summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xBuild-tools/Do-compile2
-rw-r--r--Docs/manual.texi47
-rw-r--r--mysql-test/t/rpl000017.test1
-rw-r--r--mysys/mf_iocache2.c1
-rw-r--r--sql/item_sum.cc2
-rw-r--r--sql/log_event.cc4
-rw-r--r--sql/mysqld.cc14
-rw-r--r--sql/sql_base.cc1
-rw-r--r--sql/sql_delete.cc2
-rw-r--r--sql/sql_select.cc34
-rw-r--r--sql/uniques.cc27
11 files changed, 70 insertions, 65 deletions
diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile
index 4cf2477f5bd..8a87d3d2f9d 100755
--- a/Build-tools/Do-compile
+++ b/Build-tools/Do-compile
@@ -143,7 +143,7 @@ if ($opt_stage <= 1)
if ($opt_stage <= 2)
{
- unlink($opt_distribution) if (!$opt_delete && !$opt_use_old_distribution);
+ unlink($opt_distribution) if ($opt_delete && !$opt_use_old_distribution);
safe_system("$make");
}
diff --git a/Docs/manual.texi b/Docs/manual.texi
index 17aab3c6764..68abd2ca398 100644
--- a/Docs/manual.texi
+++ b/Docs/manual.texi
@@ -770,9 +770,11 @@ databases that contain 50,000,000 records and we know of users that
uses MySQL with 60,000 tables and about 5,000,000,000 rows
@item
-All columns have default values. You can use @code{INSERT} to insert a
-subset of a table's columns; those columns that are not explicitly given
-values are set to their default values.
+@cindex default values
+All columns have default values.
+You can use @code{INSERT} to insert a subset of a table's columns; those
+columns that are not explicitly given values are set to their default
+values.
@item
Uses GNU Automake, Autoconf, and Libtool for portability.
@@ -24297,6 +24299,37 @@ takes more effort and hardware.
We are also working on some extensions to solve this problem for some
common application niches.
+MySQL can work with both transactional and not transactional tables. To
+be able to work smoothly with not transactional tables (which can't
+rollback if something goes wrong), MySQL has the following rules:
+
+@cindex default values
+@itemize @bullet
+@item
+All columns has default values.
+@item
+If you insert a 'wrong' value in a column like a @code{NULL} in a
+@code{NOT NULL} column or a too big numerical value in a numerical
+column, MySQL will instead of giving an error instead set the column to
+the 'best possible value'. For numerical values this is 0, the smallest
+possible values or the largest possible value. For strings this is
+either the empty string or the longest possible string that can be in
+the column.
+@item
+All calculated expressions returns a value that can be used instead of
+signaling an error condition. For example 1/0 returns @code{NULL}
+@end itemize
+
+The reason for the above rules is that we can't check these conditions
+before the query starts to execute. If we encounter a problem after
+updating a few rows, we can't just rollback as the table type may not
+support this. We can't stop because in that case the update would be
+'half done' which is probably the worst possible scenario. In this case
+it's better to 'do the best you can' and then continue as if nothing
+happened.
+
+The above means that one should not use MySQL to check fields content,
+but one should do this in the application.
@node Portability, Internal use, Design Limitations, Optimize Overview
@subsection Portability
@@ -32569,11 +32602,18 @@ If you specify no column list for @code{INSERT ... VALUES} or @code{INSERT
the columns in the table, use @code{DESCRIBE tbl_name} to find out.
@item
+@cindex default values
Any column not explicitly given a value is set to its default value. For
example, if you specify a column list that doesn't name all the columns in
the table, unnamed columns are set to their default values. Default value
assignment is described in @ref{CREATE TABLE, , @code{CREATE TABLE}}.
+MySQL always has a default value for all fields. This is something
+that is imposed on MySQL to be able to work with both transactional
+and not transactional tables.
+
+Our view is that checking of fields content should be done in the
+application and not in the database server.
@item
An @code{expression} may refer to any column that was set earlier in a value
list. For example, you can say this:
@@ -33814,6 +33854,7 @@ as setting it to @code{NULL}, because @code{0} is a valid @code{TIMESTAMP}
value.
@item
+@cindex default values
If no @code{DEFAULT} value is specified for a column, MySQL
automatically assigns one.
diff --git a/mysql-test/t/rpl000017.test b/mysql-test/t/rpl000017.test
index 04a2bc9c78a..8e4e61cb9d6 100644
--- a/mysql-test/t/rpl000017.test
+++ b/mysql-test/t/rpl000017.test
@@ -3,6 +3,7 @@ connect (slave,localhost,root,,test,0,mysql-slave.sock);
connection master;
reset master;
grant file on *.* to replicate@localhost identified by 'aaaaaaaaaaaaaaab';
+grant file on *.* to replicate@127.0.0.1 identified by 'aaaaaaaaaaaaaaab';
connection slave;
slave start;
connection master;
diff --git a/mysys/mf_iocache2.c b/mysys/mf_iocache2.c
index 1397c14515b..76b8055bb45 100644
--- a/mysys/mf_iocache2.c
+++ b/mysys/mf_iocache2.c
@@ -96,6 +96,7 @@ uint my_b_fill(IO_CACHE *info)
** Read a string ended by '\n' into a buffer of 'max_length' size.
** Returns number of characters read, 0 on error.
** last byte is set to '\0'
+** If buffer is full then to[max_length-1] will be set to \0.
*/
uint my_b_gets(IO_CACHE *info, char *to, uint max_length)
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 712c0fa308e..431d8b56e6a 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -745,7 +745,7 @@ Item_sum_hybrid::min_max_update_int_field(int offset)
(ulonglong) old_nr > (ulonglong) nr :
old_nr > nr);
/* (cmp_sign > 0 && res) || (!(cmp_sign > 0) && !res) */
- if (cmp_sign > 0 ^ !res)
+ if ((cmp_sign > 0) ^ (!res))
old_nr=nr;
}
result_field->set_notnull();
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 2627e9a3997..057feee1304 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1493,10 +1493,10 @@ int Query_log_event::exec_event(struct st_master_info* mi)
(actual_error = thd->net.last_errno) && expected_error)
{
const char* errmsg = "Slave: did not get the expected error\
- running query from master - expected: '%s'(%d), got '%s'(%d)";
+ running query from master - expected: '%s' (%d), got '%s' (%d)";
sql_print_error(errmsg, ER_SAFE(expected_error),
expected_error,
- actual_error ? thd->net.last_error:"no error",
+ actual_error ? thd->net.last_error: "no error",
actual_error);
thd->query_error = 1;
}
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index bd5769c3237..b8e8c80d100 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -1202,13 +1202,13 @@ static sig_handler handle_segfault(int sig)
fprintf(stderr,"\
mysqld got signal %d;\n\
This could be because you hit a bug. It is also possible that this binary\n\
-or one of the libraries it was linked agaist is corrupt, improperly built,\n\
+or one of the libraries it was linked against is corrupt, improperly built,\n\
or misconfigured. This error can also be caused by malfunctioning hardware.\n",
sig);
fprintf(stderr, "\
We will try our best to scrape up some info that will hopefully help diagnose\n\
the problem, but since we have already crashed, something is definitely wrong\n\
-and this may fail\n\n");
+and this may fail.\n\n");
fprintf(stderr, "key_buffer_size=%ld\n", keybuff_size);
fprintf(stderr, "record_buffer=%ld\n", my_default_record_cache_size);
fprintf(stderr, "sort_buffer=%ld\n", sortbuff_size);
@@ -1219,15 +1219,15 @@ and this may fail\n\n");
key_buffer_size + (record_buffer + sort_buffer)*max_connections = %ld K\n\
bytes of memory\n", (keybuff_size + (my_default_record_cache_size +
sortbuff_size) * max_connections)/ 1024);
- fprintf(stderr, "Hope that's ok, if not, decrease some variables in the equation\n\n");
+ fprintf(stderr, "Hope that's ok; if not, decrease some variables in the equation.\n\n");
#if defined(HAVE_LINUXTHREADS)
if (sizeof(char*) == 4 && thread_count > UNSAFE_DEFAULT_LINUX_THREADS)
{
fprintf(stderr, "\
You seem to be running 32-bit Linux and have %d concurrent connections.\n\
-If you have not changed STACK_SIZE in LinuxThreads and build the binary \n\
-yourself, LinuxThreads is quite likely to steal a part of global heap for\n\
+If you have not changed STACK_SIZE in LinuxThreads and built the binary \n\
+yourself, LinuxThreads is quite likely to steal a part of the global heap for\n\
the thread stack. Please read http://www.mysql.com/doc/L/i/Linux.html\n\n",
thread_count);
}
@@ -1251,12 +1251,12 @@ Some pointers may be invalid and cause the dump to abort...\n");
fprintf(stderr, "\n
Successfully dumped variables, if you ran with --log, take a look at the\n\
details of what thread %ld did to cause the crash. In some cases of really\n\
-bad corruption, the values shown above may be invalid\n\n",
+bad corruption, the values shown above may be invalid.\n\n",
thd->thread_id);
}
fprintf(stderr, "\
The manual page at http://www.mysql.com/doc/C/r/Crashing.html contains\n\
-information that should help you find out what is causing the crash\n");
+information that should help you find out what is causing the crash.\n");
fflush(stderr);
#endif /* HAVE_STACKTRACE */
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 4c012804c3e..595bee99908 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1944,7 +1944,6 @@ int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds)
// TODO: This could be optimized to use hashed names if t2 had a hash
for (j=0 ; j < t2->fields ; j++)
{
- key_map tmp_map;
if (!my_strcasecmp(t1->field[i]->field_name,
t2->field[j]->field_name))
{
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 00ebef89fa5..1103e5590d8 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -309,7 +309,6 @@ bool multi_delete::send_data(List<Item> &values)
continue;
table->file->position(table->record[0]);
- int rl = table->file->ref_length;
if (secure_counter < 0)
{
@@ -397,7 +396,6 @@ int multi_delete::do_deletes (bool from_send_error)
table_being_deleted=table_being_deleted->next, counter++)
{
TABLE *table = table_being_deleted->table;
- int rl = table->file->ref_length;
if (tempfiles[counter]->get(table))
{
error=1;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 893c99efce1..c2bb282a624 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -107,7 +107,6 @@ static uint find_shortest_key(TABLE *table, key_map usable_keys);
static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,
ha_rows select_limit, bool no_changes);
static int create_sort_index(JOIN_TAB *tab,ORDER *order,ha_rows select_limit);
-static bool fix_having(JOIN *join, Item **having);
static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
Item *having);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
@@ -5443,39 +5442,6 @@ err:
DBUG_RETURN(-1);
}
-
-/*
-** Add the HAVING criteria to table->select
-*/
-
-static bool fix_having(JOIN *join, Item **having)
-{
- (*having)->update_used_tables(); // Some tables may have been const
- JOIN_TAB *table=&join->join_tab[join->const_tables];
- table_map used_tables= join->const_table_map | table->table->map;
-
- Item* sort_table_cond=make_cond_for_table(*having,used_tables,used_tables);
- if (sort_table_cond)
- {
- if (!table->select)
- if (!(table->select=new SQL_SELECT))
- return 1;
- if (!table->select->cond)
- table->select->cond=sort_table_cond;
- else // This should never happen
- if (!(table->select->cond=new Item_cond_and(table->select->cond,
- sort_table_cond)))
- return 1;
- table->select_cond=table->select->cond;
- DBUG_EXECUTE("where",print_where(table->select_cond,
- "select and having"););
- *having=make_cond_for_table(*having,~ (table_map) 0,~used_tables);
- DBUG_EXECUTE("where",print_where(*having,"having after make_cond"););
- }
- return 0;
-}
-
-
/*****************************************************************************
** Remove duplicates from tmp table
** This should be recoded to add a uniuqe index to the table and remove
diff --git a/sql/uniques.cc b/sql/uniques.cc
index bd3ca6db0d0..fcee97dbb1a 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -35,6 +35,19 @@
#include "sql_sort.h"
+int unique_write_to_file(gptr key, element_count count, Unique *unique)
+{
+ return my_b_write(&unique->file, (byte*) key,
+ unique->tree.size_of_element) ? 1 : 0;
+}
+
+int unique_write_to_ptrs(gptr key, element_count count, Unique *unique)
+{
+ memcpy(unique->record_pointers, key, unique->tree.size_of_element);
+ unique->record_pointers+=unique->tree.size_of_element;
+ return 0;
+}
+
Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
uint size, ulong max_in_memory_size_arg)
:max_in_memory_size(max_in_memory_size_arg),elements(0)
@@ -73,20 +86,6 @@ bool Unique::flush()
}
-int unique_write_to_file(gptr key, element_count count, Unique *unique)
-{
- return my_b_write(&unique->file, (byte*) key,
- unique->tree.size_of_element) ? 1 : 0;
-}
-
-int unique_write_to_ptrs(gptr key, element_count count, Unique *unique)
-{
- memcpy(unique->record_pointers, key, unique->tree.size_of_element);
- unique->record_pointers+=unique->tree.size_of_element;
- return 0;
-}
-
-
/*
Modify the TABLE element so that when one calls init_records()
the rows will be read in priority order.