summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <Sinisa@sinisa.nasamreza.org>2003-04-26 16:44:42 +0300
committerunknown <Sinisa@sinisa.nasamreza.org>2003-04-26 16:44:42 +0300
commit8ae605d7eefe1316d17ad59ff346592044a2fa54 (patch)
tree19f5a276d5442805773ffb6aac88ff504dde58d4 /sql
parent75ef233342e7bd61640e699b754efbbcb970b827 (diff)
parentb5e40f6823528d5e99ca4e7f581e3db1b0a53c8c (diff)
downloadmariadb-git-8ae605d7eefe1316d17ad59ff346592044a2fa54.tar.gz
merge fix
mysql-test/r/alter_table.result: Auto merged mysql-test/r/join_outer.result: Auto merged mysql-test/r/multi_update.result: Auto merged mysql-test/t/alter_table.test: Auto merged mysql-test/t/join_outer.test: Auto merged mysql-test/t/multi_update.test: Auto merged sql/ha_innodb.cc: Auto merged sql/set_var.cc: Auto merged sql/sql_select.cc: Auto merged sql/share/czech/errmsg.txt: Auto merged sql/share/danish/errmsg.txt: Auto merged sql/share/dutch/errmsg.txt: Auto merged sql/share/estonian/errmsg.txt: Auto merged sql/share/french/errmsg.txt: Auto merged sql/share/german/errmsg.txt: Auto merged sql/share/greek/errmsg.txt: Auto merged sql/share/hungarian/errmsg.txt: Auto merged sql/share/italian/errmsg.txt: Auto merged sql/share/korean/errmsg.txt: Auto merged sql/share/norwegian-ny/errmsg.txt: Auto merged sql/share/norwegian/errmsg.txt: Auto merged sql/share/portuguese/errmsg.txt: Auto merged sql/share/romanian/errmsg.txt: Auto merged sql/share/russian/errmsg.txt: Auto merged sql/share/slovak/errmsg.txt: Auto merged sql/share/spanish/errmsg.txt: Auto merged sql/share/ukrainian/errmsg.txt: Auto merged sql/sql_table.cc: Auto merged
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc2
-rw-r--r--sql/filesort.cc11
-rw-r--r--sql/ha_heap.h5
-rw-r--r--sql/ha_innodb.cc128
-rw-r--r--sql/ha_innodb.h12
-rw-r--r--sql/handler.h3
-rw-r--r--sql/item_sum.cc5
-rw-r--r--sql/log_event.cc34
-rw-r--r--sql/log_event.h22
-rw-r--r--sql/mysqld.cc2
-rw-r--r--sql/opt_range.cc12
-rw-r--r--sql/records.cc2
-rw-r--r--sql/share/english/errmsg.txt2
-rw-r--r--sql/share/japanese/errmsg.txt2
-rw-r--r--sql/share/swedish/errmsg.txt2
-rw-r--r--sql/sql_insert.cc2
-rw-r--r--sql/sql_list.h2
-rw-r--r--sql/sql_select.cc24
-rw-r--r--sql/sql_select.h4
-rw-r--r--sql/sql_table.cc21
-rw-r--r--sql/sql_union.cc3
-rw-r--r--sql/sql_update.cc145
22 files changed, 295 insertions, 150 deletions
diff --git a/sql/field.cc b/sql/field.cc
index ce5e240aba8..eb7d3dc5686 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -3322,11 +3322,11 @@ bool Field_newdate::get_date(TIME *ltime,bool fuzzydate)
if (is_null())
return 1;
uint32 tmp=(uint32) uint3korr(ptr);
- bzero((char*) ltime,sizeof(*ltime));
ltime->day= tmp & 31;
ltime->month= (tmp >> 5) & 15;
ltime->year= (tmp >> 9);
ltime->time_type=TIMESTAMP_DATE;
+ ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0;
return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0;
}
diff --git a/sql/filesort.cc b/sql/filesort.cc
index 86574e4dd57..1d30c23a15e 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -456,6 +456,7 @@ static void make_sortkey(register SORTPARAM *param,
sort_field != param->end ;
sort_field++)
{
+ bool maybe_null=0;
if ((field=sort_field->field))
{ // Field
if (field->maybe_null())
@@ -480,7 +481,7 @@ static void make_sortkey(register SORTPARAM *param,
switch (sort_field->result_type) {
case STRING_RESULT:
{
- if (item->maybe_null)
+ if ((maybe_null=item->maybe_null))
*to++=1;
/* All item->str() to use some extra byte for end null.. */
String tmp((char*) to,sort_field->length+4);
@@ -546,7 +547,7 @@ static void make_sortkey(register SORTPARAM *param,
case INT_RESULT:
{
longlong value=item->val_int();
- if (item->maybe_null)
+ if ((maybe_null=item->maybe_null))
*to++=1; /* purecov: inspected */
if (item->null_value)
{
@@ -580,13 +581,13 @@ static void make_sortkey(register SORTPARAM *param,
case REAL_RESULT:
{
double value=item->val();
- if (item->null_value)
+ if ((maybe_null=item->null_value))
{
bzero((char*) to,sort_field->length+1);
to++;
break;
}
- if (item->maybe_null)
+ if ((maybe_null=item->maybe_null))
*to++=1;
change_double_for_sort(value,(byte*) to);
break;
@@ -595,6 +596,8 @@ static void make_sortkey(register SORTPARAM *param,
}
if (sort_field->reverse)
{ /* Revers key */
+ if (maybe_null)
+ to[-1]= ~to[-1];
length=sort_field->length;
while (length--)
{
diff --git a/sql/ha_heap.h b/sql/ha_heap.h
index 504f5262bf3..31126111d9d 100644
--- a/sql/ha_heap.h
+++ b/sql/ha_heap.h
@@ -54,8 +54,9 @@ class ha_heap: public handler
uint max_keys() const { return MAX_KEY; }
uint max_key_parts() const { return MAX_REF_PARTS; }
uint max_key_length() const { return HA_MAX_REC_LENGTH; }
- virtual double scan_time() { return (double) (records+deleted) / 20.0+10; }
- virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; }
+ double scan_time() { return (double) (records+deleted) / 20.0+10; }
+ double read_time(uint index, uint ranges, ha_rows rows)
+ { return (double) rows / 20.0+1; }
virtual bool fast_key_read() { return 1;}
int open(const char *name, int mode, uint test_if_locked);
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index 30d44f183a4..1363227605e 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -873,6 +873,7 @@ innobase_flush_logs(void)
DBUG_ENTER("innobase_flush_logs");
log_flush_up_to(ut_dulint_max, LOG_WAIT_ONE_GROUP);
+ log_flush_to_disk();
DBUG_RETURN(result);
}
@@ -1182,6 +1183,9 @@ ha_innobase::open(
last_query_id = (ulong)-1;
+ active_index = 0;
+ active_index_before_scan = (uint)-1; /* undefined value */
+
if (!(share=get_share(name)))
DBUG_RETURN(1);
@@ -2026,7 +2030,8 @@ calc_row_difference(
upd_t* uvect, /* in/out: update vector */
mysql_byte* old_row, /* in: old row in MySQL format */
mysql_byte* new_row, /* in: new row in MySQL format */
- struct st_table* table, /* in: table in MySQL data dictionary */
+ struct st_table* table, /* in: table in MySQL data
+ dictionary */
mysql_byte* upd_buff, /* in: buffer to use */
row_prebuilt_t* prebuilt, /* in: InnoDB prebuilt struct */
THD* thd) /* in: user thread */
@@ -2076,8 +2081,10 @@ calc_row_difference(
case DATA_VARCHAR:
case DATA_BINARY:
case DATA_VARMYSQL:
- o_ptr = row_mysql_read_var_ref_noninline(&o_len, o_ptr);
- n_ptr = row_mysql_read_var_ref_noninline(&n_len, n_ptr);
+ o_ptr = row_mysql_read_var_ref_noninline(&o_len,
+ o_ptr);
+ n_ptr = row_mysql_read_var_ref_noninline(&n_len,
+ n_ptr);
default:
;
}
@@ -2489,46 +2496,48 @@ ha_innobase::change_active_index(
index, even if it was internally generated by
InnoDB */
{
- row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
- KEY* key=0;
- statistic_increment(ha_read_key_count, &LOCK_status);
- DBUG_ENTER("change_active_index");
+ row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
+ KEY* key=0;
+ statistic_increment(ha_read_key_count, &LOCK_status);
+ DBUG_ENTER("change_active_index");
- ut_a(prebuilt->trx ==
+ ut_a(prebuilt->trx ==
(trx_t*) current_thd->transaction.all.innobase_tid);
- active_index = keynr;
+ active_index = keynr;
- if (keynr != MAX_KEY && table->keys > 0) {
- key = table->key_info + active_index;
+ if (keynr != MAX_KEY && table->keys > 0) {
+ key = table->key_info + active_index;
- prebuilt->index = dict_table_get_index_noninline(
+ prebuilt->index = dict_table_get_index_noninline(
prebuilt->table,
key->name);
- } else {
- prebuilt->index = dict_table_get_first_index_noninline(
+ } else {
+ prebuilt->index = dict_table_get_first_index_noninline(
prebuilt->table);
- }
+ }
- if (!prebuilt->index) {
- sql_print_error("Innodb could not find key n:o %u with name %s from dict cache for table %s", keynr, key ? key->name : "NULL", prebuilt->table->name);
- DBUG_RETURN(1);
- }
+ if (!prebuilt->index) {
+ sql_print_error(
+"Innodb could not find key n:o %u with name %s from dict cache for table %s",
+ keynr, key ? key->name : "NULL", prebuilt->table->name);
+ DBUG_RETURN(1);
+ }
- assert(prebuilt->search_tuple != 0);
+ assert(prebuilt->search_tuple != 0);
- dtuple_set_n_fields(prebuilt->search_tuple, prebuilt->index->n_fields);
+ dtuple_set_n_fields(prebuilt->search_tuple, prebuilt->index->n_fields);
- dict_index_copy_types(prebuilt->search_tuple, prebuilt->index,
+ dict_index_copy_types(prebuilt->search_tuple, prebuilt->index,
prebuilt->index->n_fields);
- /* Maybe MySQL changes the active index for a handle also
- during some queries, we do not know: then it is safest to build
- the template such that all columns will be fetched. */
+ /* Maybe MySQL changes the active index for a handle also
+ during some queries, we do not know: then it is safest to build
+ the template such that all columns will be fetched. */
- build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
+ build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW);
- DBUG_RETURN(0);
+ DBUG_RETURN(0);
}
/**************************************************************************
@@ -2721,6 +2730,11 @@ ha_innobase::rnd_init(
row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt;
+ /* Store the active index value so that we can restore the original
+ value after a scan */
+
+ active_index_before_scan = active_index;
+
if (prebuilt->clust_index_was_generated) {
err = change_active_index(MAX_KEY);
} else {
@@ -2733,13 +2747,25 @@ ha_innobase::rnd_init(
}
/*********************************************************************
-Ends a table scan ???????????????? */
+Ends a table scan. */
int
ha_innobase::rnd_end(void)
/*======================*/
/* out: 0 or error number */
{
+ /* Restore the old active_index back; MySQL may assume that a table
+ scan does not change active_index. We only restore the value if
+ MySQL has called rnd_init before: sometimes MySQL seems to call
+ rnd_end WITHOUT calling rnd_init. */
+
+ if (active_index_before_scan != (uint)-1) {
+
+ change_active_index(active_index_before_scan);
+
+ active_index_before_scan = (uint)-1;
+ }
+
return(index_end());
}
@@ -3517,10 +3543,8 @@ ha_innobase::records_in_range(
/*************************************************************************
Gives an UPPER BOUND to the number of rows in a table. This is used in
-filesort.cc and the upper bound must hold. TODO: Since the number of
-rows in a table may change after this function is called, we still may
-get a 'Sort aborted' error in filesort.cc of MySQL. The ultimate fix is to
-improve the algorithm of filesort.cc. */
+filesort.cc and its better if the upper bound hold.
+*/
ha_rows
ha_innobase::estimate_number_of_rows(void)
@@ -3586,6 +3610,29 @@ ha_innobase::scan_time()
return((double) (prebuilt->table->stat_clustered_index_size));
}
+/*
+ Calculate the time it takes to read a set of ranges through and index
+ This enables us to optimise reads for clustered indexes.
+*/
+
+double ha_innobase::read_time(uint index, uint ranges, ha_rows rows)
+{
+ ha_rows total_rows;
+ double time_for_scan;
+ if (index != table->primary_key)
+ return handler::read_time(index, ranges, rows); // Not clustered
+ if (rows <= 2)
+ return (double) rows;
+ /*
+ Assume that the read is proportional to scan time for all rows + one
+ seek per range.
+ */
+ time_for_scan= scan_time();
+ if ((total_rows= estimate_number_of_rows()) < rows)
+ return time_for_scan;
+ return (ranges + (double) rows / (double) total_rows * time_for_scan);
+}
+
/*************************************************************************
Returns statistics information of the table to the MySQL interpreter,
in various fields of the handle object. */
@@ -3710,6 +3757,23 @@ ha_innobase::info(
DBUG_VOID_RETURN;
}
+/**************************************************************************
+Updates index cardinalities of the table, based on 10 random dives into
+each index tree. This does NOT calculate exact statistics of the table. */
+
+int
+ha_innobase::analyze(
+/*=================*/
+ /* out: returns always 0 (success) */
+ THD* thd, /* in: connection thread handle */
+ HA_CHECK_OPT* check_opt) /* in: currently ignored */
+{
+ /* Simply call ::info() with all the flags */
+ info(HA_STATUS_TIME | HA_STATUS_CONST | HA_STATUS_VARIABLE);
+
+ return(0);
+}
+
/***********************************************************************
Tries to check that an InnoDB table is not corrupted. If corruption is
noticed, prints to stderr information about it. In case of corruption
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h
index 8031fa0aa29..5677d22a2ca 100644
--- a/sql/ha_innodb.h
+++ b/sql/ha_innodb.h
@@ -58,7 +58,15 @@ class ha_innobase: public handler
ulong start_of_scan; /* this is set to 1 when we are
starting a table scan but have not
yet fetched any row, else 0 */
-
+ uint active_index_before_scan;
+ /* since a table scan in InnoDB is
+ always done through an index, a table
+ scan may change active_index; but
+ MySQL may assume that active_index
+ after a table scan is the same as
+ before; we store the value here so
+ that we can restore the value after
+ a scan */
uint last_match_mode;/* match mode of the latest search:
ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
or undefined */
@@ -118,6 +126,7 @@ class ha_innobase: public handler
void initialize(void);
int close(void);
double scan_time();
+ double read_time(uint index, uint ranges, ha_rows rows);
int write_row(byte * buf);
int update_row(const byte * old_data, byte * new_data);
@@ -143,6 +152,7 @@ class ha_innobase: public handler
void position(const byte *record);
void info(uint);
+ int analyze(THD* thd,HA_CHECK_OPT* check_opt);
int extra(enum ha_extra_function operation);
int reset(void);
int external_lock(THD *thd, int lock_type);
diff --git a/sql/handler.h b/sql/handler.h
index 8f1d00f64b5..72a05d7ebee 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -227,7 +227,8 @@ public:
void change_table_ptr(TABLE *table_arg) { table=table_arg; }
virtual double scan_time()
{ return ulonglong2double(data_file_length) / IO_SIZE + 1; }
- virtual double read_time(ha_rows rows) { return rows2double(rows); }
+ virtual double read_time(uint index, uint ranges, ha_rows rows)
+ { return rows2double(ranges+rows); }
virtual bool fast_key_read() { return 0;}
virtual key_map keys_to_use_for_scanning() { return 0; }
virtual bool has_transactions(){ return 0;}
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 3a513505913..d88894d4fb4 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -1066,7 +1066,7 @@ bool Item_sum_count_distinct::setup(THD *thd)
int Item_sum_count_distinct::tree_to_myisam()
{
- if (create_myisam_from_heap(table, tmp_table_param,
+ if (create_myisam_from_heap(current_thd, table, tmp_table_param,
HA_ERR_RECORD_FILE_FULL, 1) ||
tree_walk(&tree, (tree_walk_action)&dump_leaf, (void*)this,
left_root_right))
@@ -1120,7 +1120,8 @@ bool Item_sum_count_distinct::add()
if (error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE)
{
- if (create_myisam_from_heap(table, tmp_table_param, error,1))
+ if (create_myisam_from_heap(current_thd, table, tmp_table_param, error,
+ 1))
return 1; // Not a table_is_full error
}
}
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 3b499b8d502..05d5788f5ae 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -659,9 +659,18 @@ Log_event* Log_event::read_log_event(const char* buf, int event_len,
}
if (!ev || !ev->is_valid())
{
- *error= "Found invalid event in binary log";
delete ev;
+#ifdef MYSQL_CLIENT
+ if (!force_opt)
+ {
+ *error= "Found invalid event in binary log";
+ return 0;
+ }
+ ev= new Unknown_log_event(buf, old_format);
+#else
+ *error= "Found invalid event in binary log";
return 0;
+#endif
}
ev->cached_event_len = event_len;
return ev;
@@ -1695,6 +1704,17 @@ void Execute_load_log_event::pack_info(String* packet)
}
#endif
+#ifdef MYSQL_CLIENT
+void Unknown_log_event::print(FILE* file, bool short_form, char* last_db)
+{
+ if (short_form)
+ return;
+ print_header(file);
+ fputc('\n', file);
+ fprintf(file, "# %s", "Unknown event\n");
+}
+#endif
+
#ifndef MYSQL_CLIENT
int Query_log_event::exec_event(struct st_relay_log_info* rli)
{
@@ -1921,13 +1941,13 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
close_thread_tables(thd);
if (thd->query_error)
{
- int sql_error = thd->net.last_errno;
+ int sql_error= thd->net.last_errno;
if (!sql_error)
- sql_error = ER_UNKNOWN_ERROR;
-
+ sql_error= ER_UNKNOWN_ERROR;
slave_print_error(rli,sql_error,
- "Slave: Error '%s' running load data infile ",
- ER_SAFE(sql_error));
+ "Error '%s' running load data infile",
+ sql_error ? thd->net.last_error :
+ ER_SAFE(ER_UNKNOWN_ERROR));
free_root(&thd->mem_root,0);
return 1;
}
@@ -1935,7 +1955,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli,
if (thd->fatal_error)
{
- sql_print_error("Slave: Fatal error running LOAD DATA INFILE ");
+ sql_print_error("Fatal error running LOAD DATA INFILE ");
return 1;
}
diff --git a/sql/log_event.h b/sql/log_event.h
index 5b9f30b3afd..b46f78d2ce0 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -201,10 +201,10 @@ struct sql_ex_info
enum Log_event_type
{
- START_EVENT = 1, QUERY_EVENT =2, STOP_EVENT=3, ROTATE_EVENT = 4,
- INTVAR_EVENT=5, LOAD_EVENT=6, SLAVE_EVENT=7, CREATE_FILE_EVENT=8,
- APPEND_BLOCK_EVENT=9, EXEC_LOAD_EVENT=10, DELETE_FILE_EVENT=11,
- NEW_LOAD_EVENT=12, RAND_EVENT=13
+ UNKNOWN_EVENT = 0, START_EVENT = 1, QUERY_EVENT =2, STOP_EVENT=3,
+ ROTATE_EVENT = 4, INTVAR_EVENT=5, LOAD_EVENT=6, SLAVE_EVENT=7,
+ CREATE_FILE_EVENT=8, APPEND_BLOCK_EVENT=9, EXEC_LOAD_EVENT=10,
+ DELETE_FILE_EVENT=11, NEW_LOAD_EVENT=12, RAND_EVENT=13
};
enum Int_event_type
@@ -714,4 +714,18 @@ public:
int write_data(IO_CACHE* file);
};
+#ifdef MYSQL_CLIENT
+class Unknown_log_event: public Log_event
+{
+public:
+ Unknown_log_event(const char* buf, bool old_format):
+ Log_event(buf, old_format)
+ {}
+ ~Unknown_log_event() {}
+ void print(FILE* file, bool short_form= 0, char* last_db= 0);
+ Log_event_type get_type_code() { return UNKNOWN_EVENT;}
+ bool is_valid() { return 1; }
+};
+#endif
+
#endif /* _log_event_h */
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index df83d1909cb..3910bfc880b 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -42,7 +42,7 @@
#define MAIN_THD
#define SIGNAL_THD
-#ifdef PURIFY
+#ifdef HAVE_purify
#define IF_PURIFY(A,B) (A)
#else
#define IF_PURIFY(A,B) (B)
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index aeeabb7d29c..cc45e18e54c 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -284,7 +284,7 @@ typedef struct st_qsel_param {
KEY_PART *key_parts,*key_parts_end,*key[MAX_KEY];
MEM_ROOT *mem_root;
table_map prev_tables,read_tables,current_table;
- uint baseflag,keys,max_key_part;
+ uint baseflag, keys, max_key_part, range_count;
uint real_keynr[MAX_KEY];
char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH];
@@ -710,8 +710,10 @@ int SQL_SELECT::test_quick_select(key_map keys_to_use, table_map prev_tables,
(double) keys_per_block);
}
else
- found_read_time= head->file->read_time(found_records)+
- (double) found_records / TIME_FOR_COMPARE;
+ found_read_time= (head->file->read_time(keynr,
+ param.range_count,
+ found_records)+
+ (double) found_records / TIME_FOR_COMPARE);
if (read_time > found_read_time)
{
read_time=found_read_time;
@@ -2113,11 +2115,12 @@ check_quick_select(PARAM *param,uint idx,SEL_ARG *tree)
if (!tree)
DBUG_RETURN(HA_POS_ERROR); // Can't use it
+ param->max_key_part=0;
+ param->range_count=0;
if (tree->type == SEL_ARG::IMPOSSIBLE)
DBUG_RETURN(0L); // Impossible select. return
if (tree->type != SEL_ARG::KEY_RANGE || tree->part != 0)
DBUG_RETURN(HA_POS_ERROR); // Don't use tree
- param->max_key_part=0;
records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0);
if (records != HA_POS_ERROR)
{
@@ -2185,6 +2188,7 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree,
}
keynr=param->real_keynr[idx];
+ param->range_count++;
if (!tmp_min_flag && ! tmp_max_flag &&
(uint) key_tree->part+1 == param->table->key_info[keynr].key_parts &&
(param->table->key_info[keynr].flags & HA_NOSAME) &&
diff --git a/sql/records.cc b/sql/records.cc
index 22c4d54550c..fd46506203f 100644
--- a/sql/records.cc
+++ b/sql/records.cc
@@ -15,7 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/* Functions to read, write and lock records */
+/* Functions for easy reading of records, possible through a cache */
#include "mysql_priv.h"
diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt
index 19de162b3fb..b7910bd07b3 100644
--- a/sql/share/english/errmsg.txt
+++ b/sql/share/english/errmsg.txt
@@ -105,7 +105,7 @@
"BLOB column '%-.64s' can't have a default value",
"Incorrect database name '%-.100s'",
"Incorrect table name '%-.100s'",
-"The SELECT would examine too many records and probably take a very long time. Check your WHERE and use SET SQL_BIG_SELECTS=1 if the SELECT is ok",
+"The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok",
"Unknown error",
"Unknown procedure '%-.64s'",
"Incorrect parameter count to procedure '%-.64s'",
diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt
index 850fc5f22bc..1b04ee5c2e4 100644
--- a/sql/share/japanese/errmsg.txt
+++ b/sql/share/japanese/errmsg.txt
@@ -107,7 +107,7 @@
"BLOB column '%-.64s' can't have a default value",
"»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹",
"»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹",
-"The SELECT would examine too many records and probably take a very long time. Check your WHERE and use SET SQL_BIG_SELECTS=1 if the SELECT is ok",
+"The SELECT would examine more rows than MAX_JOIN_SIZE. Check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is ok",
"Unknown error",
"Unknown procedure '%-.64s'",
"Incorrect parameter count to procedure '%-.64s'",
diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt
index 11ae22d3013..7ef663839c9 100644
--- a/sql/share/swedish/errmsg.txt
+++ b/sql/share/swedish/errmsg.txt
@@ -105,7 +105,7 @@
"BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde",
"Felaktigt databasnamn '%-.64s'",
"Felaktigt tabellnamn '%-.64s'",
-"Den angivna frågan skulle troligen ta mycket lång tid! Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 ifall du vill hantera stora joins",
+"Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins",
"Oidentifierat fel",
"Okänd procedur: %s",
"Felaktigt antal parametrar till procedur %s",
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index ace15771449..0e7a487276d 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -1392,7 +1392,7 @@ bool select_insert::send_eof()
thd->cuted_fields);
if (last_insert_id)
thd->insert_id(last_insert_id); // For update log
- ::send_ok(&thd->net,info.copied,last_insert_id,buff);
+ ::send_ok(&thd->net,info.copied+info.deleted,last_insert_id,buff);
mysql_update_log.write(thd,thd->query,thd->query_length);
return 0;
}
diff --git a/sql/sql_list.h b/sql/sql_list.h
index d42c344957c..15bbb5c5ec7 100644
--- a/sql/sql_list.h
+++ b/sql/sql_list.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB
+/* Copyright (C) 2000-2003 MySQL AB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index a35f5cc2314..f870f8f5178 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1839,7 +1839,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
{
ha_rows rec;
double tmp;
- THD *thd= current_thd;
+ THD *thd= join->thd;
if (!rest_tables)
{
@@ -1970,7 +1970,10 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
if (table->quick_keys & ((key_map) 1 << key))
records= (double) table->quick_rows[key];
else
- records= (double) s->records/rec; // quick_range couldn't use key!
+ {
+ /* quick_range couldn't use key! */
+ records= (double) s->records/rec;
+ }
}
else
{
@@ -4316,12 +4319,11 @@ free_tmp_table(THD *thd, TABLE *entry)
* If a HEAP table gets full, create a MyISAM table and copy all rows to this
*/
-bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
- bool ignore_last_dupp_key_error)
+bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param,
+ int error, bool ignore_last_dupp_key_error)
{
TABLE new_table;
const char *save_proc_info;
- THD *thd=current_thd;
int write_err;
DBUG_ENTER("create_myisam_from_heap");
@@ -5328,7 +5330,8 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
if (error == HA_ERR_FOUND_DUPP_KEY ||
error == HA_ERR_FOUND_DUPP_UNIQUE)
goto end;
- if (create_myisam_from_heap(table, &join->tmp_table_param, error,1))
+ if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
+ error,1))
DBUG_RETURN(-1); // Not a table_is_full error
table->uniques=0; // To ensure rows are the same
}
@@ -5405,7 +5408,8 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
copy_funcs(join->tmp_table_param.items_to_copy);
if ((error=table->file->write_row(table->record[0])))
{
- if (create_myisam_from_heap(table, &join->tmp_table_param, error, 0))
+ if (create_myisam_from_heap(join->thd, table, &join->tmp_table_param,
+ error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
/* Change method to update rows */
table->file->index_init(0);
@@ -5499,8 +5503,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)),
{
if ((error=table->file->write_row(table->record[0])))
{
- if (create_myisam_from_heap(table, &join->tmp_table_param,
- error, 0))
+ if (create_myisam_from_heap(join->thd, table,
+ &join->tmp_table_param, error, 0))
DBUG_RETURN(-1); // Not a table_is_full error
}
else
@@ -6029,7 +6033,7 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
int error;
ulong reclength,offset;
uint field_count;
- THD *thd= current_thd;
+ THD *thd= join->thd;
DBUG_ENTER("remove_duplicates");
entry->reginfo.lock_type=TL_WRITE;
diff --git a/sql/sql_select.h b/sql/sql_select.h
index 332778aafe6..5c987e74163 100644
--- a/sql/sql_select.h
+++ b/sql/sql_select.h
@@ -195,8 +195,8 @@ void count_field_types(TMP_TABLE_PARAM *param, List<Item> &fields,
bool setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,List<Item> &fields);
void copy_fields(TMP_TABLE_PARAM *param);
void copy_funcs(Item **func_ptr);
-bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error,
- bool ignore_last_dupp_error);
+bool create_myisam_from_heap(THD *Thd, TABLE *table, TMP_TABLE_PARAM *param,
+ int error, bool ignore_last_dupp_error);
/* functions from opt_sum.cc */
int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds);
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 8cec738edb0..ec4f55f246d 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -569,6 +569,14 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
column->field_name);
DBUG_RETURN(-1);
}
+ /* for fulltext keys keyseg length is 1 for blobs (it's ignored in
+ ft code anyway, and 0 (set to column width later) for char's.
+ it has to be correct col width for char's, as char data are not
+ prefixed with length (unlike blobs, where ft code takes data length
+ from a data prefix, ignoring column->length).
+ */
+ if (key->type == Key::FULLTEXT)
+ column->length=test(f_is_blob(sql_field->pack_flag));
if (f_is_blob(sql_field->pack_flag))
{
if (!(file->table_flags() & HA_BLOB_KEY))
@@ -579,15 +587,10 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name,
}
if (!column->length)
{
- if (key->type == Key::FULLTEXT)
- column->length=1; /* ft-code ignores it anyway :-) */
- else
- {
- my_printf_error(ER_BLOB_KEY_WITHOUT_LENGTH,
- ER(ER_BLOB_KEY_WITHOUT_LENGTH),MYF(0),
- column->field_name);
- DBUG_RETURN(-1);
- }
+ my_printf_error(ER_BLOB_KEY_WITHOUT_LENGTH,
+ ER(ER_BLOB_KEY_WITHOUT_LENGTH),MYF(0),
+ column->field_name);
+ DBUG_RETURN(-1);
}
}
if (!(sql_field->flags & NOT_NULL_FLAG))
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index faa106d4f42..e7afa7fbd23 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -262,7 +262,8 @@ bool select_union::send_data(List<Item> &values)
fill_record(table->field,values);
if ((write_record(table,&info)))
{
- if (create_myisam_from_heap(table, tmp_table_param, info.last_errno, 0))
+ if (create_myisam_from_heap(thd, table, tmp_table_param, info.last_errno,
+ 0))
return 1;
}
return 0;
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 19961f5d9e1..462206b25f3 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -66,7 +66,10 @@ int mysql_update(THD *thd,
TABLE *table;
SQL_SELECT *select;
READ_RECORD info;
+ TABLE_LIST tables;
+ List<Item> all_fields;
DBUG_ENTER("mysql_update");
+
LINT_INIT(used_index);
LINT_INIT(timestamp_query_id);
@@ -80,8 +83,13 @@ int mysql_update(THD *thd,
table->quick_keys=0;
want_privilege=table->grant.want_privilege;
table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege);
- if (setup_tables(table_list) || setup_conds(thd,table_list,&conds)
- || setup_ftfuncs(thd))
+
+ bzero((char*) &tables,sizeof(tables)); // For ORDER BY
+ tables.table = table;
+
+ if (setup_tables(table_list) || setup_conds(thd,table_list,&conds) ||
+ setup_order(thd, &tables, all_fields, all_fields, order) ||
+ setup_ftfuncs(thd))
DBUG_RETURN(-1); /* purecov: inspected */
old_used_keys=table->used_keys; // Keys used in WHERE
@@ -159,13 +167,6 @@ int mysql_update(THD *thd,
matching rows before updating the table!
*/
table->file->extra(HA_EXTRA_DONT_USE_CURSOR_TO_UPDATE);
- IO_CACHE tempfile;
- if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
- DISK_BUFFER_SIZE, MYF(MY_WME)))
- {
- delete select; /* purecov: inspected */
- DBUG_RETURN(-1);
- }
if (old_used_keys & ((key_map) 1 << used_index))
{
table->key_read=1;
@@ -174,81 +175,97 @@ int mysql_update(THD *thd,
if (order)
{
+ /*
+ Doing an ORDER BY; Let filesort find and sort the rows we are going
+ to update
+ */
uint length;
SORT_FIELD *sortorder;
- TABLE_LIST tables;
List<Item> fields;
- List<Item> all_fields;
ha_rows examined_rows;
- bzero((char*) &tables,sizeof(tables));
- tables.table = table;
-
table->io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
MYF(MY_FAE | MY_ZEROFILL));
- if (setup_order(thd, &tables, fields, all_fields, order) ||
- !(sortorder=make_unireg_sortorder(order, &length)) ||
+ if (!(sortorder=make_unireg_sortorder(order, &length)) ||
(table->found_records = filesort(table, sortorder, length,
- (SQL_SELECT *) 0, 0L,
- HA_POS_ERROR, &examined_rows))
- == HA_POS_ERROR)
+ select, 0L,
+ limit, &examined_rows)) ==
+ HA_POS_ERROR)
{
delete select;
+ free_io_cache(table);
DBUG_RETURN(-1);
}
+ /*
+ Filesort has already found and selected the rows we want to update,
+ so we don't need the where clause
+ */
+ delete select;
+ select= 0;
}
-
- init_read_record(&info,thd,table,select,0,1);
- thd->proc_info="Searching rows for update";
-
- while (!(error=info.read_record(&info)) && !thd->killed)
+ else
{
- if (!(select && select->skipp_record()))
+ /*
+ We are doing a search on a key that is updated. In this case
+ we go trough the matching rows, save a pointer to them and
+ update these in a separate loop based on the pointer.
+ */
+
+ IO_CACHE tempfile;
+ if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
+ DISK_BUFFER_SIZE, MYF(MY_WME)))
{
- table->file->position(table->record[0]);
- if (my_b_write(&tempfile,table->file->ref,
- table->file->ref_length))
+ delete select; /* purecov: inspected */
+ DBUG_RETURN(-1);
+ }
+
+ init_read_record(&info,thd,table,select,0,1);
+ thd->proc_info="Searching rows for update";
+ uint tmp_limit= limit;
+ while (!(error=info.read_record(&info)) && !thd->killed)
+ {
+ if (!(select && select->skipp_record()))
{
- error=1; /* purecov: inspected */
- break; /* purecov: inspected */
+ table->file->position(table->record[0]);
+ if (my_b_write(&tempfile,table->file->ref,
+ table->file->ref_length))
+ {
+ error=1; /* purecov: inspected */
+ break; /* purecov: inspected */
+ }
+ if (!--limit && using_limit)
+ break;
}
}
+ end_read_record(&info);
+ /* Change select to use tempfile */
+ if (select)
+ {
+ delete select->quick;
+ if (select->free_cond)
+ delete select->cond;
+ select->quick=0;
+ select->cond=0;
+ }
else
{
- if (!(test_flags & 512)) /* For debugging */
- {
- DBUG_DUMP("record",(char*) table->record[0],table->reclength);
- }
+ select= new SQL_SELECT;
+ select->head=table;
+ }
+ if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
+ error=1; /* purecov: inspected */
+ select->file=tempfile; // Read row ptrs from this file
+ if (error >= 0)
+ {
+ delete select;
+ DBUG_RETURN(-1);
}
}
- end_read_record(&info);
if (table->key_read)
{
table->key_read=0;
table->file->extra(HA_EXTRA_NO_KEYREAD);
}
- /* Change select to use tempfile */
- if (select)
- {
- delete select->quick;
- if (select->free_cond)
- delete select->cond;
- select->quick=0;
- select->cond=0;
- }
- else
- {
- select= new SQL_SELECT;
- select->head=table;
- }
- if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
- error=1; /* purecov: inspected */
- select->file=tempfile; // Read row ptrs from this file
- if (error >= 0)
- {
- delete select;
- DBUG_RETURN(-1);
- }
}
if (handle_duplicates == DUP_IGNORE)
@@ -275,11 +292,6 @@ int mysql_update(THD *thd,
(byte*) table->record[0])))
{
updated++;
- if (!--limit && using_limit)
- {
- error= -1;
- break;
- }
}
else if (handle_duplicates != DUP_IGNORE ||
error != HA_ERR_FOUND_DUPP_KEY)
@@ -289,11 +301,17 @@ int mysql_update(THD *thd,
break;
}
}
+ if (!--limit && using_limit)
+ {
+ error= -1; // Simulate end of file
+ break;
+ }
}
else
table->file->unlock_row();
}
end_read_record(&info);
+ free_io_cache(table); // If ORDER BY
thd->proc_info="end";
VOID(table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY));
transactional_table= table->file->has_transactions();
@@ -743,7 +761,8 @@ bool multi_update::send_data(List<Item> &not_used_values)
(error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE))
{
- if (create_myisam_from_heap(tmp_table, tmp_table_param + offset, error, 1))
+ if (create_myisam_from_heap(thd, tmp_table, tmp_table_param + offset,
+ error, 1))
{
do_update=0;
DBUG_RETURN(1); // Not a table_is_full error