summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorOleksandr Byelkin <sanja@mariadb.com>2020-08-02 11:05:29 +0200
committerOleksandr Byelkin <sanja@mariadb.com>2020-08-02 11:05:29 +0200
commitef7cb0a0b5108b74c23bf6190f7df2cbfe2996a6 (patch)
tree5c5c70ee11cdf4414a9cc9a5eacdae881933c70a /sql
parent5ec40fbb2704a0bf1369836d88a5def4721809c8 (diff)
parent09ec8e2e2246f9fb67fd41631c5669d9ae26b2e5 (diff)
downloadmariadb-git-ef7cb0a0b5108b74c23bf6190f7df2cbfe2996a6.tar.gz
Merge branch '10.1' into 10.2
Diffstat (limited to 'sql')
-rw-r--r--sql/compat56.cc2
-rw-r--r--sql/debug_sync.cc4
-rw-r--r--sql/derror.cc2
-rw-r--r--sql/encryption.cc4
-rw-r--r--sql/event_data_objects.cc2
-rw-r--r--sql/event_db_repository.cc2
-rw-r--r--sql/event_parse_data.cc6
-rw-r--r--sql/event_queue.cc2
-rw-r--r--sql/events.cc2
-rw-r--r--sql/field.cc99
-rw-r--r--sql/field.h30
-rw-r--r--sql/field_conv.cc2
-rw-r--r--sql/filesort.cc4
-rw-r--r--sql/gcalc_slicescan.cc2
-rw-r--r--sql/gcalc_slicescan.h6
-rw-r--r--sql/gcalc_tools.cc4
-rw-r--r--sql/ha_partition.cc32
-rw-r--r--sql/ha_partition.h6
-rw-r--r--sql/handler.cc12
-rw-r--r--sql/handler.h22
-rw-r--r--sql/item.cc16
-rw-r--r--sql/item.h16
-rw-r--r--sql/item_buff.cc2
-rw-r--r--sql/item_cmpfunc.cc14
-rw-r--r--sql/item_cmpfunc.h2
-rw-r--r--sql/item_func.cc14
-rw-r--r--sql/item_inetfunc.cc8
-rw-r--r--sql/item_strfunc.cc6
-rw-r--r--sql/item_subselect.cc50
-rw-r--r--sql/item_subselect.h8
-rw-r--r--sql/item_sum.cc14
-rw-r--r--sql/item_sum.h2
-rw-r--r--sql/item_timefunc.cc24
-rw-r--r--sql/item_xmlfunc.cc20
-rw-r--r--sql/key.cc6
-rw-r--r--sql/lex.h2
-rw-r--r--sql/lock.cc2
-rw-r--r--sql/log.cc18
-rw-r--r--sql/log_event.cc36
-rw-r--r--sql/log_event.h18
-rw-r--r--sql/log_event_old.cc4
-rw-r--r--sql/sql_acl.cc65
-rw-r--r--sql/sql_admin.cc10
-rw-r--r--sql/sql_base.cc14
-rw-r--r--sql/sql_lex.cc3
-rw-r--r--sql/sql_select.cc2
-rw-r--r--sql/sql_update.cc3
-rw-r--r--sql/sql_view.cc5
48 files changed, 344 insertions, 285 deletions
diff --git a/sql/compat56.cc b/sql/compat56.cc
index 1ae654338c0..8a17ae16ff3 100644
--- a/sql/compat56.cc
+++ b/sql/compat56.cc
@@ -290,7 +290,7 @@ uint my_datetime_binary_length(uint dec)
/*
On disk we store as unsigned number with DATETIMEF_INT_OFS offset,
- for HA_KETYPE_BINARY compatibilty purposes.
+ for HA_KETYPE_BINARY compatibility purposes.
*/
#define DATETIMEF_INT_OFS 0x8000000000LL
diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc
index aafce9065e3..e50ab0891ed 100644
--- a/sql/debug_sync.cc
+++ b/sql/debug_sync.cc
@@ -33,7 +33,7 @@
/*
Action to perform at a synchronization point.
NOTE: This structure is moved around in memory by realloc(), qsort(),
- and memmove(). Do not add objects with non-trivial constuctors
+ and memmove(). Do not add objects with non-trivial constructors
or destructors, which might prevent moving of this structure
with these functions.
*/
@@ -542,7 +542,7 @@ static void debug_sync_reset(THD *thd)
@description
Removing an action mainly means to decrement the ds_active counter.
But if the action is between other active action in the array, then
- the array needs to be shrinked. The active actions above the one to
+ the array needs to be shrunk. The active actions above the one to
be removed have to be moved down by one slot.
*/
diff --git a/sql/derror.cc b/sql/derror.cc
index d6ca47e9054..932c01b9dda 100644
--- a/sql/derror.cc
+++ b/sql/derror.cc
@@ -236,7 +236,7 @@ static File open_error_msg_file(const char *file_name, const char *language,
MYF(0))) < 0)
{
/*
- Trying pre-5.4 sematics of the --language parameter.
+ Trying pre-5.4 semantics of the --language parameter.
It included the language-specific part, e.g.:
--language=/path/to/english/
*/
diff --git a/sql/encryption.cc b/sql/encryption.cc
index ad23c80b8ab..4fac36dc97e 100644
--- a/sql/encryption.cc
+++ b/sql/encryption.cc
@@ -75,8 +75,8 @@ int initialize_encryption_plugin(st_plugin_int *plugin)
(struct st_mariadb_encryption*) plugin->plugin->info;
/*
- Copmiler on Spark doesn't like the '?' operator here as it
- belives the (uint (*)...) implies the C++ call model.
+ Compiler on Spark doesn't like the '?' operator here as it
+ believes the (uint (*)...) implies the C++ call model.
*/
if (handle->crypt_ctx_size)
encryption_handler.encryption_ctx_size_func= handle->crypt_ctx_size;
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index dab09161ca0..4100a031c91 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -159,7 +159,7 @@ Event_creation_ctx::load_from_db(THD *thd,
/*************************************************************************/
/*
- Initiliazes dbname and name of an Event_queue_element_for_exec
+ Initializes dbname and name of an Event_queue_element_for_exec
object
SYNOPSIS
diff --git a/sql/event_db_repository.cc b/sql/event_db_repository.cc
index 5188f86c01b..ea52f68acc9 100644
--- a/sql/event_db_repository.cc
+++ b/sql/event_db_repository.cc
@@ -669,7 +669,7 @@ Event_db_repository::create_event(THD *thd, Event_parse_data *parse_data,
DBUG_PRINT("info", ("name: %.*s", (int) parse_data->name.length,
parse_data->name.str));
- DBUG_PRINT("info", ("check existance of an event with the same name"));
+ DBUG_PRINT("info", ("check existence of an event with the same name"));
if (!find_named_event(parse_data->dbname, parse_data->name, table))
{
if (thd->lex->create_info.or_replace())
diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc
index e88ace53149..3606fdbdd3d 100644
--- a/sql/event_parse_data.cc
+++ b/sql/event_parse_data.cc
@@ -100,7 +100,7 @@ Event_parse_data::init_name(THD *thd, sp_name *spn)
ENDS or AT is in the past, we are trying to create an event that
will never be executed. If it has ON COMPLETION NOT PRESERVE
(default), then it would normally be dropped already, so on CREATE
- EVENT we give a warning, and do not create anyting. On ALTER EVENT
+ EVENT we give a warning, and do not create anything. On ALTER EVENT
we give a error, and do not change the event.
If the event has ON COMPLETION PRESERVE, then we see if the event is
@@ -359,7 +359,7 @@ wrong_value:
EVERY 5 MINUTE STARTS "2004-12-12 10:00:00" means that
the event will be executed every 5 minutes but this will
start at the date shown above. Expressions are possible :
- DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tommorow at
+ DATE_ADD(NOW(), INTERVAL 1 DAY) -- start tomorrow at
same time.
RETURN VALUE
@@ -413,7 +413,7 @@ wrong_value:
EVERY 5 MINUTE ENDS "2004-12-12 10:00:00" means that
the event will be executed every 5 minutes but this will
end at the date shown above. Expressions are possible :
- DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tommorow at
+ DATE_ADD(NOW(), INTERVAL 1 DAY) -- end tomorrow at
same time.
RETURN VALUE
diff --git a/sql/event_queue.cc b/sql/event_queue.cc
index 6cc2179be5b..904efe26f36 100644
--- a/sql/event_queue.cc
+++ b/sql/event_queue.cc
@@ -357,7 +357,7 @@ Event_queue::drop_matching_events(THD *thd, LEX_STRING pattern,
We don't call mysql_cond_broadcast(&COND_queue_state);
If we remove the top event:
1. The queue is empty. The scheduler will wake up at some time and
- realize that the queue is empty. If create_event() comes inbetween
+ realize that the queue is empty. If create_event() comes in between
it will signal the scheduler
2. The queue is not empty, but the next event after the previous top,
won't be executed any time sooner than the element we removed. Hence,
diff --git a/sql/events.cc b/sql/events.cc
index c5dc51ab83d..abac2833833 100644
--- a/sql/events.cc
+++ b/sql/events.cc
@@ -128,7 +128,7 @@ bool Events::check_if_system_tables_error()
/**
Reconstructs interval expression from interval type and expression
- value that is in form of a value of the smalles entity:
+ value that is in form of a value of the smallest entity:
For
YEAR_MONTH - expression is in months
DAY_MINUTE - expression is in minutes
diff --git a/sql/field.cc b/sql/field.cc
index 9a1779dea5f..bdaaecc2026 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -47,7 +47,7 @@
#define MAX_EXPONENT 1024
/*****************************************************************************
- Instansiate templates and static variables
+ Instantiate templates and static variables
*****************************************************************************/
static const char *zero_timestamp="0000-00-00 00:00:00.000000";
@@ -91,7 +91,7 @@ const char field_separator=',';
/*
Rules for merging different types of fields in UNION
- NOTE: to avoid 256*256 table, gap in table types numeration is skiped
+ NOTE: to avoid 256*256 table, gap in table types numeration is skipped
following #defines describe that gap and how to canculate number of fields
and index of field in this array.
*/
@@ -1526,7 +1526,7 @@ Item *Field_num::get_equal_zerofill_const_item(THD *thd, const Context &ctx,
/**
- Contruct warning parameters using thd->no_errors
+Construct warning parameters using thd->no_errors
to determine whether to generate or suppress warnings.
We can get here in a query like this:
SELECT COUNT(@@basedir);
@@ -1574,7 +1574,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd,
if (filter.want_warning_edom())
{
/*
- We can use err.ptr() here as ErrConvString is guranteed to put an
+ We can use err.ptr() here as ErrConvString is guaranteed to put an
end \0 here.
*/
THD *wthd= thd ? thd : current_thd;
@@ -1606,7 +1606,7 @@ Value_source::Converter_string_to_number::check_edom_and_truncation(THD *thd,
- found garbage at the end of the string.
@param type Data type name (e.g. "decimal", "integer", "double")
- @param edom Indicates that the string-to-number routine retuned
+ @param edom Indicates that the string-to-number routine returned
an error code equivalent to EDOM (value out of domain),
i.e. the string fully consisted of garbage and the
conversion routine could not get any digits from it.
@@ -1670,7 +1670,7 @@ int Field_num::check_edom_and_truncation(const char *type, bool edom,
/*
- Conver a string to an integer then check bounds.
+ Convert a string to an integer then check bounds.
SYNOPSIS
Field_num::get_int
@@ -2671,7 +2671,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
We only have to generate warnings if count_cuted_fields is set.
This is to avoid extra checks of the number when they are not needed.
Even if this flag is not set, it's OK to increment warnings, if
- it makes the code easer to read.
+ it makes the code easier to read.
*/
if (get_thd()->count_cuted_fields)
@@ -2754,7 +2754,7 @@ int Field_decimal::store(const char *from_arg, uint len, CHARSET_INFO *cs)
}
/*
- Now write the formated number
+ Now write the formatted number
First the digits of the int_% parts.
Do we have enough room to write these digits ?
@@ -3331,7 +3331,7 @@ int Field_new_decimal::store(const char *from, uint length,
If check_decimal() failed because of EDOM-alike error,
(e.g. E_DEC_BAD_NUM), we have to initialize decimal_value to zero.
Note: if check_decimal() failed because of truncation,
- decimal_value is alreay properly initialized.
+ decimal_value is already properly initialized.
*/
my_decimal_set_zero(&decimal_value);
/*
@@ -4835,11 +4835,12 @@ int truncate_double(double *nr, uint field_length, uint dec,
{
uint order= field_length - dec;
uint step= array_elements(log_10) - 1;
- max_value= 1.0;
+ double max_value_by_dec= 1.0;
for (; order > step; order-= step)
- max_value*= log_10[step];
- max_value*= log_10[order];
- max_value-= 1.0 / log_10[dec];
+ max_value_by_dec*= log_10[step];
+ max_value_by_dec*= log_10[order];
+ max_value_by_dec-= 1.0 / log_10[dec];
+ set_if_smaller(max_value, max_value_by_dec);
/* Check for infinity so we don't get NaN in calculations */
if (!std::isinf(res))
@@ -5134,7 +5135,7 @@ Field_timestamp::Field_timestamp(uchar *ptr_arg, uint32 len_arg,
{
/*
We mark the flag with TIMESTAMP_FLAG to indicate to the client that
- this field will be automaticly updated on insert.
+ this field will be automatically updated on insert.
*/
flags|= TIMESTAMP_FLAG;
if (unireg_check != TIMESTAMP_DN_FIELD)
@@ -7589,7 +7590,7 @@ Field_string::unpack(uchar *to, const uchar *from, const uchar *from_end,
with the real type. Since all allowable types have 0xF as most
significant bits of the metadata word, lengths <256 will not affect
the real type at all, while all other values will result in a
- non-existant type in the range 17-244.
+ non-existent type in the range 17-244.
@see Field_string::unpack
@@ -7781,8 +7782,7 @@ void Field_varstring::mark_unused_memory_as_defined()
#endif
-int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
- uint max_len)
+int Field_varstring::cmp(const uchar *a_ptr, const uchar *b_ptr)
{
uint a_length, b_length;
int diff;
@@ -7797,8 +7797,8 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
a_length= uint2korr(a_ptr);
b_length= uint2korr(b_ptr);
}
- set_if_smaller(a_length, max_len);
- set_if_smaller(b_length, max_len);
+ set_if_smaller(a_length, field_length);
+ set_if_smaller(b_length, field_length);
diff= field_charset->coll->strnncollsp(field_charset,
a_ptr+
length_bytes,
@@ -7810,6 +7810,43 @@ int Field_varstring::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
}
+static int cmp_str_prefix(const uchar *ua, size_t alen, const uchar *ub,
+ size_t blen, size_t prefix, CHARSET_INFO *cs)
+{
+ const char *a= (char*)ua, *b= (char*)ub;
+ MY_STRCOPY_STATUS status;
+ prefix/= cs->mbmaxlen;
+ alen= cs->cset->well_formed_char_length(cs, a, a + alen, prefix, &status);
+ blen= cs->cset->well_formed_char_length(cs, b, b + blen, prefix, &status);
+ return cs->coll->strnncollsp(cs, ua, alen, ub, blen);
+}
+
+
+
+int Field_varstring::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
+ size_t prefix_len)
+{
+ /* avoid expensive well_formed_char_length if possible */
+ if (prefix_len == table->field[field_index]->field_length)
+ return Field_varstring::cmp(a_ptr, b_ptr);
+
+ size_t a_length, b_length;
+
+ if (length_bytes == 1)
+ {
+ a_length= *a_ptr;
+ b_length= *b_ptr;
+ }
+ else
+ {
+ a_length= uint2korr(a_ptr);
+ b_length= uint2korr(b_ptr);
+ }
+ return cmp_str_prefix(a_ptr+length_bytes, a_length, b_ptr+length_bytes,
+ b_length, prefix_len, field_charset);
+}
+
+
/**
@note
varstring and blob keys are ALWAYS stored with a 2 byte length prefix
@@ -8374,16 +8411,24 @@ int Field_blob::cmp(const uchar *a,uint32 a_length, const uchar *b,
}
-int Field_blob::cmp_max(const uchar *a_ptr, const uchar *b_ptr,
- uint max_length)
+int Field_blob::cmp(const uchar *a_ptr, const uchar *b_ptr)
+{
+ uchar *blob1,*blob2;
+ memcpy(&blob1, a_ptr+packlength, sizeof(char*));
+ memcpy(&blob2, b_ptr+packlength, sizeof(char*));
+ size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr);
+ return cmp(blob1, a_len, blob2, b_len);
+}
+
+
+int Field_blob::cmp_prefix(const uchar *a_ptr, const uchar *b_ptr,
+ size_t prefix_len)
{
uchar *blob1,*blob2;
memcpy(&blob1, a_ptr+packlength, sizeof(char*));
memcpy(&blob2, b_ptr+packlength, sizeof(char*));
- uint a_len= get_length(a_ptr), b_len= get_length(b_ptr);
- set_if_smaller(a_len, max_length);
- set_if_smaller(b_len, max_length);
- return Field_blob::cmp(blob1,a_len,blob2,b_len);
+ size_t a_len= get_length(a_ptr), b_len= get_length(b_ptr);
+ return cmp_str_prefix(blob1, a_len, blob2, b_len, prefix_len, field_charset);
}
@@ -9709,7 +9754,7 @@ my_decimal *Field_bit::val_decimal(my_decimal *deciaml_value)
The a and b pointer must be pointers to the field in a record
(not the table->record[0] necessarily)
*/
-int Field_bit::cmp_max(const uchar *a, const uchar *b, uint max_len)
+int Field_bit::cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len)
{
my_ptrdiff_t a_diff= a - ptr;
my_ptrdiff_t b_diff= b - ptr;
@@ -10278,7 +10323,7 @@ bool Column_definition::check(THD *thd)
break;
case MYSQL_TYPE_VARCHAR:
/*
- Long VARCHAR's are automaticly converted to blobs in mysql_prepare_table
+ Long VARCHAR's are automatically converted to blobs in mysql_prepare_table
if they don't have a default value
*/
max_field_charlength= MAX_FIELD_VARCHARLENGTH;
diff --git a/sql/field.h b/sql/field.h
index d5e2cb25788..18e44f1d9d4 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -270,7 +270,7 @@ protected:
};
- // String-to-number convertion methods for the old code compatibility
+ // String-to-number conversion methods for the old code compatibility
longlong longlong_from_string_with_check(CHARSET_INFO *cs, const char *cptr,
const char *end) const
{
@@ -351,7 +351,7 @@ public:
/*
Item context attributes.
Comparison functions pass their attributes to propagate_equal_fields().
- For exmple, for string comparison, the collation of the comparison
+ For example, for string comparison, the collation of the comparison
operation is important inside propagate_equal_fields().
*/
class Context
@@ -484,7 +484,7 @@ inline bool is_timestamp_type(enum_field_types type)
/**
- Convert temporal real types as retuned by field->real_type()
+ Convert temporal real types as returned by field->real_type()
to field type as returned by field->type().
@param real_type Real type.
@@ -1055,9 +1055,13 @@ public:
return type();
}
inline int cmp(const uchar *str) { return cmp(ptr,str); }
- virtual int cmp_max(const uchar *a, const uchar *b, uint max_len)
- { return cmp(a, b); }
virtual int cmp(const uchar *,const uchar *)=0;
+ /*
+ The following method is used for comparing prefix keys.
+ Currently it's only used in partitioning.
+ */
+ virtual int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len)
+ { return cmp(a, b); }
virtual int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U)
{ return memcmp(a,b,pack_length()); }
virtual int cmp_offset(uint row_offset)
@@ -3268,11 +3272,8 @@ public:
longlong val_int(void);
String *val_str(String*,String *);
my_decimal *val_decimal(my_decimal *);
- int cmp_max(const uchar *, const uchar *, uint max_length);
- int cmp(const uchar *a,const uchar *b)
- {
- return cmp_max(a, b, ~0U);
- }
+ int cmp(const uchar *a,const uchar *b);
+ int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
void sort_string(uchar *buff,uint length);
uint get_key_image(uchar *buff,uint length, imagetype type);
void set_key_image(const uchar *buff,uint length);
@@ -3389,9 +3390,8 @@ public:
longlong val_int(void);
String *val_str(String*,String *);
my_decimal *val_decimal(my_decimal *);
- int cmp_max(const uchar *, const uchar *, uint max_length);
- int cmp(const uchar *a,const uchar *b)
- { return cmp_max(a, b, ~0U); }
+ int cmp(const uchar *a,const uchar *b);
+ int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
int cmp(const uchar *a, uint32 a_length, const uchar *b, uint32 b_length);
int cmp_binary(const uchar *a,const uchar *b, uint32 max_length=~0U);
int key_cmp(const uchar *,const uchar*);
@@ -3725,7 +3725,7 @@ private:
This is the reason:
- Field_bit::cmp_binary() is only implemented in the base class
(Field::cmp_binary()).
- - Field::cmp_binary() currenly use pack_length() to calculate how
+ - Field::cmp_binary() currently uses pack_length() to calculate how
long the data is.
- pack_length() includes size of the bits stored in the NULL bytes
of the record.
@@ -3780,7 +3780,7 @@ public:
}
int cmp_binary_offset(uint row_offset)
{ return cmp_offset(row_offset); }
- int cmp_max(const uchar *a, const uchar *b, uint max_length);
+ int cmp_prefix(const uchar *a, const uchar *b, size_t prefix_len);
int key_cmp(const uchar *a, const uchar *b)
{ return cmp_binary((uchar *) a, (uchar *) b); }
int key_cmp(const uchar *str, uint length);
diff --git a/sql/field_conv.cc b/sql/field_conv.cc
index e92f1930a7c..39136b830ba 100644
--- a/sql/field_conv.cc
+++ b/sql/field_conv.cc
@@ -229,7 +229,7 @@ static void do_skip(Copy_field *copy __attribute__((unused)))
note: if the record we're copying from is NULL-complemetned (i.e.
from_field->table->null_row==1), it will also have all NULLable columns to be
- set to NULLs, so we dont need to check table->null_row here.
+ set to NULLs, so we don't need to check table->null_row here.
*/
static void do_copy_null(Copy_field *copy)
diff --git a/sql/filesort.cc b/sql/filesort.cc
index f6bea1870c0..d76c39c3bd4 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -885,12 +885,12 @@ static ha_rows find_all_keys(THD *thd, Sort_param *param, SQL_SELECT *select,
}
if (!quick_select)
{
- (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */
+ (void) file->extra(HA_EXTRA_NO_CACHE); /* End caching of records */
if (!next_pos)
file->ha_rnd_end();
}
- /* Signal we should use orignal column read and write maps */
+ /* Signal we should use original column read and write maps */
sort_form->column_bitmaps_set(save_read_set, save_write_set, save_vcol_set);
if (thd->is_error())
diff --git a/sql/gcalc_slicescan.cc b/sql/gcalc_slicescan.cc
index d42babebf85..f63a3e717b4 100644
--- a/sql/gcalc_slicescan.cc
+++ b/sql/gcalc_slicescan.cc
@@ -1877,7 +1877,7 @@ int Gcalc_scan_iterator::add_eq_node(Gcalc_heap::Info *node, point *sp)
if (!en)
GCALC_DBUG_RETURN(1);
- /* eq_node iserted after teh equal point. */
+ /* eq_node inserted after the equal point. */
en->next= node->get_next();
node->next= en;
diff --git a/sql/gcalc_slicescan.h b/sql/gcalc_slicescan.h
index 54b12962d2a..b5188f29dfd 100644
--- a/sql/gcalc_slicescan.h
+++ b/sql/gcalc_slicescan.h
@@ -362,9 +362,9 @@ enum Gcalc_scan_events
/*
- Gcalc_scan_iterator incapsulates the slisescan algorithm.
- It takes filled Gcalc_heap as an datasource. Then can be
- iterated trought the vertexes and intersection points with
+ Gcalc_scan_iterator incapsulates the slicescan algorithm.
+ It takes filled Gcalc_heap as a datasource. Then can be
+ iterated through the vertexes and intersection points with
the step() method. After the 'step()' one usually observes
the current 'slice' to do the necessary calculations, like
looking for intersections, calculating the area, whatever.
diff --git a/sql/gcalc_tools.cc b/sql/gcalc_tools.cc
index 8bd6a45f7aa..134032b0ffe 100644
--- a/sql/gcalc_tools.cc
+++ b/sql/gcalc_tools.cc
@@ -1184,14 +1184,14 @@ int Gcalc_operation_reducer::connect_threads(
{
rp0->outer_poly= prev_range->thread_start;
tb->thread_start= prev_range->thread_start;
- /* Chack if needed */
+ /* Check if needed */
ta->thread_start= prev_range->thread_start;
}
else
{
rp0->outer_poly= 0;
ta->thread_start= rp0;
- /* Chack if needed */
+ /* Check if needed */
tb->thread_start= rp0;
}
GCALC_DBUG_RETURN(0);
diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc
index 453a1c9ff89..daac3f6a86d 100644
--- a/sql/ha_partition.cc
+++ b/sql/ha_partition.cc
@@ -546,7 +546,7 @@ bool ha_partition::initialize_partition(MEM_ROOT *mem_root)
point.
If you do not implement this, the default delete_table() is called from
- handler.cc and it will delete all files with the file extentions returned
+ handler.cc and it will delete all files with the file extensions returned
by bas_ext().
Called from handler.cc by delete_table and ha_create_table(). Only used
@@ -578,7 +578,7 @@ int ha_partition::delete_table(const char *name)
Renames a table from one name to another from alter table call.
If you do not implement this, the default rename_table() is called from
- handler.cc and it will rename all files with the file extentions returned
+ handler.cc and it will rename all files with the file extensions returned
by bas_ext().
Called from sql_table.cc by mysql_rename_table().
@@ -1432,7 +1432,7 @@ int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt,
/**
- @brief Check and repair the table if neccesary
+ @brief Check and repair the table if necessary
@param thd Thread object
@@ -2912,7 +2912,7 @@ error_end:
/**
Read the .par file to get the partitions engines and names
- @param name Name of table file (without extention)
+ @param name Name of table file (without extension)
@return Operation status
@retval true Failure
@@ -3140,7 +3140,7 @@ static uchar *get_part_name(PART_NAME_DEF *part, size_t *length,
@return Operation status
@retval true Failure
- @retval false Sucess
+ @retval false Success
*/
bool ha_partition::insert_partition_name_in_hash(const char *name, uint part_id,
@@ -3266,7 +3266,7 @@ err:
@return Operation status
@retval true Failure
- @retval false Sucess
+ @retval false Success
*/
bool ha_partition::set_ha_share_ref(Handler_share **ha_share_arg)
@@ -4151,7 +4151,7 @@ int ha_partition::write_row(uchar * buf)
/*
If we have failed to set the auto-increment value for this row,
it is highly likely that we will not be able to insert it into
- the correct partition. We must check and fail if neccessary.
+ the correct partition. We must check and fail if necessary.
*/
if (error)
goto exit;
@@ -4221,7 +4221,7 @@ exit:
have the previous row record in it, while new_data will have the newest
data in it.
Keep in mind that the server can do updates based on ordering if an
- ORDER BY clause was used. Consecutive ordering is not guarenteed.
+ ORDER BY clause was used. Consecutive ordering is not guaranteed.
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
new_data is always record[0]
@@ -4363,7 +4363,7 @@ exit:
(from either a previous rnd_xxx() or index_xxx() call).
If you keep a pointer to the last row or can access a primary key it will
make doing the deletion quite a bit easier.
- Keep in mind that the server does no guarentee consecutive deletions.
+ Keep in mind that the server does no guarantee consecutive deletions.
ORDER BY clauses can be used.
Called in sql_acl.cc and sql_udf.cc to manage internal table information.
@@ -4747,7 +4747,7 @@ int ha_partition::end_bulk_insert()
When scan is used we will scan one handler partition at a time.
When preparing for rnd_pos we will init all handler partitions.
- No extra cache handling is needed when scannning is not performed.
+ No extra cache handling is needed when scanning is not performed.
Before initialising we will call rnd_end to ensure that we clean up from
any previous incarnation of a table scan.
@@ -5843,7 +5843,7 @@ int ha_partition::read_range_next()
SYNOPSIS
ha_partition::partition_scan_set_up()
buf Buffer to later return record in (this function
- needs it to calculcate partitioning function
+ needs it to calculate partitioning function
values)
idx_read_flag TRUE <=> m_start_key has range start endpoint which
@@ -6878,7 +6878,7 @@ static int end_keyread_cb(handler* h, void *unused)
function after completing a query.
3) It is called when deleting the QUICK_RANGE_SELECT object if the
QUICK_RANGE_SELECT object had its own handler object. It is called
- immediatley before close of this local handler object.
+ immediately before close of this local handler object.
HA_EXTRA_KEYREAD:
HA_EXTRA_NO_KEYREAD:
These parameters are used to provide an optimisation hint to the handler.
@@ -6915,7 +6915,7 @@ static int end_keyread_cb(handler* h, void *unused)
HA_EXTRA_IGNORE_DUP_KEY:
HA_EXTRA_NO_IGNORE_DUP_KEY:
Informs the handler to we will not stop the transaction if we get an
- duplicate key errors during insert/upate.
+ duplicate key errors during insert/update.
Always called in pair, triggered by INSERT IGNORE and other similar
SQL constructs.
Not used by MyISAM.
@@ -8334,7 +8334,7 @@ bool ha_partition::prepare_inplace_alter_table(TABLE *altered_table,
/*
Changing to similar partitioning, only update metadata.
- Non allowed changes would be catched in prep_alter_part_table().
+ Non allowed changes would be caught in prep_alter_part_table().
*/
if (ha_alter_info->alter_info->flags == Alter_info::ALTER_PARTITION)
DBUG_RETURN(false);
@@ -8367,7 +8367,7 @@ bool ha_partition::inplace_alter_table(TABLE *altered_table,
/*
Changing to similar partitioning, only update metadata.
- Non allowed changes would be catched in prep_alter_part_table().
+ Non allowed changes would be caught in prep_alter_part_table().
*/
if (ha_alter_info->alter_info->flags == Alter_info::ALTER_PARTITION)
DBUG_RETURN(false);
@@ -8412,7 +8412,7 @@ bool ha_partition::commit_inplace_alter_table(TABLE *altered_table,
/*
Changing to similar partitioning, only update metadata.
- Non allowed changes would be catched in prep_alter_part_table().
+ Non allowed changes would be caught in prep_alter_part_table().
*/
if (ha_alter_info->alter_info->flags == Alter_info::ALTER_PARTITION)
DBUG_RETURN(false);
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index 1bd3209d82d..a48aa639237 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -285,7 +285,7 @@ public:
-------------------------------------------------------------------------
MODULE create/delete handler object
-------------------------------------------------------------------------
- Object create/delete methode. The normal called when a table object
+ Object create/delete method. Normally called when a table object
exists. There is also a method to create the handler object with only
partition information. This is used from mysql_create_table when the
table is to be created and the engine type is deduced to be the
@@ -583,7 +583,7 @@ public:
/**
@breif
- Positions an index cursor to the index specified in the hanlde. Fetches the
+ Positions an index cursor to the index specified in the handle. Fetches the
row if available. If the key value is null, begin at first key of the
index.
*/
@@ -800,7 +800,7 @@ public:
HA_REC_NOT_IN_SEQ:
This flag is set for handlers that cannot guarantee that the rows are
- returned accroding to incremental positions (0, 1, 2, 3...).
+ returned according to incremental positions (0, 1, 2, 3...).
This also means that rnd_next() should return HA_ERR_RECORD_DELETED
if it finds a deleted row.
(MyISAM (not fixed length row), HEAP, InnoDB)
diff --git a/sql/handler.cc b/sql/handler.cc
index d6e1680143c..0a5ed2dffbb 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -724,7 +724,7 @@ int ha_end()
/*
- This should be eventualy based on the graceful shutdown flag.
+ This should be eventually based on the graceful shutdown flag.
So if flag is equal to HA_PANIC_CLOSE, the deallocate
the errors.
*/
@@ -1333,8 +1333,8 @@ int ha_commit_trans(THD *thd, bool all)
THD_TRANS *trans= all ? &thd->transaction.all : &thd->transaction.stmt;
/*
"real" is a nick name for a transaction for which a commit will
- make persistent changes. E.g. a 'stmt' transaction inside a 'all'
- transation is not 'real': even though it's possible to commit it,
+ make persistent changes. E.g. a 'stmt' transaction inside an 'all'
+ transaction is not 'real': even though it's possible to commit it,
the changes are not durable as they might be rolled back if the
enclosing 'all' transaction is rolled back.
*/
@@ -2494,7 +2494,7 @@ handler *handler::clone(const char *name, MEM_ROOT *mem_root)
/*
TODO: Implement a more efficient way to have more than one index open for
- the same table instance. The ha_open call is not cachable for clone.
+ the same table instance. The ha_open call is not cacheable for clone.
This is not critical as the engines already have the table open
and should be able to use the original instance of the table.
@@ -3308,7 +3308,7 @@ int handler::update_auto_increment()
index_init() or rnd_init() and in any column_bitmaps_signal() call after
this.
- The handler is allowd to do changes to the bitmap after a index_init or
+ The handler is allowed to do changes to the bitmap after a index_init or
rnd_init() call is made as after this, MySQL will not use the bitmap
for any program logic checking.
*/
@@ -3371,7 +3371,7 @@ void handler::get_auto_increment(ulonglong offset, ulonglong increment,
{ // Autoincrement at key-start
error= ha_index_last(table->record[1]);
/*
- MySQL implicitely assumes such method does locking (as MySQL decides to
+ MySQL implicitly assumes such method does locking (as MySQL decides to
use nr+increment without checking again with the handler, in
handler::update_auto_increment()), so reserves to infinite.
*/
diff --git a/sql/handler.h b/sql/handler.h
index d1c4b79ee48..0aa56afe1a5 100644
--- a/sql/handler.h
+++ b/sql/handler.h
@@ -212,7 +212,7 @@ enum enum_alter_inplace_result {
this flag must implement start_read_removal() and end_read_removal().
The handler may return "fake" rows constructed from the key of the row
asked for. This is used to optimize UPDATE and DELETE by reducing the
- numer of roundtrips between handler and storage engine.
+ number of roundtrips between handler and storage engine.
Example:
UPDATE a=1 WHERE pk IN (<keys>)
@@ -485,7 +485,7 @@ enum enum_binlog_command {
/* Bits in used_fields */
#define HA_CREATE_USED_AUTO (1UL << 0)
-#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer availble
+#define HA_CREATE_USED_RAID (1UL << 1) //RAID is no longer available
#define HA_CREATE_USED_UNION (1UL << 2)
#define HA_CREATE_USED_INSERT_METHOD (1UL << 3)
#define HA_CREATE_USED_MIN_ROWS (1UL << 4)
@@ -953,7 +953,7 @@ struct handler_iterator {
/*
Pointer to buffer for the iterator to use.
Should be allocated by function which created the iterator and
- destroied by freed by above "destroy" call
+ destroyed by freed by above "destroy" call
*/
void *buffer;
};
@@ -1169,7 +1169,7 @@ struct handlerton
"cookie".
The flush and call of commit_checkpoint_notify_ha() need not happen
- immediately - it can be scheduled and performed asynchroneously (ie. as
+ immediately - it can be scheduled and performed asynchronously (ie. as
part of next prepare(), or sync every second, or whatever), but should
not be postponed indefinitely. It is however also permissible to do it
immediately, before returning from commit_checkpoint_request().
@@ -1254,13 +1254,13 @@ struct handlerton
Used by open_table_error(), by the default rename_table and delete_table
handler methods, and by the default discovery implementation.
- For engines that have more than one file name extentions (separate
+ For engines that have more than one file name extensions (separate
metadata, index, and/or data files), the order of elements is relevant.
- First element of engine file name extentions array should be metadata
- file extention. This is implied by the open_table_error()
+ First element of engine file name extensions array should be metadata
+ file extension. This is implied by the open_table_error()
and the default discovery implementation.
- Second element - data file extention. This is implied
+ Second element - data file extension. This is implied
assumed by REPAIR TABLE ... USE_FRM implementation.
*/
const char **tablefile_extensions; // by default - empty list
@@ -1761,7 +1761,7 @@ struct HA_CREATE_INFO: public Table_scope_and_contents_source_st,
CONVERT TO CHARACTER SET DEFAULT
to
CONVERT TO CHARACTER SET <character-set-of-the-current-database>
- TODO: Should't we postpone resolution of DEFAULT until the
+ TODO: Shouldn't we postpone resolution of DEFAULT until the
character set of the table owner database is loaded from its db.opt?
*/
DBUG_ASSERT(cs);
@@ -2653,7 +2653,7 @@ public:
ha_statistics stats;
/** MultiRangeRead-related members: */
- range_seq_t mrr_iter; /* Interator to traverse the range sequence */
+ range_seq_t mrr_iter; /* Iterator to traverse the range sequence */
RANGE_SEQ_IF mrr_funcs; /* Range sequence traversal functions */
HANDLER_BUFFER *multi_range_buffer; /* MRR buffer info */
uint ranges_in_seq; /* Total number of ranges in the traversed sequence */
@@ -3490,7 +3490,7 @@ public:
This method offers the storage engine, the possibility to store a reference
to a table name which is going to be used with query cache.
The method is called each time a statement is written to the cache and can
- be used to verify if a specific statement is cachable. It also offers
+ be used to verify if a specific statement is cacheable. It also offers
the possibility to register a generic (but static) call back function which
is called each time a statement is matched against the query cache.
diff --git a/sql/item.cc b/sql/item.cc
index 2915b0cfb4d..140eb5244ec 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -1427,7 +1427,7 @@ err:
bool Item::make_zero_date(MYSQL_TIME *ltime, ulonglong fuzzydate)
{
/*
- if the item was not null and convertion failed, we return a zero date
+ if the item was not null and conversion failed, we return a zero date
if allowed, otherwise - null.
*/
bzero((char*) ltime,sizeof(*ltime));
@@ -4719,7 +4719,7 @@ static bool mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current,
@note
We have to mark all items between current_sel (including) and
- last_select (excluding) as dependend (select before last_select should
+ last_select (excluding) as dependent (select before last_select should
be marked with actual table mask used by resolved item, all other with
OUTER_REF_TABLE_BIT) and also write dependence information to Item of
resolved identifier.
@@ -5095,7 +5095,7 @@ Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference)
bool upward_lookup= FALSE;
TABLE_LIST *table_list;
- /* Calulate the TABLE_LIST for the table */
+ /* Calculate the TABLE_LIST for the table */
table_list= (cached_table ? cached_table :
field_found && (*from_field) != view_ref_found ?
(*from_field)->table->pos_in_table_list : 0);
@@ -5824,7 +5824,7 @@ Item *Item_field::propagate_equal_fields(THD *thd,
but failed to create a valid DATE literal from the given string literal.
Do not do constant propagation in such cases and unlink
- "this" from the found Item_equal (as this equality not usefull).
+ "this" from the found Item_equal (as this equality not useful).
*/
item_equal= NULL;
return this;
@@ -7614,7 +7614,7 @@ bool Item_ref::fix_fields(THD *thd, Item **reference)
/*
Due to cache, find_field_in_tables() can return field which
doesn't belong to provided outer_context. In this case we have
- to find proper field context in order to fix field correcly.
+ to find proper field context in order to fix field correctly.
*/
do
{
@@ -7799,9 +7799,9 @@ Item* Item_ref::transform(THD *thd, Item_transformer transformer, uchar *arg)
callback functions.
First the function applies the analyzer to the Item_ref object. Then
- if the analizer succeeeds we first applies the compile method to the
+ if the analyzer succeeds we first apply the compile method to the
object the Item_ref object is referencing. If this returns a new
- item the old item is substituted for a new one. After this the
+ item the old item is substituted for a new one. After this the
transformer is applied to the Item_ref object itself.
The compile function is not called if the analyzer returns NULL
in the parameter arg_p.
@@ -10529,7 +10529,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item)
}
/**
- Calculate lenth for merging result for given Item type.
+ Calculate length for merging result for given Item type.
@param item Item for length detection
diff --git a/sql/item.h b/sql/item.h
index 7338c8be47b..4a761bfd70a 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -244,7 +244,7 @@ void dummy_error_processor(THD *thd, void *data);
void view_error_processor(THD *thd, void *data);
/*
- Instances of Name_resolution_context store the information necesary for
+ Instances of Name_resolution_context store the information necessary for
name resolution of Items and other context analysis of a query made in
fix_fields().
@@ -402,7 +402,7 @@ public:
Monotonicity is defined only for Item* trees that represent table
partitioning expressions (i.e. have no subselects/user vars/PS parameters
etc etc). An Item* tree is assumed to have the same monotonicity properties
- as its correspoinding function F:
+ as its corresponding function F:
[signed] longlong F(field1, field2, ...) {
put values of field_i into table record buffer;
@@ -746,7 +746,7 @@ protected:
return rc;
}
/*
- This method is used if the item was not null but convertion to
+ This method is used if the item was not null but conversion to
TIME/DATE/DATETIME failed. We return a zero date if allowed,
otherwise - null.
*/
@@ -947,7 +947,7 @@ public:
/*
real_type() is the type of base item. This is same as type() for
most items, except Item_ref() and Item_cache_wrapper() where it
- shows the type for the underlaying item.
+ shows the type for the underlying item.
*/
virtual enum Type real_type() const { return type(); }
@@ -1054,7 +1054,7 @@ public:
The caller can modify the returned String, if it's not marked
"const" (with the String::mark_as_const() method). That means that
if the item returns its own internal buffer (e.g. tmp_value), it
- *must* be marked "const" [1]. So normally it's preferrable to
+ *must* be marked "const" [1]. So normally it's preferable to
return the result value in the String, that was passed as an
argument. But, for example, SUBSTR() returns a String that simply
points into the buffer of SUBSTR()'s args[0]->val_str(). Such a
@@ -1431,7 +1431,7 @@ public:
@param cond_ptr[OUT] Store a replacement item here if the condition
can be simplified, e.g.:
WHERE part1 OR part2 OR part3
- with one of the partN evalutating to SEL_TREE::ALWAYS.
+ with one of the partN evaluating to SEL_TREE::ALWAYS.
*/
virtual SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr);
/*
@@ -2011,7 +2011,7 @@ public:
virtual bool is_outer_field() const { DBUG_ASSERT(fixed); return FALSE; }
/**
- Checks if this item or any of its decendents contains a subquery.
+ Checks if this item or any of its descendents contains a subquery.
*/
virtual bool has_subquery() const { return with_subselect; }
@@ -5209,7 +5209,7 @@ public:
This is the method that updates the cached value.
It must be explicitly called by the user of this class to store the value
- of the orginal item in the cache.
+ of the original item in the cache.
*/
virtual void copy() = 0;
diff --git a/sql/item_buff.cc b/sql/item_buff.cc
index c0fcb93cbab..bc888c2b679 100644
--- a/sql/item_buff.cc
+++ b/sql/item_buff.cc
@@ -192,7 +192,7 @@ bool Cached_item_field::cmp(void)
/*
If value is not null and value changed (from null to not null or
- becasue of value change), then copy the new value to buffer.
+ because of value change), then copy the new value to buffer.
*/
if (! null_value && (tmp || (tmp= (field->cmp(buff) != 0))))
field->get_image(buff,length,field->charset());
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index a28daf36f05..03648a323d5 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -1424,7 +1424,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, Item **ref)
@note
Item_in_optimizer should work as pass-through for
- subqueries that were processed by ALL/ANY->MIN/MAX rewrite
- - subqueries taht were originally EXISTS subqueries (and were coverted by
+ - subqueries that were originally EXISTS subqueries (and were coinverted by
the EXISTS->IN rewrite)
When Item_in_optimizer is not not working as a pass-through, it
@@ -2017,8 +2017,8 @@ longlong Item_func_interval::val_int()
interval_range *range= intervals + mid;
my_bool cmp_result;
/*
- The values in the range intervall may have different types,
- Only do a decimal comparision of the first argument is a decimal
+ The values in the range interval may have different types,
+ Only do a decimal comparison if the first argument is a decimal
and we are comparing against a decimal
*/
if (dec && range->type == DECIMAL_RESULT)
@@ -2684,7 +2684,7 @@ Item_func_nullif::fix_length_and_dec()
Some examples of what NULLIF can end up with after argument
substitution (we don't mention args[1] in some cases for simplicity):
- 1. l_expr is not an aggragate function:
+ 1. l_expr is not an aggregate function:
a. No conversion happened.
args[0] and args[2] were not replaced to something else
@@ -2808,7 +2808,7 @@ Item_func_nullif::fix_length_and_dec()
In this case we remember and reuse m_arg0 during EXECUTE time as args[2].
QQ: How to make sure that m_args0 does not point
- to something temporary which will be destoyed between PREPARE and EXECUTE.
+ to something temporary which will be destroyed between PREPARE and EXECUTE.
The condition below should probably be more strict and somehow check that:
- change_item_tree() was called for the new args[0]
- m_args0 is referenced from inside args[0], e.g. as a function argument,
@@ -3243,7 +3243,7 @@ bool Item_func_case::fix_length_and_dec()
If we'll do string comparison, we also need to aggregate
character set and collation for first/WHEN items and
install converters for some of them to cmp_collation when necessary.
- This is done because cmp_item compatators cannot compare
+ This is done because cmp_item comparators cannot compare
strings in two different character sets.
Some examples when we install converters:
@@ -6959,7 +6959,7 @@ Item* Item_equal::get_first(JOIN_TAB *context, Item *field_item)
and not ot2.col.
eliminate_item_equal() also has code that deals with equality substitution
- in presense of SJM nests.
+ in presence of SJM nests.
*/
TABLE_LIST *emb_nest;
diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h
index 613fb75bcd6..cad179dff74 100644
--- a/sql/item_cmpfunc.h
+++ b/sql/item_cmpfunc.h
@@ -155,7 +155,7 @@ protected:
/*
Return the full select tree for "field_item" and "value":
- a single SEL_TREE if the field is not in a multiple equality, or
- - a conjuction of all SEL_TREEs for all fields from
+ - a conjunction of all SEL_TREEs for all fields from
the same multiple equality with "field_item".
*/
SEL_TREE *get_full_func_mm_tree(RANGE_OPT_PARAM *param,
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 4f8b83e530c..9d588ce0eb1 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -366,7 +366,7 @@ Item *Item_func::transform(THD *thd, Item_transformer transformer, uchar *argume
callback functions.
First the function applies the analyzer to the root node of
- the Item_func object. Then if the analizer succeeeds (returns TRUE)
+ the Item_func object. Then if the analyzer succeeds (returns TRUE)
the function recursively applies the compile method to each argument
of the Item_func node.
If the call of the method for an argument item returns a new item
@@ -1701,6 +1701,8 @@ my_decimal *Item_func_div::decimal_op(my_decimal *decimal_value)
null_value= 1;
return 0;
}
+ my_decimal_round(E_DEC_FATAL_ERROR, decimal_value,
+ decimals, FALSE, decimal_value);
return decimal_value;
}
@@ -1769,7 +1771,7 @@ bool Item_func_div::fix_length_and_dec()
case TIME_RESULT:
DBUG_ASSERT(0);
}
- maybe_null= 1; // devision by zero
+ maybe_null= 1; // division by zero
DBUG_RETURN(FALSE);
}
@@ -1843,7 +1845,7 @@ longlong Item_func_int_div::val_int()
bool Item_func_int_div::fix_length_and_dec()
{
Item_result argtype= args[0]->result_type();
- /* use precision ony for the data type it is applicable for and valid */
+ /* use precision only for the data type it is applicable for and valid */
uint32 char_length= args[0]->max_char_length() -
(argtype == DECIMAL_RESULT || argtype == INT_RESULT ?
args[0]->decimals : 0);
@@ -4822,7 +4824,7 @@ bool Item_func_set_user_var::register_field_in_bitmap(void *arg)
@param type type of new value
@param cs charset info for new value
@param dv derivation for new value
- @param unsigned_arg indiates if a value of type INT_RESULT is unsigned
+ @param unsigned_arg indicates if a value of type INT_RESULT is unsigned
@note Sets error and fatal error if allocation fails.
@@ -6567,7 +6569,7 @@ void my_missing_function_error(const LEX_STRING &token, const char *func_name)
@brief Initialize the result field by creating a temporary dummy table
and assign it to a newly created field object. Meta data used to
create the field is fetched from the sp_head belonging to the stored
- proceedure found in the stored procedure functon cache.
+ procedure found in the stored procedure function cache.
@note This function should be called from fix_fields to init the result
field. It is some what related to Item_field.
@@ -6871,7 +6873,7 @@ Item_func_sp::fix_fields(THD *thd, Item **ref)
/*
Here we check privileges of the stored routine only during view
creation, in order to validate the view. A runtime check is
- perfomed in Item_func_sp::execute(), and this method is not
+ performed in Item_func_sp::execute(), and this method is not
called during context analysis. Notice, that during view
creation we do not infer into stored routine bodies and do not
check privileges of its statements, which would probably be a
diff --git a/sql/item_inetfunc.cc b/sql/item_inetfunc.cc
index 3643daf903f..bb00ccf2771 100644
--- a/sql/item_inetfunc.cc
+++ b/sql/item_inetfunc.cc
@@ -206,7 +206,7 @@ String *Item_func_inet_str_base::val_str_ascii(String *buffer)
@return Completion status.
@retval false Given string does not represent an IPv4-address.
- @retval true The string has been converted sucessfully.
+ @retval true The string has been converted successfully.
@note The problem with inet_pton() is that it treats leading zeros in
IPv4-part differently on different platforms.
@@ -331,7 +331,7 @@ static bool str_to_ipv4(const char *str, size_t str_length, in_addr *ipv4_addres
@return Completion status.
@retval false Given string does not represent an IPv6-address.
- @retval true The string has been converted sucessfully.
+ @retval true The string has been converted successfully.
@note The problem with inet_pton() is that it treats leading zeros in
IPv4-part differently on different platforms.
@@ -677,7 +677,7 @@ static void ipv6_to_str(const in6_addr *ipv6, char *str)
@return Completion status.
@retval false Given string does not represent an IP-address.
- @retval true The string has been converted sucessfully.
+ @retval true The string has been converted successfully.
*/
bool Item_func_inet6_aton::calc_value(const String *arg, String *buffer)
@@ -717,7 +717,7 @@ bool Item_func_inet6_aton::calc_value(const String *arg, String *buffer)
@return Completion status.
@retval false The argument does not correspond to IP-address.
- @retval true The string has been converted sucessfully.
+ @retval true The string has been converted successfully.
*/
bool Item_func_inet6_ntoa::calc_value(const String *arg, String *buffer)
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 8738af7ac56..f5a8a649ac2 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -929,7 +929,7 @@ String *Item_func_concat_ws::val_str(String *str)
goto null; // Must be a blob
}
else if (res2 == &tmp_value)
- { // This can happend only 1 time
+ { // This can happen only 1 time
if (tmp_value.replace(0,0,*sep_str) || tmp_value.replace(0,0,*res))
goto null;
res= &tmp_value;
@@ -1079,7 +1079,7 @@ bool Item_func_reverse::fix_length_and_dec()
}
/**
- Replace all occurences of string2 in string1 with string3.
+ Replace all occurrences of string2 in string1 with string3.
Don't reallocate val_str() if not needed.
@@ -3944,7 +3944,7 @@ bool Item_func_export_set::fix_length_and_dec()
using in a SQL statement.
Adds a \\ before all characters that needs to be escaped in a SQL string.
- We also escape '^Z' (END-OF-FILE in windows) to avoid probelms when
+ We also escape '^Z' (END-OF-FILE in windows) to avoid problems when
running commands from a file in windows.
This function is very useful when you want to generate SQL statements.
diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc
index 92a86063d04..0499a677be9 100644
--- a/sql/item_subselect.cc
+++ b/sql/item_subselect.cc
@@ -1111,12 +1111,12 @@ void Item_singlerow_subselect::reset()
/**
@todo
- - We cant change name of Item_field or Item_ref, because it will
- prevent it's correct resolving, but we should save name of
+ - We can't change name of Item_field or Item_ref, because it will
+ prevent its correct resolving, but we should save name of
removed item => we do not make optimization if top item of
list is field or reference.
- switch off this optimization for prepare statement,
- because we do not rollback this changes.
+ because we do not rollback these changes.
Make rollback for it, or special name resolving mode in 5.0.
@param join Join object of the subquery (i.e. 'child' join).
@@ -1139,8 +1139,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join)
select_lex->item_list.elements == 1 &&
!select_lex->item_list.head()->with_sum_func &&
/*
- We cant change name of Item_field or Item_ref, because it will
- prevent it's correct resolving, but we should save name of
+ We can't change name of Item_field or Item_ref, because it will
+ prevent its correct resolving, but we should save name of
removed item => we do not make optimization if top item of
list is field or reference.
TODO: solve above problem
@@ -1631,7 +1631,7 @@ longlong Item_exists_subselect::val_int()
Return the result of EXISTS as a string value
Converts the true/false result into a string value.
- Note that currently this cannot be NULL, so if the query exection fails
+ Note that currently this cannot be NULL, so if the query execution fails
it will return 0.
@param decimal_value[out] buffer to hold the resulting string value
@@ -1654,7 +1654,7 @@ String *Item_exists_subselect::val_str(String *str)
Return the result of EXISTS as a decimal value
Converts the true/false result into a decimal value.
- Note that currently this cannot be NULL, so if the query exection fails
+ Note that currently this cannot be NULL, so if the query execution fails
it will return 0.
@param decimal_value[out] Buffer to hold the resulting decimal value
@@ -2350,7 +2350,7 @@ Item_in_subselect::row_value_transformer(JOIN *join)
is_not_null_test(v3))
where is_not_null_test registers NULLs values but reject rows.
- in case when we do not need correct NULL, we have simplier construction:
+ in case when we do not need correct NULL, we have simpler construction:
EXISTS (SELECT ... WHERE where and
(l1 = v1) and
(l2 = v2) and
@@ -2753,6 +2753,8 @@ bool Item_exists_subselect::select_prepare_to_be_in()
Check if 'func' is an equality in form "inner_table.column = outer_expr"
@param func Expression to check
+ @param allow_subselect If true, the outer_expr part can have a subquery
+ If false, it cannot.
@param local_field OUT Return "inner_table.column" here
@param outer_expr OUT Return outer_expr here
@@ -2760,6 +2762,7 @@ bool Item_exists_subselect::select_prepare_to_be_in()
*/
static bool check_equality_for_exist2in(Item_func *func,
+ bool allow_subselect,
Item_ident **local_field,
Item **outer_exp)
{
@@ -2770,7 +2773,8 @@ static bool check_equality_for_exist2in(Item_func *func,
args= func->arguments();
if (args[0]->real_type() == Item::FIELD_ITEM &&
args[0]->all_used_tables() != OUTER_REF_TABLE_BIT &&
- args[1]->all_used_tables() == OUTER_REF_TABLE_BIT)
+ args[1]->all_used_tables() == OUTER_REF_TABLE_BIT &&
+ (allow_subselect || !args[1]->has_subquery()))
{
/* It is Item_field or Item_direct_view_ref) */
DBUG_ASSERT(args[0]->type() == Item::FIELD_ITEM ||
@@ -2781,7 +2785,8 @@ static bool check_equality_for_exist2in(Item_func *func,
}
else if (args[1]->real_type() == Item::FIELD_ITEM &&
args[1]->all_used_tables() != OUTER_REF_TABLE_BIT &&
- args[0]->all_used_tables() == OUTER_REF_TABLE_BIT)
+ args[0]->all_used_tables() == OUTER_REF_TABLE_BIT &&
+ (allow_subselect || !args[0]->has_subquery()))
{
/* It is Item_field or Item_direct_view_ref) */
DBUG_ASSERT(args[1]->type() == Item::FIELD_ITEM ||
@@ -2810,6 +2815,13 @@ typedef struct st_eq_field_outer
outer1=inner_tbl1.col1 AND ... AND outer2=inner_tbl1.col2 AND remainder_cond
+ if there is just one outer_expr=inner_expr pair, then outer_expr can have a
+ subselect in it. If there are many such pairs, then none of outer_expr can
+ have a subselect in it. If we allow this, the query will fail with an error:
+
+ This version of MariaDB doesn't yet support 'SUBQUERY in ROW in left
+ expression of IN/ALL/ANY'
+
@param conds Condition to be checked
@parm result Array to collect EQ_FIELD_OUTER elements describing
inner-vs-outer equalities the function has found.
@@ -2827,14 +2839,17 @@ static bool find_inner_outer_equalities(Item **conds,
{
List_iterator<Item> li(*((Item_cond*)*conds)->argument_list());
Item *item;
+ bool allow_subselect= true;
while ((item= li++))
{
if (item->type() == Item::FUNC_ITEM &&
check_equality_for_exist2in((Item_func *)item,
+ allow_subselect,
&element.local_field,
&element.outer_exp))
{
found= TRUE;
+ allow_subselect= false;
element.eq_ref= li.ref();
if (result.append(element))
goto alloc_err;
@@ -2843,6 +2858,7 @@ static bool find_inner_outer_equalities(Item **conds,
}
else if ((*conds)->type() == Item::FUNC_ITEM &&
check_equality_for_exist2in((Item_func *)*conds,
+ true,
&element.local_field,
&element.outer_exp))
{
@@ -3205,7 +3221,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join)
/*
In some optimisation cases we will not need this Item_in_optimizer
object, but we can't know it here, but here we need address correct
- reference on left expresion.
+ reference on left expression.
note: we won't need Item_in_optimizer when handling degenerate cases
like "... IN (SELECT 1)"
@@ -3237,7 +3253,7 @@ Item_in_subselect::select_in_like_transformer(JOIN *join)
and all that items do not make permanent changes in current item arena
which allow to us call them with changed arena (if we do not know nature
of Item, we have to call fix_fields() for it only with original arena to
- avoid memory leack)
+ avoid memory leak)
*/
if (left_expr->cols() == 1)
trans_res= single_value_transformer(join);
@@ -3400,7 +3416,7 @@ bool Item_in_subselect::setup_mat_engine()
/*
The select_engine (that executes transformed IN=>EXISTS subselects) is
- pre-created at parse time, and is stored in statment memory (preserved
+ pre-created at parse time, and is stored in statement memory (preserved
across PS executions).
*/
DBUG_ASSERT(engine->engine_type() == subselect_engine::SINGLE_SELECT_ENGINE);
@@ -3871,7 +3887,7 @@ int subselect_single_select_engine::exec()
For at least one of the pushed predicates the following is true:
We should not apply optimizations based on the condition that was
pushed down into the subquery. Those optimizations are ref[_or_null]
- acceses. Change them to be full table scans.
+ accesses. Change them to be full table scans.
*/
JOIN_TAB *tab;
for (tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
@@ -6108,7 +6124,7 @@ int subselect_partial_match_engine::exec()
if (has_covering_null_row)
{
/*
- If there is a NULL-only row that coveres all columns the result of IN
+ If there is a NULL-only row that covers all columns the result of IN
is UNKNOWN.
*/
item_in->value= 0;
@@ -6308,7 +6324,7 @@ subselect_rowid_merge_engine::init(MY_BITMAP *non_null_key_parts,
for (uint i= (non_null_key ? 1 : 0); i < merge_keys_count; i++)
{
/*
- Check if the first and only indexed column contains NULL in the curent
+ Check if the first and only indexed column contains NULL in the current
row, and add the row number to the corresponding key.
*/
if (merge_keys[i]->get_field(0)->is_null())
@@ -6520,7 +6536,7 @@ bool subselect_rowid_merge_engine::partial_match()
}
/*
- If all nullable columns contain only NULLs, then there is a guranteed
+ If all nullable columns contain only NULLs, then there is a guaranteed
partial match, and we don't need to search for a matching row.
*/
if (has_covering_null_columns)
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index 0ff7ee4997e..23bdeacade9 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -560,7 +560,7 @@ public:
bool jtbm_const_row_found;
/*
- TRUE<=>this is a flattenable semi-join, false overwise.
+ TRUE<=>this is a flattenable semi-join, false otherwise.
*/
bool is_flattenable_semijoin;
@@ -987,7 +987,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
/* FALSE for 'ref', TRUE for 'ref-or-null'. */
bool check_null;
/*
- The "having" clause. This clause (further reffered to as "artificial
+ The "having" clause. This clause (further referred to as "artificial
having") was inserted by subquery transformation code. It contains
Item(s) that have a side-effect: they record whether the subquery has
produced a row with NULL certain components. We need to use it for cases
@@ -1008,7 +1008,7 @@ class subselect_indexsubquery_engine: public subselect_uniquesubquery_engine
However, subqueries like the above are currently not handled by index
lookup-based subquery engines, the engine applicability check misses
them: it doesn't switch the engine for case of artificial having and
- [eq_]ref access (only for artifical having + ref_or_null or no having).
+ [eq_]ref access (only for artificial having + ref_or_null or no having).
The above example subquery is handled as a full-blown SELECT with eq_ref
access to one table.
@@ -1079,7 +1079,7 @@ public:
*/
JOIN *materialize_join;
/*
- A conjunction of all the equality condtions between all pairs of expressions
+ A conjunction of all the equality conditions between all pairs of expressions
that are arguments of an IN predicate. We need these to post-filter some
IN results because index lookups sometimes match values that are actually
not equal to the search key in SQL terms.
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 06c01c58948..e50822e71f2 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -697,7 +697,7 @@ int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2)
C_MODE_START
-/* Declarations for auxilary C-callbacks */
+/* Declarations for auxiliary C-callbacks */
int simple_raw_key_cmp(void* arg, const void* key1, const void* key2)
{
@@ -729,7 +729,7 @@ C_MODE_END
@param thd Thread descriptor
@return status
@retval FALSE success
- @retval TRUE faliure
+ @retval TRUE failure
Prepares Aggregator_distinct to process the incoming stream.
Creates the temporary table and the Unique class if needed.
@@ -1184,7 +1184,7 @@ Item_sum_hybrid::fix_fields(THD *thd, Item **ref)
};
if (!is_window_func_sum_expr())
setup_hybrid(thd, args[0], NULL);
- /* MIN/MAX can return NULL for empty set indepedent of the used column */
+ /* MIN/MAX can return NULL for empty set independent of the used column */
maybe_null= 1;
result_field=0;
null_value=1;
@@ -1660,7 +1660,7 @@ void Item_sum_count::cleanup()
/*
- Avgerage
+ Average
*/
bool Item_sum_avg::fix_length_and_dec()
{
@@ -1906,7 +1906,7 @@ bool Item_sum_variance::fix_length_and_dec()
/*
According to the SQL2003 standard (Part 2, Foundations; sec 10.9,
aggregate function; paragraph 7h of Syntax Rules), "the declared
- type of the result is an implementation-defined aproximate numeric
+ type of the result is an implementation-defined approximate numeric
type.
*/
@@ -1997,7 +1997,7 @@ double Item_sum_variance::val_real()
is one or zero. If it's zero, i.e. a population variance, then we only
set nullness when the count is zero.
- Another way to read it is that 'sample' is the numerical threshhold, at and
+ Another way to read it is that 'sample' is the numerical threshold, at and
below which a 'count' number of items is called NULL.
*/
DBUG_ASSERT((sample == 0) || (sample == 1));
@@ -3723,7 +3723,7 @@ bool Item_func_group_concat::setup(THD *thd)
{
/*
Force the create_tmp_table() to convert BIT columns to INT
- as we cannot compare two table records containg BIT fields
+ as we cannot compare two table records containing BIT fields
stored in the the tree used for distinct/order by.
Moreover we don't even save in the tree record null bits
where BIT fields store parts of their data.
diff --git a/sql/item_sum.h b/sql/item_sum.h
index 290aa5c50e3..4be8ea58742 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -251,7 +251,7 @@ class Window_spec;
The field 'aggr_level' is to contain the nest level of the subquery
where the set function is aggregated.
- The field 'max_arg_level' is for the maximun of the nest levels of the
+ The field 'max_arg_level' is for the maximum of the nest levels of the
unbound column references occurred in the set function. A column reference
is unbound within a set function if it is not bound by any subquery
used as a subexpression in this function. A column reference is bound by
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index f2f3fdaafe3..194933a4c54 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -452,7 +452,7 @@ err:
/**
- Create a formated date/time value in a string.
+ Create a formatted date/time value in a string.
*/
static bool make_date_time(const LEX_CSTRING &format, MYSQL_TIME *l_time,
@@ -1041,7 +1041,7 @@ uint week_mode(uint mode)
a date at start of january) In this case one can get 53 for the
first week of next year. This flag ensures that the week is
relevant for the given year. Note that this flag is only
- releveant if WEEK_JANUARY is not set.
+ relevant if WEEK_JANUARY is not set.
If set Week is in range 1-53.
@@ -1357,7 +1357,7 @@ bool get_interval_value(Item *args,interval_type int_type, INTERVAL *interval)
if (!(res= args->val_str_ascii(&str_value)))
return (1);
- /* record negative intervalls in interval->neg */
+ /* record negative intervals in interval->neg */
str=res->ptr();
cs= res->charset();
const char *end=str+res->length();
@@ -1608,7 +1608,7 @@ bool Item_func_from_days::get_date(MYSQL_TIME *ltime, ulonglong fuzzy_date)
/**
- Converts current time in my_time_t to MYSQL_TIME represenatation for local
+ Converts current time in my_time_t to MYSQL_TIME representation for local
time zone. Defines time zone (local) used for whole CURDATE function.
*/
void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
@@ -1619,7 +1619,7 @@ void Item_func_curdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
/**
- Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
+ Converts current time in my_time_t to MYSQL_TIME representation for UTC
time zone. Defines time zone (UTC) used for whole UTC_DATE function.
*/
void Item_func_curdate_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
@@ -1699,7 +1699,7 @@ static void set_sec_part(ulong sec_part, MYSQL_TIME *ltime, Item *item)
}
/**
- Converts current time in my_time_t to MYSQL_TIME represenatation for local
+ Converts current time in my_time_t to MYSQL_TIME representation for local
time zone. Defines time zone (local) used for whole CURTIME function.
*/
void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
@@ -1713,7 +1713,7 @@ void Item_func_curtime_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
/**
- Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
+ Converts current time in my_time_t to MYSQL_TIME representation for UTC
time zone. Defines time zone (UTC) used for whole UTC_TIME function.
*/
void Item_func_curtime_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
@@ -1768,7 +1768,7 @@ int Item_func_now_local::save_in_field(Field *field, bool no_conversions)
/**
- Converts current time in my_time_t to MYSQL_TIME represenatation for local
+ Converts current time in my_time_t to MYSQL_TIME representation for local
time zone. Defines time zone (local) used for whole NOW function.
*/
void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
@@ -1780,7 +1780,7 @@ void Item_func_now_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
/**
- Converts current time in my_time_t to MYSQL_TIME represenatation for UTC
+ Converts current time in my_time_t to MYSQL_TIME representation for UTC
time zone. Defines time zone (UTC) used for whole UTC_TIMESTAMP function.
*/
void Item_func_now_utc::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
@@ -1811,7 +1811,7 @@ bool Item_func_now::get_date(MYSQL_TIME *res,
/**
- Converts current time in my_time_t to MYSQL_TIME represenatation for local
+ Converts current time in my_time_t to MYSQL_TIME representation for local
time zone. Defines time zone (local) used for whole SYSDATE function.
*/
void Item_func_sysdate_local::store_now_in_TIME(THD *thd, MYSQL_TIME *now_time)
@@ -2122,7 +2122,7 @@ bool Item_func_convert_tz::get_date(MYSQL_TIME *ltime,
uint not_used;
my_time_tmp= from_tz->TIME_to_gmt_sec(ltime, &not_used);
ulong sec_part= ltime->second_part;
- /* my_time_tmp is guranteed to be in the allowed range */
+ /* my_time_tmp is guaranteed to be in the allowed range */
if (my_time_tmp)
to_tz->gmt_sec_to_TIME(ltime, my_time_tmp);
/* we rely on the fact that no timezone conversion can change sec_part */
@@ -2559,7 +2559,7 @@ bool Item_char_typecast::fix_length_and_dec()
uint32 char_length;
/*
We always force character set conversion if cast_cs
- is a multi-byte character set. It garantees that the
+ is a multi-byte character set. It guarantees that the
result of CAST is a well-formed string.
For single-byte character sets we allow just to copy
from the argument. A single-byte character sets string
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index f43402dfe48..f0d20b045d0 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -64,7 +64,7 @@ typedef struct my_xml_node_st
} MY_XML_NODE;
-/* Lexical analizer token */
+/* Lexical analyzer token */
typedef struct my_xpath_lex_st
{
int term; /* token type, see MY_XPATH_LEX_XXXXX below */
@@ -1101,7 +1101,7 @@ static Item* nametestfunc(MY_XPATH *xpath,
/*
- Tokens consisting of one character, for faster lexical analizer.
+ Tokens consisting of one character, for faster lexical analyzer.
*/
static char simpletok[128]=
{
@@ -1421,7 +1421,7 @@ my_xpath_function(const char *beg, const char *end)
}
-/* Initialize a lex analizer token */
+/* Initialize a lex analyzer token */
static void
my_xpath_lex_init(MY_XPATH_LEX *lex,
const char *str, const char *strend)
@@ -1452,7 +1452,7 @@ my_xdigit(int c)
SYNOPSYS
Scan the next token from the input.
lex->term is set to the scanned token type.
- lex->beg and lex->end are set to the beginnig
+ lex->beg and lex->end are set to the beginning
and to the end of the token.
RETURN
N/A
@@ -1478,7 +1478,7 @@ my_xpath_lex_scan(MY_XPATH *xpath,
(const uchar*) end)) > 0 &&
((ctype & (_MY_L | _MY_U)) || *beg == '_'))
{
- // scan untill the end of the idenfitier
+ // scan until the end of the identifier
for (beg+= length;
(length= xpath->cs->cset->ctype(xpath->cs, &ctype,
(const uchar*) beg,
@@ -1607,7 +1607,7 @@ static int my_xpath_parse_AxisName(MY_XPATH *xpath)
** Grammar rules, according to http://www.w3.org/TR/xpath
** Implemented using recursive descendant method.
** All the following grammar processing functions accept
-** a signle "xpath" argument and return 1 on success and 0 on error.
+** a single "xpath" argument and return 1 on success and 0 on error.
** They also modify "xpath" argument by creating new items.
*/
@@ -2487,7 +2487,7 @@ static int my_xpath_parse_UnaryExpr(MY_XPATH *xpath)
as it is in conflict with abbreviated step.
1 + .123 does not work,
1 + 0.123 does.
- Perhaps it is better to move this code into lex analizer.
+ Perhaps it is better to move this code into lex analyzer.
RETURN
1 - success
@@ -2838,7 +2838,7 @@ append_node(String *str, MY_XML_NODE *node)
SYNOPSYS
A call-back function executed when XML parser
- is entering a tag or an attribue.
+ is entering a tag or an attribute.
Appends the new node into data->pxml.
Increments data->level.
@@ -2874,7 +2874,7 @@ int xml_enter(MY_XML_PARSER *st,const char *attr, size_t len)
SYNOPSYS
A call-back function executed when XML parser
- is entering into a tag or an attribue textual value.
+ is entering into a tag or an attribute textual value.
The value is appended into data->pxml.
RETURN
@@ -2902,7 +2902,7 @@ int xml_value(MY_XML_PARSER *st,const char *attr, size_t len)
SYNOPSYS
A call-back function executed when XML parser
- is leaving a tag or an attribue.
+ is leaving a tag or an attribute.
Decrements data->level.
RETURN
diff --git a/sql/key.cc b/sql/key.cc
index 0eeb256dd87..18806efc18f 100644
--- a/sql/key.cc
+++ b/sql/key.cc
@@ -228,7 +228,7 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info,
{
/*
This in fact never happens, as we have only partial BLOB
- keys yet anyway, so it's difficult to find any sence to
+ keys yet anyway, so it's difficult to find any sense to
restore the part of a record.
Maybe this branch is to be removed, but now we
have to ignore GCov compaining.
@@ -610,8 +610,8 @@ int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec)
max length. The exceptions are the BLOB and VARCHAR field types
that take the max length into account.
*/
- if ((result= field->cmp_max(field->ptr+first_diff, field->ptr+sec_diff,
- key_part->length)))
+ if ((result= field->cmp_prefix(field->ptr+first_diff, field->ptr+sec_diff,
+ key_part->length)))
DBUG_RETURN(result);
next_loop:
key_part++;
diff --git a/sql/lex.h b/sql/lex.h
index 8efaff665ac..ca7c6635329 100644
--- a/sql/lex.h
+++ b/sql/lex.h
@@ -23,7 +23,7 @@
#include "lex_symbol.h"
SYM_GROUP sym_group_common= {"", ""};
-SYM_GROUP sym_group_geom= {"Spatial extentions", "HAVE_SPATIAL"};
+SYM_GROUP sym_group_geom= {"Spatial extensions", "HAVE_SPATIAL"};
SYM_GROUP sym_group_rtree= {"RTree keys", "HAVE_RTREE_KEYS"};
/* We don't want to include sql_yacc.h into gen_lex_hash */
diff --git a/sql/lock.cc b/sql/lock.cc
index 92e1d2d80fd..c882a84f3b8 100644
--- a/sql/lock.cc
+++ b/sql/lock.cc
@@ -1099,7 +1099,7 @@ bool Global_read_lock::make_global_read_lock_block_commit(THD *thd)
MDL_request mdl_request;
DBUG_ENTER("make_global_read_lock_block_commit");
/*
- If we didn't succeed lock_global_read_lock(), or if we already suceeded
+ If we didn't succeed lock_global_read_lock(), or if we already succeeded
make_global_read_lock_block_commit(), do nothing.
*/
diff --git a/sql/log.cc b/sql/log.cc
index 9865357a4d2..cb484210c50 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -149,7 +149,7 @@ void setup_log_handling()
/**
purge logs, master and slave sides both, related error code
- convertor.
+ converter.
Called from @c purge_error_message(), @c MYSQL_BIN_LOG::reset_logs()
@param res an internal to purging routines error code
@@ -358,7 +358,7 @@ public:
never zero.
This is done while calling the constructor binlog_cache_mngr.
- We cannot set informaton in the constructor binlog_cache_data
+ We cannot set information in the constructor binlog_cache_data
because the space for binlog_cache_mngr is allocated through
a placement new.
@@ -2941,7 +2941,7 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
mysql_mutex_lock(&LOCK_log);
if (is_open())
- { // Safety agains reopen
+ { // Safety against reopen
int tmp_errno= 0;
char buff[80], *end;
char query_time_buff[22+7], lock_time_buff[22+7];
@@ -3222,7 +3222,7 @@ void MYSQL_BIN_LOG::cleanup()
/*
Free data for global binlog state.
- We can't do that automaticly as we need to do this before
+ We can't do that automatically as we need to do this before
safemalloc is shut down
*/
if (!is_relay_log)
@@ -3993,7 +3993,7 @@ err:
/**
- Delete all logs refered to in the index file.
+ Delete all logs referred to in the index file.
The new index file will only contain this file.
@@ -5556,7 +5556,7 @@ binlog_cache_mngr *THD::binlog_setup_trx_data()
- Start a statement transaction to allow us to truncate the cache.
- - Save the currrent binlog position so that we can roll back the
+ - Save the current binlog position so that we can roll back the
statement by truncating the cache.
We only update the saved position if the old one was undefined,
@@ -6754,7 +6754,7 @@ static const char* get_first_binlog(char* buf_arg)
}
if (normalize_binlog_name(buf_arg, fname, false))
{
- errmsg= "cound not normalize the first file name in the binlog index";
+ errmsg= "could not normalize the first file name in the binlog index";
goto end;
}
end:
@@ -9754,7 +9754,7 @@ TC_LOG_BINLOG::mark_xid_done(ulong binlog_id, bool write_checkpoint)
than compare all found against each other to find the one pointing to the
most recent binlog.
- Note also that we need to first release LOCK_xid_list, then aquire
+ Note also that we need to first release LOCK_xid_list, then acquire
LOCK_log, then re-aquire LOCK_xid_list. If we were to take LOCK_log while
holding LOCK_xid_list, we might deadlock with other threads that take the
locks in the opposite order.
@@ -9839,7 +9839,7 @@ TC_LOG_BINLOG::commit_checkpoint_notify(void *cookie)
necessary stuff.
In the future, this thread could also be used to do log rotation in the
- background, which could elimiate all stalls around binlog rotations.
+ background, which could eliminate all stalls around binlog rotations.
*/
pthread_handler_t
binlog_background_thread(void *arg __attribute__((unused)))
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 1c59cca8e83..26c9cb6f9cb 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -4205,7 +4205,7 @@ get_str_len_and_pointer(const Log_event::Byte **src,
const Log_event::Byte *end)
{
if (*src >= end)
- return -1; // Will be UINT_MAX in two-complement arithmetics
+ return -1; // Will be UINT_MAX in two-complement arithmetic
uint length= **src;
if (length > 0)
{
@@ -4571,7 +4571,7 @@ Query_log_event::Query_log_event(const char* buf, uint event_len,
/* A 2nd variable part; this is common to all versions */
memcpy((char*) start, end, data_len); // Copy db and query
- start[data_len]= '\0'; // End query with \0 (For safetly)
+ start[data_len]= '\0'; // End query with \0 (For safety)
db= (char *)start;
query= (char *)(start + db_len + 1);
q_len= data_len - db_len -1;
@@ -6225,7 +6225,7 @@ int Format_description_log_event::do_update_pos(rpl_group_info *rgi)
If we do not skip stepping the group log position (and the
server id was changed when restarting the server), it might well
be that we start executing at a position that is invalid, e.g.,
- at a Rows_log_event or a Query_log_event preceeded by a
+ at a Rows_log_event or a Query_log_event preceded by a
Intvar_log_event instead of starting at a Table_map_log_event or
the Intvar_log_event respectively.
*/
@@ -6337,7 +6337,7 @@ Format_description_log_event::is_version_before_checksum(const master_version_sp
@return the version-safe checksum alg descriptor where zero
designates no checksum, 255 - the orginator is
- checksum-unaware (effectively no checksum) and the actuall
+ checksum-unaware (effectively no checksum) and the actual
[1-254] range alg descriptor.
*/
enum enum_binlog_checksum_alg get_checksum_alg(const char* buf, ulong len)
@@ -7043,7 +7043,7 @@ int Load_log_event::do_apply_event(NET* net, rpl_group_info *rgi,
/*
When replication is running fine, if it was DUP_ERROR on the
master then we could choose IGNORE here, because if DUP_ERROR
- suceeded on master, and data is identical on the master and slave,
+ succeeded on master, and data is identical on the master and slave,
then there should be no uniqueness errors on slave, so IGNORE is
the same as DUP_ERROR. But in the unlikely case of uniqueness errors
(because the data on the master and slave happen to be different
@@ -7580,7 +7580,7 @@ Gtid_log_event::Gtid_log_event(THD *thd_arg, uint64 seq_no_arg,
/*
Used to record GTID while sending binlog to slave, without having to
- fully contruct every Gtid_log_event() needlessly.
+ fully construct every Gtid_log_event() needlessly.
*/
bool
Gtid_log_event::peek(const char *event_start, size_t event_len,
@@ -8103,7 +8103,7 @@ Gtid_list_log_event::print(FILE *file, PRINT_EVENT_INFO *print_event_info)
/*
Used to record gtid_list event while sending binlog to slave, without having to
- fully contruct the event object.
+ fully construct the event object.
*/
bool
Gtid_list_log_event::peek(const char *event_start, uint32 event_len,
@@ -8183,7 +8183,7 @@ Intvar_log_event::Intvar_log_event(const char* buf,
const Format_description_log_event* description_event)
:Log_event(buf, description_event)
{
- /* The Post-Header is empty. The Varible Data part begins immediately. */
+ /* The Post-Header is empty. The Variable Data part begins immediately. */
buf+= description_event->common_header_len +
description_event->post_header_len[INTVAR_EVENT-1];
type= buf[I_TYPE_OFFSET];
@@ -9421,7 +9421,7 @@ void Create_file_log_event::pack_info(Protocol *protocol)
/**
Create_file_log_event::do_apply_event()
- Constructor for Create_file_log_event to intantiate an event
+ Constructor for Create_file_log_event to instantiate an event
from the relay log on the slave.
@retval
@@ -10471,7 +10471,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
DBUG_VOID_RETURN;
}
- /* if my_bitmap_init fails, catched in is_valid() */
+ /* if my_bitmap_init fails, caught in is_valid() */
if (likely(!my_bitmap_init(&m_cols,
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
m_width,
@@ -10888,7 +10888,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
*/
{
- DBUG_PRINT("debug", ("Checking compability of tables to lock - tables_to_lock: %p",
+ DBUG_PRINT("debug", ("Checking compatibility of tables to lock - tables_to_lock: %p",
rgi->tables_to_lock));
/**
@@ -10943,7 +10943,7 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
ptr->table->s->table_name.str));
/*
We should not honour --slave-skip-errors at this point as we are
- having severe errors which should not be skiped.
+ having severe errors which should not be skipped.
*/
thd->is_slave_error= 1;
/* remove trigger's tables */
@@ -11324,7 +11324,7 @@ static int rows_event_stmt_cleanup(rpl_group_info *rgi, THD * thd)
/**
The method either increments the relay log position or
commits the current statement and increments the master group
- possition if the event is STMT_END_F flagged and
+ position if the event is STMT_END_F flagged and
the statement corresponds to the autocommit query (i.e replicated
without wrapping in BEGIN/COMMIT)
@@ -11470,7 +11470,7 @@ public:
/**
Print an event "body" cache to @c file possibly in two fragments.
- Each fragement is optionally per @c do_wrap to produce an SQL statement.
+ Each fragment is optionally per @c do_wrap to produce an SQL statement.
@param file a file to print to
@param body the "body" IO_CACHE of event
@@ -13185,7 +13185,7 @@ record_compare_exit:
Find the best key to use when locating the row in @c find_row().
A primary key is preferred if it exists; otherwise a unique index is
- preferred. Else we pick the index with the smalles rec_per_key value.
+ preferred. Else we pick the index with the smallest rec_per_key value.
If a suitable key is found, set @c m_key, @c m_key_nr and @c m_key_info
member fields appropriately.
@@ -13318,7 +13318,7 @@ static int row_not_found_error(rpl_group_info *rgi)
Locate the current row in event's table.
The current row is pointed by @c m_curr_row. Member @c m_width tells
- how many columns are there in the row (this can be differnet from
+ how many columns are there in the row (this can be different from
the number of columns in the table). It is assumed that event's
table is already open and pointed by @c m_table.
@@ -13359,7 +13359,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
rpl_row_tabledefs.test specifies that
if the extra field on the slave does not have a default value
and this is okay with Delete or Update events.
- Todo: fix wl3228 hld that requires defauls for all types of events
+ Todo: fix wl3228 hld that requires defaults for all types of events
*/
prepare_record(table, m_width, FALSE);
@@ -13603,7 +13603,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi)
while (record_compare(table));
/*
- Note: above record_compare will take into accout all record fields
+ Note: above record_compare will take into account all record fields
which might be incorrect in case a partial row was given in the event
*/
diff --git a/sql/log_event.h b/sql/log_event.h
index 863d81c047a..1fae201057f 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -455,7 +455,7 @@ class String;
/**
@def LOG_EVENT_ARTIFICIAL_F
- Artificial events are created arbitarily and not written to binary
+ Artificial events are created arbitrarily and not written to binary
log
These events should not update the master log position when slave
@@ -932,13 +932,13 @@ private:
};
/**
- the struct aggregates two paramenters that identify an event
+ the struct aggregates two parameters that identify an event
uniquely in scope of communication of a particular master and slave couple.
I.e there can not be 2 events from the same staying connected master which
have the same coordinates.
@note
Such identifier is not yet unique generally as the event originating master
- is resetable. Also the crashed master can be replaced with some other.
+ is resettable. Also the crashed master can be replaced with some other.
*/
typedef struct event_coordinates
{
@@ -2730,7 +2730,7 @@ public:
uint8 number_of_event_types;
/*
The list of post-headers' lengths followed
- by the checksum alg decription byte
+ by the checksum alg description byte
*/
uint8 *post_header_len;
struct master_version_split {
@@ -3070,7 +3070,7 @@ public:
*/
bool is_deferred() { return deferred; }
/*
- In case of the deffered applying the variable instance is flagged
+ In case of the deferred applying the variable instance is flagged
and the parsing time query id is stored to be used at applying time.
*/
void set_deferred(query_id_t qid) { deferred= true; query_id= qid; }
@@ -3564,7 +3564,7 @@ public:
bool write_data_header();
bool write_data_body();
/*
- Cut out Create_file extentions and
+ Cut out Create_file extensions and
write it as Load event - used on the slave
*/
bool write_base();
@@ -4938,7 +4938,7 @@ private:
/**
@class Incident_log_event
- Class representing an incident, an occurance out of the ordinary,
+ Class representing an incident, an occurence out of the ordinary,
that happened on the master.
The event is used to inform the slave that something out of the
@@ -4982,7 +4982,7 @@ public:
m_message.str= NULL; /* Just as a precaution */
m_message.length= 0;
set_direct_logging();
- /* Replicate the incident irregardless of @@skip_replication. */
+ /* Replicate the incident regardless of @@skip_replication. */
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
DBUG_VOID_RETURN;
}
@@ -5003,7 +5003,7 @@ public:
strmake(m_message.str, msg.str, msg.length);
m_message.length= msg.length;
set_direct_logging();
- /* Replicate the incident irregardless of @@skip_replication. */
+ /* Replicate the incident regardless of @@skip_replication. */
flags&= ~LOG_EVENT_SKIP_REPLICATION_F;
DBUG_VOID_RETURN;
}
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 55d8660abad..b655d510bd5 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -848,7 +848,7 @@ int Write_rows_log_event_old::do_after_row_operations(TABLE *table, int error)
table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
table->file->extra(HA_EXTRA_WRITE_CANNOT_REPLACE);
/*
- reseting the extra with
+ resetting the extra with
table->file->extra(HA_EXTRA_NO_IGNORE_NO_KEY);
fires bug#27077
todo: explain or fix
@@ -1240,7 +1240,7 @@ Old_rows_log_event::Old_rows_log_event(const char *buf, uint event_len,
DBUG_VOID_RETURN;
}
- /* if my_bitmap_init fails, catched in is_valid() */
+ /* if my_bitmap_init fails, caught in is_valid() */
if (likely(!my_bitmap_init(&m_cols,
m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL,
m_width,
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index e4e616178d0..894988c4931 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -4102,13 +4102,15 @@ static int replace_user_table(THD *thd, const User_table &user_table,
table->key_info->key_length);
if (table->file->ha_index_read_idx_map(table->record[0], 0, user_key,
- HA_WHOLE_KEY,
- HA_READ_KEY_EXACT))
+ HA_WHOLE_KEY, HA_READ_KEY_EXACT))
{
/* what == 'N' means revoke */
if (what == 'N')
{
- my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str);
+ if (combo.host.length)
+ my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str);
+ else
+ my_error(ER_INVALID_ROLE, MYF(0), combo.user.str);
goto end;
}
/*
@@ -5623,6 +5625,8 @@ static void propagate_role_grants(ACL_ROLE *role,
enum PRIVS_TO_MERGE::what what,
const char *db= 0, const char *name= 0)
{
+ if (!role)
+ return;
mysql_mutex_assert_owner(&acl_cache->lock);
PRIVS_TO_MERGE data= { what, db, name };
@@ -7796,6 +7800,21 @@ err:
}
+static void check_grant_column_int(GRANT_TABLE *grant_table, const char *name,
+ uint length, ulong *want_access)
+{
+ if (grant_table)
+ {
+ *want_access&= ~grant_table->privs;
+ if (*want_access & grant_table->cols)
+ {
+ GRANT_COLUMN *grant_column= column_hash_search(grant_table, name, length);
+ if (grant_column)
+ *want_access&= ~grant_column->rights;
+ }
+ }
+}
+
/*
Check column rights in given security context
@@ -7818,9 +7837,6 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant,
const char *db_name, const char *table_name,
const char *name, uint length, Security_context *sctx)
{
- GRANT_TABLE *grant_table;
- GRANT_TABLE *grant_table_role;
- GRANT_COLUMN *grant_column;
ulong want_access= grant->want_privilege & ~grant->privilege;
DBUG_ENTER("check_grant_column");
DBUG_PRINT("enter", ("table: %s want_access: %lu", table_name, want_access));
@@ -7845,45 +7861,18 @@ bool check_grant_column(THD *thd, GRANT_INFO *grant,
grant->version= grant_version; /* purecov: inspected */
}
- grant_table= grant->grant_table_user;
- grant_table_role= grant->grant_table_role;
+ check_grant_column_int(grant->grant_table_user, name, length, &want_access);
+ check_grant_column_int(grant->grant_table_role, name, length, &want_access);
- if (!grant_table && !grant_table_role)
- goto err;
-
- if (grant_table)
- {
- grant_column= column_hash_search(grant_table, name, length);
- if (grant_column)
- {
- want_access&= ~grant_column->rights;
- }
- }
- if (grant_table_role)
- {
- grant_column= column_hash_search(grant_table_role, name, length);
- if (grant_column)
- {
- want_access&= ~grant_column->rights;
- }
- }
+ mysql_rwlock_unlock(&LOCK_grant);
if (!want_access)
- {
- mysql_rwlock_unlock(&LOCK_grant);
DBUG_RETURN(0);
- }
-err:
- mysql_rwlock_unlock(&LOCK_grant);
char command[128];
get_privilege_desc(command, sizeof(command), want_access);
/* TODO perhaps error should print current rolename aswell */
- my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0),
- command,
- sctx->priv_user,
- sctx->host_or_ip,
- name,
- table_name);
+ my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), command, sctx->priv_user,
+ sctx->host_or_ip, name, table_name);
DBUG_RETURN(1);
}
diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc
index 7ace3144dc3..ed0f37b8e75 100644
--- a/sql/sql_admin.cc
+++ b/sql/sql_admin.cc
@@ -765,8 +765,18 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables,
{
compl_result_code= result_code= HA_ADMIN_INVALID;
}
+
+ /*
+ The check for Alter_info::ALTER_ADMIN_PARTITION implements this logic:
+ do not collect EITS STATS for this syntax:
+ ALTER TABLE ... ANALYZE PARTITION p
+ EITS statistics is global (not per-partition). Collecting global stats
+ is much more expensive processing just one partition, so the most
+ appropriate action is to just not collect EITS stats for this command.
+ */
collect_eis=
(table->table->s->table_category == TABLE_CATEGORY_USER &&
+ !(lex->alter_info.flags &= Alter_info::ALTER_ADMIN_PARTITION) &&
(get_use_stat_tables_mode(thd) > NEVER ||
lex->with_persistent_for_clause));
}
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index 674f6db8358..cc77b58cb3e 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1233,7 +1233,7 @@ bool wait_while_table_is_used(THD *thd, TABLE *table,
FALSE);
/* extra() call must come only after all instances above are closed */
if (function != HA_EXTRA_NOT_USED)
- (void) table->file->extra(function);
+ DBUG_RETURN(table->file->extra(function));
DBUG_RETURN(FALSE);
}
@@ -7506,15 +7506,11 @@ bool setup_tables(THD *thd, Name_resolution_context *context,
FALSE ok; In this case *map will include the chosen index
TRUE error
*/
-bool setup_tables_and_check_access(THD *thd,
- Name_resolution_context *context,
+bool setup_tables_and_check_access(THD *thd, Name_resolution_context *context,
List<TABLE_LIST> *from_clause,
- TABLE_LIST *tables,
- List<TABLE_LIST> &leaves,
- bool select_insert,
- ulong want_access_first,
- ulong want_access,
- bool full_table_list)
+ TABLE_LIST *tables, List<TABLE_LIST> &leaves,
+ bool select_insert, ulong want_access_first,
+ ulong want_access, bool full_table_list)
{
DBUG_ENTER("setup_tables_and_check_access");
diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc
index c39afdf9e10..e1e307330e3 100644
--- a/sql/sql_lex.cc
+++ b/sql/sql_lex.cc
@@ -3868,7 +3868,8 @@ bool st_select_lex::optimize_unflattened_subqueries(bool const_only)
sl->options|= SELECT_DESCRIBE;
inner_join->select_options|= SELECT_DESCRIBE;
}
- res= inner_join->optimize();
+ if ((res= inner_join->optimize()))
+ return TRUE;
if (!inner_join->cleaned)
sl->update_used_tables();
sl->update_correlated_cache();
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index c33e554aaca..379a109c57c 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -1109,11 +1109,11 @@ int JOIN::optimize()
if (optimization_state != JOIN::NOT_OPTIMIZED)
return FALSE;
optimization_state= JOIN::OPTIMIZATION_IN_PROGRESS;
+ create_explain_query_if_not_exists(thd->lex, thd->mem_root);
int res= optimize_inner();
if (!res && have_query_plan != QEP_DELETED)
{
- create_explain_query_if_not_exists(thd->lex, thd->mem_root);
have_query_plan= QEP_AVAILABLE;
/*
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 65a828147ae..2e9752eeabd 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -1104,8 +1104,7 @@ bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
DBUG_RETURN(TRUE);
if (setup_tables_and_check_access(thd, &select_lex->context,
- &select_lex->top_join_list,
- table_list,
+ &select_lex->top_join_list, table_list,
select_lex->leaf_tables,
FALSE, UPDATE_ACL, SELECT_ACL, TRUE) ||
setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index b1c5fd1d8aa..8bfe6896ea2 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -450,9 +450,8 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
if (thd->open_temporary_tables(lex->query_tables) ||
open_and_lock_tables(thd, lex->query_tables, TRUE, 0))
{
- view= lex->unlink_first_table(&link_to_local);
res= TRUE;
- goto err;
+ goto err_no_relink;
}
view= lex->unlink_first_table(&link_to_local);
@@ -703,10 +702,12 @@ bool mysql_create_view(THD *thd, TABLE_LIST *views,
WSREP_ERROR_LABEL:
res= TRUE;
+ goto err_no_relink;
err:
THD_STAGE_INFO(thd, stage_end);
lex->link_first_table_back(view, link_to_local);
+err_no_relink:
unit->cleanup();
DBUG_RETURN(res || thd->is_error());
}