summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <df@kahlann.erinye.com>2006-12-01 10:39:48 +0100
committerunknown <df@kahlann.erinye.com>2006-12-01 10:39:48 +0100
commitcf245c8f3a5c285af6e53a0035d05262ea5fbe31 (patch)
tree64ea9b0e817c69d36e0263d377d1d828ed553be9 /sql
parent5176be8db85e2fbc3e33a35bb6f02905fdf3ede9 (diff)
parentb6004e6295717c3b616b58416544557bd858afca (diff)
downloadmariadb-git-cf245c8f3a5c285af6e53a0035d05262ea5fbe31.tar.gz
Merge dfischer@bk-internal.mysql.com:/home/bk/mysql-5.1
into kahlann.erinye.com:/home/df/mysql/build/mysql-5.1-build client/mysqltest.c: Auto merged mysql-test/mysql-test-run.pl: Auto merged sql/field.cc: Auto merged sql/item.cc: Auto merged sql/item_sum.cc: Auto merged sql/opt_range.cc: Auto merged sql/sql_base.cc: Auto merged sql/table.cc: Auto merged
Diffstat (limited to 'sql')
-rw-r--r--sql/event_data_objects.cc16
-rw-r--r--sql/event_scheduler.cc1
-rw-r--r--sql/field.cc32
-rw-r--r--sql/field.h1
-rw-r--r--sql/gen_lex_hash.cc5
-rw-r--r--sql/ha_partition.h6
-rw-r--r--sql/handler.cc6
-rw-r--r--sql/item.cc6
-rw-r--r--sql/item_func.cc10
-rw-r--r--sql/item_strfunc.cc32
-rw-r--r--sql/item_sum.cc4
-rw-r--r--sql/item_timefunc.cc1
-rw-r--r--sql/item_xmlfunc.cc2
-rw-r--r--sql/log.cc1
-rw-r--r--sql/log_event.cc10
-rw-r--r--sql/log_event.h2
-rw-r--r--sql/mysql_priv.h8
-rw-r--r--sql/mysqld.cc32
-rw-r--r--sql/net_serv.cc64
-rw-r--r--sql/opt_range.cc16
-rw-r--r--sql/partition_info.cc6
-rw-r--r--sql/rpl_injector.h2
-rw-r--r--sql/set_var.cc20
-rw-r--r--sql/slave.cc2
-rw-r--r--sql/sp_head.cc10
-rw-r--r--sql/sql_base.cc2
-rw-r--r--sql/sql_cache.cc5
-rw-r--r--sql/sql_class.h17
-rw-r--r--sql/sql_db.cc1
-rw-r--r--sql/sql_delete.cc2
-rw-r--r--sql/sql_insert.cc8
-rw-r--r--sql/sql_lex.h2
-rw-r--r--sql/sql_parse.cc8
-rw-r--r--sql/sql_partition.cc21
-rw-r--r--sql/sql_plugin.cc2
-rw-r--r--sql/sql_prepare.cc1
-rw-r--r--sql/sql_rename.cc4
-rw-r--r--sql/sql_select.cc30
-rw-r--r--sql/sql_show.cc11
-rw-r--r--sql/sql_table.cc2
-rw-r--r--sql/sql_trigger.cc9
-rw-r--r--sql/sql_union.cc2
-rw-r--r--sql/sql_update.cc2
-rw-r--r--sql/sql_view.cc6
-rw-r--r--sql/sql_yacc.yy11
-rw-r--r--sql/table.cc4
-rw-r--r--sql/table.h1
-rw-r--r--sql/tztime.cc8
-rw-r--r--sql/uniques.cc46
49 files changed, 250 insertions, 250 deletions
diff --git a/sql/event_data_objects.cc b/sql/event_data_objects.cc
index 0de90e4145b..ad7f0ab4e41 100644
--- a/sql/event_data_objects.cc
+++ b/sql/event_data_objects.cc
@@ -1095,7 +1095,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
DBUG_PRINT("error", ("negative difference"));
DBUG_ASSERT(0);
}
- uint multiplier= seconds_diff / seconds;
+ uint multiplier= (uint) (seconds_diff / seconds);
/*
Increase the multiplier is the modulus is not zero to make round up.
Or if time_now==start then we should not execute the same
@@ -1128,7 +1128,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
directly with +1 we will be after the current date but it could be that
we will be 1 month ahead, so 2 steps are necessary.
*/
- interval.month= (diff_months / months)*months;
+ interval.month= (ulong) ((diff_months / months)*months);
/*
Check if the same month as last_exec (always set - prerequisite)
An event happens at most once per month so there is no way to
@@ -1141,7 +1141,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
*/
if (time_now->year == last_exec->year &&
time_now->month == last_exec->month)
- interval.month+= months;
+ interval.month+= (ulong) months;
tmp= *start;
if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval)))
@@ -1150,7 +1150,7 @@ bool get_next_time(TIME *next, TIME *start, TIME *time_now, TIME *last_exec,
/* If `tmp` is still before time_now just add one more time the interval */
if (my_time_compare(&tmp, time_now) == -1)
{
- interval.month+= months;
+ interval.month+= (ulong) months;
tmp= *start;
if ((ret= date_add_interval(&tmp, INTERVAL_MONTH, interval)))
goto done;
@@ -1283,7 +1283,7 @@ Event_queue_element::compute_next_execution_time()
if (get_next_time(&next_exec, &starts, &time_now,
last_executed.year? &last_executed:&starts,
- expression, interval))
+ (int) expression, interval))
goto err;
/* There was previous execution */
@@ -1321,7 +1321,7 @@ Event_queue_element::compute_next_execution_time()
{
TIME next_exec;
if (get_next_time(&next_exec, &starts, &time_now, &last_executed,
- expression, interval))
+ (int) expression, interval))
goto err;
execute_at= next_exec;
DBUG_PRINT("info",("Next[%lu]",
@@ -1356,7 +1356,7 @@ Event_queue_element::compute_next_execution_time()
TIME next_exec;
if (get_next_time(&next_exec, &starts, &time_now,
last_executed.year? &last_executed:&starts,
- expression, interval))
+ (int) expression, interval))
goto err;
execute_at= next_exec;
DBUG_PRINT("info",("Next[%lu]",
@@ -1382,7 +1382,7 @@ Event_queue_element::compute_next_execution_time()
TIME next_exec;
if (get_next_time(&next_exec, &starts, &time_now, &last_executed,
- expression, interval))
+ (int) expression, interval))
goto err;
if (my_time_compare(&ends, &next_exec) == -1)
diff --git a/sql/event_scheduler.cc b/sql/event_scheduler.cc
index 9be2f2d1125..1e9526b3364 100644
--- a/sql/event_scheduler.cc
+++ b/sql/event_scheduler.cc
@@ -442,7 +442,6 @@ bool
Event_scheduler::run(THD *thd)
{
int res= FALSE;
- struct timespec abstime;
Event_job_data *job_data;
DBUG_ENTER("Event_scheduler::run");
diff --git a/sql/field.cc b/sql/field.cc
index 7eafab68101..4245d401f53 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -2567,7 +2567,6 @@ uint Field_new_decimal::is_equal(create_field *new_field)
int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
- int not_used; // We can ignore result from str2int
char *end;
int error;
@@ -2777,7 +2776,6 @@ void Field_tiny::sql_type(String &res) const
int Field_short::store(const char *from,uint len,CHARSET_INFO *cs)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
- int not_used; // We can ignore result from str2int
char *end;
int error;
@@ -3064,7 +3062,6 @@ void Field_short::sql_type(String &res) const
int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
- int not_used; // We can ignore result from str2int
char *end;
int error;
@@ -3306,8 +3303,6 @@ static bool test_if_minus(CHARSET_INFO *cs,
int Field_long::store(const char *from,uint len,CHARSET_INFO *cs)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
- ulong tmp_scan;
- longlong tmp;
long store_tmp;
int error;
char *end;
@@ -5535,11 +5530,11 @@ int Field_newdate::store_time(TIME *ltime,timestamp_type type)
if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
{
tmp=ltime->year*16*32+ltime->month*32+ltime->day;
- if ((my_bool)check_date(ltime, tmp,
- (TIME_FUZZY_DATE |
- (current_thd->variables.sql_mode &
- (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))), &error))
+ if (check_date(ltime, tmp != 0,
+ (TIME_FUZZY_DATE |
+ (current_thd->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES))), &error))
{
char buff[12];
String str(buff, sizeof(buff), &my_charset_latin1);
@@ -5765,11 +5760,11 @@ int Field_datetime::store_time(TIME *ltime,timestamp_type type)
{
tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+
(ltime->hour*10000L+ltime->minute*100+ltime->second));
- if ((my_bool)check_date(ltime, tmp,
- (TIME_FUZZY_DATE |
- (current_thd->variables.sql_mode &
- (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))), &error))
+ if (check_date(ltime, tmp != 0,
+ (TIME_FUZZY_DATE |
+ (current_thd->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES))), &error))
{
char buff[19];
String str(buff, sizeof(buff), &my_charset_latin1);
@@ -8528,8 +8523,9 @@ const char *Field_bit::unpack(char *to, const char *from)
void Field_bit::set_default()
{
- my_ptrdiff_t const offset= table->s->default_values - table->record[0];
- uchar bits= get_rec_bits(bit_ptr + offset, bit_ofs, bit_len);
+ my_ptrdiff_t const offset= (my_ptrdiff_t) (table->s->default_values -
+ table->record[0]);
+ uchar bits= (uchar) get_rec_bits(bit_ptr + offset, bit_ofs, bit_len);
set_rec_bits(bits, bit_ptr, bit_ofs, bit_len);
Field::set_default();
}
@@ -8554,7 +8550,7 @@ int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
{
ASSERT_COLUMN_MARKED_FOR_WRITE;
int delta;
- uchar bits= field_length & 7;
+ uchar bits= (uchar) (field_length & 7);
for (; length && !*from; from++, length--); // skip left 0's
delta= bytes_in_rec - length;
diff --git a/sql/field.h b/sql/field.h
index 6ff6882ed87..3e2be248627 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1361,7 +1361,6 @@ public:
int store(const char *to,uint length,CHARSET_INFO *charset);
int store(double nr);
int store(longlong nr, bool unsigned_val);
- void reset() { bzero(ptr,packlength); }
double val_real(void);
longlong val_int(void);
String *val_str(String*,String *);
diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc
index 5a8bd48d699..2674b2e65f7 100644
--- a/sql/gen_lex_hash.cc
+++ b/sql/gen_lex_hash.cc
@@ -206,9 +206,10 @@ void insert_symbols()
void insert_sql_functions()
{
- size_t i= 0;
+ int i= 0;
SYMBOL *cur;
- for (cur= sql_functions; i<array_elements(sql_functions); cur++, i++){
+ for (cur= sql_functions; i < (int) array_elements(sql_functions); cur++, i++)
+ {
hash_lex_struct *root=
get_hash_struct_by_len(&root_by_len,cur->length,&max_len);
insert_into_hash(root,cur->name,0,-i-1,1);
diff --git a/sql/ha_partition.h b/sql/ha_partition.h
index c0c99a7a82b..40af30bf08c 100644
--- a/sql/ha_partition.h
+++ b/sql/ha_partition.h
@@ -76,12 +76,12 @@ private:
for this since the MySQL Server sometimes allocating the handler object
without freeing them.
*/
- u_long m_table_flags;
- u_long m_low_byte_first;
+ longlong m_table_flags;
+ ulong m_low_byte_first;
uint m_reorged_parts; // Number of reorganised parts
uint m_tot_parts; // Total number of partitions;
- uint m_no_locks; // For engines like ha_blackhole, which needs no locks
+ uint m_no_locks; // For engines like ha_blackhole, which needs no locks
uint m_last_part; // Last file that we update,write
int m_lock_type; // Remembers type of last
// external_lock
diff --git a/sql/handler.cc b/sql/handler.cc
index 85345c70e36..b134814db1e 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -2778,7 +2778,7 @@ int ha_change_key_cache(KEY_CACHE *old_key_cache,
>0 : error. frmblob and frmlen may not be set
*/
-typedef struct st_discover_args
+struct st_discover_args
{
const char *db;
const char *name;
@@ -2826,7 +2826,7 @@ int ha_discover(THD *thd, const char *db, const char *name,
to ask engine if there are any new tables that should be written to disk
or any dropped tables that need to be removed from disk
*/
-typedef struct st_find_files_args
+struct st_find_files_args
{
const char *db;
const char *path;
@@ -2877,7 +2877,7 @@ ha_find_files(THD *thd,const char *db,const char *path,
*/
-typedef struct st_table_exists_in_engine_args
+struct st_table_exists_in_engine_args
{
const char *db;
const char *name;
diff --git a/sql/item.cc b/sql/item.cc
index e96111708eb..b310c81af5c 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -273,7 +273,6 @@ my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
TIME ltime;
- longlong date;
if (get_date(&ltime, TIME_FUZZY_DATE))
{
my_decimal_set_zero(decimal_value);
@@ -287,7 +286,6 @@ my_decimal *Item::val_decimal_from_time(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
TIME ltime;
- longlong date;
if (get_time(&ltime))
{
my_decimal_set_zero(decimal_value);
@@ -1542,7 +1540,7 @@ bool agg_item_collations_for_comparison(DTCollation &c, const char *fname,
bool agg_item_charsets(DTCollation &coll, const char *fname,
Item **args, uint nargs, uint flags, int item_sep)
{
- Item **arg, **last, *safe_args[2];
+ Item **arg, *safe_args[2];
LINT_INIT(safe_args[0]);
LINT_INIT(safe_args[1]);
@@ -5749,7 +5747,7 @@ void Item_trigger_field::set_required_privilege(bool rw)
}
-bool Item_trigger_field::set_value(THD *thd, sp_rcontext */*ctx*/, Item **it)
+bool Item_trigger_field::set_value(THD *thd, sp_rcontext * /*ctx*/, Item **it)
{
Item *item= sp_prepare_func_item(thd, it);
diff --git a/sql/item_func.cc b/sql/item_func.cc
index e658a70d7cb..68517ea976c 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -2350,7 +2350,7 @@ longlong Item_func_locate::val_int()
return 0;
/* start is now sufficiently valid to pass to charpos function */
- start= a->charpos(start);
+ start= a->charpos((int) start);
if (start + b->length() > a->length())
return 0;
@@ -2360,7 +2360,8 @@ longlong Item_func_locate::val_int()
return start + 1;
if (!cmp_collation.collation->coll->instr(cmp_collation.collation,
- a->ptr()+start, a->length()-start,
+ a->ptr()+start,
+ (uint) (a->length()-start),
b->ptr(), b->length(),
&match, 1))
return 0;
@@ -3841,8 +3842,7 @@ bool
Item_func_set_user_var::check(bool use_result_field)
{
DBUG_ENTER("Item_func_set_user_var::check");
- if (use_result_field)
- DBUG_ASSERT(result_field);
+ DBUG_ASSERT(!use_result_field || result_field);
switch (cached_result_type) {
case REAL_RESULT:
@@ -4286,7 +4286,7 @@ bool Item_func_get_user_var::eq(const Item *item, bool binary_cmp) const
bool Item_func_get_user_var::set_value(THD *thd,
- sp_rcontext */*ctx*/, Item **it)
+ sp_rcontext * /*ctx*/, Item **it)
{
Item_func_set_user_var *suv= new Item_func_set_user_var(get_name(), *it);
/*
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 32b283fca57..e8c87893471 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -983,8 +983,8 @@ String *Item_func_insert::val_str(String *str)
length= res->length() + 1;
/* start and length are now sufficiently valid to pass to charpos function */
- start= res->charpos(start);
- length= res->charpos(length, start);
+ start= res->charpos((int) start);
+ length= res->charpos((int) length, (uint32) start);
/* Re-testing with corrected params */
if (start > res->length() + 1)
@@ -1002,7 +1002,7 @@ String *Item_func_insert::val_str(String *str)
goto null;
}
res=copy_if_not_alloced(str,res,res->length());
- res->replace(start,length,*res2);
+ res->replace((uint32) start,(uint32) length,*res2);
return res;
null:
null_value=1;
@@ -1078,7 +1078,7 @@ String *Item_func_left::val_str(String *str)
return &my_empty_string;
if ((res->length() <= (ulonglong) length) ||
- (res->length() <= (char_pos= res->charpos(length))))
+ (res->length() <= (char_pos= res->charpos((int) length))))
return res;
tmp_value.set(*res, 0, char_pos);
@@ -1170,17 +1170,17 @@ String *Item_func_substr::val_str(String *str)
return &my_empty_string;
start= ((start < 0) ? res->numchars() + start : start - 1);
- start= res->charpos(start);
+ start= res->charpos((int) start);
if ((start < 0) || ((uint) start + 1 > res->length()))
return &my_empty_string;
- length= res->charpos(length, start);
+ length= res->charpos((int) length, (uint32) start);
tmp_length= res->length() - start;
length= min(length, tmp_length);
- if (!start && res->length() == (ulonglong) length)
+ if (!start && (longlong) res->length() == length)
return res;
- tmp_value.set(*res, (ulonglong) start, (ulonglong) length);
+ tmp_value.set(*res, (uint32) start, (uint32) length);
return &tmp_value;
}
@@ -2273,7 +2273,7 @@ String *Item_func_repeat::val_str(String *str)
char *to;
/* must be longlong to avoid truncation */
longlong tmp_count= args[1]->val_int();
- long count= tmp_count;
+ long count= (long) tmp_count;
String *res= args[0]->val_str(str);
/* Assumes that the maximum length of a String is < INT_MAX32. */
@@ -2375,7 +2375,7 @@ String *Item_func_rpad::val_str(String *str)
if (count <= (res_char_length= res->numchars()))
{ // String to pad is big enough
- res->length(res->charpos(count)); // Shorten result if longer
+ res->length(res->charpos((int) count)); // Shorten result if longer
return (res);
}
pad_char_length= rpad->numchars();
@@ -2392,7 +2392,7 @@ String *Item_func_rpad::val_str(String *str)
if (args[2]->null_value || !pad_char_length)
goto err;
res_byte_length= res->length(); /* Must be done before alloc_buffer */
- if (!(res= alloc_buffer(res,str,&tmp_value,byte_count)))
+ if (!(res= alloc_buffer(res,str,&tmp_value, (ulong) byte_count)))
goto err;
to= (char*) res->ptr()+res_byte_length;
@@ -2406,7 +2406,7 @@ String *Item_func_rpad::val_str(String *str)
}
if (count)
{
- pad_byte_length= rpad->charpos(count);
+ pad_byte_length= rpad->charpos((int) count);
memcpy(to,ptr_pad,(size_t) pad_byte_length);
to+= pad_byte_length;
}
@@ -2478,7 +2478,7 @@ String *Item_func_lpad::val_str(String *str)
if (count <= res_char_length)
{
- res->length(res->charpos(count));
+ res->length(res->charpos((int) count));
return res;
}
@@ -2494,7 +2494,8 @@ String *Item_func_lpad::val_str(String *str)
goto err;
}
- if (args[2]->null_value || !pad_char_length || str->alloc(byte_count))
+ if (args[2]->null_value || !pad_char_length ||
+ str->alloc((uint32) byte_count))
goto err;
str->length(0);
@@ -2506,7 +2507,7 @@ String *Item_func_lpad::val_str(String *str)
count-= pad_char_length;
}
if (count > 0)
- str->append(pad->ptr(), pad->charpos(count), collation.collation);
+ str->append(pad->ptr(), pad->charpos((int) count), collation.collation);
str->append(*res);
null_value= 0;
@@ -3326,4 +3327,3 @@ String *Item_func_uuid::val_str(String *str)
strmov(s+18, clock_seq_and_node_str);
return str;
}
-
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index 1ac872b6c07..7ea13e61c23 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -3434,8 +3434,8 @@ bool Item_func_group_concat::setup(THD *thd)
duplicate values (according to the syntax of this function). If there
is no DISTINCT or ORDER BY clauses, we don't create this tree.
*/
- init_tree(tree, min(thd->variables.max_heap_table_size,
- thd->variables.sortbuff_size/16), 0,
+ init_tree(tree, (uint) min(thd->variables.max_heap_table_size,
+ thd->variables.sortbuff_size/16), 0,
tree_key_length, compare_key, 0, NULL, (void*) this);
}
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 6771b44ccd3..76ab70cca96 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -110,7 +110,6 @@ static bool make_datetime_with_warn(date_time_format_types format, TIME *ltime,
String *str)
{
int warning= 0;
- bool rc;
if (make_datetime(format, ltime, str))
return 1;
diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc
index 21239a13735..966bae43984 100644
--- a/sql/item_xmlfunc.cc
+++ b/sql/item_xmlfunc.cc
@@ -777,7 +777,7 @@ String *Item_nodeset_func_elementbyindex::val_nodeset(String *nodeset)
((XPathFilter*)(&nodeset_func->context_cache))->append_element(flt->num,
flt->pos,
size);
- int index= args[1]->val_int() - 1;
+ int index= (int) (args[1]->val_int()) - 1;
if (index >= 0 && (flt->pos == (uint) index || args[1]->is_bool_func()))
((XPathFilter*)nodeset)->append_element(flt->num, pos++);
}
diff --git a/sql/log.cc b/sql/log.cc
index b12eca9bb07..a5f7b48aa75 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -2418,7 +2418,6 @@ bool MYSQL_BIN_LOG::open(const char *log_name,
ulong max_size_arg,
bool null_created_arg)
{
- char buff[FN_REFLEN];
File file= -1;
int open_flags = O_CREAT | O_BINARY;
DBUG_ENTER("MYSQL_BIN_LOG::open");
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 112f4aee135..44cba324a02 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1162,11 +1162,11 @@ void Log_event::print_base64(IO_CACHE* file,
bool more)
{
const uchar *ptr= (const uchar *)temp_buf;
- my_off_t size= uint4korr(ptr + EVENT_LEN_OFFSET);
+ uint32 size= uint4korr(ptr + EVENT_LEN_OFFSET);
DBUG_ENTER("Log_event::print_base64");
- size_t const tmp_str_sz= base64_needed_encoded_length(size);
+ size_t const tmp_str_sz= base64_needed_encoded_length((int) size);
char *const tmp_str= (char *) my_malloc(tmp_str_sz, MYF(MY_WME));
if (!tmp_str) {
fprintf(stderr, "\nError: Out of memory. "
@@ -1174,7 +1174,7 @@ void Log_event::print_base64(IO_CACHE* file,
DBUG_VOID_RETURN;
}
- int const res= base64_encode(ptr, size, tmp_str);
+ int const res= base64_encode(ptr, (size_t) size, tmp_str);
DBUG_ASSERT(res == 0);
if (my_b_tell(file) == 0)
@@ -5360,7 +5360,7 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len,
}
else
{
- m_table_id= uint6korr(post_start);
+ m_table_id= (ulong) uint6korr(post_start);
post_start+= RW_FLAGS_OFFSET;
}
@@ -6098,7 +6098,7 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len,
else
{
DBUG_ASSERT(post_header_len == TABLE_MAP_HEADER_LEN);
- m_table_id= uint6korr(post_start);
+ m_table_id= (ulong) uint6korr(post_start);
post_start+= TM_FLAGS_OFFSET;
}
diff --git a/sql/log_event.h b/sql/log_event.h
index c3f015e723c..4b74bf7c7ee 100644
--- a/sql/log_event.h
+++ b/sql/log_event.h
@@ -884,6 +884,8 @@ public:
bool write(IO_CACHE* file) { return(false); };
virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; }
+#else
+ Muted_query_log_event() {}
#endif
};
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index 915e4200fd1..a75f204ae33 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -893,7 +893,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
bool mysql_select(THD *thd, Item ***rref_pointer_array,
TABLE_LIST *tables, uint wild_num, List<Item> &list,
COND *conds, uint og_num, ORDER *order, ORDER *group,
- Item *having, ORDER *proc_param, ulong select_type,
+ Item *having, ORDER *proc_param, ulonglong select_type,
select_result *result, SELECT_LEX_UNIT *unit,
SELECT_LEX *select_lex);
void free_underlaid_joins(THD *thd, SELECT_LEX *select);
@@ -919,7 +919,7 @@ void sp_prepare_create_field(THD *thd, create_field *sql_field);
int prepare_create_field(create_field *sql_field,
uint *blob_columns,
int *timestamps, int *timestamps_with_niladic,
- uint table_flags);
+ longlong table_flags);
bool mysql_create_table(THD *thd,const char *db, const char *table_name,
HA_CREATE_INFO *create_info,
List<create_field> &fields, List<Key> &keys,
@@ -1717,7 +1717,7 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list,
/* old unireg functions */
void unireg_init(ulong options);
-void unireg_end(void);
+void unireg_end(void) __attribute__((noreturn));
bool mysql_create_frm(THD *thd, const char *file_name,
const char *db, const char *table,
HA_CREATE_INFO *create_info,
@@ -2002,7 +2002,7 @@ inline bool is_user_table(TABLE * table)
*/
#ifndef EMBEDDED_LIBRARY
-extern "C" void unireg_abort(int exit_code);
+extern "C" void unireg_abort(int exit_code) __attribute__((noreturn));
void kill_delayed_threads(void);
bool check_stack_overrun(THD *thd, long margin, char *dummy);
#else
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 77679a9795e..6921fe3e6d8 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -68,6 +68,12 @@
#define IF_PURIFY(A,B) (B)
#endif
+#if SIZEOF_CHARP == 4
+#define MAX_MEM_TABLE_SIZE ~(ulong) 0
+#else
+#define MAX_MEM_TABLE_SIZE ~(ulonglong) 0
+#endif
+
/* stack traces are only supported on linux intel */
#if defined(__linux__) && defined(__i386__) && defined(USE_PSTACK)
#define HAVE_STACK_TRACE_ON_SEGV
@@ -1056,9 +1062,6 @@ static void __cdecl kill_server(int sig_ptr)
}
#endif
-#if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__))
- my_thread_init(); // If this is a new thread
-#endif
close_connections();
if (sig != MYSQL_KILL_SIGNAL &&
#ifdef __WIN__
@@ -1069,16 +1072,15 @@ static void __cdecl kill_server(int sig_ptr)
else
unireg_end();
+ /* purecov: begin deadcode */
#ifdef __NETWARE__
if (!event_flag)
pthread_join(select_thread, NULL); // wait for main thread
#endif /* __NETWARE__ */
-#if defined(__NETWARE__) || (defined(USE_ONE_SIGNAL_HAND) && !defined(__WIN__) && !defined(OS2))
my_thread_end();
-#endif
-
- pthread_exit(0); /* purecov: deadcode */
+ pthread_exit(0);
+ /* purecov: end */
#endif /* EMBEDDED_LIBRARY */
RETURN_FROM_KILL_SERVER;
@@ -1090,11 +1092,15 @@ pthread_handler_t kill_server_thread(void *arg __attribute__((unused)))
{
my_thread_init(); // Initialize new thread
kill_server(0);
- my_thread_end(); // Normally never reached
+ /* purecov: begin deadcode */
+ my_thread_end();
+ pthread_exit(0);
return 0;
+ /* purecov: end */
}
#endif
+
extern "C" sig_handler print_signal_warning(int sig)
{
if (global_system_variables.log_warnings)
@@ -5943,8 +5949,9 @@ The minimum value for this variable is 4096.",
{"max_heap_table_size", OPT_MAX_HEP_TABLE_SIZE,
"Don't allow creation of heap tables bigger than this.",
(gptr*) &global_system_variables.max_heap_table_size,
- (gptr*) &max_system_variables.max_heap_table_size, 0, GET_ULONG,
- REQUIRED_ARG, 16*1024*1024L, 16384, ~0L, MALLOC_OVERHEAD, 1024, 0},
+ (gptr*) &max_system_variables.max_heap_table_size, 0, GET_ULL,
+ REQUIRED_ARG, 16*1024*1024L, 16384, MAX_MEM_TABLE_SIZE,
+ MALLOC_OVERHEAD, 1024, 0},
{"max_join_size", OPT_MAX_JOIN_SIZE,
"Joins that are probably going to read more than max_join_size records return an error.",
(gptr*) &global_system_variables.max_join_size,
@@ -6230,8 +6237,8 @@ The minimum value for this variable is 4096.",
{"tmp_table_size", OPT_TMP_TABLE_SIZE,
"If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.",
(gptr*) &global_system_variables.tmp_table_size,
- (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULONG,
- REQUIRED_ARG, 16*1024*1024L, 1024, ~0L, 0, 1, 0}, /* See max_heap_table_size . */
+ (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULL,
+ REQUIRED_ARG, 16*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0},
{"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
"Allocation block size for transactions to be stored in binary log",
(gptr*) &global_system_variables.trans_alloc_block_size,
@@ -7131,7 +7138,6 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
switch(optid) {
case '#':
#ifndef DBUG_OFF
- DBUG_SET(argument ? argument : default_dbug_option);
DBUG_SET_INITIAL(argument ? argument : default_dbug_option);
#endif
opt_endinfo=1; /* unireg: memory allocation */
diff --git a/sql/net_serv.cc b/sql/net_serv.cc
index ec4c4675e76..6f8993f584d 100644
--- a/sql/net_serv.cc
+++ b/sql/net_serv.cc
@@ -266,6 +266,7 @@ static int net_data_is_ready(my_socket sd)
SYNOPSIS
net_clear()
net NET handler
+ clear_buffer If <> 0, then clear all data from communication buffer
DESCRIPTION
Read from socket until there is nothing more to read. Discard
@@ -280,48 +281,51 @@ static int net_data_is_ready(my_socket sd)
*/
-void net_clear(NET *net)
+void net_clear(NET *net, my_bool clear_buffer)
{
int count, ready;
DBUG_ENTER("net_clear");
#if !defined(EMBEDDED_LIBRARY)
- while((ready= net_data_is_ready(net->vio->sd)) > 0)
+ if (clear_buffer)
{
- /* The socket is ready */
- if ((count= vio_read(net->vio, (char*) (net->buff),
- (uint32) net->max_packet)) > 0)
+ while ((ready= net_data_is_ready(net->vio->sd)) > 0)
{
- DBUG_PRINT("info",("skipped %d bytes from file: %s",
- count, vio_description(net->vio)));
+ /* The socket is ready */
+ if ((count= vio_read(net->vio, (char*) (net->buff),
+ (uint32) net->max_packet)) > 0)
+ {
+ DBUG_PRINT("info",("skipped %d bytes from file: %s",
+ count, vio_description(net->vio)));
#ifdef EXTRA_DEBUG
- fprintf(stderr,"skipped %d bytes from file: %s\n",
- count, vio_description(net->vio));
+ fprintf(stderr,"Error: net_clear() skipped %d bytes from file: %s\n",
+ count, vio_description(net->vio));
#endif
+ }
+ else
+ {
+ DBUG_PRINT("info",("socket ready but only EOF to read - disconnected"));
+ net->error= 2;
+ break;
+ }
}
- else
- {
- DBUG_PRINT("info",("socket ready but only EOF to read - disconnected"));
- net->error= 2;
- break;
- }
- }
#ifdef NET_DATA_IS_READY_CAN_RETURN_MINUS_ONE
- /* 'net_data_is_ready' returned "don't know" */
- if (ready == -1)
- {
- /* Read unblocking to clear net */
- my_bool old_mode;
- if (!vio_blocking(net->vio, FALSE, &old_mode))
+ /* 'net_data_is_ready' returned "don't know" */
+ if (ready == -1)
{
- while ((count= vio_read(net->vio, (char*) (net->buff),
- (uint32) net->max_packet)) > 0)
- DBUG_PRINT("info",("skipped %d bytes from file: %s",
- count, vio_description(net->vio)));
- vio_blocking(net->vio, TRUE, &old_mode);
+ /* Read unblocking to clear net */
+ my_bool old_mode;
+ if (!vio_blocking(net->vio, FALSE, &old_mode))
+ {
+ while ((count= vio_read(net->vio, (char*) (net->buff),
+ (uint32) net->max_packet)) > 0)
+ DBUG_PRINT("info",("skipped %d bytes from file: %s",
+ count, vio_description(net->vio)));
+ vio_blocking(net->vio, TRUE, &old_mode);
+ }
}
+#endif /* NET_DATA_IS_READY_CAN_RETURN_MINUS_ONE */
}
-#endif
-#endif
+#endif /* EMBEDDED_LIBRARY */
net->pkt_nr=net->compress_pkt_nr=0; /* Ready for new command */
net->write_pos=net->buff;
DBUG_VOID_RETURN;
@@ -894,7 +898,7 @@ my_real_read(NET *net, ulong *complen)
(int) net->buff[net->where_b + 3],
net->pkt_nr));
#ifdef EXTRA_DEBUG
- fprintf(stderr,"Packets out of order (Found: %d, expected %d)\n",
+ fprintf(stderr,"Error: Packets out of order (Found: %d, expected %d)\n",
(int) net->buff[net->where_b + 3],
(uint) (uchar) net->pkt_nr);
#endif
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index b4927bdb5b4..fa575e73c39 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -540,7 +540,8 @@ class PARAM : public RANGE_OPT_PARAM
{
public:
KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */
- uint baseflag, max_key_part, range_count;
+ longlong baseflag;
+ uint max_key_part, range_count;
char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH],
@@ -1142,7 +1143,6 @@ int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler)
{
handler *save_file= file, *org_file;
THD *thd;
- MY_BITMAP *bitmap;
DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan");
in_ror_merged_scan= 1;
@@ -2048,7 +2048,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
/* set up parameter that is passed to all functions */
param.thd= thd;
- param.baseflag=head->file->ha_table_flags();
+ param.baseflag= head->file->ha_table_flags();
param.prev_tables=prev_tables | const_tables;
param.read_tables=read_tables;
param.current_table= head->map;
@@ -2101,7 +2101,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
key_parts->null_bit= key_part_info->null_bit;
key_parts->image_type =
(key_info->flags & HA_SPATIAL) ? Field::itMBR : Field::itRAW;
- key_parts->flag= key_part_info->key_part_flag;
+ /* Only HA_PART_KEY_SEG is used */
+ key_parts->flag= (uint8) key_part_info->key_part_flag;
}
param.real_keynr[param.keys++]=idx;
}
@@ -2508,7 +2509,6 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond)
prune_param.key= prune_param.range_param.key_parts;
SEL_TREE *tree;
- SEL_ARG *arg;
int res;
tree= get_mm_tree(range_par, pprune_cond);
@@ -3223,12 +3223,12 @@ static bool create_partition_index_description(PART_PRUNE_PARAM *ppar)
{
key_part->key= 0;
key_part->part= part;
- key_part->length= (*field)->pack_length_in_rec();
+ key_part->length= (uint16) (*field)->pack_length_in_rec();
/*
psergey-todo: check yet again if this is correct for tricky field types,
e.g. see "Fix a fatal error in decimal key handling" in open_binary_frm()
*/
- key_part->store_length= (*field)->pack_length();
+ key_part->store_length= (uint16) (*field)->pack_length();
if ((*field)->real_maybe_null())
key_part->store_length+= HA_KEY_NULL_LENGTH;
if ((*field)->type() == FIELD_TYPE_BLOB ||
@@ -7652,7 +7652,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
key_part->length= key_info->key_part[part].length;
key_part->store_length= key_info->key_part[part].store_length;
key_part->null_bit= key_info->key_part[part].null_bit;
- key_part->flag= key_info->key_part[part].key_part_flag;
+ key_part->flag= (uint8) key_info->key_part[part].key_part_flag;
}
if (insert_dynamic(&quick->ranges,(gptr)&range))
goto err;
diff --git a/sql/partition_info.cc b/sql/partition_info.cc
index c3e67752396..9d5b6d0494a 100644
--- a/sql/partition_info.cc
+++ b/sql/partition_info.cc
@@ -248,7 +248,6 @@ bool partition_info::set_up_default_subpartitions(handler *file,
HA_CREATE_INFO *info)
{
uint i, j;
- char *default_name, *name_ptr;
bool result= TRUE;
partition_element *part_elem;
List_iterator<partition_element> part_it(partitions);
@@ -664,7 +663,8 @@ bool partition_info::check_list_constants()
qsort((void*)list_array, no_list_values, sizeof(LIST_PART_ENTRY),
&list_part_cmp);
- i= prev_value= 0; //prev_value initialised to quiet compiler
+ i= 0;
+ LINT_INIT(prev_value);
do
{
DBUG_ASSERT(i < no_list_values);
@@ -956,7 +956,6 @@ bool partition_info::set_up_charset_field_preps()
while ((field= *(ptr++)))
{
unsigned j= 0;
- Field *part_field;
CHARSET_INFO *cs;
char *field_buf;
LINT_INIT(field_buf);
@@ -985,7 +984,6 @@ bool partition_info::set_up_charset_field_preps()
}
if (tot_fields)
{
- Field *part_field, *subpart_field;
uint j,k,l;
size= tot_fields*sizeof(char**);
diff --git a/sql/rpl_injector.h b/sql/rpl_injector.h
index 48df30e8ac8..17251f54746 100644
--- a/sql/rpl_injector.h
+++ b/sql/rpl_injector.h
@@ -27,7 +27,7 @@
/* Forward declarations */
class handler;
class MYSQL_BIN_LOG;
-class st_table;
+struct st_table;
typedef st_table TABLE;
diff --git a/sql/set_var.cc b/sql/set_var.cc
index daca62400e2..2fe839189a0 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -298,7 +298,7 @@ sys_var_thd_ulong sys_max_delayed_threads("max_delayed_threads",
fix_max_connections);
sys_var_thd_ulong sys_max_error_count("max_error_count",
&SV::max_error_count);
-sys_var_thd_ulong sys_max_heap_table_size("max_heap_table_size",
+sys_var_thd_ulonglong sys_max_heap_table_size("max_heap_table_size",
&SV::max_heap_table_size);
sys_var_thd_ulong sys_pseudo_thread_id("pseudo_thread_id",
&SV::pseudo_thread_id,
@@ -472,7 +472,7 @@ sys_var_thd_enum sys_tx_isolation("tx_isolation",
&tx_isolation_typelib,
fix_tx_isolation,
check_tx_isolation);
-sys_var_thd_ulong sys_tmp_table_size("tmp_table_size",
+sys_var_thd_ulonglong sys_tmp_table_size("tmp_table_size",
&SV::tmp_table_size);
sys_var_bool_ptr sys_timed_mutexes("timed_mutexes",
&timed_mutexes);
@@ -3039,7 +3039,7 @@ static bool set_option_autocommit(THD *thd, set_var *var)
{
/* The test is negative as the flag we use is NOT autocommit */
- ulong org_options=thd->options;
+ ulonglong org_options= thd->options;
if (var->save_result.ulong_value != 0)
thd->options&= ~((sys_var_thd_bit*) var->var)->bit_flag;
@@ -3051,15 +3051,16 @@ static bool set_option_autocommit(THD *thd, set_var *var)
if ((org_options & OPTION_NOT_AUTOCOMMIT))
{
/* We changed to auto_commit mode */
- thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE |
- OPTION_KEEP_LOG);
+ thd->options&= ~(ulonglong) (OPTION_BEGIN |
+ OPTION_STATUS_NO_TRANS_UPDATE |
+ OPTION_KEEP_LOG);
thd->server_status|= SERVER_STATUS_AUTOCOMMIT;
if (ha_commit(thd))
return 1;
}
else
{
- thd->options&= ~(ulong) (OPTION_STATUS_NO_TRANS_UPDATE);
+ thd->options&= ~(ulonglong) (OPTION_STATUS_NO_TRANS_UPDATE);
thd->server_status&= ~SERVER_STATUS_AUTOCOMMIT;
}
}
@@ -3613,7 +3614,8 @@ bool sys_var_thd_table_type::update(THD *thd, set_var *var)
pointer to string with sql_mode representation
*/
-byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, ulong val,
+byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd,
+ ulong val,
ulong *len)
{
char buff[256];
@@ -3641,8 +3643,8 @@ byte *sys_var_thd_sql_mode::symbolic_mode_representation(THD *thd, ulong val,
byte *sys_var_thd_sql_mode::value_ptr(THD *thd, enum_var_type type,
LEX_STRING *base)
{
- ulong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
- thd->variables.*offset);
+ ulonglong val= ((type == OPT_GLOBAL) ? global_system_variables.*offset :
+ thd->variables.*offset);
ulong length_unused;
return symbolic_mode_representation(thd, val, &length_unused);
}
diff --git a/sql/slave.cc b/sql/slave.cc
index 4c5f0fc4764..67e8ba20c4f 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -913,7 +913,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
TABLE_LIST tables;
int error= 1;
handler *file;
- ulong save_options;
+ ulonglong save_options;
NET *net= &mysql->net;
DBUG_ENTER("create_table_from_dump");
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index 622d9efdde0..c0c778c0e8d 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -456,10 +456,14 @@ sp_head::operator delete(void *ptr, size_t size)
sp_head::sp_head()
:Query_arena(&main_mem_root, INITIALIZED_FOR_SP),
m_flags(0), m_recursion_level(0), m_next_cached_sp(0),
- m_first_instance(this), m_first_free_instance(this), m_last_cached_sp(this),
m_cont_level(0)
{
const LEX_STRING str_reset= { NULL, 0 };
+
+ m_first_instance= this;
+ m_first_free_instance= this;
+ m_last_cached_sp= this;
+
m_return_field_def.charset = NULL;
/*
FIXME: the only use case when name is NULL is events, and it should
@@ -1675,7 +1679,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
Item_null *null_item= new Item_null();
if (!null_item ||
- nctx->set_variable(thd, i, (struct Item **)&null_item))
+ nctx->set_variable(thd, i, (Item **)&null_item))
{
err_status= TRUE;
break;
@@ -2853,7 +2857,7 @@ void
sp_instr_freturn::print(String *str)
{
/* freturn type expr... */
- if (str->reserve(UINT_MAX+8+32)) // Add some for the expr. too
+ if (str->reserve(1024+8+32)) // Add some for the expr. too
return;
str->qs_append(STRING_WITH_LEN("freturn "));
str->qs_append((uint)m_type);
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index afd287630ad..db6baac8681 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -1251,7 +1251,7 @@ void close_temporary_tables(THD *thd)
/* We always quote db,table names though it is slight overkill */
if (found_user_tables &&
- !(was_quote_show= (thd->options & OPTION_QUOTE_SHOW_CREATE)))
+ !(was_quote_show= test(thd->options & OPTION_QUOTE_SHOW_CREATE)))
{
thd->options |= OPTION_QUOTE_SHOW_CREATE;
}
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index a6e15c55641..1b9af26530e 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -1043,19 +1043,18 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
while (sql[i]=='(')
i++;
-
/*
Test if the query is a SELECT
(pre-space is removed in dispatch_command).
First '/' looks like comment before command it is not
- frequently appeared in real lihe, consequently we can
+ frequently appeared in real life, consequently we can
check all such queries, too.
*/
if ((my_toupper(system_charset_info, sql[i]) != 'S' ||
my_toupper(system_charset_info, sql[i + 1]) != 'E' ||
my_toupper(system_charset_info, sql[i + 2]) != 'L') &&
- sql[0] != '/')
+ sql[i] != '/')
{
DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached"));
goto err;
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 218c56959a3..166b078ce62 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -183,6 +183,8 @@ struct system_variables
{
ulonglong myisam_max_extra_sort_file_size;
ulonglong myisam_max_sort_file_size;
+ ulonglong max_heap_table_size;
+ ulonglong tmp_table_size;
ha_rows select_limit;
ha_rows max_join_size;
ulong auto_increment_increment, auto_increment_offset;
@@ -191,7 +193,6 @@ struct system_variables
ulong long_query_time;
ulong max_allowed_packet;
ulong max_error_count;
- ulong max_heap_table_size;
ulong max_length_for_sort_data;
ulong max_sort_length;
ulong max_tmp_tables;
@@ -215,7 +216,6 @@ struct system_variables
ulong div_precincrement;
ulong sortbuff_size;
handlerton *table_type;
- ulong tmp_table_size;
ulong tx_isolation;
ulong completion_type;
/* Determines which non-standard SQL behaviour should be enabled */
@@ -2060,7 +2060,8 @@ class user_var_entry
class Unique :public Sql_alloc
{
DYNAMIC_ARRAY file_ptrs;
- ulong max_elements, max_in_memory_size;
+ ulong max_elements;
+ ulonglong max_in_memory_size;
IO_CACHE file;
TREE tree;
byte *record_pointers;
@@ -2070,7 +2071,7 @@ class Unique :public Sql_alloc
public:
ulong elements;
Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
- uint size_arg, ulong max_in_memory_size_arg);
+ uint size_arg, ulonglong max_in_memory_size_arg);
~Unique();
ulong elements_in_tree() { return tree.elements_in_tree; }
inline bool unique_add(void *ptr)
@@ -2084,13 +2085,13 @@ public:
bool get(TABLE *table);
static double get_use_cost(uint *buffer, uint nkeys, uint key_size,
- ulong max_in_memory_size);
+ ulonglong max_in_memory_size);
inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size,
- ulong max_in_memory_size)
+ ulonglong max_in_memory_size)
{
- register ulong max_elems_in_tree=
+ register ulonglong max_elems_in_tree=
(1 + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
- return sizeof(uint)*(1 + nkeys/max_elems_in_tree);
+ return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
}
void reset();
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 0c154069bd6..4f9e19732fd 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -1302,7 +1302,6 @@ err:
bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
{
- int path_length;
LEX_STRING db_name;
bool system_db= 0;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 5da2a7660a4..df24dad2d4c 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -975,7 +975,7 @@ end:
trunc_by_del:
/* Probably InnoDB table */
- ulong save_options= thd->options;
+ ulonglong save_options= thd->options;
table_list->lock_type= TL_WRITE;
thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT);
ha_enable_transaction(thd, FALSE);
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index dcb4152f64f..191fc60dfd5 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -2042,7 +2042,9 @@ err:
*/
ha_rollback_stmt(thd);
+#ifndef __WIN__
end:
+#endif
/*
di should be unlinked from the thread handler list and have no active
clients
@@ -2698,7 +2700,7 @@ void select_insert::send_error(uint errcode,const char *err)
bool select_insert::send_eof()
{
- int error,error2;
+ int error;
bool const trans_table= table->file->has_transactions();
ulonglong id;
DBUG_ENTER("select_insert::send_eof");
@@ -2750,9 +2752,9 @@ bool select_insert::send_eof()
*/
if (trans_table || thd->current_stmt_binlog_row_based)
{
- int const error2= ha_autocommit_or_rollback(thd, error);
+ int error2= ha_autocommit_or_rollback(thd, error);
if (error2 && !error)
- error=error2;
+ error= error2;
}
table->file->ha_release_auto_increment();
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 3166928420a..4af767b25db 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -423,7 +423,7 @@ protected:
TABLE *table; /* temporary table using for appending UNION results */
select_result *result;
- ulong found_rows_for_union;
+ ulonglong found_rows_for_union;
bool res;
public:
bool prepared, // prepare phase already performed for UNION (unit)
diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc
index f0aa90fa84f..401285c2f8b 100644
--- a/sql/sql_parse.cc
+++ b/sql/sql_parse.cc
@@ -2093,7 +2093,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd,
#ifdef __WIN__
sleep(1); // must wait after eof()
#endif
- send_eof(thd); // This is for 'quit request'
+ /*
+ The client is next going to send a COM_QUIT request (as part of
+ mysql_close()). Make the life simpler for the client by sending
+ the response for the coming COM_QUIT in advance
+ */
+ send_eof(thd);
close_connection(thd, 0, 1);
close_thread_tables(thd); // Free before kill
kill_mysql();
@@ -5191,7 +5196,6 @@ end_with_restore_list:
break;
}
-end:
thd->proc_info="query end";
/*
diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc
index 266a5bad34d..dcc35293b84 100644
--- a/sql/sql_partition.cc
+++ b/sql/sql_partition.cc
@@ -886,7 +886,6 @@ int check_signed_flag(partition_info *part_info)
bool fix_fields_part_func(THD *thd, Item* func_expr, TABLE *table,
bool is_sub_part, bool is_field_to_be_setup)
{
- MEM_ROOT new_mem_root;
partition_info *part_info= table->part_info;
uint dir_length, home_dir_length;
bool result= TRUE;
@@ -2002,7 +2001,7 @@ char *generate_partition_syntax(partition_info *part_info,
bool use_sql_alloc,
bool show_partition_options)
{
- uint i,j, tot_no_parts, no_subparts, no_parts;
+ uint i,j, tot_no_parts, no_subparts;
partition_element *part_elem;
partition_element *save_part_elem= NULL;
ulonglong buffer_length;
@@ -2302,10 +2301,13 @@ static uint32 get_part_id_hash(uint no_parts,
Item *part_expr,
longlong *func_value)
{
+ longlong int_hash_id;
DBUG_ENTER("get_part_id_hash");
+
*func_value= part_val_int(part_expr);
- longlong int_hash_id= *func_value % no_parts;
- DBUG_RETURN(int_hash_id < 0 ? -int_hash_id : int_hash_id);
+ int_hash_id= *func_value % no_parts;
+
+ DBUG_RETURN(int_hash_id < 0 ? (uint32) -int_hash_id : (uint32) int_hash_id);
}
@@ -2358,7 +2360,7 @@ static uint32 get_part_id_key(Field **field_array,
{
DBUG_ENTER("get_part_id_key");
*func_value= calculate_key_value(field_array);
- DBUG_RETURN(*func_value % no_parts);
+ DBUG_RETURN((uint32) (*func_value % no_parts));
}
@@ -3936,7 +3938,7 @@ static int fast_end_partition(THD *thd, ulonglong copied,
(ulong) (copied + deleted),
(ulong) deleted,
(ulong) 0);
- send_ok(thd,copied+deleted,0L,tmp_name);
+ send_ok(thd, (ha_rows) (copied+deleted),0L,tmp_name);
DBUG_RETURN(FALSE);
}
table->file->print_error(error, MYF(0));
@@ -4024,7 +4026,6 @@ static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val,
handlerton *engine_type= create_info->db_type;
handlerton *old_engine_type= engine_type;
uint i= 0;
- handler *file;
uint no_parts= part_info->partitions.elements;
DBUG_ENTER("check_native_partitioned");
@@ -5476,7 +5477,6 @@ static void set_part_info_exec_log_entry(partition_info *part_info,
static bool write_log_drop_shadow_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
{
- DDL_LOG_ENTRY ddl_log_entry;
partition_info *part_info= lpt->part_info;
DDL_LOG_MEMORY_ENTRY *log_entry;
DDL_LOG_MEMORY_ENTRY *exec_log_entry= NULL;
@@ -5521,7 +5521,6 @@ error:
static bool write_log_rename_frm(ALTER_PARTITION_PARAM_TYPE *lpt)
{
- DDL_LOG_ENTRY ddl_log_entry;
partition_info *part_info= lpt->part_info;
DDL_LOG_MEMORY_ENTRY *log_entry;
DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
@@ -5574,7 +5573,6 @@ error:
static bool write_log_drop_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
{
- DDL_LOG_ENTRY ddl_log_entry;
partition_info *part_info= lpt->part_info;
DDL_LOG_MEMORY_ENTRY *log_entry;
DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
@@ -5688,7 +5686,6 @@ error:
static bool write_log_final_change_partition(ALTER_PARTITION_PARAM_TYPE *lpt)
{
- DDL_LOG_ENTRY ddl_log_entry;
partition_info *part_info= lpt->part_info;
DDL_LOG_MEMORY_ENTRY *log_entry;
DDL_LOG_MEMORY_ENTRY *exec_log_entry= part_info->exec_log_entry;
@@ -5746,7 +5743,6 @@ static void write_log_completed(ALTER_PARTITION_PARAM_TYPE *lpt,
{
partition_info *part_info= lpt->part_info;
uint count_loop= 0;
- bool not_success;
DDL_LOG_MEMORY_ENTRY *log_entry= part_info->exec_log_entry;
DBUG_ENTER("write_log_completed");
@@ -7055,7 +7051,6 @@ static uint32 get_next_partition_via_walking(PARTITION_ITERATOR *part_iter)
static uint32 get_next_subpartition_via_walking(PARTITION_ITERATOR *part_iter)
{
- uint32 part_id;
Field *field= part_iter->part_info->subpart_field_array[0];
if (part_iter->field_vals.cur == part_iter->field_vals.end)
{
diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc
index 8982db9e6b3..8cd4c661fb8 100644
--- a/sql/sql_plugin.cc
+++ b/sql/sql_plugin.cc
@@ -734,7 +734,7 @@ void plugin_load(void)
TABLE_LIST tables;
TABLE *table;
READ_RECORD read_record_info;
- int error, i;
+ int error;
MEM_ROOT mem;
THD *new_thd;
DBUG_ENTER("plugin_load");
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 0c6a5fe5846..6f810f13d9c 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -2920,7 +2920,6 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
{
Statement stmt_backup;
Query_arena *old_stmt_arena;
- Item *old_free_list;
bool error= TRUE;
statistic_increment(thd->status_var.com_stmt_execute, &LOCK_status);
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index cd2c3c348d4..c37227e7d28 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -35,7 +35,7 @@ static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list);
bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list, bool silent)
{
bool error= 1;
- TABLE_LIST *ren_table= 0, *new_table;
+ TABLE_LIST *ren_table= 0;
int to_table;
char *rename_log_table[2]= {NULL, NULL};
int disable_logs= 0;
@@ -353,7 +353,7 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
static TABLE_LIST *
rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error)
{
- TABLE_LIST *ren_table,*new_table, *tmp_table;
+ TABLE_LIST *ren_table, *new_table;
DBUG_ENTER("rename_tables");
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 9a4e93dfb94..9a64055f2e3 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -82,7 +82,7 @@ static store_key *get_store_key(THD *thd,
static bool make_simple_join(JOIN *join,TABLE *tmp_table);
static void make_outerjoin_info(JOIN *join);
static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *item);
-static void make_join_readinfo(JOIN *join,uint options);
+static void make_join_readinfo(JOIN *join, ulonglong options);
static bool only_eq_ref_tables(JOIN *join, ORDER *order, table_map tables);
static void update_depend_map(JOIN *join);
static void update_depend_map(JOIN *join, ORDER *order);
@@ -90,7 +90,7 @@ static ORDER *remove_const(JOIN *join,ORDER *first_order,COND *cond,
bool change_list, bool *simple_order);
static int return_zero_rows(JOIN *join, select_result *res,TABLE_LIST *tables,
List<Item> &fields, bool send_row,
- uint select_options, const char *info,
+ ulonglong select_options, const char *info,
Item *having);
static COND *build_equal_items(THD *thd, COND *cond,
COND_EQUAL *inherited,
@@ -114,7 +114,7 @@ static bool resolve_nested_join (TABLE_LIST *table);
static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item);
static bool open_tmp_table(TABLE *table);
static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
- ulong options);
+ ulonglong options);
static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table,
Procedure *proc);
@@ -1991,7 +1991,7 @@ bool
mysql_select(THD *thd, Item ***rref_pointer_array,
TABLE_LIST *tables, uint wild_num, List<Item> &fields,
COND *conds, uint og_num, ORDER *order, ORDER *group,
- Item *having, ORDER *proc_param, ulong select_options,
+ Item *having, ORDER *proc_param, ulonglong select_options,
select_result *result, SELECT_LEX_UNIT *unit,
SELECT_LEX *select_lex)
{
@@ -4206,7 +4206,7 @@ choose_plan(JOIN *join, table_map join_tables)
{
uint search_depth= join->thd->variables.optimizer_search_depth;
uint prune_level= join->thd->variables.optimizer_prune_level;
- bool straight_join= join->select_options & SELECT_STRAIGHT_JOIN;
+ bool straight_join= test(join->select_options & SELECT_STRAIGHT_JOIN);
DBUG_ENTER("choose_plan");
join->cur_embedding_map= 0;
@@ -4808,8 +4808,6 @@ static void
find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
double read_time)
{
- ha_rows rec;
- double tmp;
THD *thd= join->thd;
if (!rest_tables)
{
@@ -5941,7 +5939,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
}
static void
-make_join_readinfo(JOIN *join, uint options)
+make_join_readinfo(JOIN *join, ulonglong options)
{
uint i;
bool statistics= test(!(join->select_options & SELECT_DESCRIBE));
@@ -6598,7 +6596,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
static int
return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
- List<Item> &fields, bool send_row, uint select_options,
+ List<Item> &fields, bool send_row, ulonglong select_options,
const char *info, Item *having)
{
DBUG_ENTER("return_zero_rows");
@@ -7149,7 +7147,6 @@ static COND *build_equal_items_for_cond(COND *cond,
Item_equal *item_equal;
uint members;
COND_EQUAL cond_equal;
- COND *new_cond;
cond_equal.upper_levels= inherited;
if (cond->type() == Item::COND_ITEM)
@@ -9518,13 +9515,14 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
param->recinfo=recinfo;
store_record(table,s->default_values); // Make empty default record
- if (thd->variables.tmp_table_size == ~(ulong) 0) // No limit
+ if (thd->variables.tmp_table_size == ~ (ulonglong) 0) // No limit
share->max_rows= ~(ha_rows) 0;
else
- share->max_rows= (((share->db_type == heap_hton) ?
- min(thd->variables.tmp_table_size,
- thd->variables.max_heap_table_size) :
- thd->variables.tmp_table_size)/ share->reclength);
+ share->max_rows= (ha_rows) (((share->db_type == heap_hton) ?
+ min(thd->variables.tmp_table_size,
+ thd->variables.max_heap_table_size) :
+ thd->variables.tmp_table_size) /
+ share->reclength);
set_if_bigger(share->max_rows,1); // For dummy start options
/*
Push the LIMIT clause to the temporary table creation, so that we
@@ -9836,7 +9834,7 @@ static bool open_tmp_table(TABLE *table)
static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
- ulong options)
+ ulonglong options)
{
int error;
MI_KEYDEF keydef;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 227cc37cdaf..d2939ca7caa 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -696,7 +696,6 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
HA_CREATE_INFO *create_info)
{
Security_context *sctx= thd->security_ctx;
- int length;
char buff[2048];
String buffer(buff, sizeof(buff), system_charset_info);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -1028,7 +1027,7 @@ int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet,
HA_CREATE_INFO *create_info_arg)
{
List<Item> field_list;
- char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end, uname[NAME_LEN*3+1];
+ char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end;
const char *alias;
String type(tmp, sizeof(tmp), system_charset_info);
Field **ptr,*field;
@@ -2752,7 +2751,6 @@ int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond)
INDEX_FIELD_VALUES idx_field_vals;
List<char> files;
char *file_name;
- uint length;
bool with_i_schema;
HA_CREATE_INFO create;
TABLE *table= tables->table;
@@ -3379,7 +3377,7 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
restore_record(table, s->default_values);
if (!wild || !wild[0] || !wild_compare(sp_name.ptr(), wild, 0))
{
- int enum_idx= proc_table->field[5]->val_int();
+ int enum_idx= (int) proc_table->field[5]->val_int();
table->field[3]->store(sp_name.ptr(), sp_name.length(), cs);
get_field(thd->mem_root, proc_table->field[3], &tmp_string);
table->field[0]->store(tmp_string.ptr(), tmp_string.length(), cs);
@@ -3992,7 +3990,6 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
char buff[61];
String tmp_res(buff, sizeof(buff), cs);
String tmp_str;
- TIME time;
TABLE *show_table= tables->table;
handler *file;
#ifdef WITH_PARTITION_STORAGE_ENGINE
@@ -4017,7 +4014,6 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
List_iterator<partition_element> part_it(part_info->partitions);
uint part_pos= 0, part_id= 0;
uint no_parts= part_info->no_parts;
- handler *part_file;
restore_record(table, s->default_values);
table->field[1]->store(base_name, strlen(base_name), cs);
@@ -4099,8 +4095,6 @@ static int get_schema_partitions_record(THD *thd, struct st_table_list *tables,
while ((part_elem= part_it++))
{
-
-
table->field[3]->store(part_elem->partition_name,
strlen(part_elem->partition_name), cs);
table->field[3]->set_notnull();
@@ -5026,7 +5020,6 @@ static my_bool run_hton_fill_schema_files(THD *thd, st_plugin_int *plugin,
int fill_schema_files(THD *thd, TABLE_LIST *tables, COND *cond)
{
- int i;
TABLE *table= tables->table;
DBUG_ENTER("fill_schema_files");
diff --git a/sql/sql_table.cc b/sql/sql_table.cc
index 547095d191f..1924f25a76a 100644
--- a/sql/sql_table.cc
+++ b/sql/sql_table.cc
@@ -2011,7 +2011,7 @@ void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval,
int prepare_create_field(create_field *sql_field,
uint *blob_columns,
int *timestamps, int *timestamps_with_niladic,
- uint table_flags)
+ longlong table_flags)
{
DBUG_ENTER("prepare_field");
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index e835de4dedd..8ca72eab8bf 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -362,7 +362,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
TABLE *table= tables->table;
char file_buff[FN_REFLEN], trigname_buff[FN_REFLEN];
LEX_STRING file, trigname_file;
- LEX_STRING *trg_def, *name;
+ LEX_STRING *trg_def;
LEX_STRING definer_user;
LEX_STRING definer_host;
ulonglong *trg_sql_mode;
@@ -877,7 +877,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
DBUG_RETURN(1);
List_iterator_fast<LEX_STRING> it(triggers->definitions_list);
- LEX_STRING *trg_create_str, *trg_name_str;
+ LEX_STRING *trg_create_str;
ulonglong *trg_sql_mode;
if (triggers->definition_modes_list.is_empty() &&
@@ -994,7 +994,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
goto err_with_lex_cleanup;
}
- lex.sphead->set_info(0, 0, &lex.sp_chistics, *trg_sql_mode);
+ lex.sphead->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode);
triggers->bodies[lex.trg_chistics.event]
[lex.trg_chistics.action_time]= lex.sphead;
@@ -1335,7 +1335,6 @@ Table_triggers_list::change_table_name_in_triggers(THD *thd,
{
char path_buff[FN_REFLEN];
LEX_STRING *def, *on_table_name, new_def;
- ulonglong *sql_mode;
ulong save_sql_mode= thd->variables.sql_mode;
List_iterator_fast<LEX_STRING> it_def(definitions_list);
List_iterator_fast<LEX_STRING> it_on_table_name(on_table_names_list);
@@ -1349,7 +1348,7 @@ Table_triggers_list::change_table_name_in_triggers(THD *thd,
while ((def= it_def++))
{
on_table_name= it_on_table_name++;
- thd->variables.sql_mode= *(it_mode++);
+ thd->variables.sql_mode= (ulong) *(it_mode++);
/* Construct CREATE TRIGGER statement with new table name. */
buff.length(0);
diff --git a/sql/sql_union.cc b/sql/sql_union.cc
index 722a99eb2d1..0715a0c4296 100644
--- a/sql/sql_union.cc
+++ b/sql/sql_union.cc
@@ -433,7 +433,9 @@ bool st_select_lex_unit::exec()
}
/* re-enabling indexes for next subselect iteration */
if (union_distinct && table->file->enable_indexes(HA_KEY_SWITCH_ALL))
+ {
DBUG_ASSERT(0);
+ }
}
for (SELECT_LEX *sl= select_cursor; sl; sl= sl->next_select())
{
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index 2575f17d256..a379ea66db6 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -118,7 +118,7 @@ int mysql_update(THD *thd,
enum enum_duplicates handle_duplicates, bool ignore)
{
bool using_limit= limit != HA_POS_ERROR;
- bool safe_update= thd->options & OPTION_SAFE_UPDATES;
+ bool safe_update= test(thd->options & OPTION_SAFE_UPDATES);
bool used_key_is_modified, transactional_table, will_batch;
bool can_compare_record;
int res;
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index 98226c1651b..7a8a85dceed 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -197,7 +197,7 @@ fill_defined_view_parts (THD *thd, TABLE_LIST *view)
lex->definer= &view->definer;
}
if (lex->create_view_algorithm == VIEW_ALGORITHM_UNDEFINED)
- lex->create_view_algorithm= decoy.algorithm;
+ lex->create_view_algorithm= (uint8) decoy.algorithm;
if (lex->create_view_suid == VIEW_SUID_DEFAULT)
lex->create_view_suid= decoy.view_suid ?
VIEW_SUID_DEFINER : VIEW_SUID_INVOKER;
@@ -675,7 +675,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view,
String str(buff,(uint32) sizeof(buff), system_charset_info);
char md5[MD5_BUFF_LENGTH];
bool can_be_merged;
- char dir_buff[FN_REFLEN], file_buff[FN_REFLEN], path_buff[FN_REFLEN];
+ char dir_buff[FN_REFLEN], path_buff[FN_REFLEN];
const uchar *endp;
LEX_STRING dir, file, path;
DBUG_ENTER("mysql_register_view");
@@ -1341,7 +1341,6 @@ bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode)
{
char path[FN_REFLEN];
TABLE_LIST *view;
- frm_type_enum type;
String non_existant_views;
char *wrong_object_db= NULL, *wrong_object_name= NULL;
bool error= FALSE;
@@ -1509,7 +1508,6 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
TABLE *table;
Field_translator *trans, *end_of_trans;
KEY *key_info, *key_info_end;
- uint i;
DBUG_ENTER("check_key_in_view");
/*
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index ec3094ca721..fcac7eb8e05 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1948,7 +1948,6 @@ sp_decl:
uint num_vars= pctx->context_var_count();
enum enum_field_types var_type= (enum enum_field_types) $4;
Item *dflt_value_item= $5;
- create_field *create_field_op;
if (!dflt_value_item)
{
@@ -3144,7 +3143,7 @@ size_number:
real_ulong_num { $$= $1;}
| IDENT
{
- ulonglong number, test_number;
+ ulonglong number;
uint text_shift_number= 0;
longlong prefix_number;
char *start_ptr= $1.str;
@@ -3653,11 +3652,9 @@ part_bit_expr:
bit_expr
{
Item *part_expr= $1;
- bool not_corr_func;
int part_expression_ok= 1;
LEX *lex= Lex;
THD *thd= YYTHD;
- longlong item_value;
Name_resolution_context *context= &lex->current_select->context;
TABLE_LIST *save_list= context->table_list;
const char *save_where= thd->where;
@@ -3798,11 +3795,11 @@ opt_part_option:
lex->part_info->default_engine_type= $4;
}
| NODEGROUP_SYM opt_equal real_ulong_num
- { Lex->part_info->curr_part_elem->nodegroup_id= $3; }
+ { Lex->part_info->curr_part_elem->nodegroup_id= (uint16) $3; }
| MAX_ROWS opt_equal real_ulonglong_num
- { Lex->part_info->curr_part_elem->part_max_rows= $3; }
+ { Lex->part_info->curr_part_elem->part_max_rows= (ha_rows) $3; }
| MIN_ROWS opt_equal real_ulonglong_num
- { Lex->part_info->curr_part_elem->part_min_rows= $3; }
+ { Lex->part_info->curr_part_elem->part_min_rows= (ha_rows) $3; }
| DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
{ Lex->part_info->curr_part_elem->data_file_name= $4.str; }
| INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys
diff --git a/sql/table.cc b/sql/table.cc
index 4b72cd3cb31..cacfdbd1c11 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -1031,7 +1031,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
{
uint primary_key=(uint) (find_type((char*) primary_key_name,
&share->keynames, 3) - 1);
- uint ha_option= handler_file->ha_table_flags();
+ longlong ha_option= handler_file->ha_table_flags();
keyinfo= share->key_info;
key_part= keyinfo->key_part;
@@ -1078,6 +1078,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head,
goto err;
}
field= key_part->field= share->field[key_part->fieldnr-1];
+ key_part->type= field->key_type();
if (field->null_ptr)
{
key_part->null_offset=(uint) ((byte*) field->null_ptr -
@@ -2078,7 +2079,6 @@ File create_frm(THD *thd, const char *name, const char *db,
HA_CREATE_INFO *create_info, uint keys)
{
register File file;
- uint key_length;
ulong length;
char fill[IO_SIZE];
int create_flags= O_RDWR | O_TRUNC;
diff --git a/sql/table.h b/sql/table.h
index 55f889f42b9..13666c82f4b 100644
--- a/sql/table.h
+++ b/sql/table.h
@@ -116,6 +116,7 @@ class Table_triggers_list;
typedef struct st_table_share
{
+ st_table_share() {} /* Remove gcc warning */
/* hash of field names (contains pointers to elements of field array) */
HASH name_hash; /* hash of field names */
MEM_ROOT mem_root;
diff --git a/sql/tztime.cc b/sql/tztime.cc
index 6acf17520d9..c44a907c07b 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1743,9 +1743,9 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
tz_leapcnt++;
DBUG_PRINT("info",
- ("time_zone_leap_second table: tz_leapcnt:%u tt_time: %lu offset: %ld",
- tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans,
- tz_lsis[tz_leapcnt-1].ls_corr));
+ ("time_zone_leap_second table: tz_leapcnt: %u tt_time: %lu offset: %ld",
+ tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans,
+ tz_lsis[tz_leapcnt-1].ls_corr));
res= table->file->index_next(table->record[0]);
}
@@ -2057,7 +2057,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
tz_info->timecnt++;
DBUG_PRINT("info",
- ("time_zone_transition table: tz_id: %u tt_time:%lu tt_id: %u",
+ ("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u",
tzid, (ulong) ttime, ttid));
res= table->file->index_next_same(table->record[0],
diff --git a/sql/uniques.cc b/sql/uniques.cc
index ad074f8b2b0..c7bdbdeb207 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -55,18 +55,19 @@ int unique_write_to_ptrs(gptr key, element_count count, Unique *unique)
}
Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
- uint size_arg, ulong max_in_memory_size_arg)
+ uint size_arg, ulonglong max_in_memory_size_arg)
:max_in_memory_size(max_in_memory_size_arg), size(size_arg), elements(0)
{
my_b_clear(&file);
- init_tree(&tree, max_in_memory_size / 16, 0, size, comp_func, 0, NULL,
- comp_func_fixed_arg);
+ init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func, 0,
+ NULL, comp_func_fixed_arg);
/* If the following fail's the next add will also fail */
my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16);
/*
If you change the following, change it in get_max_elements function, too.
*/
- max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size);
+ max_elements= (ulong) (max_in_memory_size /
+ ALIGN_SIZE(sizeof(TREE_ELEMENT)+size));
VOID(open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
MYF(MY_WME)));
}
@@ -260,15 +261,15 @@ static double get_merge_many_buffs_cost(uint *buffer,
*/
double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
- ulong max_in_memory_size)
+ ulonglong max_in_memory_size)
{
ulong max_elements_in_tree;
ulong last_tree_elems;
int n_full_trees; /* number of trees in unique - 1 */
double result;
- max_elements_in_tree=
- max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size);
+ max_elements_in_tree= ((ulong) max_in_memory_size /
+ ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
n_full_trees= nkeys / max_elements_in_tree;
last_tree_elems= nkeys % max_elements_in_tree;
@@ -386,9 +387,11 @@ C_MODE_END
/*
DESCRIPTION
+
Function is very similar to merge_buffers, but instead of writing sorted
unique keys to the output file, it invokes walk_action for each key.
This saves I/O if you need to pass through all unique keys only once.
+
SYNOPSIS
merge_walk()
All params are 'IN' (but see comment for begin, end):
@@ -416,7 +419,7 @@ C_MODE_END
<> 0 error
*/
-static bool merge_walk(uchar *merge_buffer, uint merge_buffer_size,
+static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size,
uint key_length, BUFFPEK *begin, BUFFPEK *end,
tree_walk_action walk_action, void *walk_action_arg,
qsort_cmp2 compare, void *compare_arg,
@@ -425,14 +428,15 @@ static bool merge_walk(uchar *merge_buffer, uint merge_buffer_size,
BUFFPEK_COMPARE_CONTEXT compare_context = { compare, compare_arg };
QUEUE queue;
if (end <= begin ||
- merge_buffer_size < key_length * (end - begin + 1) ||
- init_queue(&queue, end - begin, offsetof(BUFFPEK, key), 0,
+ merge_buffer_size < (ulong) (key_length * (end - begin + 1)) ||
+ init_queue(&queue, (uint) (end - begin), offsetof(BUFFPEK, key), 0,
buffpek_compare, &compare_context))
return 1;
/* we need space for one key when a piece of merge buffer is re-read */
merge_buffer_size-= key_length;
uchar *save_key_buff= merge_buffer + merge_buffer_size;
- uint max_key_count_per_piece= merge_buffer_size/(end-begin)/key_length;
+ uint max_key_count_per_piece= (uint) (merge_buffer_size/(end-begin) /
+ key_length);
/* if piece_size is aligned reuse_freed_buffer will always hit */
uint piece_size= max_key_count_per_piece * key_length;
uint bytes_read; /* to hold return value of read_to_buffer */
@@ -548,6 +552,9 @@ end:
bool Unique::walk(tree_walk_action action, void *walk_action_arg)
{
+ int res;
+ uchar *merge_buffer;
+
if (elements == 0) /* the whole tree is in memory */
return tree_walk(&tree, action, walk_action_arg, left_root_right);
@@ -556,15 +563,14 @@ bool Unique::walk(tree_walk_action action, void *walk_action_arg)
return 1;
if (flush_io_cache(&file) || reinit_io_cache(&file, READ_CACHE, 0L, 0, 0))
return 1;
- uchar *merge_buffer= (uchar *) my_malloc(max_in_memory_size, MYF(0));
- if (merge_buffer == 0)
+ if (!(merge_buffer= (uchar *) my_malloc((ulong) max_in_memory_size, MYF(0))))
return 1;
- int res= merge_walk(merge_buffer, max_in_memory_size, size,
- (BUFFPEK *) file_ptrs.buffer,
- (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements,
- action, walk_action_arg,
- tree.compare, tree.custom_arg, &file);
- x_free(merge_buffer);
+ res= merge_walk(merge_buffer, (ulong) max_in_memory_size, size,
+ (BUFFPEK *) file_ptrs.buffer,
+ (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements,
+ action, walk_action_arg,
+ tree.compare, tree.custom_arg, &file);
+ my_free((char*) merge_buffer, MYF(0));
return res;
}
@@ -615,7 +621,7 @@ bool Unique::get(TABLE *table)
sort_param.sort_form=table;
sort_param.rec_length= sort_param.sort_length= sort_param.ref_length=
size;
- sort_param.keys= max_in_memory_size / sort_param.sort_length;
+ sort_param.keys= (uint) (max_in_memory_size / sort_param.sort_length);
sort_param.not_killable=1;
if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) *