summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorunknown <msvensson@neptunus.(none)>2006-12-04 19:28:38 +0100
committerunknown <msvensson@neptunus.(none)>2006-12-04 19:28:38 +0100
commit2e5a6f9eaabebedcf10f189427fb56577fc9780c (patch)
tree4709d201720e18f147c369d52ae2cde47bec6b24 /sql
parent9f1fa169c59c29fa1d9c877efd14ffac5ba87448 (diff)
parent85b1701ab7292eeee4ce4b253909ae68345be43c (diff)
downloadmariadb-git-2e5a6f9eaabebedcf10f189427fb56577fc9780c.tar.gz
Merge neptunus.(none):/home/msvensson/mysql/mysql-5.0
into neptunus.(none):/home/msvensson/mysql/mysql-5.0-maint BitKeeper/etc/ignore: auto-union Docs/Makefile.am: Auto merged Makefile.am: Auto merged client/mysql.cc: Auto merged client/mysqltest.c: Auto merged include/Makefile.am: Auto merged myisam/myisampack.c: Auto merged mysql-test/lib/mtr_io.pl: Auto merged mysql-test/lib/mtr_process.pl: Auto merged mysql-test/mysql-test-run.pl: Auto merged mysql-test/r/view_grant.result: Auto merged mysql-test/t/view_grant.test: Auto merged sql/handler.cc: Auto merged sql/item.cc: Auto merged sql/item_func.cc: Auto merged sql/item_func.h: Auto merged sql/item_timefunc.cc: Auto merged sql/mysql_priv.h: Auto merged sql/mysqld.cc: Auto merged sql/sp.cc: Auto merged sql/sql_base.cc: Auto merged sql-common/my_time.c: Auto merged sql/sql_handler.cc: Auto merged extra/yassl/taocrypt/include/algebra.hpp: Manual merge with import of upstream yaSSL
Diffstat (limited to 'sql')
-rw-r--r--sql/Makefile.am7
-rw-r--r--sql/field.cc86
-rw-r--r--sql/gen_lex_hash.cc5
-rw-r--r--sql/ha_archive.cc19
-rw-r--r--sql/ha_federated.cc6
-rw-r--r--sql/ha_heap.cc2
-rw-r--r--sql/ha_innodb.cc1
-rw-r--r--sql/ha_ndbcluster.cc4
-rw-r--r--sql/handler.cc7
-rw-r--r--sql/item.cc28
-rw-r--r--sql/item.h6
-rw-r--r--sql/item_cmpfunc.cc1
-rw-r--r--sql/item_func.cc27
-rw-r--r--sql/item_func.h4
-rw-r--r--sql/item_strfunc.cc54
-rw-r--r--sql/item_strfunc.h6
-rw-r--r--sql/item_subselect.h2
-rw-r--r--sql/item_sum.cc22
-rw-r--r--sql/item_sum.h4
-rw-r--r--sql/item_timefunc.cc3
-rw-r--r--sql/log.cc8
-rw-r--r--sql/mysql_priv.h2
-rw-r--r--sql/mysqld.cc19
-rw-r--r--sql/opt_range.cc10
-rw-r--r--sql/password.c4
-rw-r--r--sql/set_var.cc21
-rw-r--r--sql/slave.cc2
-rw-r--r--sql/sp.cc15
-rw-r--r--sql/sp_cache.cc1
-rw-r--r--sql/sp_head.cc9
-rw-r--r--sql/sql_acl.cc2
-rw-r--r--sql/sql_base.cc8
-rw-r--r--sql/sql_cache.cc57
-rw-r--r--sql/sql_class.h17
-rw-r--r--sql/sql_db.cc2
-rw-r--r--sql/sql_delete.cc2
-rw-r--r--sql/sql_handler.cc1
-rw-r--r--sql/sql_lex.h2
-rw-r--r--sql/sql_prepare.cc1
-rw-r--r--sql/sql_rename.cc2
-rw-r--r--sql/sql_select.cc30
-rw-r--r--sql/sql_show.cc6
-rw-r--r--sql/sql_string.h6
-rw-r--r--sql/sql_trigger.cc113
-rw-r--r--sql/sql_update.cc2
-rw-r--r--sql/sql_view.cc3
-rw-r--r--sql/sql_yacc.yy8
-rw-r--r--sql/table.cc14
-rw-r--r--sql/tztime.cc10
-rw-r--r--sql/uniques.cc46
50 files changed, 444 insertions, 273 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am
index 6c685ba67c6..cbb87f16d80 100644
--- a/sql/Makefile.am
+++ b/sql/Makefile.am
@@ -148,12 +148,15 @@ mysql_tzinfo_to_sql.o: $(mysql_tzinfo_to_sql_SOURCES)
sql_yacc.cc: sql_yacc.yy
sql_yacc.h: sql_yacc.yy
+# Be careful here, note that we use VPATH and might or might not have
+# a pregenerated "sql_yacc.cc" in $(srcdir) or one we just built in
+# $(builddir). And it has to work if $(srcdir) == $(builddir).
sql_yacc.o: sql_yacc.cc sql_yacc.h $(HEADERS)
- @SED@ -e 's/__attribute__ ((__unused__))//' sql_yacc.cc > sql_yacc.cc-new
+ @SED@ -e 's/__attribute__ ((__unused__))//' $< > sql_yacc.cc-new
@MV@ sql_yacc.cc-new sql_yacc.cc
@echo "Note: The following compile may take a long time."
@echo "If it fails, re-run configure with --with-low-memory"
- $(CXXCOMPILE) $(LM_CFLAGS) -c $<
+ $(CXXCOMPILE) $(LM_CFLAGS) -c sql_yacc.cc
# This generates lex_hash.h
# NOTE Built sources should depend on their sources not the tool
diff --git a/sql/field.cc b/sql/field.cc
index 1cfd0843179..4fe533c3398 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -47,6 +47,8 @@ uchar Field_null::null[1]={1};
const char field_separator=',';
#define DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE 320
+#define LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE 128
+#define DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE 128
#define BLOB_PACK_LENGTH_TO_MAX_LENGH(arg) \
((ulong) ((LL(1) << min(arg, 4) * 8) - LL(1)))
@@ -4813,7 +4815,7 @@ int Field_time::store_time(TIME *ltime, timestamp_type type)
(ltime->minute * 100 + ltime->second);
if (ltime->neg)
tmp= -tmp;
- return Field_time::store((longlong) tmp);
+ return Field_time::store((longlong) tmp, FALSE);
}
@@ -5409,11 +5411,11 @@ int Field_newdate::store_time(TIME *ltime,timestamp_type type)
if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME)
{
tmp=ltime->year*16*32+ltime->month*32+ltime->day;
- if ((my_bool)check_date(ltime, tmp,
- (TIME_FUZZY_DATE |
- (current_thd->variables.sql_mode &
- (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))), &error))
+ if (check_date(ltime, tmp != 0,
+ (TIME_FUZZY_DATE |
+ (current_thd->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES))), &error))
{
char buff[12];
String str(buff, sizeof(buff), &my_charset_latin1);
@@ -5633,11 +5635,11 @@ int Field_datetime::store_time(TIME *ltime,timestamp_type type)
{
tmp=((ltime->year*10000L+ltime->month*100+ltime->day)*LL(1000000)+
(ltime->hour*10000L+ltime->minute*100+ltime->second));
- if ((my_bool)check_date(ltime, tmp,
- (TIME_FUZZY_DATE |
- (current_thd->variables.sql_mode &
- (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
- MODE_INVALID_DATES))), &error))
+ if (check_date(ltime, tmp != 0,
+ (TIME_FUZZY_DATE |
+ (current_thd->variables.sql_mode &
+ (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE |
+ MODE_INVALID_DATES))), &error))
{
char buff[19];
String str(buff, sizeof(buff), &my_charset_latin1);
@@ -6056,19 +6058,49 @@ int Field_longstr::store_decimal(const my_decimal *d)
double Field_string::val_real(void)
{
- int not_used;
- char *end_not_used;
+ int error;
+ char *end;
CHARSET_INFO *cs= charset();
- return my_strntod(cs,ptr,field_length,&end_not_used,&not_used);
+ double result;
+
+ result= my_strntod(cs,ptr,field_length,&end,&error);
+ if (!table->in_use->no_errors &&
+ (error || (field_length != (uint32)(end - ptr) &&
+ !check_if_only_end_space(cs, end, ptr + field_length))))
+ {
+ char buf[DOUBLE_TO_STRING_CONVERSION_BUFFER_SIZE];
+ String tmp(buf, sizeof(buf), cs);
+ tmp.copy(ptr, field_length, cs);
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_TRUNCATED_WRONG_VALUE,
+ ER(ER_TRUNCATED_WRONG_VALUE),
+ "DOUBLE", tmp.c_ptr());
+ }
+ return result;
}
longlong Field_string::val_int(void)
{
- int not_used;
- char *end_not_used;
- CHARSET_INFO *cs=charset();
- return my_strntoll(cs,ptr,field_length,10,&end_not_used,&not_used);
+ int error;
+ char *end;
+ CHARSET_INFO *cs= charset();
+ longlong result;
+
+ result= my_strntoll(cs,ptr,field_length,10,&end,&error);
+ if (!table->in_use->no_errors &&
+ (error || (field_length != (uint32)(end - ptr) &&
+ !check_if_only_end_space(cs, end, ptr + field_length))))
+ {
+ char buf[LONGLONG_TO_STRING_CONVERSION_BUFFER_SIZE];
+ String tmp(buf, sizeof(buf), cs);
+ tmp.copy(ptr, field_length, cs);
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_TRUNCATED_WRONG_VALUE,
+ ER(ER_TRUNCATED_WRONG_VALUE),
+ "INTEGER", tmp.c_ptr());
+ }
+ return result;
}
@@ -6085,8 +6117,20 @@ String *Field_string::val_str(String *val_buffer __attribute__((unused)),
my_decimal *Field_string::val_decimal(my_decimal *decimal_value)
{
- str2my_decimal(E_DEC_FATAL_ERROR, ptr, field_length, charset(),
+ int err= str2my_decimal(E_DEC_FATAL_ERROR, ptr, field_length, charset(),
decimal_value);
+ if (!table->in_use->no_errors && err)
+ {
+ char buf[DECIMAL_TO_STRING_CONVERSION_BUFFER_SIZE];
+ CHARSET_INFO *cs= charset();
+ String tmp(buf, sizeof(buf), cs);
+ tmp.copy(ptr, field_length, cs);
+ push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
+ ER_TRUNCATED_WRONG_VALUE,
+ ER(ER_TRUNCATED_WRONG_VALUE),
+ "DECIMAL", tmp.c_ptr());
+ }
+
return decimal_value;
}
@@ -8076,7 +8120,7 @@ int Field_bit::store_decimal(const my_decimal *val)
{
int err= 0;
longlong i= convert_decimal2longlong(val, 1, &err);
- return test(err | store(i));
+ return test(err | store(i, TRUE));
}
@@ -8229,7 +8273,7 @@ Field_bit_as_char::Field_bit_as_char(char *ptr_arg, uint32 len_arg,
int Field_bit_as_char::store(const char *from, uint length, CHARSET_INFO *cs)
{
int delta;
- uchar bits= field_length & 7;
+ uchar bits= (uchar) (field_length & 7);
for (; length && !*from; from++, length--); // skip left 0's
delta= bytes_in_rec - length;
diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc
index 5a8bd48d699..2674b2e65f7 100644
--- a/sql/gen_lex_hash.cc
+++ b/sql/gen_lex_hash.cc
@@ -206,9 +206,10 @@ void insert_symbols()
void insert_sql_functions()
{
- size_t i= 0;
+ int i= 0;
SYMBOL *cur;
- for (cur= sql_functions; i<array_elements(sql_functions); cur++, i++){
+ for (cur= sql_functions; i < (int) array_elements(sql_functions); cur++, i++)
+ {
hash_lex_struct *root=
get_hash_struct_by_len(&root_by_len,cur->length,&max_len);
insert_into_hash(root,cur->name,0,-i-1,1);
diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc
index 113008c4885..e3f979952e0 100644
--- a/sql/ha_archive.cc
+++ b/sql/ha_archive.cc
@@ -211,7 +211,7 @@ bool archive_db_init()
max_zfile_size= INT_MAX16;
break;
case 8:
- max_zfile_size= LONGLONG_MAX;
+ max_zfile_size= (z_off_t) LONGLONG_MAX;
break;
case 4:
default:
@@ -327,8 +327,8 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows)
DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0]));
DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1]));
- DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lld", *rows));
- DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lld", check_point));
+ DBUG_PRINT("ha_archive::read_meta_file", ("Rows %lu", (ulong) *rows));
+ DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %lu", (ulong) check_point));
DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)meta_buffer[18]));
if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) ||
@@ -359,8 +359,8 @@ int ha_archive::write_meta_file(File meta_file, ha_rows rows, bool dirty)
*(meta_buffer + 18)= (uchar)dirty;
DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", (uint)ARCHIVE_CHECK_HEADER));
DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", (uint)ARCHIVE_VERSION));
- DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", (ulonglong)rows));
- DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", check_point));
+ DBUG_PRINT("ha_archive::write_meta_file", ("Rows %lu", (ulong)rows));
+ DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %lu", (ulong) check_point));
DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty));
VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0)));
@@ -495,7 +495,7 @@ int ha_archive::init_archive_writer()
}
share->archive_write_open= TRUE;
info(HA_STATUS_TIME);
- share->approx_file_size= data_file_length;
+ share->approx_file_size= (ulong) data_file_length;
DBUG_RETURN(0);
}
@@ -676,7 +676,7 @@ int ha_archive::real_write_row(byte *buf, gzFile writer)
if (share->approx_file_size > max_zfile_size - total_row_length)
{
info(HA_STATUS_TIME);
- share->approx_file_size= data_file_length;
+ share->approx_file_size= (ulong) data_file_length;
if (share->approx_file_size > max_zfile_size - total_row_length)
DBUG_RETURN(HA_ERR_RECORD_FILE_FULL);
}
@@ -783,7 +783,7 @@ int ha_archive::rnd_init(bool scan)
if (scan)
{
scan_rows= share->rows_recorded;
- DBUG_PRINT("info", ("archive will retrieve %llu rows", scan_rows));
+ DBUG_PRINT("info", ("archive will retrieve %lu rows", (ulong) scan_rows));
records= 0;
/*
@@ -1019,7 +1019,8 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt)
share->rows_recorded++;
}
}
- DBUG_PRINT("info", ("recovered %llu archive rows", share->rows_recorded));
+ DBUG_PRINT("info", ("recovered %lu archive rows",
+ (ulong) share->rows_recorded));
my_free((char*)buf, MYF(0));
if (rc && rc != HA_ERR_END_OF_FILE)
diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc
index 6328803c743..9abfcdc61c6 100644
--- a/sql/ha_federated.cc
+++ b/sql/ha_federated.cc
@@ -1249,7 +1249,6 @@ bool ha_federated::create_where_from_key(String *to,
if (tmp.append(FEDERATED_CLOSEPAREN))
DBUG_RETURN(1);
-next_loop:
if (store_length >= length)
break;
DBUG_PRINT("info", ("remainder %d", remainder));
@@ -1914,8 +1913,8 @@ int ha_federated::delete_row(const byte *buf)
{
DBUG_RETURN(stash_remote_error());
}
- deleted+= mysql->affected_rows;
- records-= mysql->affected_rows;
+ deleted+= (ha_rows) mysql->affected_rows;
+ records-= (ha_rows) mysql->affected_rows;
DBUG_PRINT("info",
("rows deleted %ld rows deleted for all time %ld",
(long) mysql->affected_rows, (long) deleted));
@@ -2270,7 +2269,6 @@ int ha_federated::rnd_next(byte *buf)
int ha_federated::read_next(byte *buf, MYSQL_RES *result)
{
int retval;
- my_ulonglong num_rows;
MYSQL_ROW row;
DBUG_ENTER("ha_federated::read_next");
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc
index 34ef888a029..3cf7593bd8a 100644
--- a/sql/ha_heap.cc
+++ b/sql/ha_heap.cc
@@ -631,7 +631,7 @@ int ha_heap::create(const char *name, TABLE *table_arg,
}
mem_per_row+= MY_ALIGN(share->reclength + 1, sizeof(char*));
max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size /
- mem_per_row);
+ (ulonglong) mem_per_row);
if (table_arg->found_next_number_field)
{
keydef[share->next_number_index].flag|= HA_AUTO_KEY;
diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc
index a1619d8e1a1..5548cb66e7f 100644
--- a/sql/ha_innodb.cc
+++ b/sql/ha_innodb.cc
@@ -2739,7 +2739,6 @@ ha_innobase::store_key_val_for_row(
CHARSET_INFO* cs;
ulint key_len;
- ulint len;
ulint true_len;
int error=0;
ulint blob_len;
diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc
index 739fae79565..2ef16ddacbf 100644
--- a/sql/ha_ndbcluster.cc
+++ b/sql/ha_ndbcluster.cc
@@ -822,8 +822,8 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob,
{
char *buf= m_blobs_buffer + offset;
uint32 len= 0xffffffff; // Max uint32
- DBUG_PRINT("value", ("read blob ptr=%x len=%u",
- (UintPtr)buf, (uint)blob_len));
+ DBUG_PRINT("value", ("read blob ptr: 0x%lx len: %u",
+ (long)buf, (uint)blob_len));
if (ndb_blob->readData(buf, len) != 0)
DBUG_RETURN(-1);
DBUG_ASSERT(len == blob_len);
diff --git a/sql/handler.cc b/sql/handler.cc
index 93442b4f852..65ed7b43855 100644
--- a/sql/handler.cc
+++ b/sql/handler.cc
@@ -474,7 +474,6 @@ int ha_init()
{
int error= 0;
handlerton **types;
- show_table_alias_st *table_alias;
total_ha= savepoint_alloc_size= 0;
if (ha_init_errors())
@@ -885,8 +884,8 @@ int ha_commit_or_rollback_by_xid(XID *xid, bool commit)
if ((*types)->state == SHOW_OPTION_YES && (*types)->recover)
{
if ((*(commit ? (*types)->commit_by_xid :
- (*types)->rollback_by_xid))(xid));
- res= 0;
+ (*types)->rollback_by_xid))(xid))
+ res= 0;
}
}
return res;
@@ -1779,7 +1778,7 @@ void handler::print_error(int error, myf errflag)
{
/* Key is unknown */
str.copy("", 0, system_charset_info);
- key_nr= -1;
+ key_nr= (uint) -1;
}
else
{
diff --git a/sql/item.cc b/sql/item.cc
index e9cd19c0d31..d14956dee84 100644
--- a/sql/item.cc
+++ b/sql/item.cc
@@ -276,7 +276,6 @@ my_decimal *Item::val_decimal_from_date(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
TIME ltime;
- longlong date;
if (get_date(&ltime, TIME_FUZZY_DATE))
{
my_decimal_set_zero(decimal_value);
@@ -290,7 +289,6 @@ my_decimal *Item::val_decimal_from_time(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
TIME ltime;
- longlong date;
if (get_time(&ltime))
{
my_decimal_set_zero(decimal_value);
@@ -2175,12 +2173,6 @@ void Item_string::print(String *str)
}
-inline bool check_if_only_end_space(CHARSET_INFO *cs, char *str, char *end)
-{
- return str+ cs->cset->scan(cs, str, end, MY_SEQ_SPACES) == end;
-}
-
-
double Item_string::val_real()
{
DBUG_ASSERT(fixed == 1);
@@ -4764,6 +4756,22 @@ bool Item_field::send(Protocol *protocol, String *buffer)
}
+void Item_field::update_null_value()
+{
+ /*
+ need to set no_errors to prevent warnings about type conversion
+ popping up.
+ */
+ THD *thd= field->table->in_use;
+ int no_errors;
+
+ no_errors= thd->no_errors;
+ thd->no_errors= 1;
+ Item::update_null_value();
+ thd->no_errors= no_errors;
+}
+
+
Item_ref::Item_ref(Name_resolution_context *context_arg,
Item **item, const char *table_name_arg,
const char *field_name_arg)
@@ -5647,7 +5655,7 @@ void Item_trigger_field::set_required_privilege(bool rw)
}
-bool Item_trigger_field::set_value(THD *thd, sp_rcontext */*ctx*/, Item **it)
+bool Item_trigger_field::set_value(THD *thd, sp_rcontext * /*ctx*/, Item **it)
{
Item *item= sp_prepare_func_item(thd, it);
@@ -6121,7 +6129,7 @@ bool Item_cache_row::null_inside()
}
else
{
- values[i]->val_int();
+ values[i]->update_null_value();
if (values[i]->null_value)
return 1;
}
diff --git a/sql/item.h b/sql/item.h
index 63d89113ec1..e0451febb1e 100644
--- a/sql/item.h
+++ b/sql/item.h
@@ -704,6 +704,11 @@ public:
virtual bool is_null() { return 0; }
/*
+ Make sure the null_value member has a correct value.
+ */
+ virtual void update_null_value () { (void) val_int(); }
+
+ /*
Inform the item that there will be no distinction between its result
being FALSE or NULL.
@@ -1270,6 +1275,7 @@ public:
bool get_date_result(TIME *ltime,uint fuzzydate);
bool get_time(TIME *ltime);
bool is_null() { return field->is_null(); }
+ void update_null_value();
Item *get_tmp_table_item(THD *thd);
bool collect_item_field_processor(byte * arg);
bool find_item_in_field_list_processor(byte *arg);
diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc
index bc31a7203a4..29fa049b6c4 100644
--- a/sql/item_cmpfunc.cc
+++ b/sql/item_cmpfunc.cc
@@ -2497,7 +2497,6 @@ void Item_func_in::fix_length_and_dec()
if (cmp_type == STRING_RESULT)
in_item->cmp_charset= cmp_collation.collation;
}
- maybe_null= args[0]->maybe_null;
max_length= 1;
}
diff --git a/sql/item_func.cc b/sql/item_func.cc
index 96b3cc3da98..e5ac133e35e 100644
--- a/sql/item_func.cc
+++ b/sql/item_func.cc
@@ -900,7 +900,8 @@ void Item_func_signed::print(String *str)
longlong Item_func_signed::val_int_from_str(int *error)
{
- char buff[MAX_FIELD_WIDTH], *end;
+ char buff[MAX_FIELD_WIDTH], *end, *start;
+ uint32 length;
String tmp(buff,sizeof(buff), &my_charset_bin), *res;
longlong value;
@@ -916,13 +917,21 @@ longlong Item_func_signed::val_int_from_str(int *error)
return 0;
}
null_value= 0;
- end= (char*) res->ptr()+ res->length();
- value= my_strtoll10(res->ptr(), &end, error);
- if (*error > 0 || end != res->ptr()+ res->length())
+ start= (char *)res->ptr();
+ length= res->length();
+
+ end= start + length;
+ value= my_strtoll10(start, &end, error);
+ if (*error > 0 || end != start+ length)
+ {
+ char err_buff[128];
+ String err_tmp(err_buff,(uint32) sizeof(err_buff), system_charset_info);
+ err_tmp.copy(start, length, system_charset_info);
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_TRUNCATED_WRONG_VALUE,
ER(ER_TRUNCATED_WRONG_VALUE), "INTEGER",
- res->c_ptr());
+ err_tmp.c_ptr());
+ }
return value;
}
@@ -2337,7 +2346,7 @@ longlong Item_func_locate::val_int()
return 0;
/* start is now sufficiently valid to pass to charpos function */
- start= a->charpos(start);
+ start= a->charpos((int) start);
if (start + b->length() > a->length())
return 0;
@@ -2347,7 +2356,8 @@ longlong Item_func_locate::val_int()
return start + 1;
if (!cmp_collation.collation->coll->instr(cmp_collation.collation,
- a->ptr()+start, a->length()-start,
+ a->ptr()+start,
+ (uint) (a->length()-start),
b->ptr(), b->length(),
&match, 1))
return 0;
@@ -4300,7 +4310,7 @@ bool Item_func_get_user_var::eq(const Item *item, bool binary_cmp) const
bool Item_func_get_user_var::set_value(THD *thd,
- sp_rcontext */*ctx*/, Item **it)
+ sp_rcontext * /*ctx*/, Item **it)
{
Item_func_set_user_var *suv= new Item_func_set_user_var(get_name(), *it);
/*
@@ -4891,6 +4901,7 @@ Item_func_sp::cleanup()
result_field= NULL;
}
m_sp= NULL;
+ dummy_table->s= NULL;
Item_func::cleanup();
}
diff --git a/sql/item_func.h b/sql/item_func.h
index 4379c542e63..92bc870d509 100644
--- a/sql/item_func.h
+++ b/sql/item_func.h
@@ -157,7 +157,7 @@ public:
return (null_value=args[0]->get_time(ltime));
}
bool is_null() {
- (void) val_int(); /* Discard result. It sets null_value as side-effect. */
+ update_null_value();
return null_value;
}
void signal_divide_by_null();
@@ -241,7 +241,7 @@ public:
virtual double real_op()= 0;
virtual my_decimal *decimal_op(my_decimal *)= 0;
virtual String *str_op(String *)= 0;
- bool is_null() { (void) val_real(); return null_value; }
+ bool is_null() { update_null_value(); return null_value; }
};
/* function where type of result detected by first argument */
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index c2f16ffac10..0b48a00012a 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -80,6 +80,20 @@ String *Item_str_func::check_well_formed_result(String *str)
}
+bool Item_str_func::fix_fields(THD *thd, Item **ref)
+{
+ bool res= Item_func::fix_fields(thd, ref);
+ /*
+ In Item_str_func::check_well_formed_result() we may set null_value
+ flag on the same condition as in test() below.
+ */
+ maybe_null= (maybe_null ||
+ test(thd->variables.sql_mode &
+ (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES)));
+ return res;
+}
+
+
my_decimal *Item_str_func::val_decimal(my_decimal *decimal_value)
{
DBUG_ASSERT(fixed == 1);
@@ -969,8 +983,8 @@ String *Item_func_insert::val_str(String *str)
length= res->length() + 1;
/* start and length are now sufficiently valid to pass to charpos function */
- start= res->charpos(start);
- length= res->charpos(length, start);
+ start= res->charpos((int) start);
+ length= res->charpos((int) length, (uint32) start);
/* Re-testing with corrected params */
if (start > res->length() + 1)
@@ -978,8 +992,8 @@ String *Item_func_insert::val_str(String *str)
if (length > res->length() - start)
length= res->length() - start;
- if (res->length() - length + res2->length() >
- current_thd->variables.max_allowed_packet)
+ if ((ulonglong) (res->length() - length + res2->length()) >
+ (ulonglong) current_thd->variables.max_allowed_packet)
{
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
@@ -988,7 +1002,7 @@ String *Item_func_insert::val_str(String *str)
goto null;
}
res=copy_if_not_alloced(str,res,res->length());
- res->replace(start,length,*res2);
+ res->replace((uint32) start,(uint32) length,*res2);
return res;
null:
null_value=1;
@@ -1064,7 +1078,7 @@ String *Item_func_left::val_str(String *str)
return &my_empty_string;
if ((res->length() <= (ulonglong) length) ||
- (res->length() <= (char_pos= res->charpos(length))))
+ (res->length() <= (char_pos= res->charpos((int) length))))
return res;
tmp_value.set(*res, 0, char_pos);
@@ -1156,17 +1170,17 @@ String *Item_func_substr::val_str(String *str)
return &my_empty_string;
start= ((start < 0) ? res->numchars() + start : start - 1);
- start= res->charpos(start);
+ start= res->charpos((int) start);
if ((start < 0) || ((uint) start + 1 > res->length()))
return &my_empty_string;
- length= res->charpos(length, start);
+ length= res->charpos((int) length, (uint32) start);
tmp_length= res->length() - start;
length= min(length, tmp_length);
- if (!start && res->length() == (ulonglong) length)
+ if (!start && (longlong) res->length() == length)
return res;
- tmp_value.set(*res, (ulonglong) start, (ulonglong) length);
+ tmp_value.set(*res, (uint32) start, (uint32) length);
return &tmp_value;
}
@@ -2214,7 +2228,7 @@ String *Item_func_repeat::val_str(String *str)
char *to;
/* must be longlong to avoid truncation */
longlong tmp_count= args[1]->val_int();
- long count= tmp_count;
+ long count= (long) tmp_count;
String *res= args[0]->val_str(str);
/* Assumes that the maximum length of a String is < INT_MAX32. */
@@ -2316,7 +2330,7 @@ String *Item_func_rpad::val_str(String *str)
if (count <= (res_char_length= res->numchars()))
{ // String to pad is big enough
- res->length(res->charpos(count)); // Shorten result if longer
+ res->length(res->charpos((int) count)); // Shorten result if longer
return (res);
}
pad_char_length= rpad->numchars();
@@ -2333,7 +2347,7 @@ String *Item_func_rpad::val_str(String *str)
if (args[2]->null_value || !pad_char_length)
goto err;
res_byte_length= res->length(); /* Must be done before alloc_buffer */
- if (!(res= alloc_buffer(res,str,&tmp_value,byte_count)))
+ if (!(res= alloc_buffer(res,str,&tmp_value, (ulong) byte_count)))
goto err;
to= (char*) res->ptr()+res_byte_length;
@@ -2347,7 +2361,7 @@ String *Item_func_rpad::val_str(String *str)
}
if (count)
{
- pad_byte_length= rpad->charpos(count);
+ pad_byte_length= rpad->charpos((int) count);
memcpy(to,ptr_pad,(size_t) pad_byte_length);
to+= pad_byte_length;
}
@@ -2419,14 +2433,14 @@ String *Item_func_lpad::val_str(String *str)
if (count <= res_char_length)
{
- res->length(res->charpos(count));
+ res->length(res->charpos((int) count));
return res;
}
pad_char_length= pad->numchars();
byte_count= count * collation.collation->mbmaxlen;
- if (byte_count > current_thd->variables.max_allowed_packet)
+ if ((ulonglong) byte_count > current_thd->variables.max_allowed_packet)
{
push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_WARN_ALLOWED_PACKET_OVERFLOWED,
@@ -2435,7 +2449,8 @@ String *Item_func_lpad::val_str(String *str)
goto err;
}
- if (args[2]->null_value || !pad_char_length || str->alloc(byte_count))
+ if (args[2]->null_value || !pad_char_length ||
+ str->alloc((uint32) byte_count))
goto err;
str->length(0);
@@ -2447,7 +2462,7 @@ String *Item_func_lpad::val_str(String *str)
count-= pad_char_length;
}
if (count > 0)
- str->append(pad->ptr(), pad->charpos(count), collation.collation);
+ str->append(pad->ptr(), pad->charpos((int) count), collation.collation);
str->append(*res);
null_value= 0;
@@ -2777,7 +2792,7 @@ String *Item_load_file::val_str(String *str)
tmp_value.length(stat_info.st_size);
my_close(file, MYF(0));
null_value = 0;
- return &tmp_value;
+ DBUG_RETURN(&tmp_value);
err:
null_value = 1;
@@ -3267,4 +3282,3 @@ String *Item_func_uuid::val_str(String *str)
strmov(s+18, clock_seq_and_node_str);
return str;
}
-
diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h
index 528180b803d..fd2aaf19675 100644
--- a/sql/item_strfunc.h
+++ b/sql/item_strfunc.h
@@ -37,6 +37,7 @@ public:
enum Item_result result_type () const { return STRING_RESULT; }
void left_right_max_length();
String *check_well_formed_result(String *str);
+ bool fix_fields(THD *thd, Item **ref);
};
class Item_func_md5 :public Item_str_func
@@ -525,9 +526,8 @@ public:
{ collation.set(cs); }
String *val_str(String *);
void fix_length_and_dec()
- {
- maybe_null=0;
- max_length=arg_count * collation.collation->mbmaxlen;
+ {
+ max_length= arg_count * collation.collation->mbmaxlen;
}
const char *func_name() const { return "char"; }
};
diff --git a/sql/item_subselect.h b/sql/item_subselect.h
index f1be99353cc..35ded79b75d 100644
--- a/sql/item_subselect.h
+++ b/sql/item_subselect.h
@@ -91,7 +91,7 @@ public:
enum Type type() const;
bool is_null()
{
- val_int();
+ update_null_value();
return null_value;
}
bool fix_fields(THD *thd, Item **ref);
diff --git a/sql/item_sum.cc b/sql/item_sum.cc
index c2219aafd03..7b296019709 100644
--- a/sql/item_sum.cc
+++ b/sql/item_sum.cc
@@ -322,9 +322,13 @@ void Item_sum::make_field(Send_field *tmp_field)
if (args[0]->type() == Item::FIELD_ITEM && keep_field_type())
{
((Item_field*) args[0])->field->make_field(tmp_field);
- tmp_field->db_name=(char*)"";
- tmp_field->org_table_name=tmp_field->table_name=(char*)"";
- tmp_field->org_col_name=tmp_field->col_name=name;
+ /* For expressions only col_name should be non-empty string. */
+ char *empty_string= (char*)"";
+ tmp_field->db_name= empty_string;
+ tmp_field->org_table_name= empty_string;
+ tmp_field->table_name= empty_string;
+ tmp_field->org_col_name= empty_string;
+ tmp_field->col_name= name;
if (maybe_null)
tmp_field->flags&= ~NOT_NULL_FLAG;
}
@@ -1050,7 +1054,7 @@ bool Item_sum_count::add()
count++;
else
{
- (void) args[0]->val_int();
+ args[0]->update_null_value();
if (!args[0]->null_value)
count++;
}
@@ -1957,7 +1961,7 @@ void Item_sum_count::reset_field()
nr=1;
else
{
- (void) args[0]->val_int();
+ args[0]->update_null_value();
if (!args[0]->null_value)
nr=1;
}
@@ -2067,7 +2071,7 @@ void Item_sum_count::update_field()
nr++;
else
{
- (void) args[0]->val_int();
+ args[0]->update_null_value();
if (!args[0]->null_value)
nr++;
}
@@ -2547,7 +2551,7 @@ bool Item_sum_count_distinct::setup(THD *thd)
return TRUE; // End of memory
if (item->const_item())
{
- (void) item->val_int();
+ item->update_null_value();
if (item->null_value)
always_null=1;
}
@@ -3409,8 +3413,8 @@ bool Item_func_group_concat::setup(THD *thd)
duplicate values (according to the syntax of this function). If there
is no DISTINCT or ORDER BY clauses, we don't create this tree.
*/
- init_tree(tree, min(thd->variables.max_heap_table_size,
- thd->variables.sortbuff_size/16), 0,
+ init_tree(tree, (uint) min(thd->variables.max_heap_table_size,
+ thd->variables.sortbuff_size/16), 0,
tree_key_length, compare_key, 0, NULL, (void*) this);
}
diff --git a/sql/item_sum.h b/sql/item_sum.h
index c11ef7e548a..ad05b3c9d12 100644
--- a/sql/item_sum.h
+++ b/sql/item_sum.h
@@ -618,7 +618,7 @@ public:
double val_real();
longlong val_int();
my_decimal *val_decimal(my_decimal *);
- bool is_null() { (void) val_int(); return null_value; }
+ bool is_null() { update_null_value(); return null_value; }
String *val_str(String*);
enum_field_types field_type() const
{
@@ -685,7 +685,7 @@ public:
{ /* can't be fix_fields()ed */ return (longlong) rint(val_real()); }
String *val_str(String*);
my_decimal *val_decimal(my_decimal *);
- bool is_null() { (void) val_int(); return null_value; }
+ bool is_null() { update_null_value(); return null_value; }
enum_field_types field_type() const
{
return hybrid_type == DECIMAL_RESULT ?
diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc
index 26a6b3f2009..8504434aed5 100644
--- a/sql/item_timefunc.cc
+++ b/sql/item_timefunc.cc
@@ -110,7 +110,6 @@ static bool make_datetime_with_warn(date_time_format_types format, TIME *ltime,
String *str)
{
int warning= 0;
- bool rc;
if (make_datetime(format, ltime, str))
return 1;
@@ -1646,7 +1645,7 @@ double Item_func_sysdate_local::val_real()
{
DBUG_ASSERT(fixed == 1);
store_now_in_TIME(&ltime);
- return (longlong) TIME_to_ulonglong_datetime(&ltime);
+ return (double) TIME_to_ulonglong_datetime(&ltime);
}
diff --git a/sql/log.cc b/sql/log.cc
index 960fc4f60c2..71bdecdc536 100644
--- a/sql/log.cc
+++ b/sql/log.cc
@@ -1676,14 +1676,14 @@ bool MYSQL_LOG::write(Log_event *event_info)
}
trans_log->end_of_file= max_binlog_cache_size;
trans_register_ha(thd,
- thd->options & (OPTION_NOT_AUTOCOMMIT |
- OPTION_BEGIN),
+ test(thd->options & (OPTION_NOT_AUTOCOMMIT |
+ OPTION_BEGIN)),
&binlog_hton);
}
else if (!my_b_tell(trans_log))
trans_register_ha(thd,
- thd->options & (OPTION_NOT_AUTOCOMMIT |
- OPTION_BEGIN),
+ test(thd->options & (OPTION_NOT_AUTOCOMMIT |
+ OPTION_BEGIN)),
&binlog_hton);
file= trans_log;
}
diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h
index edf88eb4f6d..55b182049b6 100644
--- a/sql/mysql_priv.h
+++ b/sql/mysql_priv.h
@@ -745,7 +745,7 @@ bool handle_select(THD *thd, LEX *lex, select_result *result,
bool mysql_select(THD *thd, Item ***rref_pointer_array,
TABLE_LIST *tables, uint wild_num, List<Item> &list,
COND *conds, uint og_num, ORDER *order, ORDER *group,
- Item *having, ORDER *proc_param, ulong select_type,
+ Item *having, ORDER *proc_param, ulonglong select_type,
select_result *result, SELECT_LEX_UNIT *unit,
SELECT_LEX *select_lex);
void free_underlaid_joins(THD *thd, SELECT_LEX *select);
diff --git a/sql/mysqld.cc b/sql/mysqld.cc
index 6cfa7e205d1..9411ba2ef12 100644
--- a/sql/mysqld.cc
+++ b/sql/mysqld.cc
@@ -70,6 +70,12 @@
#define IF_PURIFY(A,B) (B)
#endif
+#if SIZEOF_CHARP == 4
+#define MAX_MEM_TABLE_SIZE ~(ulong) 0
+#else
+#define MAX_MEM_TABLE_SIZE ~(ulonglong) 0
+#endif
+
/* stack traces are only supported on linux intel */
#if defined(__linux__) && defined(__i386__) && defined(USE_PSTACK)
#define HAVE_STACK_TRACE_ON_SEGV
@@ -1532,7 +1538,7 @@ static void network_init(void)
if (strlen(mysqld_unix_port) > (sizeof(UNIXaddr.sun_path) - 1))
{
sql_print_error("The socket file path is too long (> %u): %s",
- sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
+ (uint) sizeof(UNIXaddr.sun_path) - 1, mysqld_unix_port);
unireg_abort(1);
}
if ((unix_sock= socket(AF_UNIX, SOCK_STREAM, 0)) < 0)
@@ -5729,8 +5735,9 @@ The minimum value for this variable is 4096.",
{"max_heap_table_size", OPT_MAX_HEP_TABLE_SIZE,
"Don't allow creation of heap tables bigger than this.",
(gptr*) &global_system_variables.max_heap_table_size,
- (gptr*) &max_system_variables.max_heap_table_size, 0, GET_ULONG,
- REQUIRED_ARG, 16*1024*1024L, 16384, ~0L, MALLOC_OVERHEAD, 1024, 0},
+ (gptr*) &max_system_variables.max_heap_table_size, 0, GET_ULL,
+ REQUIRED_ARG, 16*1024*1024L, 16384, MAX_MEM_TABLE_SIZE,
+ MALLOC_OVERHEAD, 1024, 0},
{"max_join_size", OPT_MAX_JOIN_SIZE,
"Joins that are probably going to read more than max_join_size records return an error.",
(gptr*) &global_system_variables.max_join_size,
@@ -6005,8 +6012,8 @@ The minimum value for this variable is 4096.",
{"tmp_table_size", OPT_TMP_TABLE_SIZE,
"If an in-memory temporary table exceeds this size, MySQL will automatically convert it to an on-disk MyISAM table.",
(gptr*) &global_system_variables.tmp_table_size,
- (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULONG,
- REQUIRED_ARG, 32*1024*1024L, 1024, ~0L, 0, 1, 0},
+ (gptr*) &max_system_variables.tmp_table_size, 0, GET_ULL,
+ REQUIRED_ARG, 32*1024*1024L, 1024, MAX_MEM_TABLE_SIZE, 0, 1, 0},
{"transaction_alloc_block_size", OPT_TRANS_ALLOC_BLOCK_SIZE,
"Allocation block size for transactions to be stored in binary log",
(gptr*) &global_system_variables.trans_alloc_block_size,
@@ -6189,6 +6196,7 @@ struct show_var_st status_vars[]= {
{"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST},
{"Open_tables", (char*) 0, SHOW_OPENTABLES},
{"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), SHOW_LONG_STATUS},
+ {"Prepared_stmt_count", (char*) &prepared_stmt_count, SHOW_LONG_CONST},
#ifdef HAVE_QUERY_CACHE
{"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_CONST},
{"Qcache_free_memory", (char*) &query_cache.free_memory, SHOW_LONG_CONST},
@@ -6358,6 +6366,7 @@ static void mysql_init_variables(void)
binlog_cache_use= binlog_cache_disk_use= 0;
max_used_connections= slow_launch_threads = 0;
mysqld_user= mysqld_chroot= opt_init_file= opt_bin_logname = 0;
+ prepared_stmt_count= 0;
errmesg= 0;
mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS;
bzero((gptr) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list));
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index ef755d868d9..23a82c6eda7 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -1924,7 +1924,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
key_parts->null_bit= key_part_info->null_bit;
key_parts->image_type =
(key_info->flags & HA_SPATIAL) ? Field::itMBR : Field::itRAW;
- key_parts->flag= key_part_info->key_part_flag;
+ key_parts->flag= (uint8) key_part_info->key_part_flag;
}
param.real_keynr[param.keys++]=idx;
}
@@ -6240,7 +6240,7 @@ QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table,
key_part->length= key_info->key_part[part].length;
key_part->store_length= key_info->key_part[part].store_length;
key_part->null_bit= key_info->key_part[part].null_bit;
- key_part->flag= key_info->key_part[part].key_part_flag;
+ key_part->flag= (uint8) key_info->key_part[part].key_part_flag;
}
if (insert_dynamic(&quick->ranges,(gptr)&range))
goto err;
@@ -7445,7 +7445,7 @@ static TRP_GROUP_MIN_MAX *
get_best_group_min_max(PARAM *param, SEL_TREE *tree)
{
THD *thd= param->thd;
- JOIN *join= thd->lex->select_lex.join;
+ JOIN *join= thd->lex->current_select->join;
TABLE *table= param->table;
bool have_min= FALSE; /* TRUE if there is a MIN function. */
bool have_max= FALSE; /* TRUE if there is a MAX function. */
@@ -7466,7 +7466,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
DBUG_ENTER("get_best_group_min_max");
/* Perform few 'cheap' tests whether this access method is applicable. */
- if (!join || (thd->lex->sql_command != SQLCOM_SELECT))
+ if (!join)
DBUG_RETURN(NULL); /* This is not a select statement. */
if ((join->tables != 1) || /* The query must reference one table. */
((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */
@@ -8316,7 +8316,7 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows,
DBUG_ENTER("TRP_GROUP_MIN_MAX::make_quick");
quick= new QUICK_GROUP_MIN_MAX_SELECT(param->table,
- param->thd->lex->select_lex.join,
+ param->thd->lex->current_select->join,
have_min, have_max, min_max_arg_part,
group_prefix_len, used_key_parts,
index_info, index, read_cost, records,
diff --git a/sql/password.c b/sql/password.c
index 506e1aa36a2..594096b6ec9 100644
--- a/sql/password.c
+++ b/sql/password.c
@@ -406,7 +406,7 @@ make_scrambled_password(char *to, const char *password)
mysql_sha1_result(&sha1_context, hash_stage2);
/* convert hash_stage2 to hex string */
*to++= PVERSION41_CHAR;
- octet2hex(to, hash_stage2, SHA1_HASH_SIZE);
+ octet2hex(to, (char*) hash_stage2, SHA1_HASH_SIZE);
}
@@ -520,5 +520,5 @@ void get_salt_from_password(uint8 *hash_stage2, const char *password)
void make_password_from_salt(char *to, const uint8 *hash_stage2)
{
*to++= PVERSION41_CHAR;
- octet2hex(to, hash_stage2, SHA1_HASH_SIZE);
+ octet2hex(to, (char*) hash_stage2, SHA1_HASH_SIZE);
}
diff --git a/sql/set_var.cc b/sql/set_var.cc
index 55c62a9a5a5..23afef742a6 100644
--- a/sql/set_var.cc
+++ b/sql/set_var.cc
@@ -121,7 +121,6 @@ static KEY_CACHE *create_key_cache(const char *name, uint length);
void fix_sql_mode_var(THD *thd, enum_var_type type);
static byte *get_error_count(THD *thd);
static byte *get_warning_count(THD *thd);
-static byte *get_prepared_stmt_count(THD *thd);
static byte *get_have_innodb(THD *thd);
static byte *get_tmpdir(THD *thd);
@@ -248,7 +247,7 @@ sys_var_thd_ulong sys_max_delayed_threads("max_delayed_threads",
fix_max_connections);
sys_var_thd_ulong sys_max_error_count("max_error_count",
&SV::max_error_count);
-sys_var_thd_ulong sys_max_heap_table_size("max_heap_table_size",
+sys_var_thd_ulonglong sys_max_heap_table_size("max_heap_table_size",
&SV::max_heap_table_size);
sys_var_thd_ulong sys_pseudo_thread_id("pseudo_thread_id",
&SV::pseudo_thread_id,
@@ -415,7 +414,7 @@ sys_var_thd_enum sys_tx_isolation("tx_isolation",
&SV::tx_isolation,
&tx_isolation_typelib,
fix_tx_isolation);
-sys_var_thd_ulong sys_tmp_table_size("tmp_table_size",
+sys_var_thd_ulonglong sys_tmp_table_size("tmp_table_size",
&SV::tmp_table_size);
sys_var_bool_ptr sys_timed_mutexes("timed_mutexes",
&timed_mutexes);
@@ -567,9 +566,6 @@ static sys_var_readonly sys_warning_count("warning_count",
OPT_SESSION,
SHOW_LONG,
get_warning_count);
-static sys_var_readonly sys_prepared_stmt_count("prepared_stmt_count",
- OPT_GLOBAL, SHOW_LONG,
- get_prepared_stmt_count);
/* alias for last_insert_id() to be compatible with Sybase */
#ifdef HAVE_REPLICATION
@@ -701,7 +697,6 @@ sys_var *sys_variables[]=
&sys_optimizer_prune_level,
&sys_optimizer_search_depth,
&sys_preload_buff_size,
- &sys_prepared_stmt_count,
&sys_pseudo_thread_id,
&sys_query_alloc_block_size,
&sys_query_cache_size,
@@ -1008,7 +1003,6 @@ struct show_var_st init_vars[]= {
{"pid_file", (char*) pidfile_name, SHOW_CHAR},
{"port", (char*) &mysqld_port, SHOW_INT},
{sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS},
- {sys_prepared_stmt_count.name, (char*) &sys_prepared_stmt_count, SHOW_SYS},
{"protocol_version", (char*) &protocol_version, SHOW_INT},
{sys_query_alloc_block_size.name, (char*) &sys_query_alloc_block_size,
SHOW_SYS},
@@ -2836,7 +2830,7 @@ static bool set_option_autocommit(THD *thd, set_var *var)
{
/* The test is negative as the flag we use is NOT autocommit */
- ulong org_options=thd->options;
+ ulonglong org_options= thd->options;
if (var->save_result.ulong_value != 0)
thd->options&= ~((sys_var_thd_bit*) var->var)->bit_flag;
@@ -2946,15 +2940,6 @@ static byte *get_have_innodb(THD *thd)
}
-static byte *get_prepared_stmt_count(THD *thd)
-{
- pthread_mutex_lock(&LOCK_prepared_stmt_count);
- thd->sys_var_tmp.ulong_value= prepared_stmt_count;
- pthread_mutex_unlock(&LOCK_prepared_stmt_count);
- return (byte*) &thd->sys_var_tmp.ulong_value;
-}
-
-
/*
Get the tmpdir that was specified or chosen by default
diff --git a/sql/slave.cc b/sql/slave.cc
index b5c1fff4222..d0396444ace 100644
--- a/sql/slave.cc
+++ b/sql/slave.cc
@@ -1513,7 +1513,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db,
TABLE_LIST tables;
int error= 1;
handler *file;
- ulong save_options;
+ ulonglong save_options;
NET *net= &mysql->net;
DBUG_ENTER("create_table_from_dump");
diff --git a/sql/sp.cc b/sql/sp.cc
index f7c086061d3..188db4546c9 100644
--- a/sql/sp.cc
+++ b/sql/sp.cc
@@ -532,17 +532,17 @@ db_create_routine(THD *thd, int type, sp_head *sp)
table->field[MYSQL_PROC_FIELD_NAME]->
store(sp->m_name.str, sp->m_name.length, system_charset_info);
table->field[MYSQL_PROC_FIELD_TYPE]->
- store((longlong)type);
+ store((longlong)type, 1);
table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME]->
store(sp->m_name.str, sp->m_name.length, system_charset_info);
if (sp->m_chistics->daccess != SP_DEFAULT_ACCESS)
table->field[MYSQL_PROC_FIELD_ACCESS]->
- store((longlong)sp->m_chistics->daccess);
+ store((longlong)sp->m_chistics->daccess, 1);
table->field[MYSQL_PROC_FIELD_DETERMINISTIC]->
- store((longlong)(sp->m_chistics->detistic ? 1 : 2));
+ store((longlong)(sp->m_chistics->detistic ? 1 : 2), 1);
if (sp->m_chistics->suid != SP_IS_DEFAULT_SUID)
table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]->
- store((longlong)sp->m_chistics->suid);
+ store((longlong)sp->m_chistics->suid, 1);
table->field[MYSQL_PROC_FIELD_PARAM_LIST]->
store(sp->m_params.str, sp->m_params.length, system_charset_info);
if (sp->m_type == TYPE_ENUM_FUNCTION)
@@ -559,7 +559,7 @@ db_create_routine(THD *thd, int type, sp_head *sp)
((Field_timestamp *)table->field[MYSQL_PROC_FIELD_CREATED])->set_time();
((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time();
table->field[MYSQL_PROC_FIELD_SQL_MODE]->
- store((longlong)thd->variables.sql_mode);
+ store((longlong)thd->variables.sql_mode, 1);
if (sp->m_chistics->comment.str)
table->field[MYSQL_PROC_FIELD_COMMENT]->
store(sp->m_chistics->comment.str, sp->m_chistics->comment.length,
@@ -663,7 +663,6 @@ db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics)
{
TABLE *table;
int ret;
- bool opened;
DBUG_ENTER("db_update_routine");
DBUG_PRINT("enter", ("type: %d name: %.*s",
type, name->m_name.length, name->m_name.str));
@@ -677,10 +676,10 @@ db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics)
((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time();
if (chistics->suid != SP_IS_DEFAULT_SUID)
table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]->
- store((longlong)chistics->suid);
+ store((longlong)chistics->suid, 1);
if (chistics->daccess != SP_DEFAULT_ACCESS)
table->field[MYSQL_PROC_FIELD_ACCESS]->
- store((longlong)chistics->daccess);
+ store((longlong)chistics->daccess, 1);
if (chistics->comment.str)
table->field[MYSQL_PROC_FIELD_COMMENT]->store(chistics->comment.str,
chistics->comment.length,
diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc
index fea6a67f32c..f5912caddaf 100644
--- a/sql/sp_cache.cc
+++ b/sql/sp_cache.cc
@@ -124,7 +124,6 @@ void sp_cache_clear(sp_cache **cp)
void sp_cache_insert(sp_cache **cp, sp_head *sp)
{
sp_cache *c;
- ulong v;
if (!(c= *cp))
{
diff --git a/sql/sp_head.cc b/sql/sp_head.cc
index a06bfe28a6f..152bc87aead 100644
--- a/sql/sp_head.cc
+++ b/sql/sp_head.cc
@@ -444,9 +444,12 @@ sp_head::operator delete(void *ptr, size_t size)
sp_head::sp_head()
:Query_arena(&main_mem_root, INITIALIZED_FOR_SP),
m_flags(0), m_recursion_level(0), m_next_cached_sp(0),
- m_first_instance(this), m_first_free_instance(this), m_last_cached_sp(this),
m_cont_level(0)
{
+ m_first_instance= this;
+ m_first_free_instance= this;
+ m_last_cached_sp= this;
+
m_return_field_def.charset = NULL;
extern byte *
@@ -1648,7 +1651,7 @@ sp_head::execute_procedure(THD *thd, List<Item> *args)
Item_null *null_item= new Item_null();
if (!null_item ||
- nctx->set_variable(thd, i, (struct Item **)&null_item))
+ nctx->set_variable(thd, i, (Item **)&null_item))
{
err_status= TRUE;
break;
@@ -2789,7 +2792,7 @@ void
sp_instr_freturn::print(String *str)
{
/* freturn type expr... */
- if (str->reserve(UINT_MAX+8+32)) // Add some for the expr. too
+ if (str->reserve(1024+8+32)) // Add some for the expr. too
return;
str->qs_append(STRING_WITH_LEN("freturn "));
str->qs_append((uint)m_type);
diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc
index d91da405c36..ab39cb250f6 100644
--- a/sql/sql_acl.cc
+++ b/sql/sql_acl.cc
@@ -1898,7 +1898,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo,
table->field[next_field+2]->store((longlong) mqh.conn_per_hour, TRUE);
if (table->s->fields >= 36 &&
(mqh.specified_limits & USER_RESOURCES::USER_CONNECTIONS))
- table->field[next_field+3]->store((longlong) mqh.user_conn);
+ table->field[next_field+3]->store((longlong) mqh.user_conn, TRUE);
mqh_used= mqh_used || mqh.questions || mqh.updates || mqh.conn_per_hour;
}
if (old_row_exists)
diff --git a/sql/sql_base.cc b/sql/sql_base.cc
index a01ffe3ce43..0a7abc87061 100644
--- a/sql/sql_base.cc
+++ b/sql/sql_base.cc
@@ -694,7 +694,7 @@ void close_temporary_tables(THD *thd)
/* We always quote db,table names though it is slight overkill */
if (found_user_tables &&
- !(was_quote_show= (thd->options & OPTION_QUOTE_SHOW_CREATE)))
+ !(was_quote_show= test(thd->options & OPTION_QUOTE_SHOW_CREATE)))
{
thd->options |= OPTION_QUOTE_SHOW_CREATE;
}
@@ -3316,6 +3316,12 @@ find_field_in_tables(THD *thd, Item_ident *item,
{
if (found == WRONG_GRANT)
return (Field*) 0;
+
+ /*
+ Only views fields should be marked as dependent, not an underlying
+ fields.
+ */
+ if (!table_ref->belong_to_view)
{
SELECT_LEX *current_sel= thd->lex->current_select;
SELECT_LEX *last_select= table_ref->select_lex;
diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc
index 5902374dff0..2c77e0ef230 100644
--- a/sql/sql_cache.cc
+++ b/sql/sql_cache.cc
@@ -741,7 +741,11 @@ void query_cache_end_of_result(THD *thd)
header->query()));
query_cache.wreck(__LINE__, "");
- BLOCK_UNLOCK_WR(query_block);
+ /*
+ We do not need call of BLOCK_UNLOCK_WR(query_block); here because
+ query_cache.wreck() switched query cache off but left content
+ untouched for investigation (it is debugging method).
+ */
goto end;
}
#endif
@@ -902,7 +906,7 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
if (thd->db_length)
{
memcpy(thd->query+thd->query_length+1, thd->db, thd->db_length);
- DBUG_PRINT("qcache", ("database : %s length %u",
+ DBUG_PRINT("qcache", ("database: %s length: %u",
thd->db, thd->db_length));
}
else
@@ -1048,7 +1052,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
(pre-space is removed in dispatch_command)
First '/' looks like comment before command it is not
- frequently appeared in real lihe, consequently we can
+ frequently appeared in real life, consequently we can
check all such queries, too.
*/
if ((my_toupper(system_charset_info, sql[i]) != 'S' ||
@@ -1077,7 +1081,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length)
if (thd->db_length)
{
memcpy(sql+query_length+1, thd->db, thd->db_length);
- DBUG_PRINT("qcache", ("database: '%s' length %u",
+ DBUG_PRINT("qcache", ("database: '%s' length: %u",
thd->db, thd->db_length));
}
else
@@ -1230,9 +1234,9 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
if (engine_data != table->engine_data())
{
DBUG_PRINT("qcache",
- ("Handler require invalidation queries of %s.%s %lld-%lld",
- table_list.db, table_list.alias,
- engine_data, table->engine_data()));
+ ("Handler require invalidation queries of %s.%s %lu-%lu",
+ table_list.db, table_list.alias,
+ (ulong) engine_data, (ulong) table->engine_data()));
invalidate_table((byte *) table->db(), table->key_length());
}
else
@@ -1253,10 +1257,10 @@ sql mode: 0x%lx, sort len: %lu, conncat len: %lu",
#ifndef EMBEDDED_LIBRARY
do
{
- DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %u)",
+ DBUG_PRINT("qcache", ("Results (len: %lu used: %lu headers: %lu)",
result_block->length, result_block->used,
- result_block->headers_len()+
- ALIGN_SIZE(sizeof(Query_cache_result))));
+ (ulong) (result_block->headers_len()+
+ ALIGN_SIZE(sizeof(Query_cache_result)))));
Query_cache_result *result = result_block->result();
if (net_real_write(&thd->net, result->data(),
@@ -1338,7 +1342,7 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used)
for (; tables_used; tables_used= tables_used->next)
{
invalidate_table((byte*) tables_used->key, tables_used->key_length);
- DBUG_PRINT("qcache", (" db %s, table %s", tables_used->key,
+ DBUG_PRINT("qcache", ("db: %s table: %s", tables_used->key,
tables_used->key+
strlen(tables_used->key)+1));
}
@@ -2349,7 +2353,7 @@ Query_cache::register_tables_from_list(TABLE_LIST *tables_used,
{
char key[MAX_DBKEY_LENGTH];
uint key_length;
- DBUG_PRINT("qcache", ("view %s, db %s",
+ DBUG_PRINT("qcache", ("view: %s db: %s",
tables_used->view_name.str,
tables_used->view_db.str));
key_length= (uint) (strmov(strmov(key, tables_used->view_db.str) + 1,
@@ -2470,11 +2474,11 @@ Query_cache::insert_table(uint key_len, char *key,
table_block->table()->engine_data() != engine_data)
{
DBUG_PRINT("qcache",
- ("Handler require invalidation queries of %s.%s %lld-%lld",
+ ("Handler require invalidation queries of %s.%s %lu-%lu",
table_block->table()->db(),
table_block->table()->table(),
- engine_data,
- table_block->table()->engine_data()));
+ (ulong) engine_data,
+ (ulong) table_block->table()->engine_data()));
/*
as far as we delete all queries with this table, table block will be
deleted, too
@@ -2972,7 +2976,7 @@ static TABLE_COUNTER_TYPE process_and_count_tables(TABLE_LIST *tables_used,
table_count++;
if (tables_used->view)
{
- DBUG_PRINT("qcache", ("view %s, db %s",
+ DBUG_PRINT("qcache", ("view: %s db: %s",
tables_used->view_name.str,
tables_used->view_db.str));
*tables_type|= HA_CACHE_TBL_NONTRANSACT;
@@ -3038,7 +3042,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
lex->safe_to_cache_query)
{
DBUG_PRINT("qcache", ("options: %lx %lx type: %u",
- OPTION_TO_QUERY_CACHE,
+ (long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
(int) thd->variables.query_cache_type));
@@ -3058,7 +3062,7 @@ Query_cache::is_cacheable(THD *thd, uint32 query_len, char *query, LEX *lex,
DBUG_PRINT("qcache",
("not interesting query: %d or not cacheable, options %lx %lx type: %u",
(int) lex->sql_command,
- OPTION_TO_QUERY_CACHE,
+ (long) OPTION_TO_QUERY_CACHE,
(long) lex->select_lex.options,
(int) thd->variables.query_cache_type));
DBUG_RETURN(0);
@@ -3522,7 +3526,7 @@ uint Query_cache::filename_2_table_key (char *key, const char *path,
#if defined(DBUG_OFF) && !defined(USE_QUERY_CACHE_INTEGRITY_CHECK)
-void wreck(uint line, const char *message) {}
+void wreck(uint line, const char *message) { query_cache_size = 0; }
void bins_dump() {}
void cache_dump() {}
void queries_dump() {}
@@ -3534,6 +3538,17 @@ my_bool in_blocks(Query_cache_block * point) { return 0; }
#else
+
+/*
+ Debug method which switch query cache off but left content for
+ investigation.
+
+ SYNOPSIS
+ Query_cache::wreck()
+ line line of the wreck() call
+ message message for logging
+*/
+
void Query_cache::wreck(uint line, const char *message)
{
THD *thd=current_thd;
@@ -3757,8 +3772,8 @@ my_bool Query_cache::check_integrity(bool locked)
(((long)first_block) % (long)ALIGN_SIZE(1)))
{
DBUG_PRINT("error",
- ("block 0x%lx do not aligned by %d", (ulong) block,
- ALIGN_SIZE(1)));
+ ("block 0x%lx do not aligned by %d", (long) block,
+ (int) ALIGN_SIZE(1)));
result = 1;
}
// Check memory allocation
diff --git a/sql/sql_class.h b/sql/sql_class.h
index 41845dc5c76..9ad67524998 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -495,6 +495,8 @@ struct system_variables
{
ulonglong myisam_max_extra_sort_file_size;
ulonglong myisam_max_sort_file_size;
+ ulonglong max_heap_table_size;
+ ulonglong tmp_table_size;
ha_rows select_limit;
ha_rows max_join_size;
ulong auto_increment_increment, auto_increment_offset;
@@ -503,7 +505,6 @@ struct system_variables
ulong long_query_time;
ulong max_allowed_packet;
ulong max_error_count;
- ulong max_heap_table_size;
ulong max_length_for_sort_data;
ulong max_sort_length;
ulong max_tmp_tables;
@@ -527,7 +528,6 @@ struct system_variables
ulong div_precincrement;
ulong sortbuff_size;
ulong table_type;
- ulong tmp_table_size;
ulong tx_isolation;
ulong completion_type;
/* Determines which non-standard SQL behaviour should be enabled */
@@ -2074,7 +2074,8 @@ class user_var_entry
class Unique :public Sql_alloc
{
DYNAMIC_ARRAY file_ptrs;
- ulong max_elements, max_in_memory_size;
+ ulong max_elements;
+ ulonglong max_in_memory_size;
IO_CACHE file;
TREE tree;
byte *record_pointers;
@@ -2084,7 +2085,7 @@ class Unique :public Sql_alloc
public:
ulong elements;
Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg,
- uint size_arg, ulong max_in_memory_size_arg);
+ uint size_arg, ulonglong max_in_memory_size_arg);
~Unique();
ulong elements_in_tree() { return tree.elements_in_tree; }
inline bool unique_add(void *ptr)
@@ -2098,13 +2099,13 @@ public:
bool get(TABLE *table);
static double get_use_cost(uint *buffer, uint nkeys, uint key_size,
- ulong max_in_memory_size);
+ ulonglong max_in_memory_size);
inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size,
- ulong max_in_memory_size)
+ ulonglong max_in_memory_size)
{
- register ulong max_elems_in_tree=
+ register ulonglong max_elems_in_tree=
(1 + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
- return sizeof(uint)*(1 + nkeys/max_elems_in_tree);
+ return (int) (sizeof(uint)*(1 + nkeys/max_elems_in_tree));
}
void reset();
diff --git a/sql/sql_db.cc b/sql/sql_db.cc
index 43d09d288e5..1dd9406109c 100644
--- a/sql/sql_db.cc
+++ b/sql/sql_db.cc
@@ -1172,7 +1172,7 @@ err:
bool mysql_change_db(THD *thd, const char *name, bool no_access_check)
{
- int path_length, db_length;
+ int db_length;
char *db_name;
bool system_db= 0;
#ifndef NO_EMBEDDED_ACCESS_CHECKS
diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc
index 38c12562fe3..b665113dd18 100644
--- a/sql/sql_delete.cc
+++ b/sql/sql_delete.cc
@@ -902,7 +902,7 @@ end:
trunc_by_del:
/* Probably InnoDB table */
- ulong save_options= thd->options;
+ ulonglong save_options= thd->options;
table_list->lock_type= TL_WRITE;
thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_NOT_AUTOCOMMIT);
ha_enable_transaction(thd, FALSE);
diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc
index 77c7bf137fb..4c0916c389f 100644
--- a/sql/sql_handler.cc
+++ b/sql/sql_handler.cc
@@ -338,7 +338,6 @@ bool mysql_ha_read(THD *thd, TABLE_LIST *tables,
ha_rows select_limit_cnt, ha_rows offset_limit_cnt)
{
TABLE_LIST *hash_tables;
- TABLE **table_ptr;
TABLE *table;
MYSQL_LOCK *lock;
List<Item> list;
diff --git a/sql/sql_lex.h b/sql/sql_lex.h
index 5f968252cc3..be12467d097 100644
--- a/sql/sql_lex.h
+++ b/sql/sql_lex.h
@@ -397,7 +397,7 @@ protected:
TABLE *table; /* temporary table using for appending UNION results */
select_result *result;
- ulong found_rows_for_union;
+ ulonglong found_rows_for_union;
bool res;
public:
bool prepared, // prepare phase already performed for UNION (unit)
diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc
index 1e7601c0951..8c0235e9768 100644
--- a/sql/sql_prepare.cc
+++ b/sql/sql_prepare.cc
@@ -2879,7 +2879,6 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor)
{
Statement stmt_backup;
Query_arena *old_stmt_arena;
- Item *old_free_list;
bool error= TRUE;
statistic_increment(thd->status_var.com_stmt_execute, &LOCK_status);
diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc
index c87a8696bbc..8beb9839177 100644
--- a/sql/sql_rename.cc
+++ b/sql/sql_rename.cc
@@ -257,7 +257,7 @@ do_rename(THD *thd, TABLE_LIST *ren_table, char *new_db, char *new_table_name,
static TABLE_LIST *
rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error)
{
- TABLE_LIST *ren_table,*new_table, *tmp_table;
+ TABLE_LIST *ren_table, *new_table;
DBUG_ENTER("rename_tables");
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index bcd7e8c4a9d..3d2f46a9982 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -82,7 +82,7 @@ static store_key *get_store_key(THD *thd,
static bool make_simple_join(JOIN *join,TABLE *tmp_table);
static void make_outerjoin_info(JOIN *join);
static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *item);
-static void make_join_readinfo(JOIN *join,uint options);
+static void make_join_readinfo(JOIN *join, ulonglong options);
static bool only_eq_ref_tables(JOIN *join, ORDER *order, table_map tables);
static void update_depend_map(JOIN *join);
static void update_depend_map(JOIN *join, ORDER *order);
@@ -90,7 +90,7 @@ static ORDER *remove_const(JOIN *join,ORDER *first_order,COND *cond,
bool change_list, bool *simple_order);
static int return_zero_rows(JOIN *join, select_result *res,TABLE_LIST *tables,
List<Item> &fields, bool send_row,
- uint select_options, const char *info,
+ ulonglong select_options, const char *info,
Item *having);
static COND *build_equal_items(THD *thd, COND *cond,
COND_EQUAL *inherited,
@@ -114,7 +114,7 @@ static bool resolve_nested_join (TABLE_LIST *table);
static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item);
static bool open_tmp_table(TABLE *table);
static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
- ulong options);
+ ulonglong options);
static int do_select(JOIN *join,List<Item> *fields,TABLE *tmp_table,
Procedure *proc);
@@ -1974,7 +1974,7 @@ bool
mysql_select(THD *thd, Item ***rref_pointer_array,
TABLE_LIST *tables, uint wild_num, List<Item> &fields,
COND *conds, uint og_num, ORDER *order, ORDER *group,
- Item *having, ORDER *proc_param, ulong select_options,
+ Item *having, ORDER *proc_param, ulonglong select_options,
select_result *result, SELECT_LEX_UNIT *unit,
SELECT_LEX *select_lex)
{
@@ -4123,7 +4123,7 @@ choose_plan(JOIN *join, table_map join_tables)
{
uint search_depth= join->thd->variables.optimizer_search_depth;
uint prune_level= join->thd->variables.optimizer_prune_level;
- bool straight_join= join->select_options & SELECT_STRAIGHT_JOIN;
+ bool straight_join= test(join->select_options & SELECT_STRAIGHT_JOIN);
DBUG_ENTER("choose_plan");
join->cur_embedding_map= 0;
@@ -4725,8 +4725,6 @@ static void
find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
double read_time)
{
- ha_rows rec;
- double tmp;
THD *thd= join->thd;
if (!rest_tables)
{
@@ -5780,7 +5778,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
}
static void
-make_join_readinfo(JOIN *join, uint options)
+make_join_readinfo(JOIN *join, ulonglong options)
{
uint i;
@@ -6435,7 +6433,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond,
static int
return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
- List<Item> &fields, bool send_row, uint select_options,
+ List<Item> &fields, bool send_row, ulonglong select_options,
const char *info, Item *having)
{
DBUG_ENTER("return_zero_rows");
@@ -6986,7 +6984,6 @@ static COND *build_equal_items_for_cond(COND *cond,
Item_equal *item_equal;
uint members;
COND_EQUAL cond_equal;
- COND *new_cond;
cond_equal.upper_levels= inherited;
if (cond->type() == Item::COND_ITEM)
@@ -9320,13 +9317,14 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
param->recinfo=recinfo;
store_record(table,s->default_values); // Make empty default record
- if (thd->variables.tmp_table_size == ~(ulong) 0) // No limit
+ if (thd->variables.tmp_table_size == ~ (ulonglong) 0) // No limit
table->s->max_rows= ~(ha_rows) 0;
else
- table->s->max_rows= (((table->s->db_type == DB_TYPE_HEAP) ?
- min(thd->variables.tmp_table_size,
- thd->variables.max_heap_table_size) :
- thd->variables.tmp_table_size)/ table->s->reclength);
+ table->s->max_rows= (ha_rows) (((table->s->db_type == DB_TYPE_HEAP) ?
+ min(thd->variables.tmp_table_size,
+ thd->variables.max_heap_table_size) :
+ thd->variables.tmp_table_size)/
+ table->s->reclength);
set_if_bigger(table->s->max_rows,1); // For dummy start options
/*
Push the LIMIT clause to the temporary table creation, so that we
@@ -9628,7 +9626,7 @@ static bool open_tmp_table(TABLE *table)
static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param,
- ulong options)
+ ulonglong options)
{
int error;
MI_KEYDEF keydef;
diff --git a/sql/sql_show.cc b/sql/sql_show.cc
index 1d524418480..ee310ea6fe4 100644
--- a/sql/sql_show.cc
+++ b/sql/sql_show.cc
@@ -464,7 +464,6 @@ bool mysqld_show_create_db(THD *thd, char *dbname,
HA_CREATE_INFO *create_info)
{
Security_context *sctx= thd->security_ctx;
- int length;
char buff[2048];
String buffer(buff, sizeof(buff), system_charset_info);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
@@ -2367,7 +2366,6 @@ int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond)
INDEX_FIELD_VALUES idx_field_vals;
List<char> files;
char *file_name;
- uint length;
bool with_i_schema;
HA_CREATE_INFO create;
TABLE *table= tables->table;
@@ -2939,7 +2937,7 @@ bool store_schema_proc(THD *thd, TABLE *table, TABLE *proc_table,
restore_record(table, s->default_values);
if (!wild || !wild[0] || !wild_compare(sp_name.ptr(), wild, 0))
{
- int enum_idx= proc_table->field[5]->val_int();
+ int enum_idx= (int) proc_table->field[5]->val_int();
table->field[3]->store(sp_name.ptr(), sp_name.length(), cs);
get_field(thd->mem_root, proc_table->field[3], &tmp_string);
table->field[0]->store(tmp_string.ptr(), tmp_string.length(), cs);
@@ -3105,7 +3103,7 @@ static int get_schema_stat_record(THD *thd, struct st_table_list *tables,
show_table->field[key_part->fieldnr-1]->key_length()))
{
table->field[10]->store((longlong) key_part->length /
- key_part->field->charset()->mbmaxlen);
+ key_part->field->charset()->mbmaxlen, 1);
table->field[10]->set_notnull();
}
uint flags= key_part->field ? key_part->field->flags : 0;
diff --git a/sql/sql_string.h b/sql/sql_string.h
index 09b8478adf8..2be2cca5427 100644
--- a/sql/sql_string.h
+++ b/sql/sql_string.h
@@ -363,3 +363,9 @@ public:
return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length);
}
};
+
+static inline bool check_if_only_end_space(CHARSET_INFO *cs, char *str,
+ char *end)
+{
+ return str+ cs->cset->scan(cs, str, end, MY_SEQ_SPACES) == end;
+}
diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc
index 95734d31411..3569733d064 100644
--- a/sql/sql_trigger.cc
+++ b/sql/sql_trigger.cc
@@ -107,7 +107,9 @@ const LEX_STRING trg_event_type_names[]=
};
-static TABLE_LIST *add_table_for_trigger(THD *thd, sp_name *trig);
+static int
+add_table_for_trigger(THD *thd, sp_name *trig, bool if_exists,
+ TABLE_LIST ** table);
class Handle_old_incorrect_sql_modes_hook: public Unknown_key_hook
{
@@ -156,6 +158,13 @@ private:
*/
bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
{
+ /*
+ FIXME: The code below takes too many different paths depending on the
+ 'create' flag, so that the justification for a single function
+ 'mysql_create_or_drop_trigger', compared to two separate functions
+ 'mysql_create_trigger' and 'mysql_drop_trigger' is not apparent.
+ This is a good candidate for a minor refactoring.
+ */
TABLE *table;
bool result= TRUE;
String stmt_query;
@@ -181,10 +190,6 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
DBUG_RETURN(TRUE);
}
- if (!create &&
- !(tables= add_table_for_trigger(thd, thd->lex->spname)))
- DBUG_RETURN(TRUE);
-
/*
We don't allow creating triggers on tables in the 'mysql' schema
*/
@@ -194,9 +199,6 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
DBUG_RETURN(TRUE);
}
- /* We should have only one table in table list. */
- DBUG_ASSERT(tables->next_global == 0);
-
/*
TODO: We should check if user has TRIGGER privilege for table here.
Now we just require SUPER privilege for creating/dropping because
@@ -211,7 +213,7 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
DROP for example) so we do the check for privileges. For now there is
already a stronger test right above; but when this stronger test will
be removed, the test below will hold. Because triggers have the same
- nature as functions regarding binlogging: their body is implicitely
+ nature as functions regarding binlogging: their body is implicitly
binlogged, so they share the same danger, so trust_function_creators
applies to them too.
*/
@@ -222,24 +224,52 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create)
DBUG_RETURN(TRUE);
}
- /* We do not allow creation of triggers on temporary tables. */
- if (create && find_temporary_table(thd, tables->db, tables->table_name))
- {
- my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias);
- DBUG_RETURN(TRUE);
- }
-
/*
We don't want perform our operations while global read lock is held
- so we have to wait until its end and then prevent it from occuring
+ so we have to wait until its end and then prevent it from occurring
again until we are done. (Acquiring LOCK_open is not enough because
- global read lock is held without helding LOCK_open).
+ global read lock is held without holding LOCK_open).
*/
if (wait_if_global_read_lock(thd, 0, 1))
DBUG_RETURN(TRUE);
VOID(pthread_mutex_lock(&LOCK_open));
+ if (!create)
+ {
+ bool if_exists= thd->lex->drop_if_exists;
+
+ if (add_table_for_trigger(thd, thd->lex->spname, if_exists, & tables))
+ goto end;
+
+ if (!tables)
+ {
+ DBUG_ASSERT(if_exists);
+ /*
+ Since the trigger does not exist, there is no associated table,
+ and therefore :
+ - no TRIGGER privileges to check,
+ - no trigger to drop,
+ - no table to lock/modify,
+ so the drop statement is successful.
+ */
+ result= FALSE;
+ /* Still, we need to log the query ... */
+ stmt_query.append(thd->query, thd->query_length);
+ goto end;
+ }
+ }
+
+ /* We should have only one table in table list. */
+ DBUG_ASSERT(tables->next_global == 0);
+
+ /* We do not allow creation of triggers on temporary tables. */
+ if (create && find_temporary_table(thd, tables->db, tables->table_name))
+ {
+ my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias);
+ goto end;
+ }
+
if (lock_table_names(thd, tables))
goto end;
@@ -326,7 +356,7 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables,
char dir_buff[FN_REFLEN], file_buff[FN_REFLEN], trigname_buff[FN_REFLEN],
trigname_path[FN_REFLEN];
LEX_STRING dir, file, trigname_file;
- LEX_STRING *trg_def, *name;
+ LEX_STRING *trg_def;
LEX_STRING definer_user;
LEX_STRING definer_host;
ulonglong *trg_sql_mode;
@@ -849,7 +879,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
DBUG_RETURN(1);
List_iterator_fast<LEX_STRING> it(triggers->definitions_list);
- LEX_STRING *trg_create_str, *trg_name_str;
+ LEX_STRING *trg_create_str;
ulonglong *trg_sql_mode;
if (triggers->definition_modes_list.is_empty() &&
@@ -966,7 +996,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db,
goto err_with_lex_cleanup;
}
- lex.sphead->set_info(0, 0, &lex.sp_chistics, *trg_sql_mode);
+ lex.sphead->set_info(0, 0, &lex.sp_chistics, (ulong) *trg_sql_mode);
triggers->bodies[lex.trg_chistics.event]
[lex.trg_chistics.action_time]= lex.sphead;
@@ -1145,13 +1175,17 @@ bool Table_triggers_list::get_trigger_info(THD *thd, trg_event_type event,
mysql_table_for_trigger()
thd - current thread context
trig - identifier for trigger
+ if_exists - treat a not existing trigger as a warning if TRUE
+ table - pointer to TABLE_LIST object for the table trigger (output)
RETURN VALUE
- 0 - error
- # - pointer to TABLE_LIST object for the table
+ 0 Success
+ 1 Error
*/
-static TABLE_LIST *add_table_for_trigger(THD *thd, sp_name *trig)
+static int
+add_table_for_trigger(THD *thd, sp_name *trig, bool if_exists,
+ TABLE_LIST **table)
{
LEX *lex= thd->lex;
char path_buff[FN_REFLEN];
@@ -1162,6 +1196,7 @@ static TABLE_LIST *add_table_for_trigger(THD *thd, sp_name *trig)
path_buff, &trigname.trigger_table);
DBUG_ENTER("add_table_for_trigger");
+ DBUG_ASSERT(table != NULL);
strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", trig->m_db.str, "/",
trig->m_name.str, trigname_file_ext, NullS);
@@ -1170,30 +1205,45 @@ static TABLE_LIST *add_table_for_trigger(THD *thd, sp_name *trig)
if (access(path_buff, F_OK))
{
+ if (if_exists)
+ {
+ push_warning_printf(thd,
+ MYSQL_ERROR::WARN_LEVEL_NOTE,
+ ER_TRG_DOES_NOT_EXIST,
+ ER(ER_TRG_DOES_NOT_EXIST));
+ *table= NULL;
+ DBUG_RETURN(0);
+ }
+
my_error(ER_TRG_DOES_NOT_EXIST, MYF(0));
- DBUG_RETURN(0);
+ DBUG_RETURN(1);
}
if (!(parser= sql_parse_prepare(&path, thd->mem_root, 1)))
- DBUG_RETURN(0);
+ DBUG_RETURN(1);
if (!is_equal(&trigname_file_type, parser->type()))
{
my_error(ER_WRONG_OBJECT, MYF(0), trig->m_name.str, trigname_file_ext+1,
"TRIGGERNAME");
- DBUG_RETURN(0);
+ DBUG_RETURN(1);
}
if (parser->parse((gptr)&trigname, thd->mem_root,
trigname_file_parameters, 1,
&trigger_table_hook))
- DBUG_RETURN(0);
+ DBUG_RETURN(1);
/* We need to reset statement table list to be PS/SP friendly. */
lex->query_tables= 0;
lex->query_tables_last= &lex->query_tables;
- DBUG_RETURN(sp_add_to_query_tables(thd, lex, trig->m_db.str,
- trigname.trigger_table.str, TL_IGNORE));
+ *table= sp_add_to_query_tables(thd, lex, trig->m_db.str,
+ trigname.trigger_table.str, TL_IGNORE);
+
+ if (! *table)
+ DBUG_RETURN(1);
+
+ DBUG_RETURN(0);
}
@@ -1287,7 +1337,6 @@ Table_triggers_list::change_table_name_in_triggers(THD *thd,
{
char path_buff[FN_REFLEN];
LEX_STRING *def, *on_table_name, new_def;
- ulonglong *sql_mode;
ulong save_sql_mode= thd->variables.sql_mode;
List_iterator_fast<LEX_STRING> it_def(definitions_list);
List_iterator_fast<LEX_STRING> it_on_table_name(on_table_names_list);
@@ -1301,7 +1350,7 @@ Table_triggers_list::change_table_name_in_triggers(THD *thd,
while ((def= it_def++))
{
on_table_name= it_on_table_name++;
- thd->variables.sql_mode= *(it_mode++);
+ thd->variables.sql_mode= (ulong) *(it_mode++);
/* Construct CREATE TRIGGER statement with new table name. */
buff.length(0);
diff --git a/sql/sql_update.cc b/sql/sql_update.cc
index dabda39d6b7..3b6aa5f1aa2 100644
--- a/sql/sql_update.cc
+++ b/sql/sql_update.cc
@@ -118,7 +118,7 @@ int mysql_update(THD *thd,
enum enum_duplicates handle_duplicates, bool ignore)
{
bool using_limit= limit != HA_POS_ERROR;
- bool safe_update= thd->options & OPTION_SAFE_UPDATES;
+ bool safe_update= test(thd->options & OPTION_SAFE_UPDATES);
bool used_key_is_modified, transactional_table;
bool can_compare_record;
int res;
diff --git a/sql/sql_view.cc b/sql/sql_view.cc
index c0cdaf59712..53844eb0fd2 100644
--- a/sql/sql_view.cc
+++ b/sql/sql_view.cc
@@ -197,7 +197,7 @@ fill_defined_view_parts (THD *thd, TABLE_LIST *view)
lex->definer= &view->definer;
}
if (lex->create_view_algorithm == VIEW_ALGORITHM_UNDEFINED)
- lex->create_view_algorithm= decoy.algorithm;
+ lex->create_view_algorithm= (uint8) decoy.algorithm;
if (lex->create_view_suid == VIEW_SUID_DEFAULT)
lex->create_view_suid= decoy.view_suid ?
VIEW_SUID_DEFINER : VIEW_SUID_INVOKER;
@@ -1477,7 +1477,6 @@ bool check_key_in_view(THD *thd, TABLE_LIST *view)
TABLE *table;
Field_translator *trans, *end_of_trans;
KEY *key_info, *key_info_end;
- uint i;
DBUG_ENTER("check_key_in_view");
/*
diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy
index 676f3f0e6ab..a362d1ce4ea 100644
--- a/sql/sql_yacc.yy
+++ b/sql/sql_yacc.yy
@@ -1626,7 +1626,6 @@ sp_decl:
uint num_vars= pctx->context_var_count();
enum enum_field_types var_type= (enum enum_field_types) $4;
Item *dflt_value_item= $5;
- create_field *create_field_op;
if (!dflt_value_item)
{
@@ -6120,11 +6119,12 @@ drop:
lex->sql_command= SQLCOM_DROP_VIEW;
lex->drop_if_exists= $3;
}
- | DROP TRIGGER_SYM sp_name
+ | DROP TRIGGER_SYM if_exists sp_name
{
LEX *lex= Lex;
lex->sql_command= SQLCOM_DROP_TRIGGER;
- lex->spname= $3;
+ lex->drop_if_exists= $3;
+ lex->spname= $4;
}
;
@@ -7053,6 +7053,8 @@ load_data_lock:
Ignore this option in SP to avoid problem with query cache
*/
if (Lex->sphead != 0)
+ $$= YYTHD->update_lock_default;
+ else
#endif
$$= TL_WRITE_CONCURRENT_INSERT;
}
diff --git a/sql/table.cc b/sql/table.cc
index e63db72a02d..1305ed9cc24 100644
--- a/sql/table.cc
+++ b/sql/table.cc
@@ -730,6 +730,7 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat,
if (key_part->fieldnr)
{ // Should always be true !
Field *field=key_part->field=outparam->field[key_part->fieldnr-1];
+ key_part->type= field->key_type();
if (field->null_ptr)
{
key_part->null_offset=(uint) ((byte*) field->null_ptr -
@@ -2458,7 +2459,18 @@ bool st_table_list::prepare_view_securety_context(THD *thd)
}
else
{
- my_error(ER_NO_SUCH_USER, MYF(0), definer.user.str, definer.host.str);
+ if (thd->security_ctx->master_access & SUPER_ACL)
+ {
+ my_error(ER_NO_SUCH_USER, MYF(0), definer.user.str, definer.host.str);
+
+ }
+ else
+ {
+ my_error(ER_ACCESS_DENIED_ERROR, MYF(0),
+ thd->security_ctx->priv_user,
+ thd->security_ctx->priv_host,
+ (thd->password ? ER(ER_YES) : ER(ER_NO)));
+ }
DBUG_RETURN(TRUE);
}
}
diff --git a/sql/tztime.cc b/sql/tztime.cc
index fe23954bbb2..4becf4a9fcc 100644
--- a/sql/tztime.cc
+++ b/sql/tztime.cc
@@ -1729,9 +1729,9 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap)
tz_leapcnt++;
DBUG_PRINT("info",
- ("time_zone_leap_second table: tz_leapcnt=%u tt_time=%lld offset=%ld",
- tz_leapcnt, (longlong)tz_lsis[tz_leapcnt-1].ls_trans,
- tz_lsis[tz_leapcnt-1].ls_corr));
+ ("time_zone_leap_second table: tz_leapcnt: %u tt_time: %lu offset=%ld",
+ tz_leapcnt, (ulong) tz_lsis[tz_leapcnt-1].ls_trans,
+ tz_lsis[tz_leapcnt-1].ls_corr));
res= table->file->index_next(table->record[0]);
}
@@ -2041,8 +2041,8 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables)
tz_info->timecnt++;
DBUG_PRINT("info",
- ("time_zone_transition table: tz_id=%u tt_time=%lld tt_id=%u",
- tzid, (longlong)ttime, ttid));
+ ("time_zone_transition table: tz_id: %u tt_time: %lu tt_id: %u",
+ tzid, (ulong) ttime, ttid));
res= table->file->index_next_same(table->record[0],
(byte*)table->field[0]->ptr, 4);
diff --git a/sql/uniques.cc b/sql/uniques.cc
index ad074f8b2b0..c7bdbdeb207 100644
--- a/sql/uniques.cc
+++ b/sql/uniques.cc
@@ -55,18 +55,19 @@ int unique_write_to_ptrs(gptr key, element_count count, Unique *unique)
}
Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg,
- uint size_arg, ulong max_in_memory_size_arg)
+ uint size_arg, ulonglong max_in_memory_size_arg)
:max_in_memory_size(max_in_memory_size_arg), size(size_arg), elements(0)
{
my_b_clear(&file);
- init_tree(&tree, max_in_memory_size / 16, 0, size, comp_func, 0, NULL,
- comp_func_fixed_arg);
+ init_tree(&tree, (ulong) (max_in_memory_size / 16), 0, size, comp_func, 0,
+ NULL, comp_func_fixed_arg);
/* If the following fail's the next add will also fail */
my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16);
/*
If you change the following, change it in get_max_elements function, too.
*/
- max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size);
+ max_elements= (ulong) (max_in_memory_size /
+ ALIGN_SIZE(sizeof(TREE_ELEMENT)+size));
VOID(open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE,
MYF(MY_WME)));
}
@@ -260,15 +261,15 @@ static double get_merge_many_buffs_cost(uint *buffer,
*/
double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size,
- ulong max_in_memory_size)
+ ulonglong max_in_memory_size)
{
ulong max_elements_in_tree;
ulong last_tree_elems;
int n_full_trees; /* number of trees in unique - 1 */
double result;
- max_elements_in_tree=
- max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size);
+ max_elements_in_tree= ((ulong) max_in_memory_size /
+ ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size));
n_full_trees= nkeys / max_elements_in_tree;
last_tree_elems= nkeys % max_elements_in_tree;
@@ -386,9 +387,11 @@ C_MODE_END
/*
DESCRIPTION
+
Function is very similar to merge_buffers, but instead of writing sorted
unique keys to the output file, it invokes walk_action for each key.
This saves I/O if you need to pass through all unique keys only once.
+
SYNOPSIS
merge_walk()
All params are 'IN' (but see comment for begin, end):
@@ -416,7 +419,7 @@ C_MODE_END
<> 0 error
*/
-static bool merge_walk(uchar *merge_buffer, uint merge_buffer_size,
+static bool merge_walk(uchar *merge_buffer, ulong merge_buffer_size,
uint key_length, BUFFPEK *begin, BUFFPEK *end,
tree_walk_action walk_action, void *walk_action_arg,
qsort_cmp2 compare, void *compare_arg,
@@ -425,14 +428,15 @@ static bool merge_walk(uchar *merge_buffer, uint merge_buffer_size,
BUFFPEK_COMPARE_CONTEXT compare_context = { compare, compare_arg };
QUEUE queue;
if (end <= begin ||
- merge_buffer_size < key_length * (end - begin + 1) ||
- init_queue(&queue, end - begin, offsetof(BUFFPEK, key), 0,
+ merge_buffer_size < (ulong) (key_length * (end - begin + 1)) ||
+ init_queue(&queue, (uint) (end - begin), offsetof(BUFFPEK, key), 0,
buffpek_compare, &compare_context))
return 1;
/* we need space for one key when a piece of merge buffer is re-read */
merge_buffer_size-= key_length;
uchar *save_key_buff= merge_buffer + merge_buffer_size;
- uint max_key_count_per_piece= merge_buffer_size/(end-begin)/key_length;
+ uint max_key_count_per_piece= (uint) (merge_buffer_size/(end-begin) /
+ key_length);
/* if piece_size is aligned reuse_freed_buffer will always hit */
uint piece_size= max_key_count_per_piece * key_length;
uint bytes_read; /* to hold return value of read_to_buffer */
@@ -548,6 +552,9 @@ end:
bool Unique::walk(tree_walk_action action, void *walk_action_arg)
{
+ int res;
+ uchar *merge_buffer;
+
if (elements == 0) /* the whole tree is in memory */
return tree_walk(&tree, action, walk_action_arg, left_root_right);
@@ -556,15 +563,14 @@ bool Unique::walk(tree_walk_action action, void *walk_action_arg)
return 1;
if (flush_io_cache(&file) || reinit_io_cache(&file, READ_CACHE, 0L, 0, 0))
return 1;
- uchar *merge_buffer= (uchar *) my_malloc(max_in_memory_size, MYF(0));
- if (merge_buffer == 0)
+ if (!(merge_buffer= (uchar *) my_malloc((ulong) max_in_memory_size, MYF(0))))
return 1;
- int res= merge_walk(merge_buffer, max_in_memory_size, size,
- (BUFFPEK *) file_ptrs.buffer,
- (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements,
- action, walk_action_arg,
- tree.compare, tree.custom_arg, &file);
- x_free(merge_buffer);
+ res= merge_walk(merge_buffer, (ulong) max_in_memory_size, size,
+ (BUFFPEK *) file_ptrs.buffer,
+ (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements,
+ action, walk_action_arg,
+ tree.compare, tree.custom_arg, &file);
+ my_free((char*) merge_buffer, MYF(0));
return res;
}
@@ -615,7 +621,7 @@ bool Unique::get(TABLE *table)
sort_param.sort_form=table;
sort_param.rec_length= sort_param.sort_length= sort_param.ref_length=
size;
- sort_param.keys= max_in_memory_size / sort_param.sort_length;
+ sort_param.keys= (uint) (max_in_memory_size / sort_param.sort_length);
sort_param.not_killable=1;
if (!(sort_buffer=(uchar*) my_malloc((sort_param.keys+1) *