summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
authorSergei Golubchik <sergii@pisem.net>2013-01-28 13:36:05 +0100
committerSergei Golubchik <sergii@pisem.net>2013-01-28 13:36:05 +0100
commit87de27e46b889d86cd872adf8ce613128d379911 (patch)
tree87f3ec6d65538b9bc199408f58daaa2ec9e7a702 /sql
parentf08a404a6d87f7c8c7fef1862eaf768cf920677b (diff)
parent34e84c227f1cb76771eabf229b4cf1b5292eef25 (diff)
downloadmariadb-git-87de27e46b889d86cd872adf8ce613128d379911.tar.gz
5.3 merge
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc15
-rw-r--r--sql/field.h1
-rw-r--r--sql/filesort.cc25
-rw-r--r--sql/item_row.cc3
-rw-r--r--sql/item_strfunc.cc20
-rw-r--r--sql/opt_range.cc8
-rw-r--r--sql/sql_insert.cc2
-rw-r--r--sql/sql_select.cc94
8 files changed, 86 insertions, 82 deletions
diff --git a/sql/field.cc b/sql/field.cc
index a3d3d951887..4667702c145 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -1119,6 +1119,21 @@ bool Field::type_can_have_key_part(enum enum_field_types type)
}
+void Field::make_sort_key(uchar *buff,uint length)
+{
+ if (maybe_null())
+ {
+ if (is_null())
+ {
+ bzero(buff, length + 1);
+ return;
+ }
+ *buff++= 1;
+ }
+ sort_string(buff, length);
+}
+
+
/**
Numeric fields base class constructor.
*/
diff --git a/sql/field.h b/sql/field.h
index f22bab0409d..a6382026d97 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -410,6 +410,7 @@ public:
return bytes;
}
+ void make_sort_key(uchar *buff, uint length);
virtual void make_field(Send_field *);
virtual void sort_string(uchar *buff,uint length)=0;
virtual bool optimize_range(uint idx, uint part);
diff --git a/sql/filesort.cc b/sql/filesort.cc
index e68fc1dc594..9af57f669e4 100644
--- a/sql/filesort.cc
+++ b/sql/filesort.cc
@@ -789,21 +789,9 @@ static void make_sortkey(register SORTPARAM *param,
bool maybe_null=0;
if ((field=sort_field->field))
{ // Field
- if (field->maybe_null())
- {
- if (field->is_null())
- {
- if (sort_field->reverse)
- bfill(to,sort_field->length+1,(char) 255);
- else
- bzero((char*) to,sort_field->length+1);
- to+= sort_field->length+1;
- continue;
- }
- else
- *to++=1;
- }
- field->sort_string(to, sort_field->length);
+ field->make_sort_key(to, sort_field->length);
+ if ((maybe_null = field->maybe_null()))
+ to++;
}
else
{ // Item
@@ -955,8 +943,11 @@ static void make_sortkey(register SORTPARAM *param,
}
if (sort_field->reverse)
{ /* Revers key */
- if (maybe_null)
- to[-1]= ~to[-1];
+ if (maybe_null && (to[-1]= !to[-1]))
+ {
+ to+= sort_field->length; // don't waste the time reversing all 0's
+ continue;
+ }
length=sort_field->length;
while (length--)
{
diff --git a/sql/item_row.cc b/sql/item_row.cc
index cfd2f34095c..ee7bd837553 100644
--- a/sql/item_row.cc
+++ b/sql/item_row.cc
@@ -74,7 +74,8 @@ bool Item_row::fix_fields(THD *thd, Item **ref)
Item **arg, **arg_end;
for (arg= items, arg_end= items+arg_count; arg != arg_end ; arg++)
{
- if ((*arg)->fix_fields(thd, arg))
+ if (!(*arg)->fixed &&
+ (*arg)->fix_fields(thd, arg))
return TRUE;
// we can't assign 'item' before, because fix_fields() can change arg
Item *item= *arg;
diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc
index 3fc5e504b85..c0a6120b361 100644
--- a/sql/item_strfunc.cc
+++ b/sql/item_strfunc.cc
@@ -1337,7 +1337,7 @@ void Item_str_func::left_right_max_length()
if (args[1]->const_item())
{
int length= (int) args[1]->val_int();
- if (length <= 0)
+ if (args[1]->null_value || length <= 0)
char_length=0;
else
set_if_smaller(char_length, (uint) length);
@@ -1444,7 +1444,9 @@ void Item_func_substr::fix_length_and_dec()
if (args[1]->const_item())
{
int32 start= (int32) args[1]->val_int();
- if (start < 0)
+ if (args[1]->null_value)
+ max_length= 0;
+ else if (start < 0)
max_length= ((uint)(-start) > max_length) ? 0 : (uint)(-start);
else
max_length-= min((uint)(start - 1), max_length);
@@ -1452,7 +1454,7 @@ void Item_func_substr::fix_length_and_dec()
if (arg_count == 3 && args[2]->const_item())
{
int32 length= (int32) args[2]->val_int();
- if (length <= 0)
+ if (args[2]->null_value || length <= 0)
max_length=0; /* purecov: inspected */
else
set_if_smaller(max_length,(uint) length);
@@ -2650,7 +2652,9 @@ void Item_func_repeat::fix_length_and_dec()
/* Assumes that the maximum length of a String is < INT_MAX32. */
/* Set here so that rest of code sees out-of-bound value as such. */
- if (count > INT_MAX32)
+ if (args[1]->null_value)
+ count= 0;
+ else if (count > INT_MAX32)
count= INT_MAX32;
ulonglong char_length= (ulonglong) args[0]->max_char_length() * count;
@@ -2729,7 +2733,9 @@ void Item_func_rpad::fix_length_and_dec()
DBUG_ASSERT(collation.collation->mbmaxlen > 0);
/* Assumes that the maximum length of a String is < INT_MAX32. */
/* Set here so that rest of code sees out-of-bound value as such. */
- if (char_length > INT_MAX32)
+ if (args[1]->null_value)
+ char_length= 0;
+ else if (char_length > INT_MAX32)
char_length= INT_MAX32;
fix_char_length_ulonglong(char_length);
}
@@ -2833,7 +2839,9 @@ void Item_func_lpad::fix_length_and_dec()
DBUG_ASSERT(collation.collation->mbmaxlen > 0);
/* Assumes that the maximum length of a String is < INT_MAX32. */
/* Set here so that rest of code sees out-of-bound value as such. */
- if (char_length > INT_MAX32)
+ if (args[1]->null_value)
+ char_length= 0;
+ else if (char_length > INT_MAX32)
char_length= INT_MAX32;
fix_char_length_ulonglong(char_length);
}
diff --git a/sql/opt_range.cc b/sql/opt_range.cc
index f3dd3b8e2d0..8f094c47f67 100644
--- a/sql/opt_range.cc
+++ b/sql/opt_range.cc
@@ -13382,9 +13382,10 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
*/
if (min_max_arg_part && min_max_arg_part->field->is_null())
{
+ uchar *tmp_key_buff= (uchar*)my_alloca(max_used_key_length);
/* Find the first subsequent record without NULL in the MIN/MAX field. */
- key_copy(tmp_record, record, index_info, max_used_key_length);
- result= file->ha_index_read_map(record, tmp_record,
+ key_copy(tmp_key_buff, record, index_info, max_used_key_length);
+ result= file->ha_index_read_map(record, tmp_key_buff,
make_keypart_map(real_key_parts),
HA_READ_AFTER_KEY);
/*
@@ -13400,10 +13401,11 @@ int QUICK_GROUP_MIN_MAX_SELECT::next_min()
if (!result)
{
if (key_cmp(index_info->key_part, group_prefix, real_prefix_len))
- key_restore(record, tmp_record, index_info, 0);
+ key_restore(record, tmp_key_buff, index_info, 0);
}
else if (result == HA_ERR_KEY_NOT_FOUND || result == HA_ERR_END_OF_FILE)
result= 0; /* There is a result in any case. */
+ my_afree(tmp_key_buff);
}
}
diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc
index d6212ded9ba..fc6151e44d9 100644
--- a/sql/sql_insert.cc
+++ b/sql/sql_insert.cc
@@ -2320,7 +2320,7 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd)
{
my_ptrdiff_t adjust_ptrs;
Field **field,**org_field, *found_next_number_field;
- Field **vfield;
+ Field **UNINIT_VAR(vfield);
TABLE *copy;
TABLE_SHARE *share;
uchar *bitmap;
diff --git a/sql/sql_select.cc b/sql/sql_select.cc
index 232ace2858c..4fb05fe71c8 100644
--- a/sql/sql_select.cc
+++ b/sql/sql_select.cc
@@ -227,7 +227,7 @@ static int create_sort_index(THD *thd, JOIN *join, ORDER *order,
static int remove_duplicates(JOIN *join,TABLE *entry,List<Item> &fields,
Item *having);
static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field,
- ulong offset,Item *having);
+ Item *having);
static int remove_dup_with_hash_index(THD *thd,TABLE *table,
uint field_count, Field **first_field,
ulong key_length,Item *having);
@@ -19172,19 +19172,24 @@ void JOIN::clean_pre_sort_join_tab()
}
-/*****************************************************************************
- Remove duplicates from tmp table
- This should be recoded to add a unique index to the table and remove
- duplicates
- Table is a locked single thread table
- fields is the number of fields to check (from the end)
-*****************************************************************************/
+/**
+ Compare fields from table->record[0] and table->record[1],
+ possibly skipping few first fields.
+
+ @param table
+ @param ptr field to start the comparison from,
+ somewhere in the table->field[] array
+ @retval 1 different
+ @retval 0 identical
+*/
static bool compare_record(TABLE *table, Field **ptr)
{
for (; *ptr ; ptr++)
{
- if ((*ptr)->cmp_offset(table->s->rec_buff_length))
+ Field *f= *ptr;
+ if (f->is_null() != f->is_null(table->s->rec_buff_length) ||
+ (!f->is_null() && f->cmp_offset(table->s->rec_buff_length)))
return 1;
}
return 0;
@@ -19212,16 +19217,16 @@ static void free_blobs(Field **ptr)
static int
-remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
+remove_duplicates(JOIN *join, TABLE *table, List<Item> &fields, Item *having)
{
int error;
- ulong reclength,offset;
+ ulong keylength= 0;
uint field_count;
THD *thd= join->thd;
DBUG_ENTER("remove_duplicates");
- entry->reginfo.lock_type=TL_WRITE;
+ table->reginfo.lock_type=TL_WRITE;
/* Calculate how many saved fields there is in list */
field_count=0;
@@ -19238,11 +19243,10 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
join->unit->select_limit_cnt= 1; // Only send first row
DBUG_RETURN(0);
}
- Field **first_field=entry->field+entry->s->fields - field_count;
- offset= (field_count ?
- entry->field[entry->s->fields - field_count]->
- offset(entry->record[0]) : 0);
- reclength=entry->s->reclength-offset;
+
+ Field **first_field=table->field+table->s->fields - field_count;
+ for (Field **ptr=first_field; *ptr; ptr++)
+ keylength+= (*ptr)->sort_length() + (*ptr)->maybe_null();
/*
Disable LIMIT ROWS EXAMINED in order to avoid interrupting prematurely
@@ -19251,18 +19255,17 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
thd->lex->limit_rows_examined_cnt= ULONGLONG_MAX;
if (thd->killed == ABORT_QUERY)
thd->reset_killed();
- free_io_cache(entry); // Safety
- entry->file->info(HA_STATUS_VARIABLE);
- if (entry->s->db_type() == heap_hton ||
- (!entry->s->blob_fields &&
- ((ALIGN_SIZE(reclength) + HASH_OVERHEAD) * entry->file->stats.records <
+
+ free_io_cache(table); // Safety
+ table->file->info(HA_STATUS_VARIABLE);
+ if (table->s->db_type() == heap_hton ||
+ (!table->s->blob_fields &&
+ ((ALIGN_SIZE(keylength) + HASH_OVERHEAD) * table->file->stats.records <
thd->variables.sortbuff_size)))
- error=remove_dup_with_hash_index(join->thd, entry,
- field_count, first_field,
- reclength, having);
+ error=remove_dup_with_hash_index(join->thd, table, field_count, first_field,
+ keylength, having);
else
- error=remove_dup_with_compare(join->thd, entry, first_field, offset,
- having);
+ error=remove_dup_with_compare(join->thd, table, first_field, having);
if (join->select_lex != join->select_lex->master_unit()->fake_select_lex)
thd->lex->set_limit_rows_examined();
@@ -19272,18 +19275,13 @@ remove_duplicates(JOIN *join, TABLE *entry,List<Item> &fields, Item *having)
static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
- ulong offset, Item *having)
+ Item *having)
{
handler *file=table->file;
- char *org_record,*new_record;
- uchar *record;
+ uchar *record=table->record[0];
int error;
- ulong reclength= table->s->reclength-offset;
DBUG_ENTER("remove_dup_with_compare");
- org_record=(char*) (record=table->record[0])+offset;
- new_record=(char*) table->record[1]+offset;
-
if (file->ha_rnd_init_with_error(1))
DBUG_RETURN(1);
@@ -19320,7 +19318,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field,
error=0;
goto err;
}
- memcpy(new_record,org_record,reclength);
+ store_record(table,record[1]);
/* Read through rest of file and mark duplicated rows deleted */
bool found=0;
@@ -19379,8 +19377,9 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
int error;
handler *file= table->file;
ulong extra_length= ALIGN_SIZE(key_length)-key_length;
- uint *field_lengths,*field_length;
+ uint *field_lengths, *field_length;
HASH hash;
+ Field **ptr;
DBUG_ENTER("remove_dup_with_hash_index");
if (!my_multi_malloc(MYF(MY_WME),
@@ -19392,21 +19391,8 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
NullS))
DBUG_RETURN(1);
- {
- Field **ptr;
- ulong total_length= 0;
- for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++)
- {
- uint length= (*ptr)->sort_length();
- (*field_length++)= length;
- total_length+= length;
- }
- DBUG_PRINT("info",("field_count: %u key_length: %lu total_length: %lu",
- field_count, key_length, total_length));
- DBUG_ASSERT(total_length <= key_length);
- key_length= total_length;
- extra_length= ALIGN_SIZE(key_length)-key_length;
- }
+ for (ptr= first_field, field_length=field_lengths ; *ptr ; ptr++)
+ (*field_length++)= (*ptr)->sort_length();
if (my_hash_init(&hash, &my_charset_bin, (uint) file->stats.records, 0,
key_length, (my_hash_get_key) 0, 0, 0))
@@ -19446,10 +19432,10 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table,
/* copy fields to key buffer */
org_key_pos= key_pos;
field_length=field_lengths;
- for (Field **ptr= first_field ; *ptr ; ptr++)
+ for (ptr= first_field ; *ptr ; ptr++)
{
- (*ptr)->sort_string(key_pos,*field_length);
- key_pos+= *field_length++;
+ (*ptr)->make_sort_key(key_pos, *field_length);
+ key_pos+= (*ptr)->maybe_null() + *field_length++;
}
/* Check if it exists before */
if (my_hash_search(&hash, org_key_pos, key_length))