diff options
author | unknown <monty@hundin.mysql.fi> | 2002-01-16 00:42:52 +0200 |
---|---|---|
committer | unknown <monty@hundin.mysql.fi> | 2002-01-16 00:42:52 +0200 |
commit | f015cbdc7e3427cf0f77012c4ce89f3cf8d5dd5f (patch) | |
tree | 890cb295feeab3ebd6413663178be56ec5ed21a2 /sql | |
parent | 71a5af5b52642cbe64960275f1994f5a57832ff3 (diff) | |
download | mariadb-git-f015cbdc7e3427cf0f77012c4ce89f3cf8d5dd5f.tar.gz |
Add support for NULL=NULL in keys (Used in GROUP BY optimization)
Add ISAM to Windows version
Fix of test results
Fixes for NULL keys in HEAP tables.
Docs/manual.texi:
Changelog
heap/hp_open.c:
Add support for NULL=NULL in keys (for GROUP BY)
heap/hp_rkey.c:
Cleanup
heap/hp_write.c:
Cleanup
include/config-win.h:
Add ISAM to Windows version
include/my_base.h:
Add support for NULL=NULL in keys (for GROUP BY)
libmysqld/Makefile.am:
Rename of innobase to innodb
myisam/mi_write.c:
Add support for NULL=NULL in keys (for GROUP BY)
BitKeeper/etc/ignore:
Added libmysqld/ha_innodb.cc to the ignore list
mysql-test/r/group_by.result:
Test of NULL keys in HEAP tables
mysql-test/r/heap.result:
Test of NULL keys in HEAP tables
mysql-test/r/null.result:
Cleanup
mysql-test/r/order_by.result:
Fix for result of new ORDER BY optimization
mysql-test/t/group_by.test:
Test of NULL keys in HEAP tables
mysql-test/t/heap.test:
Test of NULL keys in HEAP tables
mysql-test/t/null.test:
Cleanup
sql/ha_heap.cc:
Add support of NULL keys
sql/item_strfunc.h:
Fix for BINARY and CAST functions
sql/item_timefunc.h:
Fix for BINARY and CAST functions
sql/sql_parse.cc:
Cleanup
sql/sql_select.cc:
Add support for NULL=NULL in keys (for GROUP BY)
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_heap.cc | 20 | ||||
-rw-r--r-- | sql/item_strfunc.h | 3 | ||||
-rw-r--r-- | sql/item_timefunc.h | 3 | ||||
-rw-r--r-- | sql/sql_parse.cc | 33 | ||||
-rw-r--r-- | sql/sql_select.cc | 77 |
5 files changed, 83 insertions, 53 deletions
diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index 518a9c38d82..5f482bca1e8 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -33,7 +33,7 @@ const char **ha_heap::bas_ext() const int ha_heap::open(const char *name, int mode, uint test_if_locked) { - uint key,part,parts,mem_per_row=0; + uint key,parts,mem_per_row=0; ulong max_rows; HP_KEYDEF *keydef; HP_KEYSEG *seg; @@ -48,24 +48,27 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked) for (key=0 ; key < table->keys ; key++) { KEY *pos=table->key_info+key; + KEY_PART_INFO *key_part= pos->key_part; + KEY_PART_INFO *key_part_end= key_part+pos->key_parts; + mem_per_row += (pos->key_length + (sizeof(char*) * 2)); keydef[key].keysegs=(uint) pos->key_parts; - keydef[key].flag = (pos->flags & HA_NOSAME); + keydef[key].flag = (pos->flags & (HA_NOSAME | HA_NULL_ARE_EQUAL)); keydef[key].seg=seg; - for (part=0 ; part < pos->key_parts ; part++) + for (; key_part != key_part_end ; key_part++, seg++) { - uint flag=pos->key_part[part].key_type; - Field *field=pos->key_part[part].field; + uint flag=key_part->key_type; + Field *field=key_part->field; if (!f_is_packed(flag) && f_packtype(flag) == (int) FIELD_TYPE_DECIMAL && !(flag & FIELDFLAG_BINARY)) seg->type= (int) HA_KEYTYPE_TEXT; else seg->type= (int) HA_KEYTYPE_BINARY; - seg->start=(uint) pos->key_part[part].offset; - seg->length=(uint) pos->key_part[part].length; + seg->start=(uint) key_part->offset; + seg->length=(uint) key_part->length; if (field->null_ptr) { seg->null_bit=field->null_bit; @@ -88,7 +91,8 @@ int ha_heap::open(const char *name, int mode, uint test_if_locked) table->max_rows : max_rows), table->min_rows); my_free((gptr) keydef,MYF(0)); - info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); + if (file) + info(HA_STATUS_NO_LOCK | HA_STATUS_CONST | HA_STATUS_VARIABLE); ref_length=sizeof(HEAP_PTR); return (!file ? errno : 0); } diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 870edffeb7e..1279a5099d5 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -435,7 +435,8 @@ class Item_func_binary :public Item_str_func public: Item_func_binary(Item *a) :Item_str_func(a) {} const char *func_name() const { return "binary"; } - String *val_str(String *a) { return (args[0]->val_str(a)); } + String *val_str(String *a) + { a=args[0]->val_str(a); null_value=args[0]->null_value; return a; } void fix_length_and_dec() { binary=1; max_length=args[0]->max_length; } void print(String *str) { print_op(str); } }; diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index c9daa2316e8..32b85e7f028 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -418,7 +418,8 @@ class Item_typecast :public Item_str_func { public: Item_typecast(Item *a) :Item_str_func(a) {} - String *val_str(String *a) { return (args[0]->val_str(a)); } + String *val_str(String *a) + { a=args[0]->val_str(a); null_value=args[0]->null_value; return a; } void fix_length_and_dec() { max_length=args[0]->max_length; } void print(String *str); }; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 2fbdf05e826..63abf757c1e 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -557,9 +557,9 @@ pthread_handler_decl(handle_one_connection,arg) pthread_detach_this_thread(); -#if !defined( __WIN__) && !defined(OS2) /* Win32 calls this in pthread_create */ - if (my_thread_init()) // needed to be called first before we call - // DBUG_ macros +#if !defined( __WIN__) && !defined(OS2) // Win32 calls this in pthread_create + // The following calls needs to be done before we call DBUG_ macros + if (my_thread_init()) { close_connection(&thd->net,ER_OUT_OF_RESOURCES); statistic_increment(aborted_connects,&LOCK_thread_count); @@ -568,13 +568,13 @@ pthread_handler_decl(handle_one_connection,arg) } #endif - // handle_one_connection() is the only way a thread would start - // and would always be on top of the stack - // therefore, the thread stack always starts at the address of the first - // local variable of handle_one_connection, which is thd - // we need to know the start of the stack so that we could check for - // stack overruns - + /* + handle_one_connection() is the only way a thread would start + and would always be on top of the stack, therefore, the thread + stack always starts at the address of the first local variable + of handle_one_connection, which is thd. We need to know the + start of the stack so that we could check for stack overruns. + */ DBUG_PRINT("info", ("handle_one_connection called by thread %d\n", thd->thread_id)); // now that we've called my_thread_init(), it is safe to call DBUG_* @@ -634,12 +634,12 @@ pthread_handler_decl(handle_one_connection,arg) if (net->error && net->vio != 0) { if (!thd->killed && opt_warnings) - sql_print_error(ER(ER_NEW_ABORTING_CONNECTION), - thd->thread_id,(thd->db ? thd->db : "unconnected"), - thd->user ? thd->user : "unauthenticated", - thd->host_or_ip, - (net->last_errno ? ER(net->last_errno) : - ER(ER_UNKNOWN_ERROR))); + sql_print_error(ER(ER_NEW_ABORTING_CONNECTION), + thd->thread_id,(thd->db ? thd->db : "unconnected"), + thd->user ? thd->user : "unauthenticated", + thd->host_or_ip, + (net->last_errno ? ER(net->last_errno) : + ER(ER_UNKNOWN_ERROR))); send_error(net,net->last_errno,NullS); thread_safe_increment(aborted_threads,&LOCK_thread_count); } @@ -1216,7 +1216,6 @@ mysql_execute_command(void) #endif } - thread_safe_increment(com_stat[lex->sql_command],&LOCK_thread_count); /* Skip if we are in the slave thread, some table rules have been given and the table list says the query should not be replicated diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 144b76407ab..9cda33d20d0 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -183,7 +183,7 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, ulong select_options,select_result *result) { TABLE *tmp_table; - int error,tmp; + int error, tmp_error, tmp; bool need_tmp,hidden_group_fields; bool simple_order,simple_group,no_order, skip_sort_order; Item::cond_result cond_value; @@ -678,8 +678,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, /* Copy data to the temporary table */ thd->proc_info="Copying to tmp table"; - if (do_select(&join,(List<Item> *) 0,tmp_table,0)) + if ((tmp_error=do_select(&join,(List<Item> *) 0,tmp_table,0))) + { + error=tmp_error; goto err; /* purecov: inspected */ + } if (join.having) join.having=having=0; // Allready done @@ -752,9 +755,11 @@ mysql_select(THD *thd,TABLE_LIST *tables,List<Item> &fields,COND *conds, group=0; } thd->proc_info="Copying to group table"; + tmp_error= -1; if (make_sum_func_list(&join,all_fields) || - do_select(&join,(List<Item> *) 0,tmp_table2,0)) + (tmp_error=do_select(&join,(List<Item> *) 0,tmp_table2,0))) { + error=tmp_error; free_tmp_table(thd,tmp_table2); goto err; /* purecov: inspected */ } @@ -3736,14 +3741,16 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, if (maybe_null) { /* - To be able to group on NULL, we move the null bit to be - just before the column. - The null byte is updated by 'end_update()' + To be able to group on NULL, we reserve place in group_buff + for the NULL flag just before the column. + The field data is after this flag. + The NULL flag is updated by 'end_update()' and 'end_write()' */ - key_part_info->null_bit=1; - key_part_info->null_offset= key_part_info->offset-1; - group->field->move_field((char*) group_buff+1, (uchar*) group_buff, - 1); + keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL + key_part_info->null_bit=field->null_bit; + key_part_info->null_offset= (uint) (field->null_ptr - + (uchar*) table->record[0]); + group->field->move_field((char*) ++group->buff); } else group->field->move_field((char*) group_buff); @@ -3899,10 +3906,10 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, for (uint i=0; i < keyinfo->key_parts ; i++,seg++) { Field *field=keyinfo->key_part[i].field; - seg->flag=0; - seg->language=MY_CHARSET_CURRENT; - seg->length=keyinfo->key_part[i].length; - seg->start=keyinfo->key_part[i].offset; + seg->flag= 0; + seg->language= MY_CHARSET_CURRENT; + seg->length= keyinfo->key_part[i].length; + seg->start= keyinfo->key_part[i].offset; if (field->flags & BLOB_FLAG) { seg->type= @@ -3923,11 +3930,17 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, keyinfo->key_part[i].length > 4) seg->flag|=HA_SPACE_PACK; } - if (using_unique_constraint && - !(field->flags & NOT_NULL_FLAG)) + if (!(field->flags & NOT_NULL_FLAG)) { seg->null_bit= field->null_bit; seg->null_pos= (uint) (field->null_ptr - (uchar*) table->record[0]); + /* + We are using a GROUP BY on something that contains NULL + In this case we have to tell MyISAM that two NULL should + on INSERT be compared as equal + */ + if (!using_unique_constraint) + keydef.flag|= HA_NULL_ARE_EQUAL; } } } @@ -4065,9 +4078,12 @@ bool create_myisam_from_heap(TABLE *table, TMP_TABLE_PARAM *param, int error, } -/***************************************************************************** -** Make a join of all tables and write it on socket or to table -*****************************************************************************/ +/**************************************************************************** + Make a join of all tables and write it on socket or to table + Return: 0 if ok + 1 if error is sent + -1 if error should be sent +****************************************************************************/ static int do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) @@ -4144,15 +4160,21 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) if (error == -3) error=0; /* select_limit used */ } + + /* Return 1 if error is sent; -1 if error should be sent */ if (error < 0) - join->result->send_error(0,NullS); /* purecov: inspected */ + { + join->result->send_error(0,NullS); /* purecov: inspected */ + error=1; // Error sent + } else { - if (!table) // If sending data to client + error=0; + if (!table) // If sending data to client { join_free(join); // Unlock all cursors if (join->result->send_eof()) - error= -1; + error= 1; // Don't send error } DBUG_PRINT("info",("%ld records output",join->send_records)); } @@ -4169,10 +4191,10 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) my_errno=tmp; error= -1; } - if (error != old_error) + if (error == -1) table->file->print_error(my_errno,MYF(0)); } - DBUG_RETURN(error < 0); + DBUG_RETURN(error); } @@ -4926,6 +4948,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), copy_fields(&join->tmp_table_param); copy_funcs(join->tmp_table_param.funcs); +#ifdef TO_BE_DELETED if (!table->uniques) // If not unique handling { /* Copy null values from group to row */ @@ -4936,10 +4959,11 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (item->maybe_null) { Field *field=item->tmp_table_field(); - field->ptr[-1]= (byte) (field->is_null() ? 0 : 1); + field->ptr[-1]= (byte) (field->is_null() ? 1 : 0); } } } +#endif if (!join->having || join->having->val_int()) { join->found_records++; @@ -4994,8 +5018,9 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { Item *item= *group->item; item->save_org_in_field(group->field); + /* Store in the used key if the field was 0 */ if (item->maybe_null) - group->buff[0]=item->null_value ? 0: 1; // Save reversed value + group->buff[-1]=item->null_value ? 1 : 0; } // table->file->index_init(0); if (!table->file->index_read(table->record[1], |