diff options
author | unknown <heikki@donna.mysql.fi> | 2001-08-09 20:41:20 +0300 |
---|---|---|
committer | unknown <heikki@donna.mysql.fi> | 2001-08-09 20:41:20 +0300 |
commit | 5ea83eed346740875aa8d9544c111c4c3abb0136 (patch) | |
tree | 0d7f628600c54442074d394e119cdeef58bca998 /sql | |
parent | 83fbd8d8db6939775bd0b13e9ef2aa40fb0acbea (diff) | |
download | mariadb-git-5ea83eed346740875aa8d9544c111c4c3abb0136.tar.gz |
btr0cur.c Improve range size estimate for big ranges
ha_innobase.cc Fix Sort aborted bug
sql/ha_innobase.cc:
Fix Sort aborted bug
innobase/btr/btr0cur.c:
Improve range size estimate for big ranges
BitKeeper/etc/logging_ok:
Logging to logging@openlogging.org accepted
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_innobase.cc | 41 |
1 files changed, 36 insertions, 5 deletions
diff --git a/sql/ha_innobase.cc b/sql/ha_innobase.cc index 8ea700de789..7bd71363915 100644 --- a/sql/ha_innobase.cc +++ b/sql/ha_innobase.cc @@ -822,11 +822,11 @@ ha_innobase::open( if (NULL == (ib_table = dict_table_get(norm_name, NULL))) { - fprintf(stderr, "\ -Cannot find table %s from the internal data dictionary\n\ -of InnoDB though the .frm file for the table exists. Maybe you have deleted\n\ -and created again an InnoDB database but forgotten to delete the\n\ -corresponding .frm files of old InnoDB tables?\n", + fprintf(stderr, +"Cannot find table %s from the internal data dictionary\n" +"of InnoDB though the .frm file for the table exists. Maybe you have deleted\n" +"and created again an InnoDB database but forgotten to delete the\n" +"corresponding .frm files of old InnoDB tables?\n", norm_name); free_share(share); @@ -2660,6 +2660,37 @@ ha_innobase::records_in_range( } /************************************************************************* +Gives an UPPER BOUND to the number of rows in a table. This is used in +filesort.cc and the upper bound must hold. TODO: Since the number of +rows in a table may change after this function is called, we still may +get a 'Sort aborted' error in filesort.cc of MySQL. The ultimate fix is to +improve the algorithm of filesort.cc. */ + +ha_rows +ha_innobase::estimate_number_of_rows(void) +/*======================================*/ + /* out: upper bound of rows, currently 32-bit int + or uint */ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + dict_table_t* ib_table; + + DBUG_ENTER("info"); + + ib_table = prebuilt->table; + + dict_update_statistics(ib_table); + + data_file_length = ((ulonglong) + ib_table->stat_clustered_index_size) + * UNIV_PAGE_SIZE; + + /* The minimum clustered index record size is 20 bytes */ + + return((ha_rows) (1000 + data_file_length / 20)); +} + +/************************************************************************* How many seeks it will take to read through the table. This is to be comparable to the number returned by records_in_range so that we can decide if we should scan the table or use keys. */ |