diff options
author | bell@sanja.is.com.ua <> | 2004-09-25 18:37:28 +0300 |
---|---|---|
committer | bell@sanja.is.com.ua <> | 2004-09-25 18:37:28 +0300 |
commit | 6df58cc97e6aecd341b8dfcb82b696fd5123f90a (patch) | |
tree | 1328afc6e2b10d48dd499927063824ec875201a9 /sql | |
parent | cd32040889b9bc9f6437588a3a8bf3878c021b1b (diff) | |
parent | ef7bdbf4b8efd5d1d293e801939937a26fef6292 (diff) | |
download | mariadb-git-6df58cc97e6aecd341b8dfcb82b696fd5123f90a.tar.gz |
Merge sanja.is.com.ua:/home/bell/mysql/bk/mysql-4.1
into sanja.is.com.ua:/home/bell/mysql/bk/work-4.1
Diffstat (limited to 'sql')
-rw-r--r-- | sql/examples/ha_archive.cc | 69 | ||||
-rw-r--r-- | sql/examples/ha_archive.h | 2 | ||||
-rw-r--r-- | sql/field.cc | 3 | ||||
-rw-r--r-- | sql/ha_ndbcluster.cc | 82 | ||||
-rw-r--r-- | sql/ha_ndbcluster.h | 3 | ||||
-rw-r--r-- | sql/item.cc | 2 | ||||
-rw-r--r-- | sql/item_cmpfunc.cc | 26 | ||||
-rw-r--r-- | sql/item_geofunc.cc | 33 | ||||
-rw-r--r-- | sql/item_geofunc.h | 62 | ||||
-rw-r--r-- | sql/log_event.cc | 10 | ||||
-rw-r--r-- | sql/opt_range.cc | 8 | ||||
-rw-r--r-- | sql/sql_class.cc | 70 | ||||
-rw-r--r-- | sql/sql_class.h | 29 | ||||
-rw-r--r-- | sql/sql_show.cc | 2 |
14 files changed, 272 insertions, 129 deletions
diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc index c004330932c..e71ae05734a 100644 --- a/sql/examples/ha_archive.cc +++ b/sql/examples/ha_archive.cc @@ -70,7 +70,6 @@ Allow users to set compression level. Add truncate table command. Implement versioning, should be easy. - Implement optimize so we can fix broken tables. Allow for errors, find a way to mark bad rows. See if during an optimize you can make the table smaller. Talk to the gzip guys, come up with a writable format so that updates are doable @@ -88,6 +87,7 @@ static int archive_init= 0; /* The file extension */ #define ARZ ".ARZ" +#define ARN ".ARN" /* Used for hash table that tracks open tables. @@ -117,7 +117,7 @@ static ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table) if (!archive_init) { VOID(pthread_mutex_init(&archive_mutex,MY_MUTEX_INIT_FAST)); - if (!hash_init(&archive_open_tables,system_charset_info,32,0,0, + if (hash_init(&archive_open_tables,system_charset_info,32,0,0, (hash_get_key) archive_get_key,0,0)) { pthread_mutex_unlock(&LOCK_mysql_create_db); @@ -205,7 +205,7 @@ static int free_share(ARCHIVE_SHARE *share) We just implement one additional file extension. */ const char **ha_archive::bas_ext() const -{ static const char *ext[]= { ARZ, NullS }; return ext; } +{ static const char *ext[]= { ARZ, ARN, NullS }; return ext; } /* @@ -322,6 +322,11 @@ err: /* Look at ha_archive::open() for an explanation of the row format. Here we just write out the row. + + Wondering about start_bulk_insert()? We don't implement it for + archive since it optimizes for lots of writes. The only save + for implementing start_bulk_insert() is that we could skip + setting dirty to true each time. */ int ha_archive::write_row(byte * buf) { @@ -380,17 +385,7 @@ int ha_archive::rnd_init(bool scan) pthread_mutex_lock(&share->mutex); if (share->dirty == TRUE) { -/* I was having problems with OSX, but it worked for 10.3 so I am wrapping this with and ifdef */ -#ifdef BROKEN_GZFLUSH - gzclose(share->archive_write); - if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) - { - pthread_mutex_unlock(&share->mutex); - DBUG_RETURN(errno ? errno : -1); - } -#else gzflush(share->archive_write, Z_SYNC_FLUSH); -#endif share->dirty= FALSE; } pthread_mutex_unlock(&share->mutex); @@ -504,6 +499,54 @@ int ha_archive::rnd_pos(byte * buf, byte *pos) DBUG_RETURN(get_row(buf)); } +/* + The table can become fragmented if data was inserted, read, and then + inserted again. What we do is open up the file and recompress it completely. + */ +int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) +{ + DBUG_ENTER("ha_archive::optimize"); + int read; // Bytes read, gzread() returns int + gzFile reader, writer; + char block[IO_SIZE]; + char writer_filename[FN_REFLEN]; + + /* Lets create a file to contain the new data */ + fn_format(writer_filename,share->table_name,"",ARN, MY_REPLACE_EXT|MY_UNPACK_FILENAME); + + /* Closing will cause all data waiting to be flushed, to be flushed */ + gzclose(share->archive_write); + + if ((reader= gzopen(share->data_file_name, "rb")) == NULL) + DBUG_RETURN(-1); + + if ((writer= gzopen(writer_filename, "wb")) == NULL) + { + gzclose(reader); + DBUG_RETURN(-1); + } + + while (read= gzread(reader, block, IO_SIZE)) + gzwrite(writer, block, read); + + gzclose(reader); + gzclose(writer); + + my_rename(writer_filename,share->data_file_name,MYF(0)); + + /* + We reopen the file in case some IO is waiting to go through. + In theory the table is closed right after this operation, + but it is possible for IO to still happen. + I may be being a bit too paranoid right here. + */ + if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) + DBUG_RETURN(errno ? errno : -1); + share->dirty= FALSE; + + DBUG_RETURN(0); +} + /****************************************************************************** Everything below here is default, please look at ha_example.cc for diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h index f08353a5d6c..cf7becc5bc0 100644 --- a/sql/examples/ha_archive.h +++ b/sql/examples/ha_archive.h @@ -112,7 +112,7 @@ public: int external_lock(THD *thd, int lock_type); ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); - + int optimize(THD* thd, HA_CHECK_OPT* check_opt); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); }; diff --git a/sql/field.cc b/sql/field.cc index dbdd44633f0..5dd9886f82c 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -4300,8 +4300,9 @@ int Field_str::store(double nr) uint length; bool use_scientific_notation= TRUE; use_scientific_notation= TRUE; -if (field_length < 32 && fabs(nr) < log_10[field_length]-1) + if (field_length < 32 && fabs(nr) < log_10[field_length]-1) use_scientific_notation= FALSE; + length= (uint) my_sprintf(buff, (buff, "%-.*g", (use_scientific_notation ? max(0, (int)field_length-5) : diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index da32476ae74..4e474568671 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -122,6 +122,8 @@ static const err_code_mapping err_map[]= { 827, HA_ERR_RECORD_FILE_FULL }, { 832, HA_ERR_RECORD_FILE_FULL }, + { 0, 1 }, + { -1, -1 } }; @@ -173,7 +175,7 @@ void ha_ndbcluster::records_update() DBUG_PRINT("info", ("id=%d, no_uncommitted_rows_count=%d", ((const NDBTAB *)m_table)->getTableId(), info->no_uncommitted_rows_count)); - if (info->records == ~(ha_rows)0) + // if (info->records == ~(ha_rows)0) { Uint64 rows; if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ @@ -246,8 +248,6 @@ int ha_ndbcluster::ndb_err(NdbConnection *trans) { int res; const NdbError err= trans->getNdbError(); - if (!err.code) - return 0; // Don't log things to DBUG log if no error DBUG_ENTER("ndb_err"); ERR_PRINT(err); @@ -283,10 +283,11 @@ bool ha_ndbcluster::get_error_message(int error, DBUG_ENTER("ha_ndbcluster::get_error_message"); DBUG_PRINT("enter", ("error: %d", error)); - if (!m_ndb) + Ndb *ndb= ((Thd_ndb*)current_thd->transaction.thd_ndb)->ndb; + if (!ndb) DBUG_RETURN(false); - const NdbError err= m_ndb->getNdbError(error); + const NdbError err= ndb->getNdbError(error); bool temporary= err.status==NdbError::TemporaryError; buf->set(err.message, strlen(err.message), &my_charset_bin); DBUG_PRINT("exit", ("message: %s, temporary: %d", buf->ptr(), temporary)); @@ -516,7 +517,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob) */ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, - uint fieldnr) + uint fieldnr, byte* buf) { DBUG_ENTER("get_ndb_value"); DBUG_PRINT("enter", ("fieldnr: %d flags: %o", fieldnr, @@ -524,12 +525,15 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, if (field != NULL) { + DBUG_ASSERT(buf); if (ndb_supported_type(field->type())) { DBUG_ASSERT(field->ptr != NULL); if (! (field->flags & BLOB_FLAG)) - { - m_value[fieldnr].rec= ndb_op->getValue(fieldnr, field->ptr); + { + byte *field_buf= buf + (field->ptr - table->record[0]); + m_value[fieldnr].rec= ndb_op->getValue(fieldnr, + field_buf); DBUG_RETURN(m_value[fieldnr].rec == NULL); } @@ -603,7 +607,7 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_ENTER("get_metadata"); DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, path)); - if (!(tab= dict->getTable(m_tabname, &m_table_info))) + if (!(tab= dict->getTable(m_tabname))) ERR_RETURN(dict->getNdbError()); DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); @@ -651,8 +655,8 @@ int ha_ndbcluster::get_metadata(const char *path) if (error) DBUG_RETURN(error); - // All checks OK, lets use the table - m_table= (void*)tab; + m_table= NULL; + m_table_info= NULL; DBUG_RETURN(build_index_list(table, ILBP_OPEN)); } @@ -767,6 +771,7 @@ void ha_ndbcluster::release_metadata() DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); m_table= NULL; + m_table_info= NULL; // Release index list for (i= 0; i < MAX_KEY; i++) @@ -947,7 +952,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) ERR_RETURN(trans->getNdbError()); // Read key at the same time, for future reference - if (get_ndb_value(op, NULL, no_fields)) + if (get_ndb_value(op, NULL, no_fields, NULL)) ERR_RETURN(trans->getNdbError()); } else @@ -964,7 +969,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf) if ((thd->query_id == field->query_id) || retrieve_all_fields) { - if (get_ndb_value(op, field, i)) + if (get_ndb_value(op, field, i, buf)) ERR_RETURN(trans->getNdbError()); } else @@ -1018,7 +1023,7 @@ int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data) if (!(field->flags & PRI_KEY_FLAG) && (thd->query_id != field->query_id)) { - if (get_ndb_value(op, field, i)) + if (get_ndb_value(op, field, i, new_data)) ERR_RETURN(trans->getNdbError()); } } @@ -1081,7 +1086,7 @@ int ha_ndbcluster::unique_index_read(const byte *key, if ((thd->query_id == field->query_id) || (field->flags & PRI_KEY_FLAG)) { - if (get_ndb_value(op, field, i)) + if (get_ndb_value(op, field, i, buf)) ERR_RETURN(op->getNdbError()); } else @@ -1480,7 +1485,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) (field->flags & PRI_KEY_FLAG) || retrieve_all_fields) { - if (get_ndb_value(op, field, i)) + if (get_ndb_value(op, field, i, buf)) ERR_RETURN(op->getNdbError()); } else @@ -1499,7 +1504,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) if (!tab->getColumn(hidden_no)) DBUG_RETURN(1); #endif - if (get_ndb_value(op, NULL, hidden_no)) + if (get_ndb_value(op, NULL, hidden_no, NULL)) ERR_RETURN(op->getNdbError()); } @@ -1521,6 +1526,11 @@ int ha_ndbcluster::write_row(byte *record) NdbOperation *op; int res; DBUG_ENTER("write_row"); + + if(m_ignore_dup_key_not_supported) + { + DBUG_RETURN(HA_ERR_WRONG_COMMAND); + } statistic_increment(ha_write_count,&LOCK_status); if (table->timestamp_default_now) @@ -2385,7 +2395,17 @@ void ha_ndbcluster::info(uint flag) if (flag & HA_STATUS_VARIABLE) { DBUG_PRINT("info", ("HA_STATUS_VARIABLE")); - records_update(); + if (m_table_info) + { + records_update(); + } + else + { + Uint64 rows; + if(ndb_get_table_statistics(m_ndb, m_tabname, &rows, 0) == 0){ + records= rows; + } + } } if (flag & HA_STATUS_ERRKEY) { @@ -2479,14 +2499,20 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_IGNORE_DUP_KEY: /* Dup keys don't rollback everything*/ DBUG_PRINT("info", ("HA_EXTRA_IGNORE_DUP_KEY")); - - DBUG_PRINT("info", ("Turning ON use of write instead of insert")); - m_use_write= TRUE; + if (current_thd->lex->sql_command == SQLCOM_REPLACE) + { + DBUG_PRINT("info", ("Turning ON use of write instead of insert")); + m_use_write= TRUE; + } else + { + m_ignore_dup_key_not_supported= TRUE; + } break; case HA_EXTRA_NO_IGNORE_DUP_KEY: DBUG_PRINT("info", ("HA_EXTRA_NO_IGNORE_DUP_KEY")); DBUG_PRINT("info", ("Turning OFF use of write instead of insert")); m_use_write= false; + m_ignore_dup_key_not_supported= false; break; case HA_EXTRA_RETRIEVE_ALL_COLS: /* Retrieve all columns, not just those where field->query_id is the same as @@ -2766,6 +2792,16 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) // Start of transaction retrieve_all_fields= FALSE; ops_pending= 0; + { + NDBDICT *dict= m_ndb->getDictionary(); + const NDBTAB *tab; + void *tab_info; + if (!(tab= dict->getTable(m_tabname, &tab_info))) + ERR_RETURN(dict->getNdbError()); + DBUG_PRINT("info", ("Table schema version: %d", tab->getObjectVersion())); + m_table= (void *)tab; + m_table_info= tab_info; + } no_uncommitted_rows_init(thd); } else @@ -2788,6 +2824,8 @@ int ha_ndbcluster::external_lock(THD *thd, int lock_type) thd->transaction.stmt.ndb_tid= 0; } } + m_table= NULL; + m_table_info= NULL; if (m_active_trans) DBUG_PRINT("warning", ("m_active_trans != NULL")); if (m_active_cursor) @@ -3273,6 +3311,7 @@ int ha_ndbcluster::alter_table_name(const char *from, const char *to) ERR_RETURN(dict->getNdbError()); m_table= NULL; + m_table_info= NULL; DBUG_RETURN(0); } @@ -3364,6 +3403,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): HA_NO_PREFIX_CHAR_KEYS), m_share(0), m_use_write(false), + m_ignore_dup_key_not_supported(false), retrieve_all_fields(FALSE), rows_to_insert(1), rows_inserted(0), diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index a25d3e18310..c0ef172413f 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -211,7 +211,7 @@ class ha_ndbcluster: public handler int set_ndb_key(NdbOperation*, Field *field, uint fieldnr, const byte* field_ptr); int set_ndb_value(NdbOperation*, Field *field, uint fieldnr); - int get_ndb_value(NdbOperation*, Field *field, uint fieldnr); + int get_ndb_value(NdbOperation*, Field *field, uint fieldnr, byte*); friend int g_get_ndb_blobs_value(NdbBlob *ndb_blob, void *arg); int get_ndb_blobs_value(NdbBlob *last_ndb_blob); int set_primary_key(NdbOperation *op, const byte *key); @@ -245,6 +245,7 @@ class ha_ndbcluster: public handler typedef union { NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue; NdbValue m_value[NDB_MAX_ATTRIBUTES_IN_TABLE]; bool m_use_write; + bool m_ignore_dup_key_not_supported; bool retrieve_all_fields; ha_rows rows_to_insert; ha_rows rows_inserted; diff --git a/sql/item.cc b/sql/item.cc index ea095df3aaa..4226f58e9a6 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -891,7 +891,7 @@ int Item_param::save_in_field(Field *field, bool no_conversions) return field->store(str_value.ptr(), str_value.length(), str_value.charset()); case NULL_VALUE: - return set_field_to_null(field); + return set_field_to_null_with_conversions(field, no_conversions); case NO_VALUE: default: DBUG_ASSERT(0); diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 85b22d1eddd..8950ad0c594 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -188,17 +188,27 @@ void Item_bool_func2::fix_length_and_dec() { uint strong= 0; uint weak= 0; + uint32 dummy_offset; DTCollation coll; if (args[0]->result_type() == STRING_RESULT && args[1]->result_type() == STRING_RESULT && - !my_charset_same(args[0]->collation.collation, - args[1]->collation.collation) && + String::needs_conversion(0, args[0]->collation.collation, + args[1]->collation.collation, + &dummy_offset) && !coll.set(args[0]->collation, args[1]->collation, TRUE)) { Item* conv= 0; + THD *thd= current_thd; + Item_arena *arena= thd->current_arena, backup; strong= coll.strong; weak= strong ? 0 : 1; + /* + In case we're in statement prepare, create conversion item + in its memory: it will be reused on each execute. + */ + if (arena->is_stmt_prepare()) + thd->set_n_backup_item_arena(arena, &backup); if (args[weak]->type() == STRING_ITEM) { String tmp, cstr; @@ -211,21 +221,13 @@ void Item_bool_func2::fix_length_and_dec() } else { - THD *thd= current_thd; - /* - In case we're in statement prepare, create conversion item - in its memory: it will be reused on each execute. - */ - Item_arena *arena= thd->current_arena, backup; - if (arena->is_stmt_prepare()) - thd->set_n_backup_item_arena(arena, &backup); conv= new Item_func_conv_charset(args[weak], args[strong]->collation.collation); - if (arena->is_stmt_prepare()) - thd->restore_backup_item_arena(arena, &backup); conv->collation.set(args[weak]->collation.derivation); conv->fix_fields(thd, 0, &conv); } + if (arena->is_stmt_prepare()) + thd->restore_backup_item_arena(arena, &backup); args[weak]= conv ? conv : args[weak]; } } diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 9d58cc37c2a..935925c1e83 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -27,6 +27,13 @@ #include "sql_acl.h" #include <m_ctype.h> +void Item_geometry_func::fix_length_and_dec() +{ + collation.set(&my_charset_bin); + decimals=0; + max_length=MAX_BLOB_WIDTH; +} + String *Item_func_geometry_from_text::val_str(String *str) { @@ -44,6 +51,7 @@ String *Item_func_geometry_from_text::val_str(String *str) if ((arg_count == 2) && !args[1]->null_value) srid= (uint32)args[1]->val_int(); + str->set_charset(&my_charset_bin); if (str->reserve(SRID_SIZE, 512)) return 0; str->length(0); @@ -54,12 +62,6 @@ String *Item_func_geometry_from_text::val_str(String *str) } -void Item_func_geometry_from_text::fix_length_and_dec() -{ - max_length=MAX_BLOB_WIDTH; -} - - String *Item_func_geometry_from_wkb::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -71,6 +73,7 @@ String *Item_func_geometry_from_wkb::val_str(String *str) if ((arg_count == 2) && !args[1]->null_value) srid= (uint32)args[1]->val_int(); + str->set_charset(&my_charset_bin); if (str->reserve(SRID_SIZE, 512)) return 0; str->length(0); @@ -84,12 +87,6 @@ String *Item_func_geometry_from_wkb::val_str(String *str) } -void Item_func_geometry_from_wkb::fix_length_and_dec() -{ - max_length=MAX_BLOB_WIDTH; -} - - String *Item_func_as_wkt::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -138,12 +135,6 @@ String *Item_func_as_wkb::val_str(String *str) } -void Item_func_as_wkb::fix_length_and_dec() -{ - max_length= MAX_BLOB_WIDTH; -} - - String *Item_func_geometry_type::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -180,6 +171,7 @@ String *Item_func_envelope::val_str(String *str) return 0; srid= uint4korr(swkb->ptr()); + str->set_charset(&my_charset_bin); str->length(0); if (str->reserve(SRID_SIZE, 512)) return 0; @@ -202,6 +194,7 @@ String *Item_func_centroid::val_str(String *str) swkb->length() - SRID_SIZE)))) return 0; + str->set_charset(&my_charset_bin); if (str->reserve(SRID_SIZE, 512)) return 0; str->length(0); @@ -232,6 +225,7 @@ String *Item_func_spatial_decomp::val_str(String *str) return 0; srid= uint4korr(swkb->ptr()); + str->set_charset(&my_charset_bin); if (str->reserve(SRID_SIZE, 512)) goto err; str->length(0); @@ -279,6 +273,7 @@ String *Item_func_spatial_decomp_n::val_str(String *str) swkb->length() - SRID_SIZE))))) return 0; + str->set_charset(&my_charset_bin); if (str->reserve(SRID_SIZE, 512)) goto err; srid= uint4korr(swkb->ptr()); @@ -333,6 +328,7 @@ String *Item_func_point::val_str(String *str) str->realloc(1 + 4 + SIZEOF_STORED_DOUBLE*2)))) return 0; + str->set_charset(&my_charset_bin); str->length(0); str->q_append((char)Geometry::wkb_ndr); str->q_append((uint32)Geometry::wkb_point); @@ -358,6 +354,7 @@ String *Item_func_spatial_collection::val_str(String *str) String arg_value; uint i; + str->set_charset(&my_charset_bin); str->length(0); if (str->reserve(1 + 4 + 4, 512)) goto err; diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index a1f36130152..79e4f804a04 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -23,24 +23,33 @@ #pragma interface /* gcc class implementation */ #endif -class Item_func_geometry_from_text: public Item_str_func +class Item_geometry_func: public Item_str_func { public: - Item_func_geometry_from_text(Item *a) :Item_str_func(a) {} - Item_func_geometry_from_text(Item *a, Item *srid) :Item_str_func(a, srid) {} + Item_geometry_func() :Item_str_func() {} + Item_geometry_func(Item *a) :Item_str_func(a) {} + Item_geometry_func(Item *a,Item *b) :Item_str_func(a,b) {} + Item_geometry_func(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} + Item_geometry_func(List<Item> &list) :Item_str_func(list) {} + void fix_length_and_dec(); +}; + +class Item_func_geometry_from_text: public Item_geometry_func +{ +public: + Item_func_geometry_from_text(Item *a) :Item_geometry_func(a) {} + Item_func_geometry_from_text(Item *a, Item *srid) :Item_geometry_func(a, srid) {} const char *func_name() const { return "geometryfromtext"; } String *val_str(String *); - void fix_length_and_dec(); }; -class Item_func_geometry_from_wkb: public Item_str_func +class Item_func_geometry_from_wkb: public Item_geometry_func { public: - Item_func_geometry_from_wkb(Item *a): Item_str_func(a) {} - Item_func_geometry_from_wkb(Item *a, Item *srid): Item_str_func(a, srid) {} + Item_func_geometry_from_wkb(Item *a): Item_geometry_func(a) {} + Item_func_geometry_from_wkb(Item *a, Item *srid): Item_geometry_func(a, srid) {} const char *func_name() const { return "geometryfromwkb"; } String *val_str(String *); - void fix_length_and_dec(); }; class Item_func_as_wkt: public Item_str_func @@ -52,13 +61,12 @@ public: void fix_length_and_dec(); }; -class Item_func_as_wkb: public Item_str_func +class Item_func_as_wkb: public Item_geometry_func { public: - Item_func_as_wkb(Item *a): Item_str_func(a) {} + Item_func_as_wkb(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "aswkb"; } String *val_str(String *); - void fix_length_and_dec(); }; class Item_func_geometry_type: public Item_str_func @@ -73,40 +81,37 @@ public: }; }; -class Item_func_centroid: public Item_str_func +class Item_func_centroid: public Item_geometry_func { public: - Item_func_centroid(Item *a): Item_str_func(a) {} + Item_func_centroid(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "centroid"; } String *val_str(String *); - void fix_length_and_dec(){max_length=MAX_BLOB_WIDTH;} }; -class Item_func_envelope: public Item_str_func +class Item_func_envelope: public Item_geometry_func { public: - Item_func_envelope(Item *a): Item_str_func(a) {} + Item_func_envelope(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "envelope"; } String *val_str(String *); - void fix_length_and_dec(){max_length=MAX_BLOB_WIDTH;} }; -class Item_func_point: public Item_str_func +class Item_func_point: public Item_geometry_func { public: - Item_func_point(Item *a, Item *b): Item_str_func(a, b) {} - Item_func_point(Item *a, Item *b, Item *srid): Item_str_func(a, b, srid) {} + Item_func_point(Item *a, Item *b): Item_geometry_func(a, b) {} + Item_func_point(Item *a, Item *b, Item *srid): Item_geometry_func(a, b, srid) {} const char *func_name() const { return "point"; } String *val_str(String *); - void fix_length_and_dec(){max_length=MAX_BLOB_WIDTH;} }; -class Item_func_spatial_decomp: public Item_str_func +class Item_func_spatial_decomp: public Item_geometry_func { enum Functype decomp_func; public: Item_func_spatial_decomp(Item *a, Item_func::Functype ft) : - Item_str_func(a) { decomp_func = ft; } + Item_geometry_func(a) { decomp_func = ft; } const char *func_name() const { switch (decomp_func) @@ -123,15 +128,14 @@ public: } } String *val_str(String *); - void fix_length_and_dec(){max_length=MAX_BLOB_WIDTH;} }; -class Item_func_spatial_decomp_n: public Item_str_func +class Item_func_spatial_decomp_n: public Item_geometry_func { enum Functype decomp_func_n; public: Item_func_spatial_decomp_n(Item *a, Item *b, Item_func::Functype ft): - Item_str_func(a, b) { decomp_func_n = ft; } + Item_geometry_func(a, b) { decomp_func_n = ft; } const char *func_name() const { switch (decomp_func_n) @@ -148,10 +152,9 @@ public: } } String *val_str(String *); - void fix_length_and_dec(){max_length=MAX_BLOB_WIDTH;} }; -class Item_func_spatial_collection: public Item_str_func +class Item_func_spatial_collection: public Item_geometry_func { String tmp_value; enum Geometry::wkbType coll_type; @@ -159,13 +162,12 @@ class Item_func_spatial_collection: public Item_str_func public: Item_func_spatial_collection( List<Item> &list, enum Geometry::wkbType ct, enum Geometry::wkbType it): - Item_str_func(list) + Item_geometry_func(list) { coll_type=ct; item_type=it; } String *val_str(String *); - void fix_length_and_dec(){max_length=MAX_BLOB_WIDTH;} const char *func_name() const { return "multipoint"; } }; diff --git a/sql/log_event.cc b/sql/log_event.cc index 1f30e932c01..326f2fc5c59 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1091,7 +1091,15 @@ end: VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); free_root(&thd->mem_root,MYF(MY_KEEP_PREALLOC)); - return (thd->query_error ? thd->query_error : Log_event::exec_event(rli)); + /* + If there was an error we stop. Otherwise we increment positions. Note that + we will not increment group* positions if we are just after a SET + ONE_SHOT, because SET ONE_SHOT should not be separated from its following + updating query. + */ + return (thd->query_error ? thd->query_error : + (thd->one_shot_set ? (rli->inc_event_relay_log_pos(get_event_len()),0) : + Log_event::exec_event(rli))); } #endif diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 27e8e9c11e7..9c5b0235767 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2554,7 +2554,8 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) { - QUICK_SELECT *quick=new QUICK_SELECT(thd, table, ref->key, 1); + MEM_ROOT *old_root= my_pthread_getspecific_ptr(MEM_ROOT*, THR_MALLOC); + QUICK_SELECT *quick= new QUICK_SELECT(thd, table, ref->key); KEY *key_info = &table->key_info[ref->key]; KEY_PART *key_part; QUICK_RANGE *range; @@ -2566,7 +2567,7 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) { if (thd->is_fatal_error) goto err; // out of memory - return quick; // empty range + goto ok; // empty range } if (!(range= new QUICK_RANGE())) @@ -2613,9 +2614,12 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) goto err; } +ok: + my_pthread_setspecific_ptr(THR_MALLOC, old_root); return quick; err: + my_pthread_setspecific_ptr(THR_MALLOC, old_root); delete quick; return 0; } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 16c0c206df3..6cf01896b03 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -221,7 +221,6 @@ THD::THD() init(); /* Initialize sub structures */ - clear_alloc_root(&transaction.mem_root); init_alloc_root(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE); user_connect=(USER_CONN *)0; hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, @@ -258,6 +257,7 @@ THD::THD() transaction.trans_log.end_of_file= max_binlog_cache_size; } #endif + init_alloc_root(&transaction.mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); { ulong tmp=sql_rnd_with_mutex(); randominit(&rand, tmp + (ulong) &rand, tmp + (ulong) ::query_id); @@ -303,12 +303,12 @@ void THD::init(void) void THD::init_for_queries() { ha_enable_transaction(this,TRUE); - init_sql_alloc(&mem_root, - variables.query_alloc_block_size, - variables.query_prealloc_size); - init_sql_alloc(&transaction.mem_root, - variables.trans_alloc_block_size, - variables.trans_prealloc_size); + + reset_root_defaults(&mem_root, variables.query_alloc_block_size, + variables.query_prealloc_size); + reset_root_defaults(&transaction.mem_root, + variables.trans_alloc_block_size, + variables.trans_prealloc_size); } @@ -328,6 +328,7 @@ void THD::change_user(void) cleanup(); cleanup_done= 0; init(); + stmt_map.reset(); hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, (hash_free_key) free_user_var, 0); @@ -1331,6 +1332,17 @@ void select_dumpvar::cleanup() } +/* + Create arena for already constructed THD. + + SYNOPSYS + Item_arena() + thd - thread for which arena is created + + DESCRIPTION + Create arena for already existing THD using its variables as parameters + for memory root initialization. +*/ Item_arena::Item_arena(THD* thd) :free_list(0), state(INITIALIZED) @@ -1341,24 +1353,31 @@ Item_arena::Item_arena(THD* thd) } -/* This constructor is called when Item_arena is a subobject of THD */ +/* + Create arena and optionally initialize memory root. -Item_arena::Item_arena() - :free_list(0), - state(CONVENTIONAL_EXECUTION) -{ - clear_alloc_root(&mem_root); -} + SYNOPSYS + Item_arena() + init_mem_root - whenever we need to initialize memory root + DESCRIPTION + Create arena and optionally initialize memory root with minimal + possible parameters. + NOTE + We use this constructor when arena is part of THD, but reinitialize + its memory root in THD::init_for_queries() before execution of real + statements. +*/ Item_arena::Item_arena(bool init_mem_root) :free_list(0), - state(INITIALIZED) + state(CONVENTIONAL_EXECUTION) { if (init_mem_root) - clear_alloc_root(&mem_root); + init_alloc_root(&mem_root, ALLOC_ROOT_MIN_BLOCK_SIZE, 0); } + Item_arena::Type Item_arena::type() const { DBUG_ASSERT("Item_arena::type()" == "abstract"); @@ -1366,10 +1385,6 @@ Item_arena::Type Item_arena::type() const } -Item_arena::~Item_arena() -{} - - /* Statement functions */ @@ -1393,7 +1408,8 @@ Statement::Statement(THD *thd) */ Statement::Statement() - :id(0), + :Item_arena((bool)TRUE), + id(0), set_query_id(1), allow_sum_func(0), /* initialized later */ lex(&main_lex), @@ -1461,8 +1477,16 @@ void Item_arena::restore_backup_item_arena(Item_arena *set, Item_arena *backup) { set->set_item_arena(this); set_item_arena(backup); - // reset backup mem_root to avoid its freeing - init_alloc_root(&backup->mem_root, 0, 0); +#ifdef NOT_NEEDED_NOW + /* + Reset backup mem_root to avoid its freeing. + Since Item_arena's mem_root is freed only when it is part of Statement + we need this only if we use some Statement's arena as backup storage. + But we do this only with THD::stmt_backup and this Statement is specially + handled in this respect. So this code is not really needed now. + */ + clear_alloc_root(&backup->mem_root); +#endif } void Item_arena::set_item_arena(Item_arena *set) diff --git a/sql/sql_class.h b/sql/sql_class.h index 387bba43cad..68d187168d3 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -441,11 +441,23 @@ public: STATEMENT, PREPARED_STATEMENT, STORED_PROCEDURE }; + /* + This constructor is used only when Item_arena is created as + backup storage for another instance of Item_arena. + */ + Item_arena() {}; + /* + Create arena for already constructed THD using its variables as + parameters for memory root initialization. + */ Item_arena(THD *thd); - Item_arena(); + /* + Create arena and optionally init memory root with minimal values. + Particularly used if Item_arena is part of Statement. + */ Item_arena(bool init_mem_root); virtual Type type() const; - virtual ~Item_arena(); + virtual ~Item_arena() {}; inline bool is_stmt_prepare() const { return (int)state < (int)PREPARED; } inline bool is_first_stmt_execute() const { return state == PREPARED; } @@ -566,7 +578,7 @@ public: assignment in Statement::Statement) Non-empty statement names are unique too: attempt to insert a new statement with duplicate name causes older statement to be deleted - + Statements are auto-deleted when they are removed from the map and when the map is deleted. */ @@ -575,7 +587,7 @@ class Statement_map { public: Statement_map(); - + int insert(Statement *statement); Statement *find_by_name(LEX_STRING *name) @@ -608,11 +620,18 @@ public: } hash_delete(&st_hash, (byte *) statement); } + /* Erase all statements (calls Statement destructor) */ + void reset() + { + hash_reset(&names_hash); + hash_reset(&st_hash); + last_found_statement= 0; + } ~Statement_map() { - hash_free(&st_hash); hash_free(&names_hash); + hash_free(&st_hash); } private: HASH st_hash; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 57c5f01d0bf..fbb45ce2484 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1265,6 +1265,8 @@ store_create_info(THD *thd, TABLE *table, String *packet) // check for surprises from the previous call to Field::sql_type() if (type.ptr() != tmp) type.set(tmp, sizeof(tmp), system_charset_info); + else + type.set_charset(system_charset_info); field->sql_type(type); packet->append(type.ptr(), type.length(), system_charset_info); |