diff options
Diffstat (limited to 'sql')
36 files changed, 1040 insertions, 464 deletions
diff --git a/sql/event.cc b/sql/event.cc index d9e71a263b8..3fa41996995 100644 --- a/sql/event.cc +++ b/sql/event.cc @@ -457,11 +457,7 @@ common_1_lev_code: buf->append(tmp_buff, (uint) (end- tmp_buff)); if (close_quote) buf->append('\''); - - buf->append(' '); - LEX_STRING *ival= &interval_type_to_name[interval]; - buf->append(ival->str, ival->length); - + return 0; } @@ -627,10 +623,18 @@ evex_fill_row(THD *thd, TABLE *table, event_timed *et, my_bool is_update) table->field[EVEX_FIELD_STATUS]->store((longlong)et->status); + /* + Change the SQL_MODE only if body was present in an ALTER EVENT and of course + always during CREATE EVENT. + */ if (et->body.str) + { + table->field[EVEX_FIELD_SQL_MODE]->store((longlong)thd->variables.sql_mode); + if (table->field[field_num= EVEX_FIELD_BODY]-> store(et->body.str, et->body.length, system_charset_info)) goto trunc_err; + } if (et->starts.year) { @@ -1092,6 +1096,7 @@ evex_remove_from_cache(LEX_STRING *db, LEX_STRING *name, bool use_lock, { //ToDo : Add definer to the tuple (db, name) to become triple uint i; + int ret= 0; DBUG_ENTER("evex_remove_from_cache"); /* @@ -1126,6 +1131,7 @@ evex_remove_from_cache(LEX_STRING *db, LEX_STRING *name, bool use_lock, DBUG_PRINT("evex_remove_from_cache", ("delete from queue")); evex_queue_delete_element(&EVEX_EQ_NAME, i); // ok, we have cleaned + ret= 0; goto done; } } @@ -1134,7 +1140,7 @@ done: if (use_lock) VOID(pthread_mutex_unlock(&LOCK_event_arrays)); - DBUG_RETURN(0); + DBUG_RETURN(ret); } @@ -1378,7 +1384,7 @@ evex_show_create_event(THD *thd, sp_name *spn, LEX_STRING definer) char show_str_buf[768]; String show_str(show_str_buf, sizeof(show_str_buf), system_charset_info); List<Item> field_list; - const char *sql_mode_str; + byte *sql_mode_str; ulong sql_mode_len=0; show_str.length(0); diff --git a/sql/event.h b/sql/event.h index 6df6267abc4..4adacdd4e3a 100644 --- a/sql/event.h +++ b/sql/event.h @@ -112,7 +112,6 @@ public: enum enum_event_status status; sp_head *sphead; ulong sql_mode; - const uchar *body_begin; bool dropped; diff --git a/sql/event_executor.cc b/sql/event_executor.cc index 9483c2ab165..b6c6981fd7d 100644 --- a/sql/event_executor.cc +++ b/sql/event_executor.cc @@ -268,7 +268,7 @@ init_event_thread(THD* thd) thd->client_capabilities= 0; thd->security_ctx->master_access= 0; thd->security_ctx->db_access= 0; - thd->security_ctx->host= (char*)my_localhost; + thd->security_ctx->host_or_ip= (char*)my_localhost; my_net_init(&thd->net, 0); thd->net.read_timeout = slave_net_timeout; thd->slave_thread= 0; diff --git a/sql/event_timed.cc b/sql/event_timed.cc index cb0c0167a54..a1ec7f6b718 100644 --- a/sql/event_timed.cc +++ b/sql/event_timed.cc @@ -45,6 +45,8 @@ event_timed::init() definer_user.str= definer_host.str= 0; definer_user.length= definer_host.length= 0; + sql_mode= 0; + DBUG_VOID_RETURN; } @@ -109,17 +111,21 @@ void event_timed::init_body(THD *thd) { DBUG_ENTER("event_timed::init_body"); - MEM_ROOT *root= thd->mem_root; + DBUG_PRINT("info", ("body=[%s] body_begin=0x%ld end=0x%ld", body_begin, + body_begin, thd->lex->ptr)); body.length= thd->lex->ptr - body_begin; // Trim nuls at the end while (body.length && body_begin[body.length-1] == '\0') body.length--; - //the first is always space which I cannot skip in the parser - DBUG_ASSERT(*body_begin == ' '); - body.length--; - body.str= strmake_root(root, (char *)body_begin + 1, body.length); + /* the first is always whitespace which I cannot skip in the parser */ + while (my_isspace(thd->variables.character_set_client, *body_begin)) + { + ++body_begin; + --body.length; + } + body.str= strmake_root(thd->mem_root, (char *)body_begin, body.length); DBUG_VOID_RETURN; } @@ -579,6 +585,9 @@ event_timed::load_from_row(MEM_ROOT *mem_root, TABLE *table) et->comment.length= strlen(et->comment.str); else et->comment.length= 0; + + + et->sql_mode= (ulong) table->field[EVEX_FIELD_SQL_MODE]->val_int(); DBUG_RETURN(0); error: @@ -985,6 +994,7 @@ done: DBUG_RETURN(ret); } +extern LEX_STRING interval_type_to_name[]; /* Get SHOW CREATE EVENT as string @@ -1025,6 +1035,9 @@ event_timed::get_create_event(THD *thd, String *buf) { buf->append(STRING_WITH_LEN("EVERY ")); buf->append(expr_buf); + buf->append(' '); + LEX_STRING *ival= &interval_type_to_name[interval]; + buf->append(ival->str, ival->length); } else { @@ -1228,6 +1241,7 @@ event_timed::compile(THD *thd, MEM_ROOT *mem_root) char *old_query; uint old_query_len; st_sp_chistics *p; + ulong old_sql_mode= thd->variables.sql_mode; char create_buf[2048]; String show_create(create_buf, sizeof(create_buf), system_charset_info); CHARSET_INFO *old_character_set_client, @@ -1247,6 +1261,8 @@ event_timed::compile(THD *thd, MEM_ROOT *mem_root) thd->update_charset(); DBUG_ENTER("event_timed::compile"); + DBUG_PRINT("info",("old_sql_mode=%d new_sql_mode=%d",old_sql_mode, sql_mode)); + thd->variables.sql_mode= this->sql_mode; /* Change the memory root for the execution time */ if (mem_root) { @@ -1298,7 +1314,7 @@ event_timed::compile(THD *thd, MEM_ROOT *mem_root) TODO: Handle sql_mode!! */ sphead->set_definer(definer.str, definer.length); - sphead->set_info(0, 0, &lex.sp_chistics, 0/*sql_mode*/); + sphead->set_info(0, 0, &lex.sp_chistics, sql_mode); sphead->optimize(); ret= 0; done: @@ -1312,6 +1328,7 @@ done: thd->query_length= old_query_len; thd->db= old_db; + thd->variables.sql_mode= old_sql_mode; thd->variables.character_set_client= old_character_set_client; thd->variables.character_set_results= old_character_set_results; thd->variables.collation_connection= old_collation_connection; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index cd37e830952..642199890e6 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4430,29 +4430,24 @@ int ha_ndbcluster::create(const char *name, Always create an event for the table, as other mysql servers expect it to be there. */ - if (ndbcluster_create_event(ndb, t, event_name.c_ptr(), share) < 0) + if (!ndbcluster_create_event(ndb, t, event_name.c_ptr(), share, + share && do_event_op /* push warning */)) { - /* this is only a serious error if the binlog is on */ - if (share && do_event_op) - { - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - "Creating event for logging table failed. " - "See error log for details."); - } - break; - } - if (ndb_extra_logging) - sql_print_information("NDB Binlog: CREATE TABLE Event: %s", - event_name.c_ptr()); - - if (share && do_event_op && - ndbcluster_create_event_ops(share, t, event_name.c_ptr()) < 0) - { - sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations." - " Event: %s", name); - /* a warning has been issued to the client */ + if (ndb_extra_logging) + sql_print_information("NDB Binlog: CREATE TABLE Event: %s", + event_name.c_ptr()); + if (share && do_event_op && + ndbcluster_create_event_ops(share, t, event_name.c_ptr())) + { + sql_print_error("NDB Binlog: FAILED CREATE TABLE event operations." + " Event: %s", name); + /* a warning has been issued to the client */ + } } + /* + warning has been issued if ndbcluster_create_event failed + and (share && do_event_op) + */ if (share && !do_event_op) share->flags|= NSF_NO_BINLOG; ndbcluster_log_schema_op(current_thd, share, @@ -4793,31 +4788,24 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) ndb_rep_event_name(&event_name, to + sizeof(share_prefix) - 1, 0); const NDBTAB *ndbtab= dict->getTable(new_tabname); - if (ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share) >= 0) + if (!ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share, + share && ndb_binlog_running /* push warning */)) { if (ndb_extra_logging) sql_print_information("NDB Binlog: RENAME Event: %s", event_name.c_ptr()); - if (share && ndb_binlog_running) + if (share && ndb_binlog_running && + ndbcluster_create_event_ops(share, ndbtab, event_name.c_ptr())) { - if (ndbcluster_create_event_ops(share, ndbtab, - event_name.c_ptr()) < 0) - { - sql_print_error("NDB Binlog: FAILED create event operations " - "during RENAME. Event %s", event_name.c_ptr()); - /* a warning has been issued to the client */ - } + sql_print_error("NDB Binlog: FAILED create event operations " + "during RENAME. Event %s", event_name.c_ptr()); + /* a warning has been issued to the client */ } } - else - { - sql_print_error("NDB Binlog: FAILED create event during RENAME. " - "Event: %s", event_name.c_ptr()); - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - "Creating event for logging table failed. " - "See error log for details."); - } + /* + warning has been issued if ndbcluster_create_event failed + and (share && ndb_binlog_running) + */ if (!is_old_table_tmpfile) ndbcluster_log_schema_op(current_thd, share, current_thd->query, current_thd->query_length, @@ -5524,14 +5512,17 @@ int ndbcluster_find_all_files(THD *thd) for (uint i= 0 ; i < list.count ; i++) { NDBDICT::List::Element& elmt= list.elements[i]; - if (IS_TMP_PREFIX(elmt.name)) + int do_handle_table= 0; + if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name)) { DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name)); continue; } DBUG_PRINT("info", ("Found %s.%s in NDB", elmt.database, elmt.name)); - if (!(elmt.state == NDBOBJ::StateBuilding || - elmt.state == NDBOBJ::StateOnline)) + if (elmt.state == NDBOBJ::StateOnline || + elmt.state == NDBOBJ::StateBackup) + do_handle_table= 1; + else if (!(elmt.state == NDBOBJ::StateBuilding)) { sql_print_information("NDB: skipping setup table %s.%s, in state %d", elmt.database, elmt.name, elmt.state); @@ -5543,7 +5534,7 @@ int ndbcluster_find_all_files(THD *thd) if (!(ndbtab= dict->getTable(elmt.name))) { - if (elmt.state == NDBOBJ::StateOnline) + if (do_handle_table) sql_print_error("NDB: failed to setup table %s.%s, error: %d, %s", elmt.database, elmt.name, dict->getNdbError().code, @@ -5659,7 +5650,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, for (i= 0 ; i < list.count ; i++) { NDBDICT::List::Element& elmt= list.elements[i]; - if (IS_TMP_PREFIX(elmt.name)) + if (IS_TMP_PREFIX(elmt.name) || IS_NDB_BLOB_PREFIX(elmt.name)) { DBUG_PRINT("info", ("Skipping %s.%s in NDB", elmt.database, elmt.name)); continue; @@ -5845,7 +5836,7 @@ static bool ndbcluster_init() #ifdef HAVE_NDB_BINLOG ndbcluster_binlog_init_handlerton(); #endif - h.flags= HTON_NO_FLAGS; + h.flags= HTON_TEMPORARY_NOT_SUPPORTED; } // Set connectstring if specified diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 52ab3afcba1..69ba0525f2b 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1865,8 +1865,7 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, /* create the event operations for receiving logging events */ - if (ndbcluster_create_event_ops(share, ndbtab, - event_name.c_ptr()) < 0) + if (ndbcluster_create_event_ops(share, ndbtab, event_name.c_ptr())) { sql_print_error("NDB Binlog:" "FAILED CREATE (DISCOVER) EVENT OPERATIONS Event: %s", @@ -1881,7 +1880,8 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, int ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, - const char *event_name, NDB_SHARE *share) + const char *event_name, NDB_SHARE *share, + int push_warning) { DBUG_ENTER("ndbcluster_create_event"); DBUG_PRINT("info", ("table=%s version=%d event=%s share=%s", @@ -1908,8 +1908,14 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, if (share->flags & NSF_BLOB_FLAG) { sql_print_error("NDB Binlog: logging of table %s " - "with no PK and blob attributes is not supported", + "with BLOB attribute and no PK is not supported", share->key); + if (push_warning) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_ILLEGAL_HA_CREATE_OPTION, ER(ER_ILLEGAL_HA_CREATE_OPTION), + ndbcluster_hton.name, + "Binlog of table with BLOB attribute and no PK"); + share->flags|= NSF_NO_BINLOG; DBUG_RETURN(-1); } @@ -1942,17 +1948,16 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, if (dict->createEvent(my_event)) // Add event to database { -#ifdef NDB_BINLOG_EXTRA_WARNINGS - /* - failed, print a warning - */ - push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, - ER_GET_ERRMSG, ER(ER_GET_ERRMSG), - dict->getNdbError().code, - dict->getNdbError().message, "NDB"); -#endif if (dict->getNdbError().classification != NdbError::SchemaObjectExists) { + /* + failed, print a warning + */ + if (push_warning) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + dict->getNdbError().code, + dict->getNdbError().message, "NDB"); sql_print_error("NDB Binlog: Unable to create event in database. " "Event: %s Error Code: %d Message: %s", event_name, dict->getNdbError().code, dict->getNdbError().message); @@ -1964,6 +1969,11 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, */ if (dict->dropEvent(my_event.getName())) { + if (push_warning) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + dict->getNdbError().code, + dict->getNdbError().message, "NDB"); sql_print_error("NDB Binlog: Unable to create event in database. " " Attempt to correct with drop failed. " "Event: %s Error Code: %d Message: %s", @@ -1978,6 +1988,11 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, */ if (dict->createEvent(my_event)) { + if (push_warning) + push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_GET_ERRMSG, ER(ER_GET_ERRMSG), + dict->getNdbError().code, + dict->getNdbError().message, "NDB"); sql_print_error("NDB Binlog: Unable to create event in database. " " Attempt to correct with drop ok, but create failed. " "Event: %s Error Code: %d Message: %s", @@ -2072,7 +2087,19 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, DBUG_RETURN(-1); } - NdbEventOperation *op= ndb->createEventOperation(event_name); + NdbEventOperation* op; + if (do_schema_share) + op= ndb->createEventOperation(event_name); + else + { + // set injector_ndb database/schema from table internal name + int ret= ndb->setDatabaseAndSchemaName(ndbtab); + assert(ret == 0); + op= ndb->createEventOperation(event_name); + // reset to catch errors + ndb->setDatabaseName(""); + ndb->setDatabaseSchemaName(""); + } if (!op) { pthread_mutex_unlock(&injector_mutex); @@ -2089,23 +2116,6 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, if (share->flags & NSF_BLOB_FLAG) op->mergeEvents(true); // currently not inherited from event - if (share->flags & NSF_BLOB_FLAG) - { - /* - * Given servers S1 S2, following results in out-of-date - * event->m_tableImpl and column->m_blobTable. - * - * S1: create table t1(a int primary key); - * S2: drop table t1; - * S1: create table t2(a int primary key, b blob); - * S1: alter table t2 add x int; - * S1: alter table t2 drop x; - * - * TODO fix at right place before we get here - */ - ndb->getDictionary()->fix_blob_events(ndbtab, event_name); - } - int n_columns= ndbtab->getNoOfColumns(); int n_fields= table ? table->s->fields : 0; // XXX ??? for (int j= 0; j < n_columns; j++) @@ -2656,7 +2666,8 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) goto err; } - if (!(ndb= new Ndb(g_ndb_cluster_connection, "")) || + // empty database and schema + if (!(ndb= new Ndb(g_ndb_cluster_connection, "", "")) || ndb->init()) { sql_print_error("NDB Binlog: Getting Ndb object failed"); @@ -2836,10 +2847,41 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) assert(pOp->getGCI() <= ndb_latest_received_binlog_epoch); bzero((char*) &row, sizeof(row)); injector::transaction trans= inj->new_trans(thd); + { // pass table map before epoch + Uint32 iter=0; + const NdbEventOperation* gci_op; + Uint32 event_types; + while ((gci_op=ndb->getGCIEventOperations(&iter, &event_types)) + != NULL) + { + NDB_SHARE* share=(NDB_SHARE*)gci_op->getCustomData(); + DBUG_PRINT("info", ("per gci op %p share %p event types 0x%x", + gci_op, share, event_types)); + // this should not happen + if (share == NULL || share->table == NULL) + { + DBUG_PRINT("info", ("no share or table !")); + continue; + } + TABLE* table=share->table; + const LEX_STRING& name=table->s->table_name; + DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str)); + injector::transaction::table tbl(table, true); + // TODO enable when mats patch pushed + //trans.use_table(::server_id, tbl); + } + } gci= pOp->getGCI(); if (apply_status_share) { TABLE *table= apply_status_share->table; + + const LEX_STRING& name=table->s->table_name; + DBUG_PRINT("info", ("use_table: %.*s", name.length, name.str)); + injector::transaction::table tbl(table, true); + // TODO enable when mats patch pushed + //trans.use_table(::server_id, tbl); + MY_BITMAP b; uint32 bitbuf; DBUG_ASSERT(table->s->fields <= sizeof(bitbuf) * 8); @@ -2882,7 +2924,15 @@ pthread_handler_t ndb_binlog_thread_func(void *arg) (unsigned) NDBEVENT::TE_FIRST_NON_DATA_EVENT) ndb_binlog_thread_handle_data_event(ndb, pOp, row, trans); else + { + // set injector_ndb database/schema from table internal name + int ret= ndb->setDatabaseAndSchemaName(pOp->getEvent()->getTable()); + assert(ret == 0); ndb_binlog_thread_handle_non_data_event(ndb, pOp, row); + // reset to catch errors + ndb->setDatabaseName(""); + ndb->setDatabaseSchemaName(""); + } pOp= ndb->nextEvent(); } while (pOp && pOp->getGCI() == gci); diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index a297f80f6ab..d222a7d848b 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -85,7 +85,8 @@ int ndbcluster_create_binlog_setup(Ndb *ndb, const char *key, const char *table_name, my_bool share_may_exist); int ndbcluster_create_event(Ndb *ndb, const NDBTAB *table, - const char *event_name, NDB_SHARE *share); + const char *event_name, NDB_SHARE *share, + int push_warning= 0); int ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, const char *event_name); diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 1897886ce45..5d4d1570044 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1078,9 +1078,9 @@ static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, else if (flag == ANALYZE_PARTS) error= file->analyze(thd, check_opt); else if (flag == CHECK_PARTS) - error= file->check(thd, check_opt); + error= file->ha_check(thd, check_opt); else if (flag == REPAIR_PARTS) - error= file->repair(thd, check_opt); + error= file->ha_repair(thd, check_opt); else { DBUG_ASSERT(FALSE); diff --git a/sql/handler.cc b/sql/handler.cc index 75961104d34..25cec431395 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -359,6 +359,7 @@ static int ha_init_errors(void) SETMSG(HA_ERR_NO_CONNECTION, "Could not connect to storage engine"); SETMSG(HA_ERR_TABLE_DEF_CHANGED, ER(ER_TABLE_DEF_CHANGED)); SETMSG(HA_ERR_FOREIGN_DUPLICATE_KEY, "FK constraint would lead to duplicate key"); + SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER(ER_TABLE_NEEDS_UPGRADE)); /* Register the error messages for use with my_error(). */ return my_error_register(errmsgs, HA_ERR_FIRST, HA_ERR_LAST); @@ -1975,6 +1976,9 @@ void handler::print_error(int error, myf errflag) my_error(ER_DROP_INDEX_FK, MYF(0), ptr); DBUG_VOID_RETURN; } + case HA_ERR_TABLE_NEEDS_UPGRADE: + textno=ER_TABLE_NEEDS_UPGRADE; + break; default: { /* The error was "unknown" to this function. @@ -2016,6 +2020,100 @@ bool handler::get_error_message(int error, String* buf) } +int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt) +{ + KEY *keyinfo, *keyend; + KEY_PART_INFO *keypart, *keypartend; + + if (!table->s->mysql_version) + { + /* check for blob-in-key error */ + keyinfo= table->key_info; + keyend= table->key_info + table->s->keys; + for (; keyinfo < keyend; keyinfo++) + { + keypart= keyinfo->key_part; + keypartend= keypart + keyinfo->key_parts; + for (; keypart < keypartend; keypart++) + { + if (!keypart->fieldnr) + continue; + Field *field= table->field[keypart->fieldnr-1]; + if (field->type() == FIELD_TYPE_BLOB) + { + if (check_opt->sql_flags & TT_FOR_UPGRADE) + check_opt->flags= T_MEDIUM; + return HA_ADMIN_NEEDS_CHECK; + } + } + } + } + return check_for_upgrade(check_opt); +} + + +int handler::check_old_types() +{ + Field** field; + + if (!table->s->mysql_version) + { + /* check for bad DECIMAL field */ + for (field= table->field; (*field); field++) + { + if ((*field)->type() == FIELD_TYPE_NEWDECIMAL) + { + return HA_ADMIN_NEEDS_ALTER; + } + } + } + return 0; +} + + +static bool update_frm_version(TABLE *table, bool needs_lock) +{ + char path[FN_REFLEN]; + File file; + int result= 1; + DBUG_ENTER("update_frm_version"); + + if (table->s->mysql_version != MYSQL_VERSION_ID) + DBUG_RETURN(0); + + strxmov(path, table->s->normalized_path.str, reg_ext, NullS); + + if (needs_lock) + pthread_mutex_lock(&LOCK_open); + + if ((file= my_open(path, O_RDWR|O_BINARY, MYF(MY_WME))) >= 0) + { + uchar version[4]; + char *key= table->s->table_cache_key.str; + uint key_length= table->s->table_cache_key.length; + TABLE *entry; + HASH_SEARCH_STATE state; + + int4store(version, MYSQL_VERSION_ID); + + if ((result= my_pwrite(file,(byte*) version,4,51L,MYF_RW))) + goto err; + + for (entry=(TABLE*) hash_first(&open_cache,(byte*) key,key_length, &state); + entry; + entry= (TABLE*) hash_next(&open_cache,(byte*) key,key_length, &state)) + entry->s->mysql_version= MYSQL_VERSION_ID; + } +err: + if (file >= 0) + VOID(my_close(file,MYF(MY_WME))); + if (needs_lock) + pthread_mutex_unlock(&LOCK_open); + DBUG_RETURN(result); +} + + + /* Return key if error because of duplicated keys */ uint handler::get_dup_key(int error) @@ -2093,6 +2191,56 @@ void handler::drop_table(const char *name) /* + Performs checks upon the table. + + SYNOPSIS + check() + thd thread doing CHECK TABLE operation + check_opt options from the parser + + NOTES + + RETURN + HA_ADMIN_OK Successful upgrade + HA_ADMIN_NEEDS_UPGRADE Table has structures requiring upgrade + HA_ADMIN_NEEDS_ALTER Table has structures requiring ALTER TABLE + HA_ADMIN_NOT_IMPLEMENTED +*/ + +int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt) +{ + int error; + + if ((table->s->mysql_version >= MYSQL_VERSION_ID) && + (check_opt->sql_flags & TT_FOR_UPGRADE)) + return 0; + + if (table->s->mysql_version < MYSQL_VERSION_ID) + { + if ((error= check_old_types())) + return error; + error= ha_check_for_upgrade(check_opt); + if (error && (error != HA_ADMIN_NEEDS_CHECK)) + return error; + if (!error && (check_opt->sql_flags & TT_FOR_UPGRADE)) + return 0; + } + if ((error= check(thd, check_opt))) + return error; + return update_frm_version(table, 0); +} + + +int handler::ha_repair(THD* thd, HA_CHECK_OPT* check_opt) +{ + int result; + if ((result= repair(thd, check_opt))) + return result; + return update_frm_version(table, 0); +} + + +/* Tell the storage engine that it is allowed to "disable transaction" in the handler. It is a hint that ACID is not required - it is used in NDB for ALTER TABLE, for example, when data are copied to temporary table. diff --git a/sql/handler.h b/sql/handler.h index 37bf5335077..8189973da66 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -43,6 +43,9 @@ #define HA_ADMIN_TRY_ALTER -7 #define HA_ADMIN_WRONG_CHECKSUM -8 #define HA_ADMIN_NOT_BASE_TABLE -9 +#define HA_ADMIN_NEEDS_UPGRADE -10 +#define HA_ADMIN_NEEDS_ALTER -11 +#define HA_ADMIN_NEEDS_CHECK -12 /* Bits in table_flags() to show what database can do */ @@ -601,6 +604,7 @@ struct show_table_alias_st { #define HTON_HIDDEN (1 << 3) //Engine does not appear in lists #define HTON_FLUSH_AFTER_RENAME (1 << 4) #define HTON_NOT_USER_SELECTABLE (1 << 5) +#define HTON_TEMPORARY_NOT_SUPPORTED (1 << 6) //Having temporary tables not supported typedef struct st_thd_trans { @@ -1696,10 +1700,26 @@ public: { return HA_ERR_WRONG_COMMAND; } virtual void update_create_info(HA_CREATE_INFO *create_info) {} +protected: + /* to be implemented in handlers */ /* admin commands - called from mysql_admin_table */ virtual int check(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } + + /* + in these two methods check_opt can be modified + to specify CHECK option to use to call check() + upon the table + */ + virtual int check_for_upgrade(HA_CHECK_OPT *check_opt) + { return 0; } +public: + int ha_check_for_upgrade(HA_CHECK_OPT *check_opt); + int check_old_types(); + /* to be actually called to get 'check()' functionality*/ + int ha_check(THD *thd, HA_CHECK_OPT *check_opt); + virtual int backup(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } /* @@ -1708,8 +1728,11 @@ public: */ virtual int restore(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } +protected: virtual int repair(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } +public: + int ha_repair(THD* thd, HA_CHECK_OPT* check_opt); virtual int optimize(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } virtual int analyze(THD* thd, HA_CHECK_OPT* check_opt) diff --git a/sql/item.cc b/sql/item.cc index a3bfd71c010..9f09a8fa02c 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -3212,6 +3212,252 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) /* + Resolve the name of an outer select column reference. + + SYNOPSIS + Item_field::fix_outer_field() + thd [in] current thread + from_field [in/out] found field reference or (Field*)not_found_field + reference [in/out] view column if this item was resolved to a view column + + DESCRIPTION + The method resolves the column reference represented by 'this' as a column + present in outer selects that contain current select. + + NOTES + This is the inner loop of Item_field::fix_fields: + + for each outer query Q_k beginning from the inner-most one + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q_k; + + if such a column is not found + Search for a column or derived column named col_ref_i + [in table T_j] in the SELECT and GROUP clauses of Q_k. + } + + IMPLEMENTATION + In prepared statements, because of cache, find_field_in_tables() + can resolve fields even if they don't belong to current context. + In this case this method only finds appropriate context and marks + current select as dependent. The found reference of field should be + provided in 'from_field'. + + RETURN + 1 - column succefully resolved and fix_fields() should continue. + 0 - column fully fixed and fix_fields() should return FALSE + -1 - error occured +*/ +int +Item_field::fix_outer_field(THD *thd, Field **from_field, Item **reference) +{ + enum_parsing_place place= NO_MATTER; + bool field_found= (*from_field != not_found_field); + bool upward_lookup= FALSE; + + /* + If there are outer contexts (outer selects, but current select is + not derived table or view) try to resolve this reference in the + outer contexts. + + We treat each subselect as a separate namespace, so that different + subselects may contain columns with the same names. The subselects + are searched starting from the innermost. + */ + Name_resolution_context *last_checked_context= context; + Item **ref= (Item **) not_found_item; + Name_resolution_context *outer_context= context->outer_context; + for (; + outer_context; + outer_context= outer_context->outer_context) + { + SELECT_LEX *select= outer_context->select_lex; + Item_subselect *prev_subselect_item= + last_checked_context->select_lex->master_unit()->item; + last_checked_context= outer_context; + upward_lookup= TRUE; + + place= prev_subselect_item->parsing_place; + /* + If outer_field is set, field was already found by first call + to find_field_in_tables(). Only need to find appropriate context. + */ + if (field_found && outer_context->select_lex != + cached_table->select_lex) + continue; + /* + In case of a view, find_field_in_tables() writes the pointer to + the found view field into '*reference', in other words, it + substitutes this Item_field with the found expression. + */ + if (field_found || (*from_field= find_field_in_tables(thd, this, + outer_context-> + first_name_resolution_table, + outer_context-> + last_name_resolution_table, + reference, + IGNORE_EXCEPT_NON_UNIQUE, + TRUE, TRUE)) != + not_found_field) + { + if (*from_field) + { + if (*from_field != view_ref_found) + { + prev_subselect_item->used_tables_cache|= (*from_field)->table->map; + prev_subselect_item->const_item_cache= 0; + if (thd->lex->in_sum_func && + thd->lex->in_sum_func->nest_level == + thd->lex->current_select->nest_level) + { + Item::Type type= (*reference)->type(); + set_if_bigger(thd->lex->in_sum_func->max_arg_level, + select->nest_level); + set_field(*from_field); + fixed= 1; + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, this, + ((type == REF_ITEM || type == FIELD_ITEM) ? + (Item_ident*) (*reference) : 0)); + return 0; + } + } + else + { + Item::Type type= (*reference)->type(); + prev_subselect_item->used_tables_cache|= + (*reference)->used_tables(); + prev_subselect_item->const_item_cache&= + (*reference)->const_item(); + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, this, + ((type == REF_ITEM || type == FIELD_ITEM) ? + (Item_ident*) (*reference) : + 0)); + /* + A reference to a view field had been found and we + substituted it instead of this Item (find_field_in_tables + does it by assigning the new value to *reference), so now + we can return from this function. + */ + return 0; + } + } + break; + } + + /* Search in SELECT and GROUP lists of the outer select. */ + if (outer_context->resolve_in_select_list) + { + if (!(ref= resolve_ref_in_select_and_group(thd, this, select))) + return -1; /* Some error occurred (e.g. ambiguous names). */ + if (ref != not_found_item) + { + DBUG_ASSERT(*ref && (*ref)->fixed); + prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); + prev_subselect_item->const_item_cache&= (*ref)->const_item(); + break; + } + } + + /* + Reference is not found in this select => this subquery depend on + outer select (or we just trying to find wrong identifier, in this + case it does not matter which used tables bits we set) + */ + prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; + prev_subselect_item->const_item_cache= 0; + } + + DBUG_ASSERT(ref != 0); + if (!*from_field) + return -1; + if (ref == not_found_item && *from_field == not_found_field) + { + if (upward_lookup) + { + // We can't say exactly what absent table or field + my_error(ER_BAD_FIELD_ERROR, MYF(0), full_name(), thd->where); + } + else + { + /* Call find_field_in_tables only to report the error */ + find_field_in_tables(thd, this, + context->first_name_resolution_table, + context->last_name_resolution_table, + reference, REPORT_ALL_ERRORS, + !any_privileges && + TRUE, TRUE); + } + return -1; + } + else if (ref != not_found_item) + { + Item *save; + Item_ref *rf; + + /* Should have been checked in resolve_ref_in_select_and_group(). */ + DBUG_ASSERT(*ref && (*ref)->fixed); + /* + Here, a subset of actions performed by Item_ref::set_properties + is not enough. So we pass ptr to NULL into Item_[direct]_ref + constructor, so no initialization is performed, and call + fix_fields() below. + */ + save= *ref; + *ref= NULL; // Don't call set_properties() + rf= (place == IN_HAVING ? + new Item_ref(context, ref, (char*) table_name, + (char*) field_name) : + new Item_direct_ref(context, ref, (char*) table_name, + (char*) field_name)); + *ref= save; + if (!rf) + return -1; + thd->change_item_tree(reference, rf); + /* + rf is Item_ref => never substitute other items (in this case) + during fix_fields() => we can use rf after fix_fields() + */ + DBUG_ASSERT(!rf->fixed); // Assured by Item_ref() + if (rf->fix_fields(thd, reference) || rf->check_cols(1)) + return -1; + + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, this, + rf); + return 0; + } + else + { + mark_as_dependent(thd, last_checked_context->select_lex, + context->select_lex, + this, this); + if (last_checked_context->select_lex->having_fix_field) + { + Item_ref *rf; + rf= new Item_ref(context, + (cached_table->db[0] ? cached_table->db : 0), + (char*) cached_table->alias, (char*) field_name); + if (!rf) + return -1; + thd->change_item_tree(reference, rf); + /* + rf is Item_ref => never substitute other items (in this case) + during fix_fields() => we can use rf after fix_fields() + */ + DBUG_ASSERT(!rf->fixed); // Assured by Item_ref() + if (rf->fix_fields(thd, reference) || rf->check_cols(1)) + return -1; + return 0; + } + } + return 1; +} + + +/* Resolve the name of a column reference. SYNOPSIS @@ -3258,12 +3504,11 @@ resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) bool Item_field::fix_fields(THD *thd, Item **reference) { - enum_parsing_place place= NO_MATTER; DBUG_ASSERT(fixed == 0); if (!field) // If field is not checked { - bool upward_lookup= FALSE; Field *from_field= (Field *)not_found_field; + bool outer_fixed= false; /* In case of view, find_field_in_tables() write pointer to view field expression to 'reference', i.e. it substitute that expression instead @@ -3278,7 +3523,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) TRUE)) == not_found_field) { - + int ret; /* Look up in current select's item_list to find aliased fields */ if (thd->lex->current_select->is_item_list_lookup) { @@ -3293,197 +3538,11 @@ bool Item_field::fix_fields(THD *thd, Item **reference) return 0; } } - - /* - If there are outer contexts (outer selects, but current select is - not derived table or view) try to resolve this reference in the - outer contexts. - - We treat each subselect as a separate namespace, so that different - subselects may contain columns with the same names. The subselects - are searched starting from the innermost. - */ - Name_resolution_context *last_checked_context= context; - Item **ref= (Item **) not_found_item; - Name_resolution_context *outer_context= context->outer_context; - for (; - outer_context; - outer_context= outer_context->outer_context) - { - SELECT_LEX *select= outer_context->select_lex; - Item_subselect *prev_subselect_item= - last_checked_context->select_lex->master_unit()->item; - last_checked_context= outer_context; - upward_lookup= TRUE; - - place= prev_subselect_item->parsing_place; - /* - In case of a view, find_field_in_tables() writes the pointer to - the found view field into '*reference', in other words, it - substitutes this Item_field with the found expression. - */ - if ((from_field= find_field_in_tables(thd, this, - outer_context-> - first_name_resolution_table, - outer_context-> - last_name_resolution_table, - reference, - IGNORE_EXCEPT_NON_UNIQUE, - TRUE, TRUE)) != - not_found_field) - { - if (from_field) - { - if (from_field != view_ref_found) - { - prev_subselect_item->used_tables_cache|= from_field->table->map; - prev_subselect_item->const_item_cache= 0; - if (thd->lex->in_sum_func && - thd->lex->in_sum_func->nest_level == - thd->lex->current_select->nest_level) - { - Item::Type type= (*reference)->type(); - set_if_bigger(thd->lex->in_sum_func->max_arg_level, - select->nest_level); - set_field(from_field); - fixed= 1; - mark_as_dependent(thd, last_checked_context->select_lex, - context->select_lex, this, - ((type == REF_ITEM || type == FIELD_ITEM) ? - (Item_ident*) (*reference) : 0)); - return FALSE; - } - } - else - { - Item::Type type= (*reference)->type(); - prev_subselect_item->used_tables_cache|= - (*reference)->used_tables(); - prev_subselect_item->const_item_cache&= - (*reference)->const_item(); - mark_as_dependent(thd, last_checked_context->select_lex, - context->select_lex, this, - ((type == REF_ITEM || type == FIELD_ITEM) ? - (Item_ident*) (*reference) : - 0)); - /* - A reference to a view field had been found and we - substituted it instead of this Item (find_field_in_tables - does it by assigning the new value to *reference), so now - we can return from this function. - */ - return FALSE; - } - } - break; - } - - /* Search in SELECT and GROUP lists of the outer select. */ - if (outer_context->resolve_in_select_list) - { - if (!(ref= resolve_ref_in_select_and_group(thd, this, select))) - goto error; /* Some error occurred (e.g. ambiguous names). */ - if (ref != not_found_item) - { - DBUG_ASSERT(*ref && (*ref)->fixed); - prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); - prev_subselect_item->const_item_cache&= (*ref)->const_item(); - break; - } - } - - /* - Reference is not found in this select => this subquery depend on - outer select (or we just trying to find wrong identifier, in this - case it does not matter which used tables bits we set) - */ - prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; - prev_subselect_item->const_item_cache= 0; - } - - DBUG_ASSERT(ref != 0); - if (!from_field) - goto error; - if (ref == not_found_item && from_field == not_found_field) - { - if (upward_lookup) - { - // We can't say exactly what absent table or field - my_error(ER_BAD_FIELD_ERROR, MYF(0), full_name(), thd->where); - } - else - { - /* Call find_field_in_tables only to report the error */ - find_field_in_tables(thd, this, - context->first_name_resolution_table, - context->last_name_resolution_table, - reference, REPORT_ALL_ERRORS, - !any_privileges && - TRUE, TRUE); - } - goto error; - } - else if (ref != not_found_item) - { - Item *save; - Item_ref *rf; - - /* Should have been checked in resolve_ref_in_select_and_group(). */ - DBUG_ASSERT(*ref && (*ref)->fixed); - /* - Here, a subset of actions performed by Item_ref::set_properties - is not enough. So we pass ptr to NULL into Item_[direct]_ref - constructor, so no initialization is performed, and call - fix_fields() below. - */ - save= *ref; - *ref= NULL; // Don't call set_properties() - rf= (place == IN_HAVING ? - new Item_ref(context, ref, (char*) table_name, - (char*) field_name) : - new Item_direct_ref(context, ref, (char*) table_name, - (char*) field_name)); - *ref= save; - if (!rf) - goto error; - thd->change_item_tree(reference, rf); - /* - rf is Item_ref => never substitute other items (in this case) - during fix_fields() => we can use rf after fix_fields() - */ - DBUG_ASSERT(!rf->fixed); // Assured by Item_ref() - if (rf->fix_fields(thd, reference) || rf->check_cols(1)) - goto error; - - mark_as_dependent(thd, last_checked_context->select_lex, - context->select_lex, this, - rf); - return FALSE; - } - else - { - mark_as_dependent(thd, last_checked_context->select_lex, - context->select_lex, - this, this); - if (last_checked_context->select_lex->having_fix_field) - { - Item_ref *rf; - rf= new Item_ref(context, - (cached_table->db[0] ? cached_table->db : 0), - (char*) cached_table->alias, (char*) field_name); - if (!rf) - goto error; - thd->change_item_tree(reference, rf); - /* - rf is Item_ref => never substitute other items (in this case) - during fix_fields() => we can use rf after fix_fields() - */ - DBUG_ASSERT(!rf->fixed); // Assured by Item_ref() - if (rf->fix_fields(thd, reference) || rf->check_cols(1)) - goto error; - return FALSE; - } - } + if ((ret= fix_outer_field(thd, &from_field, reference)) < 0) + goto error; + else if (!ret) + return FALSE; + outer_fixed= TRUE; } else if (!from_field) goto error; @@ -3503,6 +3562,17 @@ bool Item_field::fix_fields(THD *thd, Item **reference) if (from_field == view_ref_found) return FALSE; + if (!outer_fixed && cached_table && cached_table->select_lex && + context->select_lex && + cached_table->select_lex != context->select_lex) + { + int ret; + if ((ret= fix_outer_field(thd, &from_field, reference)) < 0) + goto error; + else if (!ret) + return FALSE; + } + set_field(from_field); if (thd->lex->in_sum_func && thd->lex->in_sum_func->nest_level == @@ -4655,6 +4725,25 @@ bool Item_ref::fix_fields(THD *thd, Item **reference) } if (from_field != not_found_field) { + if (cached_table && cached_table->select_lex && + outer_context->select_lex && + cached_table->select_lex != outer_context->select_lex) + { + /* + Due to cache, find_field_in_tables() can return field which + doesn't belong to provided outer_context. In this case we have + to find proper field context in order to fix field correcly. + */ + do + { + outer_context= outer_context->outer_context; + select= outer_context->select_lex; + prev_subselect_item= + last_checked_context->select_lex->master_unit()->item; + last_checked_context= outer_context; + } while (outer_context && outer_context->select_lex && + cached_table->select_lex != outer_context->select_lex); + } prev_subselect_item->used_tables_cache|= from_field->table->map; prev_subselect_item->const_item_cache= 0; break; diff --git a/sql/item.h b/sql/item.h index 030b2c40b4a..ae6aaeb82f0 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1205,6 +1205,7 @@ public: inline uint32 max_disp_length() { return field->max_length(); } Item_field *filed_for_view_update() { return this; } Item *safe_charset_converter(CHARSET_INFO *tocs); + int fix_outer_field(THD *thd, Field **field, Item **reference); friend class Item_default_value; friend class Item_insert_value; friend class st_select_lex_unit; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index fe02e7c5b49..a3e47154bc3 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1128,9 +1128,9 @@ void Item_func_substr_index::fix_length_and_dec() String *Item_func_substr_index::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - String *delimeter =args[1]->val_str(&tmp_value); - int32 count = (int32) args[2]->val_int(); + String *res= args[0]->val_str(str); + String *delimiter= args[1]->val_str(&tmp_value); + int32 count= (int32) args[2]->val_int(); uint offset; if (args[0]->null_value || args[1]->null_value || args[2]->null_value) @@ -1139,8 +1139,8 @@ String *Item_func_substr_index::val_str(String *str) return 0; } null_value=0; - uint delimeter_length=delimeter->length(); - if (!res->length() || !delimeter_length || !count) + uint delimiter_length= delimiter->length(); + if (!res->length() || !delimiter_length || !count) return &my_empty_string; // Wrong parameters res->set_charset(collation.collation); @@ -1148,11 +1148,11 @@ String *Item_func_substr_index::val_str(String *str) #ifdef USE_MB if (use_mb(res->charset())) { - const char *ptr=res->ptr(); - const char *strend = ptr+res->length(); - const char *end=strend-delimeter_length+1; - const char *search=delimeter->ptr(); - const char *search_end=search+delimeter_length; + const char *ptr= res->ptr(); + const char *strend= ptr+res->length(); + const char *end= strend-delimiter_length+1; + const char *search= delimiter->ptr(); + const char *search_end= search+delimiter_length; int32 n=0,c=count,pass; register uint32 l; for (pass=(count>0);pass<2;++pass) @@ -1167,7 +1167,7 @@ String *Item_func_substr_index::val_str(String *str) if (*i++ != *j++) goto skip; if (pass==0) ++n; else if (!--c) break; - ptr+=delimeter_length; + ptr+= delimiter_length; continue; } skip: @@ -1189,7 +1189,7 @@ String *Item_func_substr_index::val_str(String *str) } else /* return right part */ { - ptr+=delimeter_length; + ptr+= delimiter_length; tmp_value.set(*res,(ulong) (ptr-res->ptr()), (ulong) (strend-ptr)); } } @@ -1200,9 +1200,9 @@ String *Item_func_substr_index::val_str(String *str) { if (count > 0) { // start counting from the beginning - for (offset=0 ;; offset+=delimeter_length) + for (offset=0; ; offset+= delimiter_length) { - if ((int) (offset=res->strstr(*delimeter,offset)) < 0) + if ((int) (offset= res->strstr(*delimiter, offset)) < 0) return res; // Didn't find, return org string if (!--count) { @@ -1223,7 +1223,7 @@ String *Item_func_substr_index::val_str(String *str) address space less than where the found substring is located in res */ - if ((int) (offset=res->strrstr(*delimeter,offset)) < 0) + if ((int) (offset= res->strrstr(*delimiter, offset)) < 0) return res; // Didn't find, return org string /* At this point, we've searched for the substring @@ -1231,13 +1231,19 @@ String *Item_func_substr_index::val_str(String *str) */ if (!++count) { - offset+=delimeter_length; + offset+= delimiter_length; tmp_value.set(*res,offset,res->length()- offset); break; } } } } + /* + We always mark tmp_value as const so that if val_str() is called again + on this object, we don't disrupt the contents of tmp_value when it was + derived from another String. + */ + tmp_value.mark_as_const(); return (&tmp_value); } diff --git a/sql/item_subselect.h b/sql/item_subselect.h index f1c99f74498..a4dac5bda87 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -125,6 +125,7 @@ public: friend class select_subselect; friend class Item_in_optimizer; friend bool Item_field::fix_fields(THD *, Item **); + friend int Item_field::fix_outer_field(THD *, Field **, Item **); friend bool Item_ref::fix_fields(THD *, Item **); friend void mark_select_range_as_dependent(THD*, st_select_lex*, st_select_lex*, diff --git a/sql/item_xmlfunc.cc b/sql/item_xmlfunc.cc index bb5775780fa..d86b6acfc56 100644 --- a/sql/item_xmlfunc.cc +++ b/sql/item_xmlfunc.cc @@ -101,6 +101,7 @@ typedef struct my_xpath_st MY_XPATH_FUNC *func; /* last scanned function creator */ Item *item; /* current expression */ Item *context; /* last scanned context */ + Item *rootelement; /* The root element */ String *context_cache; /* last context provider */ String *pxml; /* Parsed XML, an array of MY_XML_NODE */ CHARSET_INFO *cs; /* character set/collation string comparison */ @@ -1464,6 +1465,8 @@ static int my_xpath_parse_LocationPath(MY_XPATH *xpath) { Item *context= xpath->context; + if (!xpath->context) + xpath->context= xpath->rootelement; int rc= my_xpath_parse_RelativeLocationPath(xpath) || my_xpath_parse_AbsoluteLocationPath(xpath); @@ -1496,7 +1499,7 @@ static int my_xpath_parse_AbsoluteLocationPath(MY_XPATH *xpath) if (!my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH)) return 0; - xpath->context= new Item_nodeset_func_rootelement(xpath->pxml); + xpath->context= xpath->rootelement; if (my_xpath_parse_term(xpath, MY_XPATH_LEX_SLASH)) { @@ -2292,6 +2295,8 @@ my_xpath_parse(MY_XPATH *xpath, const char *str, const char *strend) my_xpath_lex_init(&xpath->prevtok, str, strend); my_xpath_lex_scan(xpath, &xpath->lasttok, str, strend); + xpath->rootelement= new Item_nodeset_func_rootelement(xpath->pxml); + return my_xpath_parse_Expr(xpath) && my_xpath_parse_term(xpath, MY_XPATH_LEX_EOF); diff --git a/sql/lex.h b/sql/lex.h index b52be29457f..574d7036c8a 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -552,6 +552,7 @@ static SYMBOL symbols[] = { { "UNSIGNED", SYM(UNSIGNED)}, { "UNTIL", SYM(UNTIL_SYM)}, { "UPDATE", SYM(UPDATE_SYM)}, + { "UPGRADE", SYM(UPGRADE_SYM)}, { "USAGE", SYM(USAGE)}, { "USE", SYM(USE_SYM)}, { "USER", SYM(USER)}, diff --git a/sql/log.cc b/sql/log.cc index 2b75bda2d70..c554499292c 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -1105,6 +1105,7 @@ static int binlog_commit(THD *thd, bool all) DBUG_RETURN(0); } Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE); + qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE) DBUG_RETURN(binlog_end_trans(thd, trx_data, &qev)); } @@ -1131,6 +1132,7 @@ static int binlog_rollback(THD *thd, bool all) if (unlikely(thd->options & OPTION_STATUS_NO_TRANS_UPDATE)) { Query_log_event qev(thd, STRING_WITH_LEN("ROLLBACK"), TRUE, FALSE); + qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE) error= binlog_end_trans(thd, trx_data, &qev); } else @@ -3037,7 +3039,9 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event) Imagine this is rollback due to net timeout, after all statements of the transaction succeeded. Then we want a zero-error code in BEGIN. In other words, if there was a really serious error code it's already - in the statement's events. + in the statement's events, there is no need to put it also in this + internally generated event, and as this event is generated late it + would lead to false alarms. This is safer than thd->clear_error() against kills at shutdown. */ qinfo.error_code= 0; diff --git a/sql/log_event.cc b/sql/log_event.cc index 944190c6d20..086f3b5503c 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -28,7 +28,6 @@ #endif /* MYSQL_CLIENT */ #include <base64.h> #include <my_bitmap.h> -#include <my_vle.h> #define log_cs &my_charset_latin1 @@ -5134,7 +5133,8 @@ Rows_log_event::Rows_log_event(const char *buf, uint event_len, byte const *const var_start= (const byte *)buf + common_header_len + post_header_len; byte const *const ptr_width= var_start; - byte const *const ptr_after_width= my_vle_decode(&m_width, ptr_width); + uchar *ptr_after_width= (uchar*) ptr_width; + m_width = net_field_length(&ptr_after_width); const uint byte_count= (m_width + 7) / 8; const byte* const ptr_rows_data= var_start + byte_count + 1; @@ -5588,13 +5588,13 @@ bool Rows_log_event::write_data_body(IO_CACHE*file) Note that this should be the number of *bits*, not the number of bytes. */ - byte sbuf[my_vle_sizeof(m_width)]; + byte sbuf[sizeof(m_width)]; my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf; - char *const sbuf_end= (char *const)my_vle_encode(sbuf, sizeof(sbuf), m_width); - DBUG_ASSERT(static_cast<my_size_t>(sbuf_end - (char *const)sbuf) <= sizeof(sbuf)); + char *const sbuf_end= net_store_length(sbuf, (uint) m_width); + DBUG_ASSERT(static_cast<my_size_t>(sbuf_end - (char*) sbuf) <= sizeof(sbuf)); - return (my_b_safe_write(file, sbuf, sbuf_end - (char *const)sbuf) || + return (my_b_safe_write(file, sbuf, sbuf_end - (char*) sbuf) || my_b_safe_write(file, reinterpret_cast<byte*>(m_cols.bitmap), no_bytes_in_map(&m_cols)) || my_b_safe_write(file, m_rows_buf, data_size)); @@ -5720,7 +5720,8 @@ Table_map_log_event::Table_map_log_event(const char *buf, uint event_len, /* Length of table name + counter + terminating null */ byte const* const ptr_colcnt= ptr_tbllen + m_tbllen + 2; - byte const* const ptr_after_colcnt= my_vle_decode(&m_colcnt, ptr_colcnt); + uchar *ptr_after_colcnt= (uchar*) ptr_colcnt; + m_colcnt= net_field_length(&ptr_after_colcnt); DBUG_PRINT("info",("m_dblen=%d off=%d m_tbllen=%d off=%d m_colcnt=%d off=%d", m_dblen, ptr_dblen-(const byte*)vpart, @@ -5989,9 +5990,9 @@ bool Table_map_log_event::write_data_body(IO_CACHE *file) byte const dbuf[]= { m_dblen }; byte const tbuf[]= { m_tbllen }; - byte cbuf[my_vle_sizeof(m_colcnt)]; - byte *const cbuf_end= my_vle_encode(cbuf, sizeof(cbuf), m_colcnt); - DBUG_ASSERT(static_cast<my_size_t>(cbuf_end - cbuf) <= sizeof(cbuf)); + byte cbuf[sizeof(m_colcnt)]; + char *const cbuf_end= net_store_length(cbuf, (uint) m_colcnt); + DBUG_ASSERT(static_cast<my_size_t>(cbuf_end - (char*) cbuf) <= sizeof(cbuf)); return (my_b_safe_write(file, dbuf, sizeof(dbuf)) || my_b_safe_write(file, (const byte*)m_dbnam, m_dblen+1) || diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 70fca42da73..9780b26faad 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -657,7 +657,7 @@ bool table_cache_init(void); void table_cache_free(void); bool table_def_init(void); void table_def_free(void); -void assign_new_table_id(TABLE *table); +void assign_new_table_id(TABLE_SHARE *share); uint cached_open_tables(void); uint cached_table_definitions(void); void kill_mysql(void); diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6e70265c8bc..e1c8ed966ee 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5228,8 +5228,8 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, */ {"log-bin-trust-function-creators", OPT_LOG_BIN_TRUST_FUNCTION_CREATORS, "If equal to 0 (the default), then when --log-bin is used, creation of " - "a function (a trigger) is allowed only to users having the SUPER privilege " - "and only if this function (trigger) may not break binary logging." + "a stored function (or trigger) is allowed only to users having the SUPER privilege " + "and only if this stored function (trigger) may not break binary logging." #ifdef HAVE_ROW_BASED_REPLICATION " If using --binlog-format=row, the security issues do not exist and the " "binary logging cannot break so this option is automatically set to 1." diff --git a/sql/opt_range.cc b/sql/opt_range.cc index c514326de8d..cb1f1a68763 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1951,9 +1951,12 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, read_time= (double) HA_POS_ERROR; goto free_mem; } - if (tree->type != SEL_TREE::KEY && - tree->type != SEL_TREE::KEY_SMALLER) - goto free_mem; + /* + If the tree can't be used for range scans, proceed anyway, as we + can construct a group-min-max quick select + */ + if (tree->type != SEL_TREE::KEY && tree->type != SEL_TREE::KEY_SMALLER) + tree= NULL; } } diff --git a/sql/set_var.cc b/sql/set_var.cc index c9feea49618..12f3a61aa4e 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -142,6 +142,7 @@ static bool set_log_update(THD *thd, set_var *var); static int check_pseudo_thread_id(THD *thd, set_var *var); static bool set_log_bin(THD *thd, set_var *var); static void fix_low_priority_updates(THD *thd, enum_var_type type); +static int check_tx_isolation(THD *thd, set_var *var); static void fix_tx_isolation(THD *thd, enum_var_type type); static int check_completion_type(THD *thd, set_var *var); static void fix_completion_type(THD *thd, enum_var_type type); @@ -449,7 +450,8 @@ sys_var_long_ptr sys_thread_cache_size("thread_cache_size", sys_var_thd_enum sys_tx_isolation("tx_isolation", &SV::tx_isolation, &tx_isolation_typelib, - fix_tx_isolation); + fix_tx_isolation, + check_tx_isolation); sys_var_thd_ulong sys_tmp_table_size("tmp_table_size", &SV::tmp_table_size); sys_var_bool_ptr sys_timed_mutexes("timed_mutexes", @@ -1127,10 +1129,23 @@ static void fix_max_join_size(THD *thd, enum_var_type type) /* + Can't change the 'next' tx_isolation while we are already in + a transaction +*/ +static int check_tx_isolation(THD *thd, set_var *var) +{ + if (var->type == OPT_DEFAULT && (thd->server_status & SERVER_STATUS_IN_TRANS)) + { + my_error(ER_CANT_CHANGE_TX_ISOLATION, MYF(0)); + return 1; + } + return 0; +} + +/* If one doesn't use the SESSION modifier, the isolation level is only active for the next command */ - static void fix_tx_isolation(THD *thd, enum_var_type type) { if (type == OPT_SESSION) @@ -1606,7 +1621,12 @@ bool sys_var::check_set(THD *thd, set_var *var, TYPELIB *enum_names) else { ulonglong tmp= var->value->val_int(); - if (tmp >= enum_names->count) + /* + For when the enum is made to contain 64 elements, as 1ULL<<64 is + undefined, we guard with a "count<64" test. + */ + if (unlikely((tmp >= ((ULL(1)) << enum_names->count)) && + (enum_names->count < 64))) { llstr(tmp, buff); goto err; diff --git a/sql/set_var.h b/sql/set_var.h index 75c36176f91..0961f6d4325 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -334,19 +334,31 @@ class sys_var_thd_enum :public sys_var_thd protected: ulong SV::*offset; TYPELIB *enum_names; + sys_check_func check_func; public: sys_var_thd_enum(const char *name_arg, ulong SV::*offset_arg, TYPELIB *typelib) - :sys_var_thd(name_arg), offset(offset_arg), enum_names(typelib) + :sys_var_thd(name_arg), offset(offset_arg), enum_names(typelib), + check_func(0) {} sys_var_thd_enum(const char *name_arg, ulong SV::*offset_arg, TYPELIB *typelib, sys_after_update_func func) - :sys_var_thd(name_arg,func), offset(offset_arg), enum_names(typelib) + :sys_var_thd(name_arg,func), offset(offset_arg), enum_names(typelib), + check_func(0) + {} + sys_var_thd_enum(const char *name_arg, ulong SV::*offset_arg, + TYPELIB *typelib, sys_after_update_func func, + sys_check_func check) + :sys_var_thd(name_arg,func), offset(offset_arg), enum_names(typelib), + check_func(check) {} bool check(THD *thd, set_var *var) { - return check_enum(thd, var, enum_names); + int ret= 0; + if (check_func) + ret= (*check_func)(thd, var); + return ret ? ret : check_enum(thd, var, enum_names); } bool update(THD *thd, set_var *var); void set_default(THD *thd, enum_var_type type); diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index dafe653ae1e..35ff2e4222b 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -5804,3 +5804,12 @@ ER_FOREIGN_DUPLICATE_KEY 23000 S1009 eng "Upholding foreign key constraints for table '%.64s', entry '%-.64s', key %d would lead to a duplicate entry" ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE eng "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use scripts/mysql_fix_privilege_tables" +ER_TABLE_NEEDS_UPGRADE + eng "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" to fix it!" +ER_ILLEGAL_HA_CREATE_OPTION + eng "Table storage engine '%-.64s' does not support the create option '%.64s'" +ER_CANT_CHANGE_TX_ISOLATION 25001 + eng "Transaction isolation level can't be changed while a transaction is in progress" +ER_WARN_DEPRECATED_STATEMENT + eng "The '%s' statement is deprecated and will be removed in MySQL %s. Please use client programs (e.g. %s) instead." + diff --git a/sql/slave.cc b/sql/slave.cc index 57fc8534f8f..9429937c303 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1346,7 +1346,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, save_vio = thd->net.vio; thd->net.vio = 0; /* Rebuild the index file from the copied data file (with REPAIR) */ - error=file->repair(thd,&check_opt) != 0; + error=file->ha_repair(thd,&check_opt) != 0; thd->net.vio = save_vio; if (error) my_error(ER_INDEX_REBUILD, MYF(0), tables.table->s->table_name.str); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 301d88c8775..1f29468a61f 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -749,12 +749,6 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b) written into binary log. Instead we catch function calls the statement makes and write it into binary log separately (see #3). - We actually can easily write SELECT statements into the binary log in the - right order (we don't have issues with const tables being unlocked early - because SELECTs that use FUNCTIONs unlock all tables at once) We don't do - it because replication slave thread currently can't execute SELECT - statements. Fixing this is on the TODO. - 2. PROCEDURE calls CALL statements are not written into binary log. Instead @@ -775,7 +769,7 @@ int cmp_splocal_locations(Item_splocal * const *a, Item_splocal * const *b) function execution (grep for start_union_events and stop_union_events) If the answers are No and Yes, we write the function call into the binary - log as "DO spfunc(<param1value>, <param2value>, ...)" + log as "SELECT spfunc(<param1value>, <param2value>, ...)" 4. Miscellaneous issues. @@ -1327,7 +1321,7 @@ sp_head::execute_function(THD *thd, Item **argp, uint argcount, char buf[256]; String bufstr(buf, sizeof(buf), &my_charset_bin); bufstr.length(0); - bufstr.append(STRING_WITH_LEN("DO ")); + bufstr.append(STRING_WITH_LEN("SELECT ")); append_identifier(thd, &bufstr, m_name.str, m_name.length); bufstr.append('('); for (uint i=0; i < argcount; i++) diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index d32c5ea08d4..fc84cb1f22c 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -471,10 +471,10 @@ static my_bool acl_load(THD *thd, TABLE_LIST *tables) /* - if it is pre 5.1.4 privilege table then map CREATE privilege on + if it is pre 5.1.6 privilege table then map CREATE privilege on CREATE|ALTER|DROP|EXECUTE EVENT */ - if (table->s->fields <= 37 && (user.access & CREATE_ACL)) + if (table->s->fields <= 37 && (user.access & SUPER_ACL)) user.access|= EVENT_ACL; /* diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 98dff78a09a..94960b488f7 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -313,6 +313,22 @@ TABLE_SHARE *get_table_share(THD *thd, TABLE_LIST *table_list, char *key, conflicts */ (void) pthread_mutex_lock(&share->mutex); + + /* + We assign a new table id under the protection of the LOCK_open and + the share's own mutex. We do this insted of creating a new mutex + and using it for the sole purpose of serializing accesses to a + static variable, we assign the table id here. We assign it to the + share before inserting it into the table_def_cache to be really + sure that it cannot be read from the cache without having a table + id assigned. + + CAVEAT. This means that the table cannot be used for + binlogging/replication purposes, unless get_table_share() has been + called directly or indirectly. + */ + assign_new_table_id(share); + if (my_hash_insert(&table_def_cache, (byte*) share)) { #ifdef NOT_YET @@ -2381,43 +2397,50 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) /* - Function to assign a new table map id to a table. + Function to assign a new table map id to a table share. PARAMETERS - table - Pointer to table structure + share - Pointer to table share structure PRE-CONDITION(S) - table is non-NULL + share is non-NULL The LOCK_open mutex is locked + The share->mutex is locked POST-CONDITION(S) - table->s->table_map_id is given a value that with a high certainty - is not used by any other table. + share->table_map_id is given a value that with a high certainty is + not used by any other table (the only case where a table id can be + reused is on wrap-around, which means more than 4 billion table + shares open at the same time). - table->s->table_map_id is not ULONG_MAX. + share->table_map_id is not ULONG_MAX. */ -void assign_new_table_id(TABLE *table) +void assign_new_table_id(TABLE_SHARE *share) { static ulong last_table_id= ULONG_MAX; - DBUG_ENTER("assign_new_table_id(TABLE*)"); + DBUG_ENTER("assign_new_table_id"); /* Preconditions */ - DBUG_ASSERT(table != NULL); + DBUG_ASSERT(share != NULL); safe_mutex_assert_owner(&LOCK_open); + safe_mutex_assert_owner(&share->mutex); ulong tid= ++last_table_id; /* get next id */ - /* There is one reserved number that cannot be used. */ + /* + There is one reserved number that cannot be used. Remember to + change this when 6-byte global table id's are introduced. + */ if (unlikely(tid == ULONG_MAX)) tid= ++last_table_id; - table->s->table_map_id= tid; + share->table_map_id= tid; DBUG_PRINT("info", ("table_id=%lu", tid)); /* Post conditions */ - DBUG_ASSERT(table->s->table_map_id != ULONG_MAX); + DBUG_ASSERT(share->table_map_id != ULONG_MAX); DBUG_VOID_RETURN; } @@ -2571,20 +2594,6 @@ retry: break; } - /* - We assign a new table id under the protection of the LOCK_open - mutex. We assign a new table id here instead of inside openfrm() - since that function can be used without acquiring any lock (e.g., - inside ha_create_table()). Insted of creatint a new mutex and - using it for the sole purpose of serializing accesses to a static - variable, we assign the table id here. - - CAVEAT. This means that the table cannot be used for - binlogging/replication purposes, unless open_table() has been called - directly or indirectly. - */ - assign_new_table_id(entry); - if (Table_triggers_list::check_n_load(thd, share->db.str, share->table_name.str, entry, 0)) { diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index b84eb1cfcb8..e4bbcfc5074 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -3715,7 +3715,7 @@ end_with_restore_list: } if (!strip_sp(db) || check_db_name(db)) { - my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), db); break; } /* @@ -3727,8 +3727,8 @@ end_with_restore_list: */ #ifdef HAVE_REPLICATION if (thd->slave_thread && - (!rpl_filter->db_ok(lex->name) || - !rpl_filter->db_ok_with_wild_table(lex->name))) + (!rpl_filter->db_ok(db) || + !rpl_filter->db_ok_with_wild_table(db))) { my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index 199cf4a6264..2046596c428 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -526,15 +526,8 @@ static int plugin_initialize(struct st_plugin_int *plugin) switch (plugin->plugin->type) { case MYSQL_STORAGE_ENGINE_PLUGIN: - if (ha_initialize_handlerton((handlerton*) plugin->plugin->info)) - { - sql_print_error("Plugin '%s' handlerton init returned error.", - plugin->name.str); - DBUG_PRINT("warning", ("Plugin '%s' handlerton init returned error.", - plugin->name.str)); - goto err; - } - break; + sql_print_error("Storage Engine plugins are unsupported in this version."); + goto err; default: break; } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 5d01980025e..7695001cd67 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -364,22 +364,8 @@ JOIN::prepare(Item ***rref_pointer_array, select_lex->having_fix_field= 0; if (having_fix_rc || thd->net.report_error) DBUG_RETURN(-1); /* purecov: inspected */ - if (having->with_sum_func) - having->split_sum_func2(thd, ref_pointer_array, all_fields, - &having, TRUE); thd->lex->allow_sum_func= save_allow_sum_func; } - if (select_lex->inner_sum_func_list) - { - Item_sum *end=select_lex->inner_sum_func_list; - Item_sum *item_sum= end; - do - { - item_sum= item_sum->next; - item_sum->split_sum_func2(thd, ref_pointer_array, - all_fields, item_sum->ref_by, FALSE); - } while (item_sum != end); - } if (!thd->lex->view_prepare_mode) { @@ -397,6 +383,21 @@ JOIN::prepare(Item ***rref_pointer_array, } } + if (having && having->with_sum_func) + having->split_sum_func2(thd, ref_pointer_array, all_fields, + &having, TRUE); + if (select_lex->inner_sum_func_list) + { + Item_sum *end=select_lex->inner_sum_func_list; + Item_sum *item_sum= end; + do + { + item_sum= item_sum->next; + item_sum->split_sum_func2(thd, ref_pointer_array, + all_fields, item_sum->ref_by, FALSE); + } while (item_sum != end); + } + if (setup_ftfuncs(select_lex)) /* should be after having->fix_fields */ DBUG_RETURN(-1); diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 981dd3d4915..5b25141ee28 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -1058,15 +1058,20 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, if (i == primary_key && !strcmp(key_info->name, primary_key_name)) { found_primary=1; - packet->append(STRING_WITH_LEN("PRIMARY ")); + /* + No space at end, because a space will be added after where the + identifier would go, but that is not added for primary key. + */ + packet->append(STRING_WITH_LEN("PRIMARY KEY")); } else if (key_info->flags & HA_NOSAME) - packet->append(STRING_WITH_LEN("UNIQUE ")); + packet->append(STRING_WITH_LEN("UNIQUE KEY ")); else if (key_info->flags & HA_FULLTEXT) - packet->append(STRING_WITH_LEN("FULLTEXT ")); + packet->append(STRING_WITH_LEN("FULLTEXT KEY ")); else if (key_info->flags & HA_SPATIAL) - packet->append(STRING_WITH_LEN("SPATIAL ")); - packet->append(STRING_WITH_LEN("KEY ")); + packet->append(STRING_WITH_LEN("SPATIAL KEY ")); + else + packet->append(STRING_WITH_LEN("KEY ")); if (!found_primary) append_identifier(thd, packet, key_info->name, strlen(key_info->name)); @@ -1519,6 +1524,119 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) DBUG_VOID_RETURN; } +int fill_schema_processlist(THD* thd, TABLE_LIST* tables, COND* cond) +{ + TABLE *table= tables->table; + CHARSET_INFO *cs= system_charset_info; + char *user; + bool verbose; + ulong max_query_length; + time_t now= time(0); + DBUG_ENTER("fill_process_list"); + + user= thd->security_ctx->master_access & PROCESS_ACL ? + NullS : thd->security_ctx->priv_user; + verbose= thd->lex->verbose; + max_query_length= PROCESS_LIST_WIDTH; + + VOID(pthread_mutex_lock(&LOCK_thread_count)); + + if (!thd->killed) + { + I_List_iterator<THD> it(threads); + THD* tmp; + + while ((tmp= it++)) + { + Security_context *tmp_sctx= tmp->security_ctx; + struct st_my_thread_var *mysys_var; + const char *val; + + if ((!tmp->vio_ok() && !tmp->system_thread) || + (user && (!tmp_sctx->user || strcmp(tmp_sctx->user, user)))) + continue; + + restore_record(table, s->default_values); + /* ID */ + table->field[0]->store((longlong) tmp->thread_id, TRUE); + /* USER */ + val= tmp_sctx->user ? tmp_sctx->user : + (tmp->system_thread ? "system user" : "unauthenticated user"); + table->field[1]->store(val, strlen(val), cs); + /* HOST */ + if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) && + thd->security_ctx->host_or_ip[0]) + { + char host[LIST_PROCESS_HOST_LEN + 1]; + my_snprintf(host, LIST_PROCESS_HOST_LEN, "%s:%u", + tmp_sctx->host_or_ip, tmp->peer_port); + table->field[2]->store(host, strlen(host), cs); + } + else + table->field[2]->store(tmp_sctx->host_or_ip, + strlen(tmp_sctx->host_or_ip), cs); + /* DB */ + if (tmp->db) + { + table->field[3]->store(tmp->db, strlen(tmp->db), cs); + table->field[3]->set_notnull(); + } + + if ((mysys_var= tmp->mysys_var)) + pthread_mutex_lock(&mysys_var->mutex); + /* COMMAND */ + if ((val= (char *) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0))) + table->field[4]->store(val, strlen(val), cs); + else + table->field[4]->store(command_name[tmp->command].str, + command_name[tmp->command].length, cs); + /* TIME */ + table->field[5]->store((uint32)(tmp->start_time ? + now - tmp->start_time : 0), TRUE); + /* STATE */ +#ifndef EMBEDDED_LIBRARY + val= (char*) (tmp->locked ? "Locked" : + tmp->net.reading_or_writing ? + (tmp->net.reading_or_writing == 2 ? + "Writing to net" : + tmp->command == COM_SLEEP ? "" : + "Reading from net") : + tmp->proc_info ? tmp->proc_info : + tmp->mysys_var && + tmp->mysys_var->current_cond ? + "Waiting on cond" : NullS); +#else + val= (char *) "Writing to net"; +#endif + if (val) + { + table->field[6]->store(val, strlen(val), cs); + table->field[6]->set_notnull(); + } + + if (mysys_var) + pthread_mutex_unlock(&mysys_var->mutex); + + /* INFO */ + if (tmp->query) + { + table->field[7]->store(tmp->query, + min(max_query_length, tmp->query_length), cs); + table->field[7]->set_notnull(); + } + + if (schema_table_store_record(thd, table)) + { + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + DBUG_RETURN(1); + } + } + } + + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + DBUG_RETURN(0); +} + /***************************************************************************** Status functions *****************************************************************************/ @@ -3807,6 +3925,7 @@ static interval_type get_real_interval_type(interval_type i_type) return INTERVAL_SECOND; } +extern LEX_STRING interval_type_to_name[]; static int fill_events_copy_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) @@ -3835,8 +3954,16 @@ fill_events_copy_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) sch_table->field[3]->store(et.definer.str, et.definer.length, scs); sch_table->field[4]->store(et.body.str, et.body.length, scs); - // [9] is SQL_MODE and is NULL for now, will be fixed later - sch_table->field[9]->set_null(); + // [9] is SQL_MODE + { + byte *sql_mode_str; + ulong sql_mode_len=0; + sql_mode_str= + sys_var_thd_sql_mode::symbolic_mode_representation(thd, et.sql_mode, + &sql_mode_len); + sch_table->field[9]->store((const char*)sql_mode_str, sql_mode_len, scs); + } + if (et.expression) { String show_str; @@ -3845,14 +3972,16 @@ fill_events_copy_to_schema_table(THD *thd, TABLE *sch_table, TABLE *event_table) //execute_at sch_table->field[6]->set_null(); //interval_value - sch_table->field[7]->set_notnull(); - sch_table->field[7]->store((longlong) et.expression); //interval_type if (event_reconstruct_interval_expression(&show_str, et.interval, et.expression)) DBUG_RETURN(1); + sch_table->field[7]->set_notnull(); + sch_table->field[7]->store(show_str.c_ptr(), show_str.length(), scs); + + LEX_STRING *ival= &interval_type_to_name[et.interval]; sch_table->field[8]->set_notnull(); - sch_table->field[8]->store(show_str.c_ptr(), show_str.length(), scs); + sch_table->field[8]->store(ival->str, ival->length, scs); //starts & ends sch_table->field[10]->set_notnull(); sch_table->field[10]->store_time(&et.starts, MYSQL_TIMESTAMP_DATETIME); @@ -4679,9 +4808,9 @@ ST_FIELD_INFO events_fields_info[]= {"EVENT_BODY", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, {"EVENT_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type"}, {"EXECUTE_AT", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Execute at"}, - {"INTERVAL_VALUE", 11, MYSQL_TYPE_LONG, 0, 1, "Interval value"}, + {"INTERVAL_VALUE", 256, MYSQL_TYPE_STRING, 0, 1, "Interval value"}, {"INTERVAL_FIELD", 18, MYSQL_TYPE_STRING, 0, 1, "Interval field"}, - {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 1, 0}, + {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, {"STARTS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Starts"}, {"ENDS", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Ends"}, {"STATUS", 8, MYSQL_TYPE_STRING, 0, 0, "Status"}, @@ -4924,6 +5053,20 @@ ST_FIELD_INFO variables_fields_info[]= }; +ST_FIELD_INFO processlist_fields_info[]= +{ + {"ID", 4, MYSQL_TYPE_LONG, 0, 0, "Id"}, + {"USER", 16, MYSQL_TYPE_STRING, 0, 0, "User"}, + {"HOST", LIST_PROCESS_HOST_LEN, MYSQL_TYPE_STRING, 0, 0, "Host"}, + {"DB", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Db"}, + {"COMMAND", 16, MYSQL_TYPE_STRING, 0, 0, "Command"}, + {"TIME", 4, MYSQL_TYPE_LONG, 0, 0, "Time"}, + {"STATE", 30, MYSQL_TYPE_STRING, 0, 1, "State"}, + {"INFO", PROCESS_LIST_WIDTH, MYSQL_TYPE_STRING, 0, 1, "Info"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + ST_FIELD_INFO plugin_fields_info[]= { {"PLUGIN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, @@ -5014,6 +5157,8 @@ ST_SCHEMA_TABLE schema_tables[]= get_all_tables, 0, get_schema_partitions_record, 1, 2, 0}, {"PLUGINS", plugin_fields_info, create_schema_table, fill_plugins, make_old_format, 0, -1, -1, 0}, + {"PROCESSLIST", processlist_fields_info, create_schema_table, + fill_schema_processlist, make_old_format, 0, -1, -1, 0}, {"ROUTINES", proc_fields_info, create_schema_table, fill_schema_proc, make_proc_old_format, 0, -1, -1, 0}, {"SCHEMATA", schema_fields_info, create_schema_table, diff --git a/sql/sql_table.cc b/sql/sql_table.cc index face6425b9a..dbed6e82aa3 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -42,7 +42,7 @@ static int copy_data_between_tables(TABLE *from,TABLE *to, ha_rows *copied,ha_rows *deleted); static bool prepare_blob_field(THD *thd, create_field *sql_field); static bool check_engine(THD *thd, const char *table_name, - handlerton **new_engine); + HA_CREATE_INFO *create_info); static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, List<create_field> *fields, List<Key> *keys, bool tmp_table, @@ -50,11 +50,6 @@ static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, handler *file, KEY **key_info_buffer, uint *key_count, int select_field_count); -static int mysql_copy_create_lists(List<create_field> *orig_create_list, - List<Key> *orig_key, - List<create_field> *new_create_list, - List<Key> *new_key); - #define MYSQL50_TABLE_NAME_PREFIX "#mysql50#" #define MYSQL50_TABLE_NAME_PREFIX_LENGTH 9 @@ -164,8 +159,7 @@ uint build_tmptable_filename(THD* thd, char *buff, size_t bufflen) */ static int mysql_copy_create_list(List<create_field> *orig_create_list, - - List<create_field> *new_create_list) + List<create_field> *new_create_list) { List_iterator<create_field> prep_field_it(*orig_create_list); create_field *prep_field; @@ -205,7 +199,7 @@ static int mysql_copy_key_list(List<Key> *orig_key, { List_iterator<Key> prep_key_it(*orig_key); Key *prep_key; - DBUG_ENTER("mysql_copy_create_lists"); + DBUG_ENTER("mysql_copy_key_list"); while ((prep_key= prep_key_it++)) { @@ -217,7 +211,8 @@ static int mysql_copy_key_list(List<Key> *orig_key, while ((prep_col= prep_col_it++)) { key_part_spec *prep_key_part; - if (prep_key_part= new key_part_spec(*prep_col)) + + if (!(prep_key_part= new key_part_spec(*prep_col))) { mem_alloc_error(sizeof(key_part_spec)); DBUG_RETURN(TRUE); @@ -228,11 +223,11 @@ static int mysql_copy_key_list(List<Key> *orig_key, DBUG_RETURN(TRUE); } } - if ((temp_key= new Key(prep_key->type, prep_key->name, - prep_key->algorithm, - prep_key->generated, - prep_columns, - prep_key->parser_name))) + if (!(temp_key= new Key(prep_key->type, prep_key->name, + prep_key->algorithm, + prep_key->generated, + prep_columns, + prep_key->parser_name))) { mem_alloc_error(sizeof(Key)); DBUG_RETURN(TRUE); @@ -2021,7 +2016,7 @@ bool mysql_create_table_internal(THD *thd, MYF(0)); DBUG_RETURN(TRUE); } - if (check_engine(thd, table_name, &create_info->db_type)) + if (check_engine(thd, table_name, create_info)) DBUG_RETURN(TRUE); db_options= create_info->table_options; if (create_info->row_type == ROW_TYPE_DYNAMIC) @@ -2148,22 +2143,6 @@ bool mysql_create_table_internal(THD *thd, } #endif -#ifdef NOT_USED - /* - if there is a technical reason for a handler not to have support - for temp. tables this code can be re-enabled. - Otherwise, if a handler author has a wish to prohibit usage of - temporary tables for his handler he should implement a check in - ::create() method - */ - if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) && - (file->table_flags() & HA_NO_TEMP_TABLES)) - { - my_error(ER_ILLEGAL_HA, MYF(0), table_name); - goto err; - } -#endif - set_table_default_charset(thd, create_info, (char*) db); if (mysql_prepare_table(thd, create_info, &fields, @@ -2994,7 +2973,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, open_for_modify= 0; } - if (table->table->s->crashed && operator_func == &handler::check) + if (table->table->s->crashed && operator_func == &handler::ha_check) { protocol->prepare_for_resend(); protocol->store(table_name, system_charset_info); @@ -3006,6 +2985,21 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, goto err; } + if (operator_func == &handler::ha_repair) + { + if ((table->table->file->check_old_types() == HA_ADMIN_NEEDS_ALTER) || + (table->table->file->ha_check_for_upgrade(check_opt) == + HA_ADMIN_NEEDS_ALTER)) + { + close_thread_tables(thd); + tmp_disable_binlog(thd); // binlogging is done by caller if wanted + result_code= mysql_recreate_table(thd, table, 0); + reenable_binlog(thd); + goto send_result; + } + + } + result_code = (table->table->file->*operator_func)(thd, check_opt); send_result: @@ -3132,6 +3126,19 @@ send_result_message: break; } + case HA_ADMIN_NEEDS_UPGRADE: + case HA_ADMIN_NEEDS_ALTER: + { + char buf[ERRMSGSIZE]; + uint length; + + protocol->store(STRING_WITH_LEN("error"), system_charset_info); + length=my_snprintf(buf, ERRMSGSIZE, ER(ER_TABLE_NEEDS_UPGRADE), table->table_name); + protocol->store(buf, length, system_charset_info); + fatal_error=1; + break; + } + default: // Probably HA_ADMIN_INTERNAL_ERROR { char buf[ERRMSGSIZE+20]; @@ -3202,7 +3209,7 @@ bool mysql_repair_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) test(check_opt->sql_flags & TT_USEFRM), HA_OPEN_FOR_REPAIR, &prepare_for_repair, - &handler::repair, 0)); + &handler::ha_repair, 0)); } @@ -3575,7 +3582,7 @@ bool mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt) DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, "check", lock_type, 0, HA_OPEN_FOR_REPAIR, 0, 0, - &handler::check, &view_checksum)); + &handler::ha_check, &view_checksum)); } @@ -3984,7 +3991,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, DBUG_RETURN(TRUE); } #endif - if (check_engine(thd, new_name, &create_info->db_type)) + if (check_engine(thd, new_name, create_info)) DBUG_RETURN(TRUE); new_db_type= create_info->db_type; if (create_info->row_type == ROW_TYPE_NOT_USED) @@ -5372,7 +5379,8 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) for (uint i= 0; i < t->s->fields; i++ ) { Field *f= t->field[i]; - if (f->type() == FIELD_TYPE_BLOB) + if ((f->type() == FIELD_TYPE_BLOB) || + (f->type() == MYSQL_TYPE_VARCHAR)) { String tmp; f->val_str(&tmp); @@ -5408,8 +5416,9 @@ bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) } static bool check_engine(THD *thd, const char *table_name, - handlerton **new_engine) + HA_CREATE_INFO *create_info) { + handlerton **new_engine= &create_info->db_type; handlerton *req_engine= *new_engine; bool no_substitution= test(thd->variables.sql_mode & MODE_NO_ENGINE_SUBSTITUTION); @@ -5425,5 +5434,16 @@ static bool check_engine(THD *thd, const char *table_name, ha_resolve_storage_engine_name(*new_engine), table_name); } + if (create_info->options & HA_LEX_CREATE_TMP_TABLE && + ha_check_storage_engine_flag(*new_engine, HTON_TEMPORARY_NOT_SUPPORTED)) + { + if (create_info->used_fields & HA_CREATE_USED_ENGINE) + { + my_error(ER_ILLEGAL_HA_CREATE_OPTION, MYF(0), (*new_engine)->name, "TEMPORARY"); + *new_engine= 0; + return TRUE; + } + *new_engine= &myisam_hton; + } return FALSE; } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 125d306e218..2c7f17a7a99 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -665,6 +665,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token UNSIGNED %token UNTIL_SYM %token UPDATE_SYM +%token UPGRADE_SYM %token USAGE %token USER %token USE_FRM @@ -5349,6 +5350,11 @@ restore: RESTORE_SYM table_or_tables { Lex->sql_command = SQLCOM_RESTORE_TABLE; + push_warning_printf(((THD *)yythd), MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DEPRECATED_STATEMENT, + ER(ER_WARN_DEPRECATED_STATEMENT), + "RESTORE TABLE", "5.2", + "mysqldump, mysql, MySQL Administrator"); } table_list FROM TEXT_STRING_sys { @@ -5359,6 +5365,11 @@ backup: BACKUP_SYM table_or_tables { Lex->sql_command = SQLCOM_BACKUP_TABLE; + push_warning_printf(((THD *)yythd), MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DEPRECATED_STATEMENT, + ER(ER_WARN_DEPRECATED_STATEMENT), + "BACKUP TABLE", "5.2", + "mysqldump, mysql, MySQL Administrator"); } table_list TO_SYM TEXT_STRING_sys { @@ -5456,7 +5467,8 @@ mi_check_type: | FAST_SYM { Lex->check_opt.flags|= T_FAST; } | MEDIUM_SYM { Lex->check_opt.flags|= T_MEDIUM; } | EXTENDED_SYM { Lex->check_opt.flags|= T_EXTEND; } - | CHANGED { Lex->check_opt.flags|= T_CHECK_ONLY_CHANGED; }; + | CHANGED { Lex->check_opt.flags|= T_CHECK_ONLY_CHANGED; } + | FOR_SYM UPGRADE_SYM { Lex->check_opt.sql_flags|= TT_FOR_UPGRADE; }; optimize: OPTIMIZE opt_no_write_to_binlog table_or_tables @@ -8664,7 +8676,12 @@ load: LOAD DATA_SYM LOAD TABLE_SYM table_ident FROM MASTER_SYM { LEX *lex=Lex; - if (lex->sphead) + push_warning_printf(((THD *)yythd), MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DEPRECATED_STATEMENT, + ER(ER_WARN_DEPRECATED_STATEMENT), + "LOAD TABLE FROM MASTER", "5.2", + "mysqldump, mysql, MySQL Administrator"); + if (lex->sphead) { my_error(ER_SP_BADSTATEMENT, MYF(0), "LOAD TABLE"); YYABORT; @@ -9783,8 +9800,7 @@ sys_option_value: | option_type TRANSACTION_SYM ISOLATION LEVEL_SYM isolation_types { LEX *lex=Lex; - if ($1) - lex->option_type= $1; + lex->option_type= $1; lex->var_list.push_back(new set_var(lex->option_type, find_sys_var("tx_isolation"), &null_lex_str, diff --git a/sql/table.cc b/sql/table.cc index 7c266243d29..3e766fe6c0f 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -130,6 +130,24 @@ TABLE_SHARE *alloc_table_share(TABLE_LIST *table_list, char *key, share->version= refresh_version; share->flush_version= flush_version; +#ifdef HAVE_ROW_BASED_REPLICATION + /* + This constant is used to mark that no table map version has been + assigned. No arithmetic is done on the value: it will be + overwritten with a value taken from MYSQL_BIN_LOG. + */ + share->table_map_version= ~(ulonglong)0; + + /* + Since alloc_table_share() can be called without any locking (for + example, ha_create_table... functions), we do not assign a table + map id here. Instead we assign a value that is not used + elsewhere, and then assign a table map id inside open_table() + under the protection of the LOCK_open mutex. + */ + share->table_map_id= ULONG_MAX; +#endif + memcpy((char*) &share->mem_root, (char*) &mem_root, sizeof(mem_root)); pthread_mutex_init(&share->mutex, MY_MUTEX_INIT_FAST); pthread_cond_init(&share->cond, NULL); @@ -180,6 +198,15 @@ void init_tmp_table_share(TABLE_SHARE *share, const char *key, share->path.length= share->normalized_path.length= strlen(path); share->frm_version= FRM_VER_TRUE_VARCHAR; +#ifdef HAVE_ROW_BASED_REPLICATION + /* + Temporary tables are not replicated, but we set up these fields + anyway to be able to catch errors. + */ + share->table_map_version= ~(ulonglong)0; + share->table_map_id= ULONG_MAX; +#endif + DBUG_VOID_RETURN; } @@ -371,6 +398,7 @@ err_not_open: share->error= error; open_table_error(share, error, (share->open_errno= my_errno), 0); } + DBUG_RETURN(error); } @@ -1503,24 +1531,6 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, *root_ptr= old_root; thd->status_var.opened_tables++; -#ifdef HAVE_REPLICATION - - /* - This constant is used to mark that no table map version has been - assigned. No arithmetic is done on the value: it will be - overwritten with a value taken from MYSQL_BIN_LOG. - */ - share->table_map_version= ~(ulonglong)0; - - /* - Since openfrm() can be called without any locking (for example, - ha_create_table... functions), we do not assign a table map id - here. Instead we assign a value that is not used elsewhere, and - then assign a table map id inside open_table() under the - protection of the LOCK_open mutex. - */ - share->table_map_id= ULONG_MAX; -#endif DBUG_RETURN (0); diff --git a/sql/table.h b/sql/table.h index c0e0961f467..b0e0177048d 100644 --- a/sql/table.h +++ b/sql/table.h @@ -344,6 +344,7 @@ enum enum_schema_tables SCH_OPEN_TABLES, SCH_PARTITIONS, SCH_PLUGINS, + SCH_PROCESSLIST, SCH_PROCEDURES, SCH_SCHEMATA, SCH_SCHEMA_PRIVILEGES, |