diff options
Diffstat (limited to 'sql')
167 files changed, 52631 insertions, 18676 deletions
diff --git a/sql/Makefile.am b/sql/Makefile.am index 19bdf8055f3..8ff55898ba4 100644 --- a/sql/Makefile.am +++ b/sql/Makefile.am @@ -22,15 +22,14 @@ MYSQLBASEdir= $(prefix) INCLUDES = @MT_INCLUDES@ @ZLIB_INCLUDES@ \ @bdb_includes@ @innodb_includes@ @ndbcluster_includes@ \ -I$(top_srcdir)/include -I$(top_srcdir)/regex \ - -I$(srcdir) $(openssl_includes) + -I$(srcdir) $(openssl_includes) -I$(top_srcdir)/extra WRAPLIBS= @WRAPLIBS@ SUBDIRS = share libexec_PROGRAMS = mysqld noinst_PROGRAMS = gen_lex_hash bin_PROGRAMS = mysql_tzinfo_to_sql gen_lex_hash_LDFLAGS = @NOINST_LDFLAGS@ -LDADD = @isam_libs@ \ - $(top_builddir)/myisam/libmyisam.a \ +LDADD = $(top_builddir)/myisam/libmyisam.a \ $(top_builddir)/myisammrg/libmyisammrg.a \ $(top_builddir)/heap/libheap.a \ $(top_builddir)/vio/libvio.a \ @@ -51,7 +50,7 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ procedure.h sql_class.h sql_lex.h sql_list.h \ sql_manager.h sql_map.h sql_string.h unireg.h \ field.h handler.h mysqld_suffix.h \ - ha_isammrg.h ha_isam.h ha_myisammrg.h\ + ha_myisammrg.h\ ha_heap.h ha_myisam.h ha_berkeley.h ha_innodb.h \ ha_ndbcluster.h opt_range.h protocol.h \ sql_select.h structs.h table.h sql_udf.h hash_filo.h\ @@ -59,8 +58,13 @@ noinst_HEADERS = item.h item_func.h item_sum.h item_cmpfunc.h \ log_event.h sql_repl.h slave.h \ stacktrace.h sql_sort.h sql_cache.h set_var.h \ spatial.h gstream.h client_settings.h tzfile.h \ - tztime.h examples/ha_example.h examples/ha_archive.h \ - examples/ha_tina.h + tztime.h \ + sp_head.h sp_pcontext.h sp_rcontext.h sp.h sp_cache.h \ + parse_file.h sql_view.h sql_trigger.h \ + examples/ha_example.h examples/ha_archive.h \ + examples/ha_tina.h \ + ha_federated.h + mysqld_SOURCES = sql_lex.cc sql_handler.cc \ item.cc item_sum.cc item_buff.cc item_func.cc \ item_cmpfunc.cc item_strfunc.cc item_timefunc.cc \ @@ -82,17 +86,21 @@ mysqld_SOURCES = sql_lex.cc sql_handler.cc \ records.cc filesort.cc handler.cc \ ha_heap.cc ha_myisam.cc ha_myisammrg.cc \ ha_berkeley.cc ha_innodb.cc \ - ha_isam.cc ha_isammrg.cc ha_ndbcluster.cc \ + ha_ndbcluster.cc \ sql_db.cc sql_table.cc sql_rename.cc sql_crypt.cc \ sql_load.cc mf_iocache.cc field_conv.cc sql_show.cc \ sql_udf.cc sql_analyse.cc sql_analyse.h sql_cache.cc \ slave.cc sql_repl.cc sql_union.cc sql_derived.cc \ client.c sql_client.cc mini_client_errors.c pack.c\ stacktrace.c repl_failsafe.h repl_failsafe.cc \ + sql_olap.cc sql_view.cc \ gstream.cc spatial.cc sql_help.cc protocol_cursor.cc \ tztime.cc my_time.c \ + sp_head.cc sp_pcontext.cc sp_rcontext.cc sp.cc \ + sp_cache.cc parse_file.cc sql_trigger.cc \ examples/ha_example.cc examples/ha_archive.cc \ - examples/ha_tina.cc + examples/ha_tina.cc \ + ha_federated.cc gen_lex_hash_SOURCES = gen_lex_hash.cc gen_lex_hash_LDADD = $(LDADD) $(CXXLDFLAGS) diff --git a/sql/derror.cc b/sql/derror.cc index 09f43d20044..4690e76d0e3 100644 --- a/sql/derror.cc +++ b/sql/derror.cc @@ -24,15 +24,43 @@ static bool read_texts(const char *file_name,const char ***point, uint error_messages); static void init_myfunc_errs(void); - /* Read messages from errorfile */ +/* + Read messages from errorfile. + + SYNOPSIS + init_errmessage() + + DESCRIPTION + This function can be called multiple times to reload the messages. + + RETURN + FALSE OK + TRUE Error +*/ bool init_errmessage(void) { + const char **errmsgs; DBUG_ENTER("init_errmessage"); - if (read_texts(ERRMSG_FILE,&my_errmsg[ERRMAPP],ER_ERROR_MESSAGES)) + /* + Get a pointer to the old error messages pointer array. + read_texts() tries to free it. + */ + errmsgs= my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST); + + /* Read messages from file. */ + if (read_texts(ERRMSG_FILE, &errmsgs, ER_ERROR_LAST - ER_ERROR_FIRST + 1)) DBUG_RETURN(TRUE); - errmesg=my_errmsg[ERRMAPP]; /* Init global variabel */ + + /* Register messages for use with my_error(). */ + if (my_error_register(errmsgs, ER_ERROR_FIRST, ER_ERROR_LAST)) + { + x_free((gptr) errmsgs); + DBUG_RETURN(TRUE); + } + + errmesg= errmsgs; /* Init global variabel */ init_myfunc_errs(); /* Init myfunc messages */ DBUG_RETURN(FALSE); } @@ -148,20 +176,20 @@ static void init_myfunc_errs() init_glob_errs(); /* Initiate english errors */ if (!(specialflag & SPECIAL_ENGLISH)) { - globerrs[EE_FILENOTFOUND % ERRMOD] = ER(ER_FILE_NOT_FOUND); - globerrs[EE_CANTCREATEFILE % ERRMOD]= ER(ER_CANT_CREATE_FILE); - globerrs[EE_READ % ERRMOD] = ER(ER_ERROR_ON_READ); - globerrs[EE_WRITE % ERRMOD] = ER(ER_ERROR_ON_WRITE); - globerrs[EE_BADCLOSE % ERRMOD] = ER(ER_ERROR_ON_CLOSE); - globerrs[EE_OUTOFMEMORY % ERRMOD] = ER(ER_OUTOFMEMORY); - globerrs[EE_DELETE % ERRMOD] = ER(ER_CANT_DELETE_FILE); - globerrs[EE_LINK % ERRMOD] = ER(ER_ERROR_ON_RENAME); - globerrs[EE_EOFERR % ERRMOD] = ER(ER_UNEXPECTED_EOF); - globerrs[EE_CANTLOCK % ERRMOD] = ER(ER_CANT_LOCK); - globerrs[EE_DIR % ERRMOD] = ER(ER_CANT_READ_DIR); - globerrs[EE_STAT % ERRMOD] = ER(ER_CANT_GET_STAT); - globerrs[EE_GETWD % ERRMOD] = ER(ER_CANT_GET_WD); - globerrs[EE_SETWD % ERRMOD] = ER(ER_CANT_SET_WD); - globerrs[EE_DISK_FULL % ERRMOD] = ER(ER_DISK_FULL); + EE(EE_FILENOTFOUND) = ER(ER_FILE_NOT_FOUND); + EE(EE_CANTCREATEFILE) = ER(ER_CANT_CREATE_FILE); + EE(EE_READ) = ER(ER_ERROR_ON_READ); + EE(EE_WRITE) = ER(ER_ERROR_ON_WRITE); + EE(EE_BADCLOSE) = ER(ER_ERROR_ON_CLOSE); + EE(EE_OUTOFMEMORY) = ER(ER_OUTOFMEMORY); + EE(EE_DELETE) = ER(ER_CANT_DELETE_FILE); + EE(EE_LINK) = ER(ER_ERROR_ON_RENAME); + EE(EE_EOFERR) = ER(ER_UNEXPECTED_EOF); + EE(EE_CANTLOCK) = ER(ER_CANT_LOCK); + EE(EE_DIR) = ER(ER_CANT_READ_DIR); + EE(EE_STAT) = ER(ER_CANT_GET_STAT); + EE(EE_GETWD) = ER(ER_CANT_GET_WD); + EE(EE_SETWD) = ER(ER_CANT_SET_WD); + EE(EE_DISK_FULL) = ER(ER_DISK_FULL); } } diff --git a/sql/examples/ha_archive.cc b/sql/examples/ha_archive.cc index 771bf91d118..f79ca903e7a 100644 --- a/sql/examples/ha_archive.cc +++ b/sql/examples/ha_archive.cc @@ -22,6 +22,7 @@ #ifdef HAVE_ARCHIVE_DB #include "ha_archive.h" +#include <my_dir.h> /* First, if you want to understand storage engines you should look at @@ -227,8 +228,7 @@ int ha_archive::read_meta_file(File meta_file, ulonglong *rows) /* This method writes out the header of a meta file and returns whether or not it was successful. By setting dirty you say whether or not the file represents the actual state of the data file. - Upon ::open() we set to dirty, and upon ::close() we set to clean. If we determine during - a read that the file was dirty we will force a rebuild of this file. + Upon ::open() we set to dirty, and upon ::close() we set to clean. */ int ha_archive::write_meta_file(File meta_file, ulonglong rows, bool dirty) { @@ -305,6 +305,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) share->use_count= 0; share->table_name_length= length; share->table_name= tmp_name; + share->crashed= FALSE; fn_format(share->data_file_name,table_name,"",ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); fn_format(meta_file_name,table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); strmov(share->table_name,table_name); @@ -315,24 +316,15 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table) if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1) goto error; - if (read_meta_file(share->meta_file, &share->rows_recorded)) - { - /* - The problem here is that for some reason, probably a crash, the meta - file has been corrupted. So what do we do? Well we try to rebuild it - ourself. Once that happens, we reread it, but if that fails we just - call it quits and return an error. - */ - if (rebuild_meta_file(share->table_name, share->meta_file)) - goto error; - if (read_meta_file(share->meta_file, &share->rows_recorded)) - goto error; - } /* After we read, we set the file to dirty. When we close, we will do the - opposite. + opposite. If the meta file will not open we assume it is crashed and + leave it up to the user to fix. */ - (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); + if (read_meta_file(share->meta_file, &share->rows_recorded)) + share->crashed= TRUE; + else + (void)write_meta_file(share->meta_file, share->rows_recorded, TRUE); /* It is expensive to open and close the data files and since you can't have a gzip file that can be both read and written we keep a writer open @@ -408,7 +400,7 @@ int ha_archive::open(const char *name, int mode, uint test_if_locked) DBUG_ENTER("ha_archive::open"); if (!(share= get_share(name, table))) - DBUG_RETURN(1); + DBUG_RETURN(-1); thr_lock_data_init(&share->lock,&lock,NULL); if ((archive= gzopen(share->data_file_name, "rb")) == NULL) @@ -531,13 +523,18 @@ int ha_archive::write_row(byte * buf) Field_blob **field; DBUG_ENTER("ha_archive::write_row"); - statistic_increment(ha_write_count,&LOCK_status); + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); pthread_mutex_lock(&share->mutex); written= gzwrite(share->archive_write, buf, table->reclength); - DBUG_PRINT("ha_archive::get_row", ("Wrote %d bytes expected %d", written, table->reclength)); - share->dirty= TRUE; + DBUG_PRINT("ha_archive::write_row", ("Wrote %d bytes expected %d", written, table->reclength)); + if (!delayed_insert || !bulk_insert) + share->dirty= TRUE; + if (written != table->reclength) goto error; /* @@ -577,6 +574,9 @@ int ha_archive::rnd_init(bool scan) { DBUG_ENTER("ha_archive::rnd_init"); int read; // gzread() returns int, and we use this to check the header + + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); /* We rewind the file so that we can read from the beginning if scan */ if (scan) @@ -629,9 +629,12 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) if (read == 0) DBUG_RETURN(HA_ERR_END_OF_FILE); - /* If the record is the wrong size, the file is probably damaged */ + /* + If the record is the wrong size, the file is probably damaged, unless + we are dealing with a delayed insert or a bulk insert. + */ if ((ulong) read != table->reclength) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(HA_ERR_END_OF_FILE); /* Calculate blob length, we use this for our buffer */ for (field=table->blob_field; *field ; field++) @@ -649,7 +652,7 @@ int ha_archive::get_row(gzFile file_to_read, byte *buf) { read= gzread(file_to_read, last, size); if ((size_t) read != size) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + DBUG_RETURN(HA_ERR_END_OF_FILE); (*field)->set_ptr(size, last); last += size; } @@ -668,11 +671,15 @@ int ha_archive::rnd_next(byte *buf) int rc; DBUG_ENTER("ha_archive::rnd_next"); + if (share->crashed) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + if (!scan_rows) DBUG_RETURN(HA_ERR_END_OF_FILE); scan_rows--; - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); current_position= gztell(archive); rc= get_row(archive, buf); @@ -708,7 +715,8 @@ void ha_archive::position(const byte *record) int ha_archive::rnd_pos(byte * buf, byte *pos) { DBUG_ENTER("ha_archive::rnd_pos"); - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); current_position= ha_get_ptr(pos, ref_length); z_off_t seek= gzseek(archive, current_position, SEEK_SET); @@ -716,22 +724,23 @@ int ha_archive::rnd_pos(byte * buf, byte *pos) } /* - This method rebuilds the meta file. It does this by walking the datafile and + This method repairs the meta file. It does this by walking the datafile and rewriting the meta file. */ -int ha_archive::rebuild_meta_file(char *table_name, File meta_file) +int ha_archive::repair(THD* thd, HA_CHECK_OPT* check_opt) { int rc; byte *buf; ulonglong rows_recorded= 0; - gzFile rebuild_file; /* Archive file we are working with */ + gzFile rebuild_file; // Archive file we are working with + File meta_file; // Meta file we use char data_file_name[FN_REFLEN]; - DBUG_ENTER("ha_archive::rebuild_meta_file"); + DBUG_ENTER("ha_archive::repair"); /* Open up the meta file to recreate it. */ - fn_format(data_file_name, table_name, "", ARZ, + fn_format(data_file_name, share->table_name, "", ARZ, MY_REPLACE_EXT|MY_UNPACK_FILENAME); if ((rebuild_file= gzopen(data_file_name, "rb")) == NULL) DBUG_RETURN(errno ? errno : -1); @@ -761,11 +770,18 @@ int ha_archive::rebuild_meta_file(char *table_name, File meta_file) */ if (rc == HA_ERR_END_OF_FILE) { - (void)write_meta_file(meta_file, rows_recorded, FALSE); + fn_format(data_file_name,share->table_name,"",ARM,MY_REPLACE_EXT|MY_UNPACK_FILENAME); + if ((meta_file= my_open(data_file_name, O_RDWR, MYF(0))) == -1) + { + rc= HA_ERR_CRASHED_ON_USAGE; + goto error; + } + (void)write_meta_file(meta_file, rows_recorded, TRUE); rc= 0; } my_free((gptr) buf, MYF(0)); + share->crashed= FALSE; error: gzclose(rebuild_file); @@ -784,13 +800,14 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) char block[IO_SIZE]; char writer_filename[FN_REFLEN]; + /* Closing will cause all data waiting to be flushed */ + gzclose(share->archive_write); + share->archive_write= NULL; + /* Lets create a file to contain the new data */ fn_format(writer_filename, share->table_name, "", ARN, MY_REPLACE_EXT|MY_UNPACK_FILENAME); - /* Closing will cause all data waiting to be flushed, to be flushed */ - gzclose(share->archive_write); - if ((reader= gzopen(share->data_file_name, "rb")) == NULL) DBUG_RETURN(-1); @@ -808,29 +825,10 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) my_rename(writer_filename,share->data_file_name,MYF(0)); - /* - We reopen the file in case some IO is waiting to go through. - In theory the table is closed right after this operation, - but it is possible for IO to still happen. - I may be being a bit too paranoid right here. - */ - if ((share->archive_write= gzopen(share->data_file_name, "ab")) == NULL) - DBUG_RETURN(errno ? errno : -1); - share->dirty= FALSE; - DBUG_RETURN(0); } -/* - No transactions yet, so this is pretty dull. -*/ -int ha_archive::external_lock(THD *thd, int lock_type) -{ - DBUG_ENTER("ha_archive::external_lock"); - DBUG_RETURN(0); -} - /* Below is an example of how to setup row level locking. */ @@ -838,6 +836,11 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { + if (lock_type == TL_WRITE_DELAYED) + delayed_insert= TRUE; + else + delayed_insert= FALSE; + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { /* @@ -872,97 +875,61 @@ THR_LOCK_DATA **ha_archive::store_lock(THD *thd, } -/****************************************************************************** - - Everything below here is default, please look at ha_example.cc for - descriptions. - - ******************************************************************************/ - -int ha_archive::update_row(const byte * old_data, byte * new_data) -{ - - DBUG_ENTER("ha_archive::update_row"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::delete_row(const byte * buf) -{ - DBUG_ENTER("ha_archive::delete_row"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_read(byte * buf, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) -{ - DBUG_ENTER("ha_archive::index_read"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) -{ - DBUG_ENTER("ha_archive::index_read_idx"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - - -int ha_archive::index_next(byte * buf) -{ - DBUG_ENTER("ha_archive::index_next"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_prev(byte * buf) -{ - DBUG_ENTER("ha_archive::index_prev"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_first(byte * buf) -{ - DBUG_ENTER("ha_archive::index_first"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - -int ha_archive::index_last(byte * buf) -{ - DBUG_ENTER("ha_archive::index_last"); - DBUG_RETURN(HA_ERR_WRONG_COMMAND); -} - - +/* + Hints for optimizer, see ha_tina for more information +*/ void ha_archive::info(uint flag) { DBUG_ENTER("ha_archive::info"); - - /* This is a lie, but you don't want the optimizer to see zero or 1 */ + /* + This should be an accurate number now, though bulk and delayed inserts can + cause the number to be inaccurate. + */ records= share->rows_recorded; deleted= 0; + /* Costs quite a bit more to get all information */ + if (flag & HA_STATUS_TIME) + { + MY_STAT file_stat; // Stat information for the data file + + VOID(my_stat(share->data_file_name, &file_stat, MYF(MY_WME))); + + mean_rec_length= table->reclength + buffer.alloced_length(); + data_file_length= file_stat.st_size; + create_time= file_stat.st_ctime; + update_time= file_stat.st_mtime; + max_data_file_length= share->rows_recorded * mean_rec_length; + } + delete_length= 0; + index_file_length=0; DBUG_VOID_RETURN; } -int ha_archive::extra(enum ha_extra_function operation) -{ - DBUG_ENTER("ha_archive::extra"); - DBUG_RETURN(0); -} -int ha_archive::reset(void) +/* + This method tells us that a bulk insert operation is about to occur. We set + a flag which will keep write_row from saying that its data is dirty. This in + turn will keep selects from causing a sync to occur. + Basically, yet another optimizations to keep compression working well. +*/ +void ha_archive::start_bulk_insert(ha_rows rows) { - DBUG_ENTER("ha_archive::reset"); - DBUG_RETURN(0); + DBUG_ENTER("ha_archive::start_bulk_insert"); + bulk_insert= TRUE; + DBUG_VOID_RETURN; } -ha_rows ha_archive::records_in_range(uint inx, key_range *min_key, - key_range *max_key) + +/* + Other side of start_bulk_insert, is end_bulk_insert. Here we turn off the bulk insert + flag, and set the share dirty so that the next select will call sync for us. +*/ +int ha_archive::end_bulk_insert() { - DBUG_ENTER("ha_archive::records_in_range "); - DBUG_RETURN(records); // HA_ERR_WRONG_COMMAND + DBUG_ENTER("ha_archive::end_bulk_insert"); + bulk_insert= FALSE; + share->dirty= TRUE; + DBUG_RETURN(0); } #endif /* HAVE_ARCHIVE_DB */ diff --git a/sql/examples/ha_archive.h b/sql/examples/ha_archive.h index b619de5f6c1..07bc7baa400 100644 --- a/sql/examples/ha_archive.h +++ b/sql/examples/ha_archive.h @@ -32,10 +32,11 @@ typedef struct st_archive_share { uint table_name_length,use_count; pthread_mutex_t mutex; THR_LOCK lock; - File meta_file; /* Meta file we use */ - gzFile archive_write; /* Archive file we are working with */ - bool dirty; /* Flag for if a flush should occur */ - ulonglong rows_recorded; /* Number of rows in tables */ + File meta_file; /* Meta file we use */ + gzFile archive_write; /* Archive file we are working with */ + bool dirty; /* Flag for if a flush should occur */ + bool crashed; /* Meta file is crashed */ + ulonglong rows_recorded; /* Number of rows in tables */ } ARCHIVE_SHARE; /* @@ -53,9 +54,11 @@ class ha_archive: public handler byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */ String buffer; /* Buffer used for blob storage */ ulonglong scan_rows; /* Number of rows left in scan */ + bool delayed_insert; /* If the insert is delayed */ + bool bulk_insert; /* If we are performing a bulk insert */ public: - ha_archive(TABLE *table): handler(table) + ha_archive(TABLE *table): handler(table), delayed_insert(0), bulk_insert(0) { /* Set our original buffer from pre-allocated memory */ buffer.set(byte_buffer, IO_SIZE, system_charset_info); @@ -72,37 +75,15 @@ public: ulong table_flags() const { return (HA_REC_NOT_IN_SEQ | HA_NOT_EXACT_COUNT | HA_NO_AUTO_INCREMENT | - HA_FILE_BASED); + HA_FILE_BASED | HA_CAN_INSERT_DELAYED); } ulong index_flags(uint idx, uint part, bool all_parts) const { return 0; } - /* - Have to put something here, there is no real limit as far as - archive is concerned. - */ - uint max_supported_record_length() const { return UINT_MAX; } - /* - Called in test_quick_select to determine if indexes should be used. - */ - virtual double scan_time() { return (double) (records) / 20.0+10; } - /* The next method will never be called */ - virtual double read_time(uint index, uint ranges, ha_rows rows) - { return (double) rows / 20.0+1; } int open(const char *name, int mode, uint test_if_locked); int close(void); int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); int rnd_init(bool scan=1); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); @@ -111,17 +92,16 @@ public: int write_meta_file(File meta_file, ulonglong rows, bool dirty); ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table); int free_share(ARCHIVE_SHARE *share); - int rebuild_meta_file(char *table_name, File meta_file); + bool auto_repair() const { return 1; } // For the moment we just do this int read_data_header(gzFile file_to_read); int write_data_header(gzFile file_to_write); void position(const byte *record); void info(uint); - int extra(enum ha_extra_function operation); - int reset(void); - int external_lock(THD *thd, int lock_type); - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); int optimize(THD* thd, HA_CHECK_OPT* check_opt); + int repair(THD* thd, HA_CHECK_OPT* check_opt); + void start_bulk_insert(ha_rows rows); + int end_bulk_insert(); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); }; diff --git a/sql/examples/ha_tina.cc b/sql/examples/ha_tina.cc index 0345a170c3c..39573a8f54c 100644 --- a/sql/examples/ha_tina.cc +++ b/sql/examples/ha_tina.cc @@ -426,7 +426,7 @@ int ha_tina::write_row(byte * buf) int size; DBUG_ENTER("ha_tina::write_row"); - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); @@ -462,7 +462,8 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) int size; DBUG_ENTER("ha_tina::update_row"); - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -489,7 +490,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) int ha_tina::delete_row(const byte * buf) { DBUG_ENTER("ha_tina::delete_row"); - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); if (chain_append()) DBUG_RETURN(-1); @@ -629,7 +630,8 @@ int ha_tina::rnd_next(byte *buf) { DBUG_ENTER("ha_tina::rnd_next"); - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); current_position= next_position; if (!share->mapped_file) @@ -666,7 +668,8 @@ void ha_tina::position(const byte *record) int ha_tina::rnd_pos(byte * buf, byte *pos) { DBUG_ENTER("ha_tina::rnd_pos"); - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); current_position= ha_get_ptr(pos,ref_length); DBUG_RETURN(find_current_row(buf)); } diff --git a/sql/field.cc b/sql/field.cc index b27a319b00e..a270f102cd5 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -83,32 +83,48 @@ void Field_num::prepend_zeros(String *value) RETURN 0 ok - 1 error + 1 error. A warning is pushed if field_name != 0 */ -bool test_if_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs) +bool Field::check_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs) { + const char *end; if (str == int_end) - return 0; // Empty string - const char *end=str+length; + { + char buff[128]; + String tmp(buff,(uint32) sizeof(buff), system_charset_info); + tmp.copy(str, length, system_charset_info); + push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_TRUNCATED_WRONG_VALUE_FOR_FIELD, + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + "integer", tmp.c_ptr(), field_name, + (ulong) table->in_use->row_count); + return 1; // Empty string + } + end= str+length; if ((str= int_end) == end) - return 1; // All digits was used + return 0; // ok; All digits was used /* Allow end .0000 */ if (*str == '.') { - for (str++ ; str != end && *str == '0'; str++) ; + for (str++ ; str != end && *str == '0'; str++) + ; } /* Allow end space */ - for (str++ ; str != end ; str++) + for ( ; str != end ; str++) { if (!my_isspace(cs,*str)) - return 0; + { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + return 1; + } } - return 1; + return 0; } + #ifdef NOT_USED static bool test_if_real(const char *str,int length, CHARSET_INFO *cs) { @@ -313,6 +329,28 @@ bool Field::field_cast_compatible(Field::field_cast_enum type) } +/* + Interpret field value as an integer but return the result as a string. + + This is used for printing bit_fields as numbers while debugging +*/ + +String *Field::val_int_as_str(String *val_buffer, my_bool unsigned_flag) +{ + CHARSET_INFO *cs= &my_charset_bin; + uint length= 21; + longlong value= val_int(); + if (val_buffer->alloc(length)) + return 0; + length= (uint) (*cs->cset->longlong10_to_str)(cs, (char*) val_buffer->ptr(), + length, + unsigned_flag ? 10 : -10, + value); + val_buffer->length(length); + return val_buffer; +} + + /**************************************************************************** ** Functions for the base classes ** This is an unpacked number. @@ -413,8 +451,9 @@ uint Field::fill_cache_field(CACHE_FIELD *copy) copy->length-=table->blob_ptr_size; return copy->length; } - else if (!zero_pack() && (type() == FIELD_TYPE_STRING && copy->length > 4 || - type() == FIELD_TYPE_VAR_STRING)) + else if (!zero_pack() && + (type() == MYSQL_TYPE_STRING && copy->length >= 4 && + copy->length < 256)) copy->strip=1; /* Remove end space */ else copy->strip=0; @@ -450,11 +489,11 @@ bool Field::get_time(TIME *ltime) Needs to be changed if/when we want to support different time formats */ -void Field::store_time(TIME *ltime,timestamp_type type) +int Field::store_time(TIME *ltime, timestamp_type type) { char buff[MAX_DATE_STRING_REP_LENGTH]; uint length= (uint) my_TIME_to_str(ltime, buff); - store(buff, length, &my_charset_bin); + return store(buff, length, &my_charset_bin); } @@ -463,6 +502,42 @@ bool Field::optimize_range(uint idx, uint part) return test(table->file->index_flags(idx, part, 1) & HA_READ_RANGE); } + +Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table) +{ + Field *tmp; + if (!(tmp= (Field*) memdup_root(root,(char*) this,size_of()))) + return 0; + + if (tmp->table->maybe_null) + tmp->flags&= ~NOT_NULL_FLAG; + tmp->table= new_table; + tmp->key_start.init(0); + tmp->part_of_key.init(0); + tmp->part_of_sortkey.init(0); + tmp->unireg_check=Field::NONE; + tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | + ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG); + tmp->reset_fields(); + return tmp; +} + + +Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field *tmp; + if ((tmp= new_field(root, new_table))) + { + tmp->ptr= new_ptr; + tmp->null_ptr= new_null_ptr; + tmp->null_bit= new_null_bit; + } + return tmp; +} + + /**************************************************************************** Field_null, a field that always return NULL ****************************************************************************/ @@ -872,7 +947,13 @@ int Field_decimal::store(const char *from, uint len, CHARSET_INFO *cs) if (tmp_char != '0') // Losing a non zero digit ? { if (!is_cuted_fields_incr) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + { + /* + This is a note, not a warning, as we don't want to abort + when we cut decimals in strict mode + */ + set_warning(MYSQL_ERROR::WARN_LEVEL_NOTE, ER_WARN_DATA_TRUNCATED, 1); + } return 0; } continue; @@ -1099,11 +1180,8 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } else { @@ -1119,11 +1197,8 @@ int Field_tiny::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } ptr[0]= (char) tmp; return error; @@ -1166,7 +1241,7 @@ int Field_tiny::store(double nr) error= 1; } else - *ptr=(char) nr; + *ptr=(char) (int) nr; } return error; } @@ -1297,17 +1372,14 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (tmp > (uint16) ~0) + else if (tmp > UINT_MAX16) { - tmp=(uint16) ~0; + tmp=UINT_MAX16; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } else { @@ -1323,11 +1395,8 @@ int Field_short::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -1354,9 +1423,9 @@ int Field_short::store(double nr) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr > (double) (uint16) ~0) + else if (nr > (double) UINT_MAX16) { - res=(int16) (uint16) ~0; + res=(int16) UINT_MAX16; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } @@ -1378,7 +1447,7 @@ int Field_short::store(double nr) error= 1; } else - res=(int16) nr; + res=(int16) (int) nr; } #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) @@ -1403,9 +1472,9 @@ int Field_short::store(longlong nr) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr > (longlong) (uint16) ~0) + else if (nr > (longlong) UINT_MAX16) { - res=(int16) (uint16) ~0; + res=(int16) UINT_MAX16; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } @@ -1578,11 +1647,8 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } else { @@ -1598,11 +1664,8 @@ int Field_medium::store(const char *from,uint len,CHARSET_INFO *cs) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + else if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) error= 1; - } } int3store(ptr,tmp); @@ -1778,71 +1841,71 @@ void Field_medium::sql_type(String &res) const int Field_long::store(const char *from,uint len,CHARSET_INFO *cs) { - long tmp; - int error= 0; + longlong tmp; + long store_tmp; + int error; char *end; tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); len-= tmp; from+= tmp; - my_errno=0; - if (unsigned_flag) + end= (char*) from+len; + tmp= my_strtoll10(from, &end, &error); + + if (error != MY_ERRNO_EDOM) { - if (!len || *from == '-') + if (unsigned_flag) { - tmp=0; // Set negative to 0 - my_errno=ERANGE; - error= 1; + if (error < 0) + { + error= 1; + tmp= 0; + } + else if ((ulonglong) tmp > (ulonglong) UINT_MAX32) + { + tmp= UINT_MAX32; + error= 1; + } + else + error= 0; } else - tmp=(long) my_strntoul(cs,from,len,10,&end,&error); - } - else - tmp=my_strntol(cs,from,len,10,&end,&error); - if (error || - (from+len != end && table->in_use->count_cuted_fields && - !test_if_int(from,len,end,cs))) - error= 1; -#if SIZEOF_LONG > 4 - if (unsigned_flag) - { - if ((ulong) tmp > UINT_MAX32) - { - tmp= UINT_MAX32; - error= 1; - my_errno=ERANGE; - } - } - else - { - if (tmp > INT_MAX32) - { - tmp= INT_MAX32; - error= 1; - my_errno=ERANGE; - } - else if (tmp < INT_MIN32) { - tmp= INT_MIN32; - error= 1; - my_errno=ERANGE; + if (error < 0) + { + error= 0; + if (tmp < INT_MIN32) + { + tmp= INT_MIN32; + error= 1; + } + } + else if (tmp > INT_MAX32) + { + tmp= INT_MAX32; + error= 1; + } } } -#endif if (error) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); error= 1; + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); } + else if (from+len != end && table->in_use->count_cuted_fields && + check_int(from,len,end,cs)) + error= 1; + + store_tmp= (long) tmp; #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { - int4store(ptr,tmp); + int4store(ptr, store_tmp); } else #endif - longstore(ptr,tmp); + longstore(ptr, store_tmp); return error; } @@ -1857,7 +1920,6 @@ int Field_long::store(double nr) if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) UINT_MAX32) @@ -1874,18 +1936,19 @@ int Field_long::store(double nr) if (nr < (double) INT_MIN32) { res=(int32) INT_MIN32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (double) INT_MAX32) { res=(int32) INT_MAX32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else - res=(int32) nr; + res=(int32) (longlong) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -1902,26 +1965,18 @@ int Field_long::store(longlong nr) { int error= 0; int32 res; - - /* - This assert has nothing to do with this method per se, it was put here - only because it is one of the best places for catching places there its - condition is broken. - */ - DBUG_ASSERT(table->in_use == current_thd); + DBUG_ASSERT(table->in_use == current_thd); // General safety if (unsigned_flag) { if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr >= (LL(1) << 32)) { res=(int32) (uint32) ~0L; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -1932,18 +1987,19 @@ int Field_long::store(longlong nr) if (nr < (longlong) INT_MIN32) { res=(int32) INT_MIN32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else if (nr > (longlong) INT_MAX32) { res=(int32) INT_MAX32; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else res=(int32) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2078,17 +2134,15 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) longlong tmp; int error= 0; char *end; - + tmp= cs->cset->scan(cs, from, from+len, MY_SEQ_SPACES); len-= (uint)tmp; from+= tmp; - my_errno=0; if (unsigned_flag) { if (!len || *from == '-') { tmp=0; // Set negative to 0 - my_errno= ERANGE; error= 1; } else @@ -2096,13 +2150,14 @@ int Field_longlong::store(const char *from,uint len,CHARSET_INFO *cs) } else tmp=my_strntoll(cs,from,len,10,&end,&error); - if (error || - (from+len != end && table->in_use->count_cuted_fields && - !test_if_int(from,len,end,cs))) + if (error) { - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } + else if (from+len != end && table->in_use->count_cuted_fields && + check_int(from,len,end,cs)) + error= 1; #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2119,19 +2174,18 @@ int Field_longlong::store(double nr) { int error= 0; longlong res; - nr=rint(nr); + + nr= rint(nr); if (unsigned_flag) { if (nr < 0) { res=0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } - else if (nr >= (double) ~ (ulonglong) 0) + else if (nr >= (double) ULONGLONG_MAX) { res= ~(longlong) 0; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); error= 1; } else @@ -2141,19 +2195,20 @@ int Field_longlong::store(double nr) { if (nr <= (double) LONGLONG_MIN) { - res=(longlong) LONGLONG_MIN; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; + res= LONGLONG_MIN; + error= (nr < (double) LONGLONG_MIN); } else if (nr >= (double) (ulonglong) LONGLONG_MAX) { - res=(longlong) LONGLONG_MAX; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); - error= 1; + res= LONGLONG_MAX; + error= (nr > (double) LONGLONG_MAX); } else res=(longlong) nr; } + if (error) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + #ifdef WORDS_BIGENDIAN if (table->db_low_byte_first) { @@ -2319,10 +2374,13 @@ int Field_float::store(const char *from,uint len,CHARSET_INFO *cs) int error; char *end; double nr= my_strntod(cs,(char*) from,len,&end,&error); - if (error || ((uint) (end-from) != len && table->in_use->count_cuted_fields)) + if (error || (!len || (uint) (end-from) != len && + table->in_use->count_cuted_fields)) { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + (error ? ER_WARN_DATA_OUT_OF_RANGE : ER_WARN_DATA_TRUNCATED), + 1); error= 1; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } Field_float::store(nr); return error; @@ -2621,10 +2679,13 @@ int Field_double::store(const char *from,uint len,CHARSET_INFO *cs) int error; char *end; double nr= my_strntod(cs,(char*) from, len, &end, &error); - if (error || ((uint) (end-from) != len && table->in_use->count_cuted_fields)) + if (error || (!len || (uint) (end-from) != len && + table->in_use->count_cuted_fields)) { + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + (error ? ER_WARN_DATA_OUT_OF_RANGE : ER_WARN_DATA_TRUNCATED), + 1); error= 1; - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } Field_double::store(nr); return error; @@ -2996,14 +3057,22 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) bool in_dst_time_gap; THD *thd= table->in_use; - have_smth_to_conv= (str_to_datetime(from, len, &l_time, 0, &error) > + have_smth_to_conv= (str_to_datetime(from, len, &l_time, + ((table->in_use->variables.sql_mode & + MODE_NO_ZERO_DATE) | + MODE_NO_ZERO_IN_DATE), + &error) > MYSQL_TIMESTAMP_ERROR); - if (error) + if (error || !have_smth_to_conv) + { + error= 1; set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, from, len, MYSQL_TIMESTAMP_DATETIME, 1); + } - if (have_smth_to_conv) + /* Only convert a correct date (not a zero date) */ + if (have_smth_to_conv && l_time.month) { if (!(tmp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap))) { @@ -3033,6 +3102,7 @@ int Field_timestamp::store(const char *from,uint len,CHARSET_INFO *cs) return error; } + int Field_timestamp::store(double nr) { int error= 0; @@ -3057,7 +3127,7 @@ int Field_timestamp::store(longlong nr) bool in_dst_time_gap; THD *thd= table->in_use; - if (number_to_TIME(nr, &l_time, 0, &error)) + if (number_to_datetime(nr, &l_time, 0, &error)) { if (!(timestamp= TIME_to_timestamp(thd, &l_time, &in_dst_time_gap))) { @@ -3207,7 +3277,7 @@ bool Field_timestamp::get_date(TIME *ltime, uint fuzzydate) longget(temp,ptr); if (temp == 0L) { /* Zero time is "000000" */ - if (!fuzzydate) + if (fuzzydate & TIME_NO_ZERO_DATE) return 1; bzero((char*) ltime,sizeof(*ltime)); } @@ -3228,7 +3298,7 @@ bool Field_timestamp::get_time(TIME *ltime) bool Field_timestamp::send_binary(Protocol *protocol) { TIME tm; - Field_timestamp::get_date(&tm, TIME_FUZZY_DATE); + Field_timestamp::get_date(&tm, 0); return protocol->store(&tm); } @@ -3340,6 +3410,16 @@ int Field_time::store(const char *from,uint len,CHARSET_INFO *cs) } +int Field_time::store_time(TIME *ltime, timestamp_type type) +{ + long tmp= ((ltime->month ? 0 : ltime->day * 24L) + ltime->hour) * 10000L + + (ltime->minute * 100 + ltime->second); + if (ltime->neg) + tmp= -tmp; + return Field_time::store((longlong) tmp); +} + + int Field_time::store(double nr) { long tmp; @@ -3461,7 +3541,7 @@ String *Field_time::val_str(String *val_buffer, bool Field_time::get_date(TIME *ltime, uint fuzzydate) { long tmp; - if (!fuzzydate) + if (!(fuzzydate & TIME_FUZZY_DATE)) { push_warning_printf(table->in_use, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, @@ -3543,18 +3623,19 @@ void Field_time::sql_type(String &res) const int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) { - int not_used; // We can ignore result from str2int char *end; - long nr= my_strntol(cs, from, len, 10, &end, ¬_used); + int error; + long nr= my_strntol(cs, from, len, 10, &end, &error); - if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155) + if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155 || error) { *ptr=0; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); return 1; } - if (table->in_use->count_cuted_fields && !test_if_int(from,len,end,cs)) - set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + if (table->in_use->count_cuted_fields && check_int(from,len,end,cs)) + error= 1; + if (nr != 0 || len != 4) { if (nr < YY_PART_YEAR) @@ -3563,7 +3644,7 @@ int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) nr-= 1900; } *ptr= (char) (unsigned char) nr; - return 0; + return error; } @@ -3649,7 +3730,11 @@ int Field_date::store(const char *from, uint len,CHARSET_INFO *cs) uint32 tmp; int error; - if (str_to_datetime(from, len, &l_time, 1, &error) <= MYSQL_TIMESTAMP_ERROR) + if (str_to_datetime(from, len, &l_time, TIME_FUZZY_DATE | + (table->in_use->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES)), + &error) <= MYSQL_TIMESTAMP_ERROR) { tmp=0; error= 1; @@ -3840,7 +3925,12 @@ int Field_newdate::store(const char *from,uint len,CHARSET_INFO *cs) TIME l_time; long tmp; int error; - if (str_to_datetime(from, len, &l_time, 1, &error) <= MYSQL_TIMESTAMP_ERROR) + if (str_to_datetime(from, len, &l_time, + (TIME_FUZZY_DATE | + (table->in_use->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), + &error) <= MYSQL_TIMESTAMP_ERROR) { tmp=0L; error= 1; @@ -3911,17 +4001,20 @@ int Field_newdate::store(longlong nr) return error; } -void Field_newdate::store_time(TIME *ltime,timestamp_type type) +int Field_newdate::store_time(TIME *ltime,timestamp_type type) { long tmp; + int error= 0; if (type == MYSQL_TIMESTAMP_DATE || type == MYSQL_TIMESTAMP_DATETIME) tmp=ltime->year*16*32+ltime->month*32+ltime->day; else { tmp=0; + error= 1; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } int3store(ptr,tmp); + return error; } bool Field_newdate::send_binary(Protocol *protocol) @@ -3980,7 +4073,8 @@ bool Field_newdate::get_date(TIME *ltime,uint fuzzydate) ltime->year= (tmp >> 9); ltime->time_type= MYSQL_TIMESTAMP_DATE; ltime->hour= ltime->minute= ltime->second= ltime->second_part= ltime->neg= 0; - return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0; + return ((!(fuzzydate & TIME_FUZZY_DATE) && (!ltime->month || !ltime->day)) ? + 1 : 0); } bool Field_newdate::get_time(TIME *ltime) @@ -4022,7 +4116,12 @@ int Field_datetime::store(const char *from,uint len,CHARSET_INFO *cs) int error; ulonglong tmp= 0; - if (str_to_datetime(from, len, &time_tmp, 1, &error) > MYSQL_TIMESTAMP_ERROR) + if (str_to_datetime(from, len, &time_tmp, + (TIME_FUZZY_DATE | + (table->in_use->variables.sql_mode & + (MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_INVALID_DATES))), + &error) > MYSQL_TIMESTAMP_ERROR) tmp= TIME_to_ulonglong_datetime(&time_tmp); if (error) @@ -4064,7 +4163,7 @@ int Field_datetime::store(longlong nr) int error; longlong initial_nr= nr; - nr= number_to_TIME(nr, ¬_used, 1, &error); + nr= number_to_datetime(nr, ¬_used, 1, &error); if (error) set_datetime_warning(MYSQL_ERROR::WARN_LEVEL_WARN, @@ -4083,9 +4182,10 @@ int Field_datetime::store(longlong nr) } -void Field_datetime::store_time(TIME *ltime,timestamp_type type) +int Field_datetime::store_time(TIME *ltime,timestamp_type type) { longlong tmp; + int error= 0; /* We don't perform range checking here since values stored in TIME structure always fit into DATETIME range. @@ -4096,6 +4196,7 @@ void Field_datetime::store_time(TIME *ltime,timestamp_type type) else { tmp=0; + error= 1; set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } #ifdef WORDS_BIGENDIAN @@ -4106,6 +4207,7 @@ void Field_datetime::store_time(TIME *ltime,timestamp_type type) else #endif longlongstore(ptr,tmp); + return error; } bool Field_datetime::send_binary(Protocol *protocol) @@ -4198,7 +4300,7 @@ bool Field_datetime::get_date(TIME *ltime, uint fuzzydate) ltime->day= (int) (part1%100); ltime->month= (int) (part1/100%100); ltime->year= (int) (part1/10000); - return (!fuzzydate && (!ltime->month || !ltime->day)) ? 1 : 0; + return (!(fuzzydate & TIME_FUZZY_DATE) && (!ltime->month || !ltime->day)) ? 1 : 0; } bool Field_datetime::get_time(TIME *ltime) @@ -4405,7 +4507,7 @@ int Field_string::cmp(const char *a_ptr, const char *b_ptr) return field_charset->coll->strnncollsp(field_charset, (const uchar*) a_ptr, field_length, (const uchar*) b_ptr, - field_length); + field_length, 0); } if (field_charset->mbmaxlen != 1) { @@ -4434,20 +4536,22 @@ void Field_string::sql_type(String &res) const { THD *thd= table->in_use; CHARSET_INFO *cs=res.charset(); - ulong length= cs->cset->snprintf(cs,(char*) res.ptr(), - res.alloced_length(), "%s(%d)", - (field_length > 3 && - (table->db_options_in_use & - HA_OPTION_PACK_RECORD) ? - (has_charset() ? "varchar" : "varbinary") : + ulong length; + + length= cs->cset->snprintf(cs,(char*) res.ptr(), + res.alloced_length(), "%s(%d)", + ((type() == MYSQL_TYPE_VAR_STRING && + !thd->variables.new_mode) ? + (has_charset() ? "varchar" : "varbinary") : (has_charset() ? "char" : "binary")), - (int) field_length / charset()->mbmaxlen); + (int) field_length / charset()->mbmaxlen); res.length(length); if ((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) && has_charset() && (charset()->state & MY_CS_BINSORT)) res.append(" binary"); } + char *Field_string::pack(char *to, const char *from, uint max_length) { uint length= min(field_length,max_length); @@ -4481,10 +4585,27 @@ const char *Field_string::unpack(char *to, const char *from) } -int Field_string::pack_cmp(const char *a, const char *b, uint length) +/* + Compare two packed keys + + SYNOPSIS + pack_cmp() + a New key + b Original key + length Key length + insert_or_update 1 if this is an insert or update + + RETURN + < 0 a < b + 0 a = b + > 0 a > b +*/ + +int Field_string::pack_cmp(const char *a, const char *b, uint length, + my_bool insert_or_update) { uint a_length, b_length; - if (field_length > 255) + if (length > 255) { a_length= uint2korr(a); b_length= uint2korr(b); @@ -4496,29 +4617,51 @@ int Field_string::pack_cmp(const char *a, const char *b, uint length) a_length= (uint) (uchar) *a++; b_length= (uint) (uchar) *b++; } - return my_strnncoll(field_charset, - (const uchar*)a,a_length, - (const uchar*)b,b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length, + insert_or_update); } -int Field_string::pack_cmp(const char *b, uint length) +/* + Compare a packed key against row + + SYNOPSIS + pack_cmp() + key Original key + length Key length. (May be less than field length) + insert_or_update 1 if this is an insert or update + + RETURN + < 0 row < key + 0 row = key + > 0 row > key +*/ + +int Field_string::pack_cmp(const char *key, uint length, + my_bool insert_or_update) { - uint b_length; - if (field_length > 255) + uint row_length, key_length; + char *end; + if (length > 255) { - b_length= uint2korr(b); - b+= 2; + key_length= uint2korr(key); + key+= 2; } else - b_length= (uint) (uchar) *b++; - char *end= ptr + field_length; + key_length= (uint) (uchar) *key++; + + /* Only use 'length' of key, not field_length */ + end= ptr + length; while (end > ptr && end[-1] == ' ') end--; - uint a_length = (uint) (end - ptr); - return my_strnncoll(field_charset, - (const uchar*)ptr,a_length, - (const uchar*)b, b_length); + row_length= (uint) (end - ptr); + + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) ptr, row_length, + (const uchar*) key, key_length, + insert_or_update); } @@ -4526,25 +4669,52 @@ uint Field_string::packed_col_length(const char *data_ptr, uint length) { if (length > 255) return uint2korr(data_ptr)+2; - else - return (uint) ((uchar) *data_ptr)+1; + return (uint) ((uchar) *data_ptr)+1; } + uint Field_string::max_packed_col_length(uint max_length) { return (max_length > 255 ? 2 : 1)+max_length; } +Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table) +{ + if (type() != MYSQL_TYPE_VAR_STRING || table == new_table) + return Field::new_field(root, new_table); + + /* + Old VARCHAR field which should be modified to a VARCHAR on copy + This is done to ensure that ALTER TABLE will convert old VARCHAR fields + to now VARCHAR fields. + */ + return new Field_varstring(field_length, maybe_null(), + field_name, new_table, + charset()); +} + /**************************************************************************** -** VARCHAR type (Not available for the end user yet) + VARCHAR type + Data in field->ptr is stored as: + 1 or 2 bytes length-prefix-header (from Field_varstring::length_bytes) + data + + NOTE: + When VARCHAR is stored in a key (for handler::index_read() etc) it's always + stored with a 2 byte prefix. (Just like blob keys). + + Normally length_bytes is calculated as (field_length < 256 : 1 ? 2) + The exception is if there is a prefix key field that is part of a long + VARCHAR, in which case field_length for this may be 1 but the length_bytes + is 2. ****************************************************************************/ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) { int error= 0; - uint32 not_used; + uint32 not_used, copy_length; char buff[80]; String tmpstr(buff,sizeof(buff), &my_charset_bin); @@ -4558,15 +4728,24 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) if (conv_errors) error= 1; } - if (length > field_length) - { - length=field_length; + /* + Make sure we don't break a multibyte sequence + as well as don't copy a malformed data. + */ + copy_length= field_charset->cset->well_formed_len(field_charset, + from,from+length, + field_length/ + field_charset->mbmaxlen); + memcpy(ptr + length_bytes, from, copy_length); + if (length_bytes == 1) + *ptr= (uchar) copy_length; + else + int2store(ptr, copy_length); + + if (copy_length < length) error= 1; - } if (error) set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); - memcpy(ptr+HA_KEY_BLOB_LENGTH,from,length); - int2store(ptr, length); return error; } @@ -4574,59 +4753,117 @@ int Field_varstring::store(const char *from,uint length,CHARSET_INFO *cs) int Field_varstring::store(longlong nr) { char buff[64]; - int l; - CHARSET_INFO *cs=charset(); - l= (cs->cset->longlong10_to_str)(cs,buff,sizeof(buff),-10,nr); - return Field_varstring::store(buff,(uint)l,cs); + uint length; + length= (uint) (field_charset->cset->longlong10_to_str)(field_charset, + buff, + sizeof(buff), + -10,nr); + return Field_varstring::store(buff, length, field_charset); } double Field_varstring::val_real(void) { int not_used; - uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; - CHARSET_INFO *cs=charset(); - return my_strntod(cs, ptr+HA_KEY_BLOB_LENGTH, length, (char**)0, ¬_used); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + return my_strntod(field_charset, ptr+length_bytes, length, (char**) 0, + ¬_used); } longlong Field_varstring::val_int(void) { int not_used; - uint length=uint2korr(ptr)+HA_KEY_BLOB_LENGTH; - CHARSET_INFO *cs=charset(); - return my_strntoll(cs,ptr+HA_KEY_BLOB_LENGTH,length,10,NULL, ¬_used); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + return my_strntoll(field_charset, ptr+length_bytes, length, 10, NULL, + ¬_used); } String *Field_varstring::val_str(String *val_buffer __attribute__((unused)), String *val_ptr) { - uint length=uint2korr(ptr); - val_ptr->set((const char*) ptr+HA_KEY_BLOB_LENGTH,length,field_charset); + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + val_ptr->set((const char*) ptr+length_bytes, length, field_charset); return val_ptr; } int Field_varstring::cmp(const char *a_ptr, const char *b_ptr) { - uint a_length=uint2korr(a_ptr); - uint b_length=uint2korr(b_ptr); + uint a_length, b_length; int diff; - diff= my_strnncoll(field_charset, - (const uchar*) a_ptr+HA_KEY_BLOB_LENGTH, - min(a_length,b_length), - (const uchar*) b_ptr+HA_KEY_BLOB_LENGTH, - min(a_length,b_length)); - return diff ? diff : (int) (a_length - b_length); + + if (length_bytes == 1) + { + a_length= (uint) (uchar) *a_ptr; + b_length= (uint) (uchar) *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } + diff= field_charset->coll->strnncollsp(field_charset, + (const uchar*) a_ptr+ + length_bytes, + a_length, + (const uchar*) b_ptr+ + length_bytes, + b_length,0); + return diff; } + +/* + NOTE: varstring and blob keys are ALWAYS stored with a 2 byte length prefix +*/ + +int Field_varstring::key_cmp(const byte *key_ptr, uint max_key_length) +{ + char *blob1; + uint length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + uint char_length= max_key_length / field_charset->mbmaxlen; + + char_length= my_charpos(field_charset, ptr + length_bytes, + ptr + length_bytes + length, char_length); + set_if_smaller(length, char_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) ptr + length_bytes, + length, + (const uchar*) key_ptr+ + HA_KEY_BLOB_LENGTH, + uint2korr(key_ptr), 0); +} + + +/* + Compare to key segments (always 2 byte length prefix) + + NOTE + This is used only to compare key segments created for index_read(). + (keys are created and compared in key.cc) +*/ + +int Field_varstring::key_cmp(const byte *a,const byte *b) +{ + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a + + HA_KEY_BLOB_LENGTH, + uint2korr(a), + (const uchar*) b + + HA_KEY_BLOB_LENGTH, + uint2korr(b), + 0); +} + + void Field_varstring::sort_string(char *to,uint length) { - uint tot_length=uint2korr(ptr); + uint tot_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); tot_length= my_strnxfrm(field_charset, (uchar*) to, length, - (uchar*) ptr+HA_KEY_BLOB_LENGTH, + (uchar*) ptr + length_bytes, tot_length); if (tot_length < length) field_charset->cset->fill(field_charset, to+tot_length,length-tot_length, @@ -4634,71 +4871,164 @@ void Field_varstring::sort_string(char *to,uint length) } +enum ha_base_keytype Field_varstring::key_type() const +{ + enum ha_base_keytype res; + + if (binary()) + res= length_bytes == 1 ? HA_KEYTYPE_VARBINARY1 : HA_KEYTYPE_VARBINARY2; + else + res= length_bytes == 1 ? HA_KEYTYPE_VARTEXT1 : HA_KEYTYPE_VARTEXT2; + return res; +} + + void Field_varstring::sql_type(String &res) const { + THD *thd= table->in_use; CHARSET_INFO *cs=res.charset(); - ulong length= cs->cset->snprintf(cs,(char*) res.ptr(), - res.alloced_length(),"varchar(%u)", - field_length / charset()->mbmaxlen); + ulong length; + + length= cs->cset->snprintf(cs,(char*) res.ptr(), + res.alloced_length(), "%s(%d)", + (has_charset() ? "varchar" : "varbinary"), + (int) field_length / charset()->mbmaxlen); res.length(length); + if ((thd->variables.sql_mode & (MODE_MYSQL323 | MODE_MYSQL40)) && + has_charset() && (charset()->state & MY_CS_BINSORT)) + res.append(" binary"); } + +/* + Functions to create a packed row. + Here the number of length bytes are depending on the given max_length +*/ + char *Field_varstring::pack(char *to, const char *from, uint max_length) { - uint length=uint2korr(from); + uint length= length_bytes == 1 ? (uint) (uchar) *from : uint2korr(from); + set_if_smaller(max_length, field_length); if (length > max_length) length=max_length; *to++= (char) (length & 255); if (max_length > 255) *to++= (char) (length >> 8); if (length) - memcpy(to, from+HA_KEY_BLOB_LENGTH, length); + memcpy(to, from+length_bytes, length); return to+length; } -char *Field_varstring::pack_key(char *to, const char *from, uint max_length) +char *Field_varstring::pack_key(char *to, const char *key, uint max_length) { - uint length=uint2korr(from); - uint char_length= (field_charset->mbmaxlen > 1) ? - max_length/field_charset->mbmaxlen : max_length; - from+=HA_KEY_BLOB_LENGTH; + uint length= length_bytes == 1 ? (uint) (uchar) *key : uint2korr(key); + uint char_length= ((field_charset->mbmaxlen > 1) ? + max_length/field_charset->mbmaxlen : max_length); + key+= length_bytes; if (length > char_length) - char_length= my_charpos(field_charset, from, from+length, char_length); - set_if_smaller(length, char_length); + { + char_length= my_charpos(field_charset, key, key+length, char_length); + set_if_smaller(length, char_length); + } *to++= (char) (length & 255); if (max_length > 255) *to++= (char) (length >> 8); if (length) - memcpy(to, from, length); + memcpy(to, key, length); return to+length; } +/* + Unpack a key into a record buffer. + + SYNOPSIS + unpack_key() + to Pointer into the record buffer. + key Pointer to the packed key. + max_length Key length limit from key description. + + DESCRIPTION + A VARCHAR key has a maximum size of 64K-1. + In its packed form, the length field is one or two bytes long, + depending on 'max_length'. + + RETURN + Pointer to end of 'key' (To the next key part if multi-segment key) +*/ + +const char *Field_varstring::unpack_key(char *to, const char *key, + uint max_length) +{ + /* get length of the blob key */ + uint32 length= *((uchar*) key++); + if (max_length > 255) + length+= (*((uchar*) key++)) << 8; + + /* put the length into the record buffer */ + if (length_bytes == 1) + *ptr= (uchar) length; + else + int2store(ptr, length); + memcpy(ptr + length_bytes, key, length); + return key + length; +} + +/* + Create a packed key that will be used for storage in the index tree + + SYNOPSIS + pack_key_from_key_image() + to Store packed key segment here + from Key segment (as given to index_read()) + max_length Max length of key + + RETURN + end of key storage +*/ + +char *Field_varstring::pack_key_from_key_image(char *to, const char *from, + uint max_length) +{ + /* Key length is always stored as 2 bytes */ + uint length= uint2korr(from); + if (length > max_length) + length= max_length; + *to++= (char) (length & 255); + if (max_length > 255) + *to++= (char) (length >> 8); + if (length) + memcpy(to, from+HA_KEY_BLOB_LENGTH, length); + return to+length; +} + + +/* + unpack field packed with Field_varstring::pack() +*/ + const char *Field_varstring::unpack(char *to, const char *from) { uint length; - if (field_length > 255) - { + if (length_bytes == 1) length= (uint) (uchar) (*to= *from++); - to[1]=0; - } else { - length=uint2korr(from); - to[0] = *from++; - to[1] = *from++; + length= uint2korr(from); + to[0]= *from++; + to[1]= *from++; } if (length) - memcpy(to+HA_KEY_BLOB_LENGTH, from, length); + memcpy(to+ length_bytes, from, length); return from+length; } -int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length) +int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length, + my_bool insert_or_update) { - uint a_length; - uint b_length; + uint a_length, b_length; if (key_length > 255) { a_length=uint2korr(a); a+= 2; @@ -4709,63 +5039,138 @@ int Field_varstring::pack_cmp(const char *a, const char *b, uint key_length) a_length= (uint) (uchar) *a++; b_length= (uint) (uchar) *b++; } - return my_strnncoll(field_charset, - (const uchar*) a, a_length, - (const uchar*) b, b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length, + insert_or_update); } -int Field_varstring::pack_cmp(const char *b, uint key_length) + +int Field_varstring::pack_cmp(const char *b, uint key_length, + my_bool insert_or_update) { - char *a= ptr+HA_KEY_BLOB_LENGTH; - uint a_length= uint2korr(ptr); + char *a= ptr+ length_bytes; + uint a_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); uint b_length; + uint char_length= ((field_charset->mbmaxlen > 1) ? + key_length / field_charset->mbmaxlen : key_length); + if (key_length > 255) { - b_length=uint2korr(b); b+= 2; + b_length=uint2korr(b); b+= HA_KEY_BLOB_LENGTH; } else - { b_length= (uint) (uchar) *b++; + + if (a_length > char_length) + { + char_length= my_charpos(field_charset, a, a+a_length, char_length); + set_if_smaller(a_length, char_length); } - return my_strnncoll(field_charset, - (const uchar*) a, a_length, - (const uchar*) b, b_length); + + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, + a_length, + (const uchar*) b, b_length, + insert_or_update); } + uint Field_varstring::packed_col_length(const char *data_ptr, uint length) { if (length > 255) - return uint2korr(data_ptr)+HA_KEY_BLOB_LENGTH; - else - return (uint) ((uchar) *data_ptr)+1; + return uint2korr(data_ptr)+2; + return (uint) ((uchar) *data_ptr)+1; } + uint Field_varstring::max_packed_col_length(uint max_length) { return (max_length > 255 ? 2 : 1)+max_length; } -void Field_varstring::get_key_image(char *buff, uint length, CHARSET_INFO *cs, - imagetype type) + +void Field_varstring::get_key_image(char *buff, uint length, imagetype type) { - uint f_length=uint2korr(ptr); - if (f_length > length) - f_length= length; - int2store(buff,length); - memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+HA_KEY_BLOB_LENGTH, length); -#ifdef HAVE_purify + uint f_length= length_bytes == 1 ? (uint) (uchar) *ptr : uint2korr(ptr); + uint char_length= length / field_charset->mbmaxlen; + char_length= my_charpos(field_charset, ptr, ptr + length_bytes, + char_length); + set_if_smaller(f_length, char_length); + /* Key is always stored with 2 bytes */ + int2store(buff,f_length); + memcpy(buff+HA_KEY_BLOB_LENGTH, ptr+length_bytes, f_length); if (f_length < length) + { + /* + Must clear this as we do a memcmp in opt_range.cc to detect + identical keys + */ bzero(buff+HA_KEY_BLOB_LENGTH+f_length, (length-f_length)); -#endif + } } -void Field_varstring::set_key_image(char *buff,uint length, CHARSET_INFO *cs) + +void Field_varstring::set_key_image(char *buff,uint length) +{ + length= uint2korr(buff); // Real length is here + (void) Field_varstring::store(buff+HA_KEY_BLOB_LENGTH, length, + field_charset); +} + + +int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr, + uint32 max_length) +{ + char *a,*b; + uint diff; + uint32 a_length,b_length; + + if (length_bytes == 1) + { + a_length= (uint) (uchar) *a_ptr; + b_length= (uint) (uchar) *b_ptr; + } + else + { + a_length= uint2korr(a_ptr); + b_length= uint2korr(b_ptr); + } + set_if_smaller(a_length, max_length); + set_if_smaller(b_length, max_length); + if (a_length != b_length) + return 1; + return memcmp(a_ptr+length_bytes, b_ptr+length_bytes, a_length); +} + + +Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table) { - length=uint2korr(buff); // Real length is here - (void) Field_varstring::store(buff+HA_KEY_BLOB_LENGTH, length, cs); + Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table); + if (res) + res->length_bytes= length_bytes; + return res; } +Field *Field_varstring::new_key_field(MEM_ROOT *root, + struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field_varstring *res; + if ((res= (Field_varstring*) Field::new_key_field(root, + new_table, + new_ptr, + new_null_ptr, + new_null_bit))) + { + /* Keys length prefixes are always packed with 2 bytes */ + res->length_bytes= 2; + } + return res; +} + /**************************************************************************** ** blob type @@ -5003,10 +5408,10 @@ String *Field_blob::val_str(String *val_buffer __attribute__((unused)), int Field_blob::cmp(const char *a,uint32 a_length, const char *b, uint32 b_length) { - return field_charset->coll->strnncoll(field_charset, - (const uchar*)a, a_length, - (const uchar*)b, b_length, - 0); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*)a, a_length, + (const uchar*)b, b_length, + 0); } @@ -5020,18 +5425,6 @@ int Field_blob::cmp(const char *a_ptr, const char *b_ptr) } -int Field_blob::cmp_offset(uint row_offset) -{ - return Field_blob::cmp(ptr,ptr+row_offset); -} - - -int Field_blob::cmp_binary_offset(uint row_offset) -{ - return cmp_binary(ptr, ptr+row_offset); -} - - int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr, uint32 max_length) { @@ -5053,8 +5446,7 @@ int Field_blob::cmp_binary(const char *a_ptr, const char *b_ptr, /* The following is used only when comparing a key */ -void Field_blob::get_key_image(char *buff,uint length, - CHARSET_INFO *cs, imagetype type) +void Field_blob::get_key_image(char *buff, uint length, imagetype type) { uint32 blob_length= get_length(ptr); char *blob; @@ -5089,8 +5481,9 @@ void Field_blob::get_key_image(char *buff,uint length, #endif /*HAVE_SPATIAL*/ get_ptr(&blob); - uint char_length= length / cs->mbmaxlen; - char_length= my_charpos(cs, blob, blob + blob_length, char_length); + uint char_length= length / field_charset->mbmaxlen; + char_length= my_charpos(field_charset, blob, blob + blob_length, + char_length); set_if_smaller(blob_length, char_length); if ((uint32) length > blob_length) @@ -5106,10 +5499,11 @@ void Field_blob::get_key_image(char *buff,uint length, memcpy(buff+HA_KEY_BLOB_LENGTH, blob, length); } -void Field_blob::set_key_image(char *buff,uint length, CHARSET_INFO *cs) + +void Field_blob::set_key_image(char *buff,uint length) { length= uint2korr(buff); - (void) Field_blob::store(buff+HA_KEY_BLOB_LENGTH, length, cs); + (void) Field_blob::store(buff+HA_KEY_BLOB_LENGTH, length, field_charset); } @@ -5122,7 +5516,7 @@ int Field_blob::key_cmp(const byte *key_ptr, uint max_key_length) uint char_length= max_key_length / cs->mbmaxlen; char_length= my_charpos(cs, blob1, blob1+blob_length, char_length); set_if_smaller(blob_length, char_length); - return Field_blob::cmp(blob1,min(blob_length, max_key_length), + return Field_blob::cmp(blob1, blob_length, (char*) key_ptr+HA_KEY_BLOB_LENGTH, uint2korr(key_ptr)); } @@ -5214,10 +5608,10 @@ const char *Field_blob::unpack(char *to, const char *from) /* Keys for blobs are like keys on varchars */ -int Field_blob::pack_cmp(const char *a, const char *b, uint key_length) +int Field_blob::pack_cmp(const char *a, const char *b, uint key_length, + my_bool insert_or_update) { - uint a_length; - uint b_length; + uint a_length, b_length; if (key_length > 255) { a_length=uint2korr(a); a+=2; @@ -5228,13 +5622,15 @@ int Field_blob::pack_cmp(const char *a, const char *b, uint key_length) a_length= (uint) (uchar) *a++; b_length= (uint) (uchar) *b++; } - return my_strnncoll(field_charset, - (const uchar*) a, a_length, - (const uchar*) b, b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length, + insert_or_update); } -int Field_blob::pack_cmp(const char *b, uint key_length) +int Field_blob::pack_cmp(const char *b, uint key_length, + my_bool insert_or_update) { char *a; memcpy_fixed(&a,ptr+packlength,sizeof(char*)); @@ -5248,12 +5644,11 @@ int Field_blob::pack_cmp(const char *b, uint key_length) b_length=uint2korr(b); b+=2; } else - { b_length= (uint) (uchar) *b++; - } - return my_strnncoll(field_charset, - (const uchar*) a, a_length, - (const uchar*) b, b_length); + return field_charset->coll->strnncollsp(field_charset, + (const uchar*) a, a_length, + (const uchar*) b, b_length, + insert_or_update); } /* Create a packed key that will be used for storage from a MySQL row */ @@ -5263,8 +5658,8 @@ char *Field_blob::pack_key(char *to, const char *from, uint max_length) char *save=ptr; ptr=(char*) from; uint32 length=get_length(); // Length of from string - uint char_length= (field_charset->mbmaxlen > 1) ? - max_length/field_charset->mbmaxlen : max_length; + uint char_length= ((field_charset->mbmaxlen > 1) ? + max_length/field_charset->mbmaxlen : max_length); if (length) get_ptr((char**) &from); if (length > char_length) @@ -5322,6 +5717,7 @@ const char *Field_blob::unpack_key(char *to, const char *from, uint max_length) return from + length; } + /* Create a packed key that will be used for storage from a MySQL key */ char *Field_blob::pack_key_from_key_image(char *to, const char *from, @@ -5338,14 +5734,15 @@ char *Field_blob::pack_key_from_key_image(char *to, const char *from, return to+length; } + uint Field_blob::packed_col_length(const char *data_ptr, uint length) { if (length > 255) return uint2korr(data_ptr)+2; - else - return (uint) ((uchar) *data_ptr)+1; + return (uint) ((uchar) *data_ptr)+1; } + uint Field_blob::max_packed_col_length(uint max_length) { return (max_length > 255 ? 2 : 1)+max_length; @@ -5354,8 +5751,7 @@ uint Field_blob::max_packed_col_length(uint max_length) #ifdef HAVE_SPATIAL -void Field_geom::get_key_image(char *buff, uint length, CHARSET_INFO *cs, - imagetype type) +void Field_geom::get_key_image(char *buff, uint length, imagetype type) { char *blob; const char *dummy; @@ -5384,11 +5780,6 @@ void Field_geom::get_key_image(char *buff, uint length, CHARSET_INFO *cs, } -void Field_geom::set_key_image(char *buff, uint length, CHARSET_INFO *cs) -{ - Field_blob::set_key_image(buff, length, cs); -} - void Field_geom::sql_type(String &res) const { CHARSET_INFO *cs= &my_charset_latin1; @@ -5838,8 +6229,266 @@ bool Field_num::eq_def(Field *field) } +/* + Bit field. + + We store the first 0 - 6 uneven bits among the null bits + at the start of the record. The rest bytes are stored in + the record itself. + + For example: + + CREATE TABLE t1 (a int, b bit(17), c bit(21) not null, d bit(8)); + We would store data as follows in the record: + + Byte Bit + 1 7 - reserve for delete + 6 - null bit for 'a' + 5 - null bit for 'b' + 4 - first (high) bit of 'b' + 3 - first (high) bit of 'c' + 2 - second bit of 'c' + 1 - third bit of 'c' + 0 - forth bit of 'c' + 2 7 - firth bit of 'c' + 6 - null bit for 'd' + 3 - 6 four bytes for 'a' + 7 - 8 two bytes for 'b' + 9 - 10 two bytes for 'c' + 11 one byte for 'd' +*/ + +Field_bit::Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg) + : Field(ptr_arg, len_arg >> 3, null_ptr_arg, null_bit_arg, + unireg_check_arg, field_name_arg, table_arg), + bit_ptr(bit_ptr_arg), bit_ofs(bit_ofs_arg), bit_len(len_arg & 7) +{ + /* + Ensure that Field::eq() can distinguish between two different bit fields. + (two bit fields that are not null, may have same ptr and null_ptr) + */ + if (!null_ptr_arg) + null_bit= bit_ofs_arg; +} + + +Field *Field_bit::new_key_field(MEM_ROOT *root, + struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit) +{ + Field_bit *res; + if ((res= (Field_bit*) Field::new_key_field(root, new_table, + new_ptr, new_null_ptr, + new_null_bit))) + { + /* Move bits normally stored in null_pointer to new_ptr */ + res->bit_ptr= (uchar*) new_ptr; + res->bit_ofs= 0; + if (bit_len) + res->ptr++; // Store rest of data here + } + return res; +} + + +void Field_bit::make_field(Send_field *field) +{ + /* table_cache_key is not set for temp tables */ + field->db_name= (orig_table->table_cache_key ? orig_table->table_cache_key : + ""); + field->org_table_name= orig_table->real_name; + field->table_name= orig_table->table_name; + field->col_name= field->org_col_name= field_name; + field->charsetnr= charset()->number; + field->length= field_length; + field->type= type(); + field->flags= table->maybe_null ? (flags & ~NOT_NULL_FLAG) : flags; + field->decimals= 0; +} + + +int Field_bit::store(const char *from, uint length, CHARSET_INFO *cs) +{ + int delta; + + for (; !*from && length; from++, length--); // skip left 0's + delta= field_length - length; + + if (delta < -1 || + (delta == -1 && (uchar) *from > ((1 << bit_len) - 1)) || + (!bit_len && delta < 0)) + { + set_rec_bits(0xff, bit_ptr, bit_ofs, bit_len); + memset(ptr, 0xff, field_length); + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_OUT_OF_RANGE, 1); + return 1; + } + /* delta is >= -1 here */ + if (delta > 0) + { + if (bit_len) + clr_rec_bits(bit_ptr, bit_ofs, bit_len); + bzero(ptr, delta); + memcpy(ptr + delta, from, length); + } + else if (delta == 0) + { + if (bit_len) + clr_rec_bits(bit_ptr, bit_ofs, bit_len); + memcpy(ptr, from, length); + } + else + { + if (bit_len) + { + set_rec_bits((uchar) *from, bit_ptr, bit_ofs, bit_len); + from++; + } + memcpy(ptr, from, field_length); + } + return 0; +} + + +int Field_bit::store(double nr) +{ + return (Field_bit::store((longlong) nr)); +} + + +int Field_bit::store(longlong nr) +{ + char buf[8]; + + mi_int8store(buf, nr); + return store(buf, 8, NULL); +} + + +double Field_bit::val_real(void) +{ + return (double) Field_bit::val_int(); +} + + +longlong Field_bit::val_int(void) +{ + ulonglong bits= 0; + if (bit_len) + bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + bits<<= (field_length * 8); + + switch (field_length) { + case 0: return bits; + case 1: return bits | (ulonglong) (uchar) ptr[0]; + case 2: return bits | mi_uint2korr(ptr); + case 3: return bits | mi_uint3korr(ptr); + case 4: return bits | mi_uint4korr(ptr); + case 5: return bits | mi_uint5korr(ptr); + case 6: return bits | mi_uint6korr(ptr); + case 7: return bits | mi_uint7korr(ptr); + default: return mi_uint8korr(ptr + field_length - sizeof(longlong)); + } +} + + +String *Field_bit::val_str(String *val_buffer, + String *val_ptr __attribute__((unused))) +{ + uint length= min(pack_length(), sizeof(longlong)); + ulonglong bits= val_int(); + + val_buffer->alloc(length); + memcpy_fixed((char*) val_buffer->ptr(), (char*) &bits, length); + val_buffer->length(length); + val_buffer->set_charset(&my_charset_bin); + return val_buffer; +} + + +int Field_bit::key_cmp(const byte *str, uint length) +{ + if (bit_len) + { + int flag; + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + if ((flag= (int) (bits - *(uchar*) str))) + return flag; + str++; + length--; + } + return bcmp(ptr, str, length); +} + + +int Field_bit::cmp_offset(uint row_offset) +{ + if (bit_len) + { + int flag; + uchar bits_a= get_rec_bits(bit_ptr, bit_ofs, bit_len); + uchar bits_b= get_rec_bits(bit_ptr + row_offset, bit_ofs, bit_len); + if ((flag= (int) (bits_a - bits_b))) + return flag; + } + return bcmp(ptr, ptr + row_offset, field_length); +} + + +void Field_bit::get_key_image(char *buff, uint length, imagetype type) +{ + if (bit_len) + { + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + *buff++= bits; + length--; + } + memcpy(buff, ptr, min(length, field_length)); +} + + +void Field_bit::sql_type(String &res) const +{ + CHARSET_INFO *cs= res.charset(); + ulong length= cs->cset->snprintf(cs, (char*) res.ptr(), res.alloced_length(), + "bit(%d)", + (int) field_length * 8 + bit_len); + res.length((uint) length); +} + + +char *Field_bit::pack(char *to, const char *from, uint max_length) +{ + uint length= min(field_length + (bit_len > 0), max_length); + if (bit_len) + { + uchar bits= get_rec_bits(bit_ptr, bit_ofs, bit_len); + *to++= bits; + length--; + } + memcpy(to, from, length); + return to + length; +} + + +const char *Field_bit::unpack(char *to, const char *from) +{ + if (bit_len) + { + set_rec_bits(*from, bit_ptr, bit_ofs, bit_len); + from++; + } + memcpy(to, from, field_length); + return from + field_length; +} + + /***************************************************************************** -** Handling of field and create_field + Handling of field and create_field *****************************************************************************/ void create_field::create_length_to_internal_length(void) @@ -5851,20 +6500,44 @@ void create_field::create_length_to_internal_length(void) case MYSQL_TYPE_BLOB: case MYSQL_TYPE_VAR_STRING: case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: length*= charset->mbmaxlen; - pack_length= calc_pack_length(sql_type == FIELD_TYPE_VAR_STRING ? - FIELD_TYPE_STRING : sql_type, length); + key_length= length; + pack_length= calc_pack_length(sql_type, length); break; case MYSQL_TYPE_ENUM: case MYSQL_TYPE_SET: + /* Pack_length already calculated in sql_parse.cc */ length*= charset->mbmaxlen; + key_length= pack_length; + break; + case MYSQL_TYPE_BIT: + pack_length= calc_pack_length(sql_type, length); + /* We need one extra byte to store the bits we save among the null bits */ + key_length= pack_length+ test(length & 7); break; default: - /* do nothing */ + key_length= pack_length= calc_pack_length(sql_type, length); break; } } + +enum_field_types get_blob_type_from_length(ulong length) +{ + enum_field_types type; + if (length < 256) + type= FIELD_TYPE_TINY_BLOB; + else if (length < 65536) + type= FIELD_TYPE_BLOB; + else if (length < 256L*256L*256L) + type= FIELD_TYPE_MEDIUM_BLOB; + else + type= FIELD_TYPE_LONG_BLOB; + return type; +} + + /* Make a field from the .frm file info */ @@ -5872,9 +6545,10 @@ void create_field::create_length_to_internal_length(void) uint32 calc_pack_length(enum_field_types type,uint32 length) { switch (type) { - case FIELD_TYPE_STRING: - case FIELD_TYPE_DECIMAL: return (length); - case FIELD_TYPE_VAR_STRING: return (length+HA_KEY_BLOB_LENGTH); + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case FIELD_TYPE_DECIMAL: return (length); + case MYSQL_TYPE_VARCHAR: return (length + (length < 256 ? 1: 2)); case FIELD_TYPE_YEAR: case FIELD_TYPE_TINY : return 1; case FIELD_TYPE_SHORT : return 2; @@ -5896,6 +6570,7 @@ uint32 calc_pack_length(enum_field_types type,uint32 length) case FIELD_TYPE_GEOMETRY: return 4+portable_sizeof_char_ptr; case FIELD_TYPE_SET: case FIELD_TYPE_ENUM: abort(); return 0; // This shouldn't happen + case FIELD_TYPE_BIT: return length / 8; default: return 0; } return 0; // Keep compiler happy @@ -5926,11 +6601,30 @@ Field *make_field(char *ptr, uint32 field_length, const char *field_name, struct st_table *table) { + uchar *bit_ptr; + uchar bit_offset; + LINT_INIT(bit_ptr); + LINT_INIT(bit_offset); + if (field_type == FIELD_TYPE_BIT) + { + bit_ptr= null_pos; + bit_offset= null_bit; + if (f_maybe_null(pack_flag)) // if null field + { + bit_ptr+= (null_bit == 7); // shift bit_ptr and bit_offset + bit_offset= (bit_offset + 1) & 7; + } + } + if (!f_maybe_null(pack_flag)) { null_pos=0; null_bit=0; } + else + { + null_bit= ((uchar) 1) << null_bit; + } switch (field_type) { @@ -5947,12 +6641,18 @@ Field *make_field(char *ptr, uint32 field_length, { if (!f_is_packed(pack_flag)) { - if (field_type == FIELD_TYPE_STRING || + if (field_type == MYSQL_TYPE_STRING || field_type == FIELD_TYPE_DECIMAL || // 3.23 or 4.0 string - field_type == FIELD_TYPE_VAR_STRING) + field_type == MYSQL_TYPE_VAR_STRING) return new Field_string(ptr,field_length,null_pos,null_bit, unireg_check, field_name, table, field_charset); + if (field_type == MYSQL_TYPE_VARCHAR) + return new Field_varstring(ptr,field_length, + HA_VARCHAR_PACKLENGTH(field_length), + null_pos,null_bit, + unireg_check, field_name, table, + field_charset); return 0; // Error } @@ -6048,6 +6748,9 @@ Field *make_field(char *ptr, uint32 field_length, unireg_check, field_name, table, field_charset); case FIELD_TYPE_NULL: return new Field_null(ptr,field_length,unireg_check,field_name,table, field_charset); + case FIELD_TYPE_BIT: + return new Field_bit(ptr, field_length, null_pos, null_bit, bit_ptr, + bit_offset, unireg_check, field_name, table); default: // Impossible (Wrong version) break; } @@ -6065,44 +6768,53 @@ create_field::create_field(Field *old_field,Field *orig_field) flags= old_field->flags; unireg_check=old_field->unireg_check; pack_length=old_field->pack_length(); + key_length= old_field->key_length(); sql_type= old_field->real_type(); charset= old_field->charset(); // May be NULL ptr comment= old_field->comment; + decimals= old_field->decimals(); /* Fix if the original table had 4 byte pointer blobs */ if (flags & BLOB_FLAG) pack_length= (pack_length- old_field->table->blob_ptr_size + portable_sizeof_char_ptr); - switch (sql_type) - { - case FIELD_TYPE_BLOB: - switch (pack_length - portable_sizeof_char_ptr) - { - case 1: sql_type= FIELD_TYPE_TINY_BLOB; break; - case 2: sql_type= FIELD_TYPE_BLOB; break; - case 3: sql_type= FIELD_TYPE_MEDIUM_BLOB; break; - default: sql_type= FIELD_TYPE_LONG_BLOB; break; - } - length=(length+charset->mbmaxlen-1)/charset->mbmaxlen; // QQ: Probably not needed - break; - case MYSQL_TYPE_ENUM: - case MYSQL_TYPE_SET: - case FIELD_TYPE_STRING: - case FIELD_TYPE_VAR_STRING: - length=(length+charset->mbmaxlen-1)/charset->mbmaxlen; - break; - default: - break; - } - - decimals= old_field->decimals(); - if (sql_type == FIELD_TYPE_STRING) - { + switch (sql_type) { + case FIELD_TYPE_BLOB: + switch (pack_length - portable_sizeof_char_ptr) { + case 1: sql_type= FIELD_TYPE_TINY_BLOB; break; + case 2: sql_type= FIELD_TYPE_BLOB; break; + case 3: sql_type= FIELD_TYPE_MEDIUM_BLOB; break; + default: sql_type= FIELD_TYPE_LONG_BLOB; break; + } + length=(length+charset->mbmaxlen-1) / charset->mbmaxlen; + key_length/= charset->mbmaxlen; + break; + case MYSQL_TYPE_STRING: /* Change CHAR -> VARCHAR if dynamic record length */ - sql_type=old_field->type(); - decimals=0; + if (old_field->type() == MYSQL_TYPE_VAR_STRING) + sql_type= MYSQL_TYPE_VARCHAR; + /* fall through */ + + case MYSQL_TYPE_ENUM: + case MYSQL_TYPE_SET: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_VAR_STRING: + /* This is corrected in create_length_to_internal_length */ + length= (length+charset->mbmaxlen-1) / charset->mbmaxlen; + break; +#ifdef HAVE_SPATIAL + case FIELD_TYPE_GEOMETRY: + geom_type= ((Field_geom*)old_field)->geom_type; + break; +#endif + case FIELD_TYPE_BIT: + length= ((Field_bit *) old_field)->bit_len + length * 8; + break; + default: + break; } + if (flags & (ENUM_FLAG | SET_FLAG)) interval= ((Field_enum*) old_field)->typelib; else @@ -6127,12 +6839,6 @@ create_field::create_field(Field *old_field,Field *orig_field) def= new Item_string(pos, tmp.length(), charset); } } -#ifdef HAVE_SPATIAL - if (sql_type == FIELD_TYPE_GEOMETRY) - { - geom_type= ((Field_geom*)old_field)->geom_type; - } -#endif } @@ -6174,7 +6880,7 @@ Field::set_warning(const uint level, const uint code, int cuted_increment) Produce warning or note about datetime string data saved into field SYNOPSYS - set_warning() + set_datime_warning() level - level of message (Note/Warning/Error) code - error code of message to be produced str - string value which we tried to save @@ -6192,8 +6898,10 @@ Field::set_datetime_warning(const uint level, const uint code, const char *str, uint str_length, timestamp_type ts_type, int cuted_increment) { - if (set_warning(level, code, cuted_increment)) - make_truncated_value_warning(table->in_use, str, str_length, ts_type); + if (table->in_use->really_abort_on_warning() || + set_warning(level, code, cuted_increment)) + make_truncated_value_warning(table->in_use, str, str_length, ts_type, + field_name); } @@ -6218,12 +6926,13 @@ Field::set_datetime_warning(const uint level, const uint code, longlong nr, timestamp_type ts_type, int cuted_increment) { - if (set_warning(level, code, cuted_increment)) + if (table->in_use->really_abort_on_warning() || + set_warning(level, code, cuted_increment)) { char str_nr[22]; char *str_end= longlong10_to_str(nr, str_nr, -10); make_truncated_value_warning(table->in_use, str_nr, str_end - str_nr, - ts_type); + ts_type, field_name); } } @@ -6247,12 +6956,14 @@ void Field::set_datetime_warning(const uint level, const uint code, double nr, timestamp_type ts_type) { - if (set_warning(level, code, 1)) + if (table->in_use->really_abort_on_warning() || + set_warning(level, code, 1)) { /* DBL_DIG is enough to print '-[digits].E+###' */ char str_nr[DBL_DIG + 8]; uint str_len= my_sprintf(str_nr, (str_nr, "%g", nr)); - make_truncated_value_warning(table->in_use, str_nr, str_len, ts_type); + make_truncated_value_warning(table->in_use, str_nr, str_len, ts_type, + field_name); } } diff --git a/sql/field.h b/sql/field.h index bb999222381..9375fbc8d5a 100644 --- a/sql/field.h +++ b/sql/field.h @@ -37,11 +37,7 @@ class Field void operator=(Field &); public: static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } - static void operator delete(void *ptr_arg, size_t size) { -#ifdef SAFEMALLOC - bfill(ptr_arg, size, 0x8F); -#endif - } + static void operator delete(void *ptr_arg, size_t size) { TRASH(ptr_arg, size); } char *ptr; // Position to field in record uchar *null_ptr; // Byte where null_bit is @@ -84,7 +80,7 @@ public: FIELD_CAST_TIMESTAMP, FIELD_CAST_YEAR, FIELD_CAST_DATE, FIELD_CAST_NEWDATE, FIELD_CAST_TIME, FIELD_CAST_DATETIME, FIELD_CAST_STRING, FIELD_CAST_VARSTRING, FIELD_CAST_BLOB, - FIELD_CAST_GEOM, FIELD_CAST_ENUM, FIELD_CAST_SET + FIELD_CAST_GEOM, FIELD_CAST_ENUM, FIELD_CAST_SET, FIELD_CAST_BIT }; utype unireg_check; @@ -100,7 +96,7 @@ public: virtual int store(const char *to,uint length,CHARSET_INFO *cs)=0; virtual int store(double nr)=0; virtual int store(longlong nr)=0; - virtual void store_time(TIME *ltime,timestamp_type t_type); + virtual int store_time(TIME *ltime, timestamp_type t_type); virtual double val_real(void)=0; virtual longlong val_int(void)=0; inline String *val_str(String *str) { return val_str(str, str); } @@ -117,9 +113,14 @@ public: This trickery is used to decrease a number of malloc calls. */ virtual String *val_str(String*,String *)=0; + String *val_int_as_str(String *val_buffer, my_bool unsigned_flag); virtual Item_result result_type () const=0; virtual Item_result cmp_type () const { return result_type(); } - bool eq(Field *field) { return ptr == field->ptr && null_ptr == field->null_ptr; } + bool eq(Field *field) + { + return (ptr == field->ptr && null_ptr == field->null_ptr && + null_bit == field->null_bit); + } virtual bool eq_def(Field *field); virtual uint32 pack_length() const { return (uint32) field_length; } virtual void reset(void) { bzero(ptr,pack_length()); } @@ -143,10 +144,9 @@ public: virtual int cmp(const char *,const char *)=0; virtual int cmp_binary(const char *a,const char *b, uint32 max_length=~0L) { return memcmp(a,b,pack_length()); } - virtual int cmp_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } - virtual int cmp_binary_offset(uint row_offset) - { return memcmp(ptr,ptr+row_offset,pack_length()); } + int cmp_offset(uint row_offset) { return cmp(ptr,ptr+row_offset); } + int cmp_binary_offset(uint row_offset) + { return cmp_binary(ptr, ptr+row_offset); }; virtual int key_cmp(const byte *a,const byte *b) { return cmp((char*) a,(char*) b); } virtual int key_cmp(const byte *str, uint length) @@ -188,28 +188,11 @@ public: */ virtual bool can_be_compared_as_longlong() const { return FALSE; } virtual void free() {} - Field *new_field(MEM_ROOT *root, struct st_table *new_table) - { - Field *tmp= (Field*) memdup_root(root,(char*) this,size_of()); - if (tmp) - { - if (tmp->table->maybe_null) - tmp->flags&= ~NOT_NULL_FLAG; - tmp->table= new_table; - tmp->key_start.init(0); - tmp->part_of_key.init(0); - tmp->part_of_sortkey.init(0); - tmp->unireg_check=Field::NONE; - tmp->flags&= (NOT_NULL_FLAG | BLOB_FLAG | UNSIGNED_FLAG | - ZEROFILL_FLAG | BINARY_FLAG | ENUM_FLAG | SET_FLAG); -#ifdef PROBABLY_WRONG - tmp->table_name= new_table->table_name; -#endif - tmp->reset_fields(); - } - return tmp; - } - inline void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) + virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table); + virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); + virtual void move_field(char *ptr_arg,uchar *null_ptr_arg,uchar null_bit_arg) { ptr=ptr_arg; null_ptr=null_ptr_arg; null_bit=null_bit_arg; } @@ -224,11 +207,10 @@ public: { memcpy(buff,ptr,length); } inline void set_image(char *buff,uint length, CHARSET_INFO *cs) { memcpy(ptr,buff,length); } - virtual void get_key_image(char *buff,uint length, CHARSET_INFO *cs, - imagetype type) - { get_image(buff,length,cs); } - virtual void set_key_image(char *buff,uint length, CHARSET_INFO *cs) - { set_image(buff,length,cs); } + virtual void get_key_image(char *buff, uint length, imagetype type) + { get_image(buff,length, &my_charset_bin); } + virtual void set_key_image(char *buff,uint length) + { set_image(buff,length, &my_charset_bin); } inline longlong val_int_offset(uint row_offset) { ptr+=row_offset; @@ -236,6 +218,15 @@ public: ptr-=row_offset; return tmp; } + + inline String *val_str(String *str, char *new_ptr) + { + char *old_ptr= ptr; + ptr= new_ptr; + val_str(str); + ptr= old_ptr; + return str; + } virtual bool send_binary(Protocol *protocol); virtual char *pack(char* to, const char *from, uint max_length=~(uint) 0) { @@ -267,9 +258,11 @@ public: virtual uint max_packed_col_length(uint max_length) { return max_length;} - virtual int pack_cmp(const char *a,const char *b, uint key_length_arg) + virtual int pack_cmp(const char *a,const char *b, uint key_length_arg, + my_bool insert_or_update) { return cmp(a,b); } - virtual int pack_cmp(const char *b, uint key_length_arg) + virtual int pack_cmp(const char *b, uint key_length_arg, + my_bool insert_or_update) { return cmp(ptr,b); } uint offset(); // Should be inline ... void copy_from_tmp(int offset); @@ -281,6 +274,8 @@ public: virtual void set_charset(CHARSET_INFO *charset) { } bool set_warning(const unsigned int level, const unsigned int code, int cuted_increment); + bool check_int(const char *str, int length, const char *int_end, + CHARSET_INFO *cs); void set_datetime_warning(const uint level, const uint code, const char *str, uint str_len, timestamp_type ts_type, int cuted_increment); @@ -803,7 +798,7 @@ public: int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); - void store_time(TIME *ltime,timestamp_type type); + int store_time(TIME *ltime, timestamp_type type); void reset(void) { ptr[0]=ptr[1]=ptr[2]=0; } double val_real(void); longlong val_int(void); @@ -836,6 +831,7 @@ public: enum_field_types type() const { return FIELD_TYPE_TIME;} enum ha_base_keytype key_type() const { return HA_KEYTYPE_INT24; } enum Item_result cmp_type () const { return INT_RESULT; } + int store_time(TIME *ltime, timestamp_type type); int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); @@ -876,7 +872,7 @@ public: int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); - void store_time(TIME *ltime,timestamp_type type); + int store_time(TIME *ltime, timestamp_type type); void reset(void) { ptr[0]=ptr[1]=ptr[2]=ptr[3]=ptr[4]=ptr[5]=ptr[6]=ptr[7]=0; } double val_real(void); longlong val_int(void); @@ -909,9 +905,11 @@ public: enum_field_types type() const { - return ((table && table->db_create_options & HA_OPTION_PACK_RECORD && - field_length >= 4) ? - FIELD_TYPE_VAR_STRING : FIELD_TYPE_STRING); + return ((orig_table && + orig_table->db_create_options & HA_OPTION_PACK_RECORD && + field_length >= 4) && + orig_table->frm_version < FRM_VER_TRUE_VARCHAR ? + MYSQL_TYPE_VAR_STRING : MYSQL_TYPE_STRING); } enum ha_base_keytype key_type() const { return binary() ? HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT; } @@ -928,8 +926,9 @@ public: void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); const char *unpack(char* to, const char *from); - int pack_cmp(const char *a,const char *b,uint key_length); - int pack_cmp(const char *b,uint key_length); + int pack_cmp(const char *a,const char *b,uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b,uint key_length,my_bool insert_or_update); uint packed_col_length(const char *to, uint length); uint max_packed_col_length(uint max_length); uint size_of() const { return sizeof(*this); } @@ -937,31 +936,37 @@ public: bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } field_cast_enum field_cast_type() { return FIELD_CAST_STRING; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table); }; class Field_varstring :public Field_str { public: - Field_varstring(char *ptr_arg, uint32 len_arg,uchar *null_ptr_arg, + /* Store number of bytes used to store length (1 or 2) */ + uint32 length_bytes; + Field_varstring(char *ptr_arg, + uint32 len_arg, uint length_bytes_arg, + uchar *null_ptr_arg, uchar null_bit_arg, enum utype unireg_check_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) :Field_str(ptr_arg, len_arg, null_ptr_arg, null_bit_arg, - unireg_check_arg, field_name_arg, table_arg, cs) + unireg_check_arg, field_name_arg, table_arg, cs), + length_bytes(length_bytes_arg) {} Field_varstring(uint32 len_arg,bool maybe_null_arg, const char *field_name_arg, struct st_table *table_arg, CHARSET_INFO *cs) :Field_str((char*) 0,len_arg, maybe_null_arg ? (uchar*) "": 0,0, - NONE, field_name_arg, table_arg, cs) + NONE, field_name_arg, table_arg, cs), + length_bytes(len_arg < 256 ? 1 :2) {} - enum_field_types type() const { return FIELD_TYPE_VAR_STRING; } - enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + enum_field_types type() const { return MYSQL_TYPE_VARCHAR; } + enum ha_base_keytype key_type() const; bool zero_pack() const { return 0; } - void reset(void) { bzero(ptr,field_length+2); } - uint32 pack_length() const { return (uint32) field_length+2; } + void reset(void) { bzero(ptr,field_length+length_bytes); } + uint32 pack_length() const { return (uint32) field_length+length_bytes; } uint32 key_length() const { return (uint32) field_length; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(longlong nr); @@ -971,21 +976,31 @@ public: String *val_str(String*,String *); int cmp(const char *,const char*); void sort_string(char *buff,uint length); - void get_key_image(char *buff,uint length, CHARSET_INFO *cs, imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); + void get_key_image(char *buff,uint length, imagetype type); + void set_key_image(char *buff,uint length); void sql_type(String &str) const; char *pack(char *to, const char *from, uint max_length=~(uint) 0); char *pack_key(char *to, const char *from, uint max_length); + char *pack_key_from_key_image(char* to, const char *from, uint max_length); const char *unpack(char* to, const char *from); - int pack_cmp(const char *a, const char *b, uint key_length); - int pack_cmp(const char *b, uint key_length); + const char *unpack_key(char* to, const char *from, uint max_length); + int pack_cmp(const char *a, const char *b, uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b, uint key_length,my_bool insert_or_update); + int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); + int key_cmp(const byte *,const byte*); + int key_cmp(const byte *str, uint length); uint packed_col_length(const char *to, uint length); uint max_packed_col_length(uint max_length); uint size_of() const { return sizeof(*this); } - enum_field_types real_type() const { return FIELD_TYPE_VAR_STRING; } + enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } field_cast_enum field_cast_type() { return FIELD_CAST_VARSTRING; } + Field *new_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); }; @@ -1008,7 +1023,7 @@ public: } enum_field_types type() const { return FIELD_TYPE_BLOB;} enum ha_base_keytype key_type() const - { return binary() ? HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT; } + { return binary() ? HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2; } int store(const char *to,uint length,CHARSET_INFO *charset); int store(double nr); int store(longlong nr); @@ -1017,9 +1032,7 @@ public: String *val_str(String*,String *); int cmp(const char *,const char*); int cmp(const char *a, uint32 a_length, const char *b, uint32 b_length); - int cmp_offset(uint offset); int cmp_binary(const char *a,const char *b, uint32 max_length=~0L); - int cmp_binary_offset(uint row_offset); int key_cmp(const byte *,const byte*); int key_cmp(const byte *str, uint length); uint32 key_length() const { return 0; } @@ -1051,8 +1064,8 @@ public: store_length(length); memcpy_fixed(ptr+packlength,&data,sizeof(char*)); } - void get_key_image(char *buff,uint length, CHARSET_INFO *cs, imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); + void get_key_image(char *buff,uint length, imagetype type); + void set_key_image(char *buff,uint length); void sql_type(String &str) const; inline bool copy() { char *tmp; @@ -1066,12 +1079,13 @@ public: return 0; } char *pack(char *to, const char *from, uint max_length= ~(uint) 0); - const char *unpack(char *to, const char *from); char *pack_key(char *to, const char *from, uint max_length); char *pack_key_from_key_image(char* to, const char *from, uint max_length); + const char *unpack(char *to, const char *from); const char *unpack_key(char* to, const char *from, uint max_length); - int pack_cmp(const char *a, const char *b, uint key_length); - int pack_cmp(const char *b, uint key_length); + int pack_cmp(const char *a, const char *b, uint key_length, + my_bool insert_or_update); + int pack_cmp(const char *b, uint key_length,my_bool insert_or_update); uint packed_col_length(const char *col_ptr, uint length); uint max_packed_col_length(uint max_length); void free() { value.free(); } @@ -1084,6 +1098,7 @@ public: uint32 max_length(); }; + #ifdef HAVE_SPATIAL class Field_geom :public Field_blob { public: @@ -1101,19 +1116,19 @@ public: :Field_blob(len_arg, maybe_null_arg, field_name_arg, table_arg, &my_charset_bin) { geom_type= geom_type_arg; } - enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_VARBINARY2; } enum_field_types type() const { return FIELD_TYPE_GEOMETRY; } void sql_type(String &str) const; int store(const char *to, uint length, CHARSET_INFO *charset); int store(double nr) { return 1; } int store(longlong nr) { return 1; } - void get_key_image(char *buff,uint length, CHARSET_INFO *cs,imagetype type); - void set_key_image(char *buff,uint length, CHARSET_INFO *cs); + void get_key_image(char *buff,uint length,imagetype type); field_cast_enum field_cast_type() { return FIELD_CAST_GEOM; } }; #endif /*HAVE_SPATIAL*/ + class Field_enum :public Field_str { protected: uint packlength; @@ -1182,6 +1197,52 @@ public: }; +class Field_bit :public Field { +public: + uchar *bit_ptr; // position in record where 'uneven' bits store + uchar bit_ofs; // offset to 'uneven' high bits + uint bit_len; // number of 'uneven' high bits + Field_bit(char *ptr_arg, uint32 len_arg, uchar *null_ptr_arg, + uchar null_bit_arg, uchar *bit_ptr_arg, uchar bit_ofs_arg, + enum utype unireg_check_arg, const char *field_name_arg, + struct st_table *table_arg); + enum_field_types type() const { return FIELD_TYPE_BIT; } + enum ha_base_keytype key_type() const { return HA_KEYTYPE_BIT; } + uint32 key_length() const { return (uint32) field_length + (bit_len > 0); } + uint32 max_length() { return (uint32) field_length + (bit_len > 0); } + uint size_of() const { return sizeof(*this); } + Item_result result_type () const { return INT_RESULT; } + void make_field(Send_field *); + void reset(void) { bzero(ptr, field_length); } + int store(const char *to, uint length, CHARSET_INFO *charset); + int store(double nr); + int store(longlong nr); + double val_real(void); + longlong val_int(void); + String *val_str(String*, String *); + int cmp(const char *a, const char *b) + { return cmp_binary(a, b); } + int key_cmp(const byte *a, const byte *b) + { return cmp_binary(a, b); } + int key_cmp(const byte *str, uint length); + int cmp_offset(uint row_offset); + void get_key_image(char *buff, uint length, imagetype type); + void set_key_image(char *buff, uint length) + { Field_bit::store(buff, length, &my_charset_bin); } + void sort_string(char *buff, uint length) + { get_key_image(buff, length, itRAW); } + uint32 pack_length() const + { return (uint32) field_length + (bit_len > 0); } + void sql_type(String &str) const; + field_cast_enum field_cast_type() { return FIELD_CAST_BIT; } + char *pack(char *to, const char *from, uint max_length=~(uint) 0); + const char *unpack(char* to, const char *from); + Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, + char *new_ptr, uchar *new_null_ptr, + uint new_null_bit); +}; + + /* Create field class for CREATE TABLE */ @@ -1194,8 +1255,8 @@ public: LEX_STRING comment; // Comment for field Item *def; // Default value enum enum_field_types sql_type; - uint32 length; - uint decimals,flags,pack_length; + ulong length; + uint decimals, flags, pack_length, key_length; Field::utype unireg_check; TYPELIB *interval; // Which interval to use List<String> interval_list; @@ -1260,11 +1321,10 @@ Field *make_field(char *ptr, uint32 field_length, TYPELIB *interval, const char *field_name, struct st_table *table); uint pack_length_to_packflag(uint type); +enum_field_types get_blob_type_from_length(ulong length); uint32 calc_pack_length(enum_field_types type,uint32 length); int set_field_to_null(Field *field); int set_field_to_null_with_conversions(Field *field, bool no_conversions); -bool test_if_int(const char *str, int length, const char *int_end, - CHARSET_INFO *cs); /* The following are for the interface with the .frm file @@ -1283,6 +1343,7 @@ bool test_if_int(const char *str, int length, const char *int_end, #define FIELDFLAG_LEFT_FULLSCREEN 8192 #define FIELDFLAG_RIGHT_FULLSCREEN 16384 #define FIELDFLAG_FORMAT_NUMBER 16384 // predit: ###,,## in output +#define FIELDFLAG_NO_DEFAULT 16384 /* sql */ #define FIELDFLAG_SUM ((uint) 32768)// predit: +#fieldflag #define FIELDFLAG_MAYBE_NULL ((uint) 32768)// sql #define FIELDFLAG_PACK_SHIFT 3 @@ -1291,8 +1352,6 @@ bool test_if_int(const char *str, int length, const char *int_end, #define FIELDFLAG_NUM_SCREEN_TYPE 0x7F01 #define FIELDFLAG_ALFA_SCREEN_TYPE 0x7800 -#define FIELD_SORT_REVERSE 16384 - #define MTYP_TYPENR(type) (type & 127) /* Remove bits from type */ #define f_is_dec(x) ((x) & FIELDFLAG_DECIMAL) @@ -1310,3 +1369,4 @@ bool test_if_int(const char *str, int length, const char *int_end, #define f_is_equ(x) ((x) & (1+2+FIELDFLAG_PACK+31*256)) #define f_settype(x) (((int) x) << FIELDFLAG_PACK_SHIFT) #define f_maybe_null(x) (x & FIELDFLAG_MAYBE_NULL) +#define f_no_default(x) (x & FIELDFLAG_NO_DEFAULT) diff --git a/sql/field_conv.cc b/sql/field_conv.cc index 61a5a28f47b..9fd4f0228b3 100644 --- a/sql/field_conv.cc +++ b/sql/field_conv.cc @@ -126,8 +126,7 @@ set_field_to_null(Field *field) return 0; } if (!current_thd->no_errors) - my_printf_error(ER_BAD_NULL_ERROR,ER(ER_BAD_NULL_ERROR),MYF(0), - field->field_name); + my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name); return -1; } @@ -185,8 +184,7 @@ set_field_to_null_with_conversions(Field *field, bool no_conversions) return 0; } if (!current_thd->no_errors) - my_printf_error(ER_BAD_NULL_ERROR,ER(ER_BAD_NULL_ERROR),MYF(0), - field->field_name); + my_error(ER_BAD_NULL_ERROR, MYF(0), field->field_name); return -1; } @@ -307,7 +305,8 @@ static void do_field_string(Copy_field *copy) char buff[MAX_FIELD_WIDTH]; copy->tmp.set_quick(buff,sizeof(buff),copy->tmp.charset()); copy->from_field->val_str(©->tmp); - copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(),copy->tmp.charset()); + copy->to_field->store(copy->tmp.c_ptr_quick(),copy->tmp.length(), + copy->tmp.charset()); } @@ -352,18 +351,35 @@ static void do_expand_string(Copy_field *copy) copy->to_length-copy->from_length, ' '); } -static void do_varstring(Copy_field *copy) + +static void do_varstring1(Copy_field *copy) +{ + uint length= (uint) *(uchar*) copy->from_ptr; + if (length > copy->to_length- 1) + { + length=copy->to_length - 1; + if (current_thd->count_cuted_fields) + copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARN_DATA_TRUNCATED, 1); + } + *(uchar*) copy->to_ptr= (uchar) length; + memcpy(copy->to_ptr+1, copy->from_ptr + 1, length); +} + + +static void do_varstring2(Copy_field *copy) { uint length=uint2korr(copy->from_ptr); - if (length > copy->to_length-2) + if (length > copy->to_length- HA_KEY_BLOB_LENGTH) { - length=copy->to_length-2; + length=copy->to_length-HA_KEY_BLOB_LENGTH; if (current_thd->count_cuted_fields) copy->to_field->set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); } int2store(copy->to_ptr,length); - memcpy(copy->to_ptr+2, copy->from_ptr,length); + memcpy(copy->to_ptr+HA_KEY_BLOB_LENGTH, copy->from_ptr + HA_KEY_BLOB_LENGTH, + length); } /*************************************************************************** @@ -486,6 +502,9 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else { + if (to->real_type() == FIELD_TYPE_BIT || + from->real_type() == FIELD_TYPE_BIT) + return do_field_int; // Check if identical fields if (from->result_type() == STRING_RESULT) { @@ -506,9 +525,15 @@ void (*Copy_field::get_copy_func(Field *to,Field *from))(Copy_field*) } else if (to->charset() != from->charset()) return do_field_string; - else if (to->real_type() == FIELD_TYPE_VAR_STRING && to_length != - from_length) - return do_varstring; + else if (to->real_type() == MYSQL_TYPE_VARCHAR) + { + if (((Field_varstring*) to)->length_bytes != + ((Field_varstring*) from)->length_bytes) + return do_field_string; + if (to_length != from_length) + return (((Field_varstring*) to)->length_bytes == 1 ? + do_varstring1 : do_varstring2); + } else if (to_length < from_length) return do_cut_string; else if (to_length > from_length) @@ -573,7 +598,8 @@ void field_conv(Field *to,Field *from) Field_blob *blob=(Field_blob*) to; from->val_str(&blob->value); if (!blob->value.is_alloced() && - from->real_type() != FIELD_TYPE_STRING) + from->real_type() != MYSQL_TYPE_STRING && + from->real_type() != MYSQL_TYPE_VARCHAR) blob->value.copy(); blob->store(blob->value.ptr(),blob->value.length(),from->charset()); return; @@ -587,6 +613,12 @@ void field_conv(Field *to,Field *from) char buff[MAX_FIELD_WIDTH]; String result(buff,sizeof(buff),from->charset()); from->val_str(&result); + /* + We use c_ptr_quick() here to make it easier if to is a float/double + as the conversion routines will do a copy of the result doesn't + end with \0. Can be replaced with .ptr() when we have our own + string->double conversion. + */ to->store(result.c_ptr_quick(),result.length(),from->charset()); } else if (from->result_type() == REAL_RESULT) diff --git a/sql/filesort.cc b/sql/filesort.cc index bd0de022fd4..569ae3da357 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -48,7 +48,8 @@ static int merge_index(SORTPARAM *param,uchar *sort_buffer, BUFFPEK *buffpek, uint maxbuffer,IO_CACHE *tempfile, IO_CACHE *outfile); -static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count); +static bool save_index(SORTPARAM *param,uchar **sort_keys, uint count, + FILESORT_INFO *table_sort); static uint sortlength(SORT_FIELD *sortorder, uint s_length, bool *multi_byte_charset); static SORT_ADDON_FIELD *get_addon_fields(THD *thd, Field **ptabfield, @@ -106,8 +107,16 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, #ifdef SKIP_DBUG_IN_FILESORT DBUG_PUSH(""); /* No DBUG here */ #endif - - outfile= table->sort.io_cache; + FILESORT_INFO table_sort; + /* + Don't use table->sort in filesort as it is also used by + QUICK_INDEX_MERGE_SELECT. Work with a copy and put it back at the end + when index_merge select has finished with it. + */ + memcpy(&table_sort, &table->sort, sizeof(FILESORT_INFO)); + table->sort.io_cache= NULL; + + outfile= table_sort.io_cache; my_b_clear(&tempfile); my_b_clear(&buffpek_pointers); buffpek=0; @@ -128,14 +137,15 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, param.sort_length, ¶m.addon_length); } - table->sort.addon_buf= 0; - table->sort.addon_length= param.addon_length; - table->sort.addon_field= param.addon_field; - table->sort.unpack= unpack_addon_fields; + + table_sort.addon_buf= 0; + table_sort.addon_length= param.addon_length; + table_sort.addon_field= param.addon_field; + table_sort.unpack= unpack_addon_fields; if (param.addon_field) { param.res_length= param.addon_length; - if (!(table->sort.addon_buf= (byte *) my_malloc(param.addon_length, + if (!(table_sort.addon_buf= (byte *) my_malloc(param.addon_length, MYF(MY_WME)))) goto err; } @@ -153,11 +163,11 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, if (select && select->quick) { - statistic_increment(filesort_range_count, &LOCK_status); + statistic_increment(thd->status_var.filesort_range_count, &LOCK_status); } else { - statistic_increment(filesort_scan_count, &LOCK_status); + statistic_increment(thd->status_var.filesort_scan_count, &LOCK_status); } #ifdef CAN_TRUST_RANGE if (select && select->quick && select->quick->records > 0L) @@ -218,7 +228,7 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, if (maxbuffer == 0) // The whole set is in memory { - if (save_index(¶m,sort_keys,(uint) records)) + if (save_index(¶m,sort_keys,(uint) records, &table_sort)) goto err; } else @@ -274,13 +284,16 @@ ha_rows filesort(THD *thd, TABLE *table, SORT_FIELD *sortorder, uint s_length, } } if (error) - my_error(ER_FILSORT_ABORT,MYF(ME_ERROR+ME_WAITTANG)); + my_message(ER_FILSORT_ABORT, ER(ER_FILSORT_ABORT), + MYF(ME_ERROR+ME_WAITTANG)); else - statistic_add(filesort_rows, (ulong) records, &LOCK_status); + statistic_add(thd->status_var.filesort_rows, + (ulong) records, &LOCK_status); *examined_rows= param.examined_rows; #ifdef SKIP_DBUG_IN_FILESORT DBUG_POP(); /* Ok to DBUG */ #endif + memcpy(&table->sort, &table_sort, sizeof(FILESORT_INFO)); DBUG_PRINT("exit",("records: %ld",records)); DBUG_RETURN(error ? HA_POS_ERROR : records); } /* filesort */ @@ -387,7 +400,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, byte *ref_pos,*next_pos,ref_buff[MAX_REFLENGTH]; my_off_t record; TABLE *sort_form; - volatile my_bool *killed= ¤t_thd->killed; + volatile THD::killed_state *killed= ¤t_thd->killed; handler *file; DBUG_ENTER("find_all_keys"); DBUG_PRINT("info",("using: %s",(select?select->quick?"ranges":"where":"every row"))); @@ -416,12 +429,24 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, current_thd->variables.read_buff_size); } + READ_RECORD read_record_info; + if (quick_select) + { + if (select->quick->reset()) + DBUG_RETURN(HA_POS_ERROR); + init_read_record(&read_record_info, current_thd, select->quick->head, + select, 1, 1); + } + for (;;) { if (quick_select) { - if ((error=select->quick->get_next())) - break; + if ((error= read_record_info.read_record(&read_record_info))) + { + error= HA_ERR_END_OF_FILE; + break; + } file->position(sort_form->record[0]); } else /* Not quick-select */ @@ -440,7 +465,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, error=file->rnd_next(sort_form->record[0]); if (!flag) { - ha_store_ptr(ref_pos,ref_length,record); // Position to row + my_store_ptr(ref_pos,ref_length,record); // Position to row record+=sort_form->db_record_offset; } else @@ -449,6 +474,7 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, if (error && error != HA_ERR_RECORD_DELETED) break; } + if (*killed) { DBUG_PRINT("info",("Sort killed by user")); @@ -472,9 +498,21 @@ static ha_rows find_all_keys(SORTPARAM *param, SQL_SELECT *select, else file->unlock_row(); } - (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ - if (!next_pos) - file->ha_rnd_end(); + if (quick_select) + { + /* + index_merge quick select uses table->sort when retrieving rows, so free + resoures it has allocated. + */ + end_read_record(&read_record_info); + } + else + { + (void) file->extra(HA_EXTRA_NO_CACHE); /* End cacheing of records */ + if (!next_pos) + file->ha_rnd_end(); + } + DBUG_PRINT("test",("error: %d indexpos: %d",error,indexpos)); if (error != HA_ERR_END_OF_FILE) { @@ -670,7 +708,7 @@ static void make_sortkey(register SORTPARAM *param, } case REAL_RESULT: { - double value=item->val(); + double value= item->val_real(); if ((maybe_null=item->null_value)) { bzero((char*) to,sort_field->length+1); @@ -747,8 +785,8 @@ static void make_sortkey(register SORTPARAM *param, return; } - -static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count) +static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count, + FILESORT_INFO *table_sort) { uint offset,res_length; byte *to; @@ -759,7 +797,7 @@ static bool save_index(SORTPARAM *param, uchar **sort_keys, uint count) offset= param->rec_length-res_length; if ((ha_rows) count > param->max_rows) count=(uint) param->max_rows; - if (!(to= param->sort_form->sort.record_pointers= + if (!(to= table_sort->record_pointers= (byte*) my_malloc(res_length*count, MYF(MY_WME)))) DBUG_RETURN(1); /* purecov: inspected */ for (uchar **end= sort_keys+count ; sort_keys != end ; sort_keys++) @@ -839,6 +877,39 @@ uint read_to_buffer(IO_CACHE *fromfile, BUFFPEK *buffpek, } /* read_to_buffer */ +/* + Put all room used by freed buffer to use in adjacent buffer. Note, that + we can't simply distribute memory evenly between all buffers, because + new areas must not overlap with old ones. + SYNOPSYS + reuse_freed_buff() + queue IN list of non-empty buffers, without freed buffer + reuse IN empty buffer + key_length IN key length +*/ + +void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length) +{ + uchar *reuse_end= reuse->base + reuse->max_keys * key_length; + for (uint i= 0; i < queue->elements; ++i) + { + BUFFPEK *bp= (BUFFPEK *) queue_element(queue, i); + if (bp->base + bp->max_keys * key_length == reuse->base) + { + bp->max_keys+= reuse->max_keys; + return; + } + else if (bp->base == reuse_end) + { + bp->base= reuse->base; + bp->max_keys+= reuse->max_keys; + return; + } + } + DBUG_ASSERT(0); +} + + /* Merge buffers to one buffer SYNOPSIS @@ -868,18 +939,19 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, ha_rows max_rows,org_max_rows; my_off_t to_start_filepos; uchar *strpos; - BUFFPEK *buffpek,**refpek; + BUFFPEK *buffpek; QUEUE queue; qsort2_cmp cmp; - volatile my_bool *killed= ¤t_thd->killed; - my_bool not_killable; + volatile THD::killed_state *killed= ¤t_thd->killed; + THD::killed_state not_killable; DBUG_ENTER("merge_buffers"); - statistic_increment(filesort_merge_passes, &LOCK_status); + statistic_increment(current_thd->status_var.filesort_merge_passes, + &LOCK_status); if (param->not_killable) { killed= ¬_killable; - not_killable= 0; + not_killable= THD::NOT_KILLED; } error=0; @@ -982,29 +1054,8 @@ int merge_buffers(SORTPARAM *param, IO_CACHE *from_file, if (!(error= (int) read_to_buffer(from_file,buffpek, rec_length))) { - uchar *base= buffpek->base; - ulong max_keys= buffpek->max_keys; - VOID(queue_remove(&queue,0)); - - /* Put room used by buffer to use in other buffer */ - for (refpek= (BUFFPEK**) &queue_top(&queue); - refpek <= (BUFFPEK**) &queue_end(&queue); - refpek++) - { - buffpek= *refpek; - if (buffpek->base+buffpek->max_keys*rec_length == base) - { - buffpek->max_keys+= max_keys; - break; - } - else if (base+max_keys*rec_length == buffpek->base) - { - buffpek->base= base; - buffpek->max_keys+= max_keys; - break; - } - } + reuse_freed_buff(&queue, buffpek, rec_length); break; /* One buffer have been removed */ } else if (error == -1) diff --git a/sql/gen_lex_hash.cc b/sql/gen_lex_hash.cc index 0bbdf84c8d6..57b5e006489 100644 --- a/sql/gen_lex_hash.cc +++ b/sql/gen_lex_hash.cc @@ -83,8 +83,17 @@ TODO: #include "mysql_version.h" #include "lex.h" +const char *default_dbug_option="d:t:o,/tmp/gen_lex_hash.trace"; + struct my_option my_long_options[] = { +#ifdef DBUG_OFF + {"debug", '#', "This is a non-debug version. Catch this and exit", + 0,0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#else + {"debug", '#', "Output debug log", (gptr*) &default_dbug_option, + (gptr*) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#endif {"help", '?', "Display help and exit", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit", @@ -108,7 +117,7 @@ hash_lex_struct *get_hash_struct_by_len(hash_lex_struct **root_by_len, { if (*max_len<len){ *root_by_len= (hash_lex_struct *)realloc((char*)*root_by_len, - sizeof(hash_lex_struct)*len); + sizeof(hash_lex_struct)*len); hash_lex_struct *cur, *end= *root_by_len + len; for (cur= *root_by_len + *max_len; cur<end; cur++) cur->first_char= 0; @@ -353,6 +362,9 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), case '?': usage(0); exit(0); + case '#': + DBUG_PUSH(argument ? argument : default_dbug_option); + break; } return 0; } @@ -425,11 +437,12 @@ int check_duplicates() int main(int argc,char **argv) { MY_INIT(argv[0]); + DBUG_PROCESS(argv[0]); if (get_options(argc,(char **) argv)) exit(1); - printf("/* Copyright (C) 2001 MySQL AB\n\ + printf("/* Copyright (C) 2001-2004 MySQL AB\n\ This software comes with ABSOLUTELY NO WARRANTY. This is free software,\n\ and you are welcome to modify and redistribute it under the GPL license\n\ \n*/\n\n"); @@ -449,9 +462,8 @@ int main(int argc,char **argv) printf("\nunsigned int sql_functions_max_len=%d;\n",max_len); printf("\nunsigned int symbols_max_len=%d;\n\n",max_len2); - printf -( -"inline SYMBOL *get_hash_symbol(const char *s,\n\ + printf("\ +inline SYMBOL *get_hash_symbol(const char *s,\n \ unsigned int len,bool function)\n\ {\n\ register uchar *hash_map;\n\ @@ -516,5 +528,7 @@ int main(int argc,char **argv) }\n\ }\n" ); + my_end(0); + exit(0); } diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index 2c7cce4bcd0..322126ff47b 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -356,7 +356,8 @@ ulong ha_berkeley::index_flags(uint idx, uint part, bool all_parts) const } switch (table->key_info[idx].key_part[i].field->key_type()) { case HA_KEYTYPE_TEXT: - case HA_KEYTYPE_VARTEXT: + case HA_KEYTYPE_VARTEXT1: + case HA_KEYTYPE_VARTEXT2: /* As BDB stores only one copy of equal strings, we can't use key read on these. Binary collations do support key read though. @@ -391,9 +392,11 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) KEY_PART_INFO *key_part= key->key_part, *end=key_part+key->key_parts; uint key_length=new_key->size; + DBUG_DUMP("key_in_index", saved_key_ptr, saved_key->size); for (; key_part != end && (int) key_length > 0; key_part++) { int cmp; + uint length; if (key_part->null_bit) { if (*new_key_ptr != *saved_key_ptr++) @@ -402,11 +405,12 @@ berkeley_cmp_packed_key(DB *file, const DBT *new_key, const DBT *saved_key) if (!*new_key_ptr++) continue; } - if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr, - key_part->length))) + if ((cmp= key_part->field->pack_cmp(new_key_ptr,saved_key_ptr, + key_part->length, + key->table->insert_or_update))) return cmp; - uint length=key_part->field->packed_col_length(new_key_ptr, - key_part->length); + length= key_part->field->packed_col_length(new_key_ptr, + key_part->length); new_key_ptr+=length; key_length-=length; saved_key_ptr+=key_part->field->packed_col_length(saved_key_ptr, @@ -432,7 +436,7 @@ berkeley_cmp_fix_length_key(DB *file, const DBT *new_key, const DBT *saved_key) for (; key_part != end && (int) key_length > 0 ; key_part++) { int cmp; - if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr,0))) + if ((cmp=key_part->field->pack_cmp(new_key_ptr,saved_key_ptr,0,0))) return cmp; new_key_ptr+=key_part->length; key_length-= key_part->length; @@ -442,6 +446,7 @@ berkeley_cmp_fix_length_key(DB *file, const DBT *new_key, const DBT *saved_key) } #endif + /* Compare key against row */ static bool @@ -453,6 +458,7 @@ berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length) for (; key_part != end && (int) key_length > 0; key_part++) { int cmp; + uint length; if (key_part->null_bit) { key_length--; @@ -466,15 +472,20 @@ berkeley_key_cmp(TABLE *table, KEY *key_info, const char *key, uint key_length) if (!*key++) // Null value continue; } - if ((cmp=key_part->field->pack_cmp(key,key_part->length))) + /* + Last argument has to be 0 as we are also using this to function to see + if a key like 'a ' matched a row with 'a' + */ + if ((cmp= key_part->field->pack_cmp(key, key_part->length, 0))) return cmp; - uint length=key_part->field->packed_col_length(key,key_part->length); - key+=length; - key_length-=length; + length= key_part->field->packed_col_length(key,key_part->length); + key+= length; + key_length-= length; } return 0; // Identical keys } + int ha_berkeley::open(const char *name, int mode, uint test_if_locked) { char name_buff[FN_REFLEN]; @@ -736,11 +747,11 @@ void ha_berkeley::unpack_row(char *record, DBT *row) void ha_berkeley::unpack_key(char *record, DBT *key, uint index) { - KEY *key_info=table->key_info+index; + KEY *key_info= table->key_info+index; KEY_PART_INFO *key_part= key_info->key_part, - *end=key_part+key_info->key_parts; + *end= key_part+key_info->key_parts; + char *pos= (char*) key->data; - char *pos=(char*) key->data; for (; key_part != end; key_part++) { if (key_part->null_bit) @@ -764,8 +775,10 @@ void ha_berkeley::unpack_key(char *record, DBT *key, uint index) /* - Create a packed key from from a row - This will never fail as the key buffer is pre allocated. + Create a packed key from a row. This key will be written as such + to the index tree. + + This will never fail as the key buffer is pre-allocated. */ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, @@ -811,7 +824,10 @@ DBT *ha_berkeley::create_key(DBT *key, uint keynr, char *buff, /* - Create a packed key from from a MySQL unpacked key + Create a packed key from from a MySQL unpacked key (like the one that is + sent from the index_read() + + This key is to be used to read a row */ DBT *ha_berkeley::pack_key(DBT *key, uint keynr, char *buff, @@ -857,7 +873,7 @@ int ha_berkeley::write_row(byte * record) int error; DBUG_ENTER("write_row"); - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && record == table->record[0]) @@ -865,6 +881,7 @@ int ha_berkeley::write_row(byte * record) if ((error=pack_row(&row, record,1))) DBUG_RETURN(error); /* purecov: inspected */ + table->insert_or_update= 1; // For handling of VARCHAR if (table->keys + test(hidden_primary_key) == 1) { error=file->put(file, transaction, create_key(&prim_key, primary_key, @@ -950,6 +967,7 @@ int ha_berkeley::write_row(byte * record) break; } } + table->insert_or_update= 0; if (error == DB_KEYEXIST) error=HA_ERR_FOUND_DUPP_KEY; else if (!error) @@ -974,7 +992,7 @@ int ha_berkeley::key_cmp(uint keynr, const byte * old_row, (new_row[key_part->null_offset] & key_part->null_bit)) return 1; } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH)) + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) { if (key_part->field->cmp_binary((char*) (old_row + key_part->offset), @@ -1101,13 +1119,15 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) DB_TXN *sub_trans; ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; bool primary_key_changed; + DBUG_ENTER("update_row"); LINT_INIT(error); - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); + table->insert_or_update= 1; // For handling of VARCHAR if (hidden_primary_key) { primary_key_changed=0; @@ -1160,6 +1180,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) if (new_error) error = new_error; } + table->insert_or_update= 0; DBUG_RETURN(error); // Fatal error /* purecov: inspected */ } changed_keys.set_bit(keynr); @@ -1192,8 +1213,9 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) thd_options); if (new_error) { - error=new_error; // This shouldn't happen /* purecov: inspected */ - break; /* purecov: inspected */ + /* This shouldn't happen */ + error=new_error; /* purecov: inspected */ + break; /* purecov: inspected */ } } } @@ -1205,6 +1227,7 @@ int ha_berkeley::update_row(const byte * old_row, byte * new_row) if (error != DB_LOCK_DEADLOCK) break; } + table->insert_or_update= 0; if (error == DB_KEYEXIST) error=HA_ERR_FOUND_DUPP_KEY; DBUG_RETURN(error); @@ -1294,7 +1317,7 @@ int ha_berkeley::delete_row(const byte * record) key_map keys=table->keys_in_use; ulong thd_options = table->tmp_table == NO_TMP_TABLE ? table->in_use->options : 0; DBUG_ENTER("delete_row"); - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); if ((error=pack_row(&row, record, 0))) DBUG_RETURN((error)); /* purecov: inspected */ @@ -1441,7 +1464,7 @@ int ha_berkeley::read_row(int error, char *buf, uint keynr, DBT *row, int ha_berkeley::index_read_idx(byte * buf, uint keynr, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + table->in_use->status_var.ha_read_key_count++; DBUG_ENTER("index_read_idx"); current_row.flags=DB_DBT_REALLOC; active_index=MAX_KEY; @@ -1462,7 +1485,7 @@ int ha_berkeley::index_read(byte * buf, const byte * key, int do_prev= 0; DBUG_ENTER("ha_berkeley::index_read"); - statistic_increment(ha_read_key_count,&LOCK_status); + table->in_use->status_var.ha_read_key_count++; bzero((char*) &row,sizeof(row)); if (find_flag == HA_READ_BEFORE_KEY) { @@ -1474,7 +1497,8 @@ int ha_berkeley::index_read(byte * buf, const byte * key, find_flag= HA_READ_AFTER_KEY; do_prev= 1; } - if (key_len == key_info->key_length) + if (key_len == key_info->key_length && + !table->key_info[active_index].flags & HA_END_SPACE_KEY) { if (find_flag == HA_READ_AFTER_KEY) key_info->handler.bdb_return_if_eq= 1; @@ -1531,7 +1555,8 @@ int ha_berkeley::index_read_last(byte * buf, const byte * key, uint key_len) KEY *key_info= &table->key_info[active_index]; DBUG_ENTER("ha_berkeley::index_read"); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); /* read of partial key */ @@ -1555,7 +1580,8 @@ int ha_berkeley::index_next(byte * buf) { DBT row; DBUG_ENTER("index_next"); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), (char*) buf, active_index, &row, &last_key, 1)); @@ -1566,9 +1592,11 @@ int ha_berkeley::index_next_same(byte * buf, const byte *key, uint keylen) DBT row; int error; DBUG_ENTER("index_next_same"); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); - if (keylen == table->key_info[active_index].key_length) + if (keylen == table->key_info[active_index].key_length && + !table->key_info[active_index].flags & HA_END_SPACE_KEY) error=read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT_DUP), (char*) buf, active_index, &row, &last_key, 1); else @@ -1586,7 +1614,8 @@ int ha_berkeley::index_prev(byte * buf) { DBT row; DBUG_ENTER("index_prev"); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_PREV), (char*) buf, active_index, &row, &last_key, 1)); @@ -1597,7 +1626,8 @@ int ha_berkeley::index_first(byte * buf) { DBT row; DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_FIRST), (char*) buf, active_index, &row, &last_key, 1)); @@ -1607,7 +1637,8 @@ int ha_berkeley::index_last(byte * buf) { DBT row; DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_LAST), (char*) buf, active_index, &row, &last_key, 0)); @@ -1629,7 +1660,8 @@ int ha_berkeley::rnd_next(byte *buf) { DBT row; DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); bzero((char*) &row,sizeof(row)); DBUG_RETURN(read_row(cursor->c_get(cursor, &last_key, &row, DB_NEXT), (char*) buf, primary_key, &row, &last_key, 1)); @@ -1653,6 +1685,7 @@ DBT *ha_berkeley::get_pos(DBT *to, byte *pos) pos+=key_part->field->packed_col_length((char*) pos,key_part->length); to->size= (uint) (pos- (byte*) to->data); } + DBUG_DUMP("key", (char*) to->data, to->size); return to; } @@ -1660,9 +1693,10 @@ DBT *ha_berkeley::get_pos(DBT *to, byte *pos) int ha_berkeley::rnd_pos(byte * buf, byte *pos) { DBT db_pos; - statistic_increment(ha_read_rnd_count,&LOCK_status); + DBUG_ENTER("ha_berkeley::rnd_pos"); - + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); active_index= MAX_KEY; DBUG_RETURN(read_row(file->get(file, transaction, get_pos(&db_pos, pos), @@ -1804,7 +1838,8 @@ int ha_berkeley::external_lock(THD *thd, int lock_type) !thd->transaction.all.bdb_tid) { /* We have to start a master transaction */ - DBUG_PRINT("trans",("starting transaction all")); + DBUG_PRINT("trans",("starting transaction all: options: 0x%lx", + (ulong) thd->options)); if ((error=txn_begin(db_env, 0, (DB_TXN**) &thd->transaction.all.bdb_tid, 0))) @@ -2058,19 +2093,35 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key, DB_KEY_RANGE start_range, end_range; DB *kfile=key_file[keynr]; double start_pos,end_pos,rows; - DBUG_ENTER("records_in_range"); - - if ((start_key && kfile->key_range(kfile,transaction, - pack_key(&key, keynr, key_buff, - start_key->key, - start_key->length), - &start_range,0)) || - (end_key && kfile->key_range(kfile,transaction, - pack_key(&key, keynr, key_buff, - end_key->key, - end_key->length), - &end_range,0))) - DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); // Better than returning an error /* purecov: inspected */ + bool error; + KEY *key_info= &table->key_info[keynr]; + DBUG_ENTER("ha_berkeley::records_in_range"); + + /* Ensure we get maximum range, even for varchar keys with different space */ + key_info->handler.bdb_return_if_eq= -1; + error= ((start_key && kfile->key_range(kfile,transaction, + pack_key(&key, keynr, key_buff, + start_key->key, + start_key->length), + &start_range,0))); + if (error) + { + key_info->handler.bdb_return_if_eq= 0; + // Better than returning an error + DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); /* purecov: inspected */ + } + key_info->handler.bdb_return_if_eq= 1; + error= (end_key && kfile->key_range(kfile,transaction, + pack_key(&key, keynr, key_buff, + end_key->key, + end_key->length), + &end_range,0)); + key_info->handler.bdb_return_if_eq= 0; + if (error) + { + // Better than returning an error + DBUG_RETURN(HA_BERKELEY_RANGE_COUNT); /* purecov: inspected */ + } if (!start_key) start_pos= 0.0; @@ -2091,9 +2142,9 @@ ha_rows ha_berkeley::records_in_range(uint keynr, key_range *start_key, } -longlong ha_berkeley::get_auto_increment() +ulonglong ha_berkeley::get_auto_increment() { - longlong nr=1; // Default if error or new key + ulonglong nr=1; // Default if error or new key int error; (void) ha_berkeley::extra(HA_EXTRA_KEYREAD); @@ -2142,7 +2193,7 @@ longlong ha_berkeley::get_auto_increment() } } if (!error) - nr=(longlong) + nr=(ulonglong) table->next_number_field->val_int_offset(table->rec_buff_length)+1; ha_berkeley::index_end(); (void) ha_berkeley::extra(HA_EXTRA_NO_KEYREAD); @@ -2181,7 +2232,7 @@ static void print_msg(THD *thd, const char *table_name, const char *op_name, protocol->store(msg_type); protocol->store(msgbuf); if (protocol->write()) - thd->killed=1; + thd->killed=THD::KILL_CONNECTION; } #endif @@ -2553,6 +2604,7 @@ end: DBUG_VOID_RETURN; } + /* Return an estimated of the number of rows in the table. Used when sorting to allocate buffers and by the optimizer. @@ -2563,4 +2615,29 @@ ha_rows ha_berkeley::estimate_rows_upper_bound() return share->rows + HA_BERKELEY_EXTRA_ROWS; } +int ha_berkeley::cmp_ref(const byte *ref1, const byte *ref2) +{ + if (hidden_primary_key) + return memcmp(ref1, ref2, BDB_HIDDEN_PRIMARY_KEY_LENGTH); + + int result; + Field *field; + KEY *key_info=table->key_info+table->primary_key; + KEY_PART_INFO *key_part=key_info->key_part; + KEY_PART_INFO *end=key_part+key_info->key_parts; + + for (; key_part != end; key_part++) + { + field= key_part->field; + result= field->pack_cmp((const char*)ref1, (const char*)ref2, + key_part->length, 0); + if (result) + return result; + ref1+= field->packed_col_length((const char*)ref1, key_part->length); + ref2+= field->packed_col_length((const char*)ref2, key_part->length); + } + + return 0; +} + #endif /* HAVE_BERKELEY_DB */ diff --git a/sql/ha_berkeley.h b/sql/ha_berkeley.h index 25d3e128502..e485b12bdb4 100644 --- a/sql/ha_berkeley.h +++ b/sql/ha_berkeley.h @@ -153,9 +153,11 @@ class ha_berkeley: public handler int5store(to,share->auto_ident); pthread_mutex_unlock(&share->mutex); } - longlong get_auto_increment(); + ulonglong get_auto_increment(); void print_error(int error, myf errflag); uint8 table_cache_type() { return HA_CACHE_TBL_TRANSACT; } + bool primary_key_is_clustered() { return true; } + int cmp_ref(const byte *ref1, const byte *ref2); }; extern bool berkeley_shared_data; diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc new file mode 100755 index 00000000000..3118833a47e --- /dev/null +++ b/sql/ha_federated.cc @@ -0,0 +1,1722 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + + MySQL Federated Storage Engine + + ha_federated.cc - MySQL Federated Storage Engine + Patrick Galbraith and Brian Aker, 2004 + + This is a handler which uses a remote database as the data file, as + opposed to a handler like MyISAM, which uses .MYD files locally. + + How this handler works + ---------------------------------- + Normal database files are local and as such: You create a table called + 'users', a file such as 'users.MYD' is created. A handler reads, inserts, + deletes, updates data in this file. The data is stored in particular format, + so to read, that data has to be parsed into fields, to write, fields have to + be stored in this format to write to this data file. + + With MySQL Federated storage engine, there will be no local files for each + table's data (such as .MYD). A remote database will store the data that would + normally be in this file. This will necessitate the use of MySQL client API + to read, delete, update, insert this data. The data will have to be retrieve + via an SQL call "SELECT * FROM users". Then, to read this data, it will have + to be retrieved via mysql_fetch_row one row at a time, then converted from + the column in this select into the format that the handler expects. + + The create table will simply create the .frm file, and within the + "CREATE TABLE" SQL, there SHALL be any of the following : + + comment=scheme://username:password@hostname:port/database/tablename + comment=scheme://username@hostname/database/tablename + comment=scheme://username:password@hostname/database/tablename + comment=scheme://username:password@hostname/database/tablename + + An example would be: + + comment=mysql://username:password@hostname:port/database/tablename + + ***IMPORTANT*** + + Only 'mysql://' is supported at this release. + + + This comment connection string is necessary for the handler to be + able to connect to the remote server. + + + The basic flow is this: + + SQL calls issues locally -> + mysql handler API (data in handler format) -> + mysql client API (data converted to SQL calls) -> + remote database -> mysql client API -> + convert result sets (if any) to handler format -> + handler API -> results or rows affected to local + + What this handler does and doesn't support + ------------------------------------------ + * Tables MUST be created on the remote server prior to any action on those + tables via the handler, first version. IMPORTANT: IF you MUST use the + federated storage engine type on the REMOTE end, MAKE SURE [ :) ] That + the table you connect to IS NOT a table pointing BACK to your ORIGNAL + table! You know and have heard the screaching of audio feedback? You + know putting two mirror in front of each other how the reflection + continues for eternity? Well, need I say more?! + * There will not be support for transactions. + * There is no way for the handler to know if the database on the remote end + has changed. The reason for this is that this database has to work like a + data file that would never be written to by anything other than the + database. The integrity of the data in the local table could be breached + if there was any change to the remote database. + * Support for SELECT, INSERT, UPDATE , DELETE, indexes. + * No ALTER TABLE, DROP TABLE or any other Data Definition Language calls. + * Prepared statements will not be used in the first implementation, it + remains to to be seen whether the limited subset of the client API for the + server supports this. + * This uses SELECT, INSERT, UPDATE, DELETE and not HANDLER for its + implementation. + * This will not work with the query cache. + + Method calls + + A two column table, with one record: + + (SELECT) + + "SELECT * FROM foo" + ha_federated::info + ha_federated::scan_time: + ha_federated::rnd_init: share->select_query SELECT * FROM foo + ha_federated::extra + + <for every row of data retrieved> + ha_federated::rnd_next + ha_federated::convert_row_to_internal_format + ha_federated::rnd_next + </for every row of data retrieved> + + ha_federated::rnd_end + ha_federated::extra + ha_federated::reset + + (INSERT) + + "INSERT INTO foo (id, ts) VALUES (2, now());" + + ha_federated::write_row + + <for every field/column> + ha_federated::quote_data + ha_federated::quote_data + </for every field/column> + + ha_federated::reset + + (UPDATE) + + "UPDATE foo SET ts = now() WHERE id = 1;" + + ha_federated::index_init + ha_federated::index_read + ha_federated::index_read_idx + ha_federated::quote_data + ha_federated::rnd_next + ha_federated::convert_row_to_internal_format + ha_federated::update_row + + <quote 3 cols, new and old data> + <ha_federated::quote_data + <ha_federated::quote_data + <ha_federated::quote_data + <ha_federated::quote_data + <ha_federated::quote_data + <ha_federated::quote_data + </quote 3 cols, new and old data> + + ha_federated::extra + ha_federated::extra + ha_federated::extra + ha_federated::external_lock + ha_federated::reset + + + How do I use this handler? + -------------------------- + First of all, you need to build this storage engine: + + ./configure --with-federated-storage-engine + make + + Next, to use this handler, it's very simple. You must + have two databases running, either both on the same host, or + on different hosts. + + One the server that will be connecting to the remote + host (client), you create your table as such: + + CREATE TABLE test_table ( + id int(20) NOT NULL auto_increment, + name varchar(32) NOT NULL default '', + other int(20) NOT NULL default '0', + PRIMARY KEY (id), + KEY name (name), + KEY other_key (other)) + ENGINE="FEDERATED" + DEFAULT CHARSET=latin1 + COMMENT='root@127.0.0.1:9306/federated/test_federated'; + + Notice the "COMMENT" and "ENGINE" field? This is where you + respectively set the engine type, "FEDERATED" and remote + host information, this being the database your 'client' database + will connect to and use as the "data file". Obviously, the remote + database is running on port 9306, so you want to start up your other + database so that it is indeed on port 9306, and your federated + database on a port other than that. In my setup, I use port 5554 + for federated, and port 5555 for the remote. + + Then, on the remote database: + + CREATE TABLE test_table ( + id int(20) NOT NULL auto_increment, + name varchar(32) NOT NULL default '', + other int(20) NOT NULL default '0', + PRIMARY KEY (id), + KEY name (name), + KEY other_key (other)) + ENGINE="<NAME>" <-- whatever you want, or not specify + DEFAULT CHARSET=latin1 ; + + This table is exactly the same (and must be exactly the same), + except that it is not using the federated handler and does + not need the URL. + + + How to see the handler in action + -------------------------------- + + When developing this handler, I compiled the federated database with + debugging: + + ./configure --with-federated-storage-engine + --prefix=/home/mysql/mysql-build/federated/ --with-debug + + Once compiled, I did a 'make install' (not for the purpose of installing + the binary, but to install all the files the binary expects to see in the + diretory I specified in the build with --prefix, + "/home/mysql/mysql-build/federated". + + Then, I started the remote server: + + /usr/local/mysql/bin/mysqld_safe + --user=mysql --log=/tmp/mysqld.5555.log -P 5555 + + Then, I went back to the directory containing the newly compiled mysqld, + <builddir>/sql/, started up gdb: + + gdb ./mysqld + + Then, withn the (gdb) prompt: + (gdb) run --gdb --port=5554 --socket=/tmp/mysqld.5554 --skip-innodb --debug + + Next, I open several windows for each: + + 1. Tail the debug trace: tail -f /tmp/mysqld.trace|grep ha_fed + 2. Tail the SQL calls to the remote database: tail -f /tmp/mysqld.5555.log + 3. A window with a client open to the federated server on port 5554 + 4. A window with a client open to the federated server on port 5555 + + I would create a table on the client to the remote server on port + 5555, and then to the federated server on port 5554. At this point, + I would run whatever queries I wanted to on the federated server, + just always remembering that whatever changes I wanted to make on + the table, or if I created new tables, that I would have to do that + on the remote server. + + Another thing to look for is 'show variables' to show you that you have + support for federated handler support: + + show variables like '%federat%' + + and: + + show storage engines; + + Both should display the federated storage handler. + + + Testing + ------- + + There is a test for MySQL Federated Storage Handler in ./mysql-test/t, + federatedd.test It starts both a slave and master database using + the same setup that the replication tests use, with the exception that + it turns off replication, and sets replication to ignore the test tables. + After ensuring that you actually do have support for the federated storage + handler, numerous queries/inserts/updates/deletes are run, many derived + from the MyISAM tests, plus som other tests which were meant to reveal + any issues that would be most likely to affect this handler. All tests + should work! ;) + + To run these tests, go into ./mysql-test (based in the directory you + built the server in) + + ./mysql-test-run federatedd + + To run the test, or if you want to run the test and have debug info: + + ./mysql-test-run --debug federated + + This will run the test in debug mode, and you can view the trace and + log files in the ./mysql-test/var/log directory + + ls -l mysql-test/var/log/ + -rw-r--r-- 1 patg patg 17 4 Dec 12:27 current_test + -rw-r--r-- 1 patg patg 692 4 Dec 12:52 manager.log + -rw-rw---- 1 patg patg 21246 4 Dec 12:51 master-bin.000001 + -rw-rw---- 1 patg patg 68 4 Dec 12:28 master-bin.index + -rw-r--r-- 1 patg patg 1620 4 Dec 12:51 master.err + -rw-rw---- 1 patg patg 23179 4 Dec 12:51 master.log + -rw-rw---- 1 patg patg 16696550 4 Dec 12:51 master.trace + -rw-r--r-- 1 patg patg 0 4 Dec 12:28 mysqltest-time + -rw-r--r-- 1 patg patg 2024051 4 Dec 12:51 mysqltest.trace + -rw-rw---- 1 patg patg 94992 4 Dec 12:51 slave-bin.000001 + -rw-rw---- 1 patg patg 67 4 Dec 12:28 slave-bin.index + -rw-rw---- 1 patg patg 249 4 Dec 12:52 slave-relay-bin.000003 + -rw-rw---- 1 patg patg 73 4 Dec 12:28 slave-relay-bin.index + -rw-r--r-- 1 patg patg 1349 4 Dec 12:51 slave.err + -rw-rw---- 1 patg patg 96206 4 Dec 12:52 slave.log + -rw-rw---- 1 patg patg 15706355 4 Dec 12:51 slave.trace + -rw-r--r-- 1 patg patg 0 4 Dec 12:51 warnings + + Of course, again, you can tail the trace log: + + tail -f mysql-test/var/log/master.trace |grep ha_fed + + As well as the slave query log: + + tail -f mysql-test/var/log/slave.log + + Files that comprise the test suit + --------------------------------- + mysql-test/t/federated.test + mysql-test/r/federated.result + mysql-test/r/have_federated_db.require + mysql-test/include/have_federated_db.inc + + + Other tidbits + ------------- + + These were the files that were modified or created for this + Federated handler to work: + + ./configure.in + ./sql/Makefile.am + ./config/ac_macros/ha_federated.m4 + ./sql/handler.cc + ./sql/mysqld.cc + ./sql/set_var.cc + ./sql/field.h + ./sql/sql_string.h + ./mysql-test/mysql-test-run(.sh) + ./mysql-test/t/federated.test + ./mysql-test/r/federated.result + ./mysql-test/r/have_federated_db.require + ./mysql-test/include/have_federated_db.inc + ./sql/ha_federated.cc + ./sql/ha_federated.h + +*/ + +#ifdef __GNUC__ +#pragma implementation // gcc: Class implementation +#endif + +#include <mysql_priv.h> + +#ifdef HAVE_FEDERATED_DB +#include "ha_federated.h" +#define MAX_REMOTE_SIZE IO_SIZE +/* Variables for federated share methods */ +static HASH federated_open_tables; // Hash used to track open tables +pthread_mutex_t federated_mutex; // This is the mutex we use to init the hash +static int federated_init= 0; // Variable for checking the init state of hash + +/* + Function we use in the creation of our hash to get key. +*/ +static byte* federated_get_key(FEDERATED_SHARE *share,uint *length, + my_bool not_used __attribute__((unused))) +{ + *length= share->table_name_length; + return (byte*) share->table_name; +} + +/* + Parse connection info from table->comment + + SYNOPSIS + parse_url() + share pointer to FEDERATED share + table pointer to current TABLE class + + DESCRIPTION + populates the share with information about the connection + to the remote database that will serve as the data source. + This string must be specified (currently) in the "comment" field, + listed in the CREATE TABLE statement. + + This string MUST be in the format of any of these: + +scheme://username:password@hostname:port/database/table +scheme://username@hostname/database/table +scheme://username@hostname:port/database/table +scheme://username:password@hostname/database/table + + An Example: + + mysql://joe:joespass@192.168.1.111:9308/federated/testtable + + ***IMPORTANT*** + Currently, only "mysql://" is supported. + + 'password' and 'port' are both optional. + + RETURN VALUE + 0 success + -1 failure, wrong string format + +*/ +int parse_url(FEDERATED_SHARE *share, TABLE *table, uint table_create_flag) +{ + DBUG_ENTER("ha_federated::parse_url"); + + // This either get set or will remain the same. + share->port= 0; + uint error_num= table_create_flag ? ER_CANT_CREATE_TABLE : ER_CONNECT_TO_MASTER ; + + share->scheme= my_strdup(table->comment, MYF(0)); + + + if (share->username= strstr(share->scheme, "://")) + { + share->scheme[share->username - share->scheme] = '\0'; + if (strcmp(share->scheme, "mysql") != 0) + { + DBUG_PRINT("ha_federated::parse_url", + ("The federated handler currently only supports connecting\ + to a MySQL database!!!\n")); + my_error(error_num, MYF(0), + "ERROR: federated handler only supports remote 'mysql://' database"); + DBUG_RETURN(-1); + } + share->username+= 3; + + if (share->hostname= strchr(share->username, '@')) + { + share->username[share->hostname - share->username]= '\0'; + share->hostname++; + + if (share->password= strchr(share->username, ':')) + { + share->username[share->password - share->username]= '\0'; + share->password++; + share->username= share->username; + // make sure there isn't an extra / or @ + if (strchr(share->password, '/') || strchr(share->hostname, '@')) + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + /* + Found that if the string is: +user:@hostname:port/database/table +Then password is a null string, so set to NULL + */ + if (share->password[0] == '\0') + share->password= NULL; + } + else + share->username= share->username; + + // make sure there isn't an extra / or @ + if (strchr(share->username, '/') || strchr(share->hostname, '@')) + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + + if (share->database= strchr(share->hostname, '/')) + { + share->hostname[share->database - share->hostname]= '\0'; + share->database++; + + if (share->sport= strchr(share->hostname, ':')) + { + share->hostname[share->sport - share->hostname]= '\0'; + share->sport++; + if (share->sport[0] == '\0') + share->sport= NULL; + else + share->port= atoi(share->sport); + } + + if (share->table_base_name= strchr(share->database, '/')) + { + share->database[share->table_base_name - share->database]= '\0'; + share->table_base_name++; + } + else + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + } + else + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + // make sure there's not an extra / + if (strchr(share->table_base_name, '/')) + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + if (share->hostname[0] == '\0') + share->hostname= NULL; + + DBUG_PRINT("ha_federated::parse_url", + ("scheme %s username %s password %s \ + hostname %s port %d database %s tablename %s\n", + share->scheme, share->username, share->password, share->hostname, + share->port, share->database, share->table_base_name)); + } + else + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + } + else + { + DBUG_PRINT("ha_federated::parse_url", + ("this connection string is not in the correct format!!!\n")); + my_error(error_num, MYF(0), + "this connection string is not in the correct format!!!\n"); + DBUG_RETURN(-1); + } + DBUG_RETURN(0); +} + +/* + Convert MySQL result set row to handler internal format + + SYNOPSIS + convert_row_to_internal_format() + record Byte pointer to record + row MySQL result set row from fetchrow() + + DESCRIPTION + This method simply iterates through a row returned via fetchrow with + values from a successful SELECT , and then stores each column's value + in the field object via the field object pointer (pointing to the table's + array of field object pointers). This is how the handler needs the data + to be stored to then return results back to the user + + RETURN VALUE + 0 After fields have had field values stored from record + */ +uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row) +{ + unsigned long len; + int x= 0; + DBUG_ENTER("ha_federated::convert_row_to_internal_format"); + + // Question this + memset(record, 0, table->null_bytes); + + for (Field **field=table->field; *field ; field++, x++) + { + if (!row[x]) + (*field)->set_null(); + else + /* + changed system_charset_info to default_charset_info because + testing revealed that german text was not being retrieved properly + */ + (*field)->store(row[x], strlen(row[x]), &my_charset_bin); + } + + DBUG_RETURN(0); +} + +/* + SYNOPSIS + quote_data() + unquoted_string Pointer pointing to the value of a field + field MySQL Field pointer to field being checked for type + + DESCRIPTION + Simple method that passes the field type to the method "type_quote" + To get a true/false value as to whether the value in string1 needs + to be enclosed with quotes. This ensures that values in the final + sql statement to be passed to the remote server will be quoted properly + + RETURN_VALUE + void Immediately - if string doesn't need quote + void Upon prepending/appending quotes on each side of variable + +*/ +void ha_federated::quote_data(String *unquoted_string, Field *field ) +{ + char escaped_string[IO_SIZE]; + char *unquoted_string_buffer; + + unquoted_string_buffer= unquoted_string->c_ptr_quick(); + + int quote_flag; + DBUG_ENTER("ha_federated::quote_data"); + // this is the same call that mysql_real_escape_string() calls + escape_string_for_mysql(&my_charset_bin, (char *)escaped_string, + unquoted_string->c_ptr_quick(), unquoted_string->length()); + + DBUG_PRINT("ha_federated::quote_data", + ("escape_string_for_mysql unescaped %s escaped %s", + unquoted_string->c_ptr_quick(), escaped_string)); + + if (field->is_null()) + { + DBUG_PRINT("ha_federated::quote_data", + ("NULL, no quoted needed for unquoted_string %s, returning.", + unquoted_string->c_ptr_quick())); + DBUG_VOID_RETURN; + } + + quote_flag= type_quote(field->type()); + + if (quote_flag == 0) + { + DBUG_PRINT("ha_federated::quote_data", + ("quote flag 0 no quoted needed for unquoted_string %s, returning.", + unquoted_string->c_ptr_quick())); + DBUG_VOID_RETURN; + } + else + { + // reset string, then re-append with quotes and escaped values + unquoted_string->length(0); + unquoted_string->append("'"); + unquoted_string->append((char *)escaped_string); + unquoted_string->append("'"); + } + DBUG_PRINT("ha_federated::quote_data", + ("FINAL quote_flag %d unquoted_string %s escaped_string %s", + quote_flag, unquoted_string->c_ptr_quick(), escaped_string)); + DBUG_VOID_RETURN; +} + +/* + Quote a field type if needed + + SYNOPSIS + ha_federated::type_quote + int field Enumerated field type number + + DESCRIPTION + Simple method to give true/false whether a field should be quoted. + Used when constructing INSERT and UPDATE queries to the remote server + see write_row and update_row + + RETURN VALUE + 0 if value is of type NOT needing quotes + 1 if value is of type needing quotes +*/ +uint ha_federated::type_quote(int type) +{ + DBUG_ENTER("ha_federated::type_quote"); + DBUG_PRINT("ha_federated::type_quote", ("field type %d", type)); + + switch(type) { + //FIX this is a bug, fix when kernel is fixed + case MYSQL_TYPE_VARCHAR : + case FIELD_TYPE_STRING : + case FIELD_TYPE_VAR_STRING : + case FIELD_TYPE_YEAR : + case FIELD_TYPE_NEWDATE : + case FIELD_TYPE_TIME : + case FIELD_TYPE_TIMESTAMP : + case FIELD_TYPE_DATE : + case FIELD_TYPE_DATETIME : + case FIELD_TYPE_TINY_BLOB : + case FIELD_TYPE_BLOB : + case FIELD_TYPE_MEDIUM_BLOB : + case FIELD_TYPE_LONG_BLOB : + case FIELD_TYPE_GEOMETRY : + DBUG_RETURN(1); + + case FIELD_TYPE_DECIMAL : + case FIELD_TYPE_TINY : + case FIELD_TYPE_SHORT : + case FIELD_TYPE_INT24 : + case FIELD_TYPE_LONG : + case FIELD_TYPE_FLOAT : + case FIELD_TYPE_DOUBLE : + case FIELD_TYPE_LONGLONG : + case FIELD_TYPE_NULL : + case FIELD_TYPE_SET : + case FIELD_TYPE_ENUM : + DBUG_RETURN(0); + + default: DBUG_RETURN(0); + } + DBUG_RETURN(0); +} + +int load_conn_info(FEDERATED_SHARE *share, TABLE *table) +{ + DBUG_ENTER("ha_federated::load_conn_info"); + int retcode; + + retcode= parse_url(share, table, 0); + + if (retcode < 0) + { + DBUG_PRINT("ha_federated::load_conn_info", + ("retcode %d, setting defaults", retcode)); + /* sanity checks to make sure all needed pieces are present */ + if (!share->port) + { + if (strcmp(share->hostname, "localhost") == 0) + share->socket= my_strdup("/tmp/mysql.sock",MYF(0)); + else + share->port= 3306; + } + } + DBUG_PRINT("ha_federated::load_conn_info", + ("returned from retcode %d", retcode)); + + DBUG_RETURN(retcode); +} + +/* + Example of simple lock controls. The "share" it creates is structure we will + pass to each federated handler. Do you have to have one of these? Well, you + have pieces that are used for locking, and they are needed to function. +*/ +static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) +{ + FEDERATED_SHARE *share; + // FIX : need to redo + //String query; + char query_buffer[IO_SIZE]; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + query.length(0); + + uint table_name_length, table_base_name_length; + char *tmp_table_name, *tmp_table_base_name, *table_base_name, *select_query; + + // share->table_name has the file location - we want the actual table's + // name! + table_base_name= table->table_name; + DBUG_PRINT("ha_federated::get_share",("table_name %s", table_base_name)); + /* + So why does this exist? There is no way currently to init a storage engine. + Innodb and BDB both have modifications to the server to allow them to + do this. Since you will not want to do this, this is probably the next + best method. + */ + if (!federated_init) + { + /* Hijack a mutex for init'ing the storage engine */ + pthread_mutex_lock(&LOCK_mysql_create_db); + if (!federated_init) + { + federated_init++; + VOID(pthread_mutex_init(&federated_mutex,MY_MUTEX_INIT_FAST)); + (void) hash_init(&federated_open_tables,system_charset_info,32,0,0, + (hash_get_key) federated_get_key,0,0); + } + pthread_mutex_unlock(&LOCK_mysql_create_db); + } + pthread_mutex_lock(&federated_mutex); + table_name_length= (uint) strlen(table_name); + table_base_name_length= (uint) strlen(table_base_name); + + if (!(share= (FEDERATED_SHARE*) hash_search(&federated_open_tables, + (byte*) table_name, + table_name_length))) + { + query.set_charset(system_charset_info); + query.append("SELECT * FROM "); + query.append(table_base_name); + + if (!(share= (FEDERATED_SHARE *) + my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), + &share, sizeof(*share), + &tmp_table_name, table_name_length+1, + &tmp_table_base_name, table_base_name_length+1, + &select_query, query.length()+1, + NullS))) + { + pthread_mutex_unlock(&federated_mutex); + return NULL; + } + + load_conn_info(share, table); + share->use_count= 0; + share->table_name_length= table_name_length; + share->table_name= tmp_table_name; + share->table_base_name_length= table_base_name_length; + share->table_base_name= tmp_table_base_name; + share->select_query= select_query; + strmov(share->table_name,table_name); + strmov(share->table_base_name,table_base_name); + strmov(share->select_query,query.c_ptr_quick()); + DBUG_PRINT("ha_federated::get_share",("share->select_query %s", share->select_query)); + if (my_hash_insert(&federated_open_tables, (byte*) share)) + goto error; + thr_lock_init(&share->lock); + pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST); + } + share->use_count++; + pthread_mutex_unlock(&federated_mutex); + + return share; + +error2: + thr_lock_delete(&share->lock); + pthread_mutex_destroy(&share->mutex); +error: + pthread_mutex_unlock(&federated_mutex); + my_free((gptr) share, MYF(0)); + + return NULL; +} + + +/* + Free lock controls. We call this whenever we close a table. + If the table had the last reference to the share then we + free memory associated with it. +*/ +static int free_share(FEDERATED_SHARE *share) +{ + pthread_mutex_lock(&federated_mutex); + if (!--share->use_count) + { + hash_delete(&federated_open_tables, (byte*) share); + thr_lock_delete(&share->lock); + pthread_mutex_destroy(&share->mutex); + my_free((gptr) share, MYF(0)); + } + pthread_mutex_unlock(&federated_mutex); + + return 0; +} + + +/* + If frm_error() is called then we will use this to to find out + what file extentions exist for the storage engine. This is + also used by the default rename_table and delete_table method + in handler.cc. +*/ +const char **ha_federated::bas_ext() const +{ static const char *ext[]= { NullS }; return ext; } + + +/* + Used for opening tables. The name will be the name of the file. + A table is opened when it needs to be opened. For instance + when a request comes in for a select on the table (tables are not + open and closed for each request, they are cached). + + Called from handler.cc by handler::ha_open(). The server opens + all tables by calling ha_open() which then calls the handler + specific open(). +*/ +int ha_federated::open(const char *name, int mode, uint test_if_locked) +{ + DBUG_ENTER("ha_federated::open"); + int rc; + + if (!(share= get_share(name, table))) + DBUG_RETURN(1); + thr_lock_data_init(&share->lock,&lock,NULL); + + /* Connect to remote database mysql_real_connect() */ + mysql= mysql_init(0); + DBUG_PRINT("ha_federated::open",("hostname %s", share->hostname)); + DBUG_PRINT("ha_federated::open",("username %s", share->username)); + DBUG_PRINT("ha_federated::open",("password %s", share->password)); + DBUG_PRINT("ha_federated::open",("database %s", share->database)); + DBUG_PRINT("ha_federated::open",("port %d", share->port)); + if (!mysql_real_connect(mysql, + share->hostname, + share->username, + share->password, + share->database, + share->port, + NULL, + 0)) + { + my_error(ER_CONNECT_TO_MASTER, MYF(0), mysql_error(mysql)); + DBUG_RETURN(ER_CONNECT_TO_MASTER); + } + DBUG_RETURN(0); +} + + +/* + Closes a table. We call the free_share() function to free any resources + that we have allocated in the "shared" structure. + + Called from sql_base.cc, sql_select.cc, and table.cc. + In sql_select.cc it is only used to close up temporary tables or during + the process where a temporary table is converted over to being a + myisam table. + For sql_base.cc look at close_data_tables(). +*/ +int ha_federated::close(void) +{ + DBUG_ENTER("ha_federated::close"); + /* Disconnect from mysql */ + mysql_close(mysql); + DBUG_RETURN(free_share(share)); + +} + +/* + + Checks if a field in a record is SQL NULL. + + SYNOPSIS + field_in_record_is_null() + table TABLE pointer, MySQL table object + field Field pointer, MySQL field object + record char pointer, contains record + + DESCRIPTION + This method uses the record format information in table to track + the null bit in record. + + RETURN VALUE + 1 if NULL + 0 otherwise +*/ +inline uint field_in_record_is_null ( + TABLE* table, /* in: MySQL table object */ + Field* field, /* in: MySQL field object */ + char* record) /* in: a row in MySQL format */ +{ + int null_offset; + DBUG_ENTER("ha_federated::field_in_record_is_null"); + + if (!field->null_ptr) + DBUG_RETURN(0); + + null_offset= (uint) ((char*) field->null_ptr - (char*) table->record[0]); + + if (record[null_offset] & field->null_bit) + DBUG_RETURN(1); + + DBUG_RETURN(0); +} + +/* + write_row() inserts a row. No extra() hint is given currently if a bulk load + is happeneding. buf() is a byte array of data. You can use the field + information to extract the data from the native byte array type. + Example of this would be: + for (Field **field=table->field ; *field ; field++) + { + ... + } + + Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, + sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc. +*/ +int ha_federated::write_row(byte * buf) +{ + int x= 0, num_fields= 0; + ulong current_query_id= 1; + ulong tmp_query_id; + int all_fields_have_same_query_id= 1; + + char insert_buffer[IO_SIZE]; + char values_buffer[IO_SIZE], insert_field_value_buffer[IO_SIZE]; + + // The main insert query string + String insert_string(insert_buffer, sizeof(insert_buffer), &my_charset_bin); + insert_string.length(0); + // The string containing the values to be added to the insert + String values_string(values_buffer, sizeof(values_buffer), &my_charset_bin); + values_string.length(0); + // The actual value of the field, to be added to the values_string + String insert_field_value_string(insert_field_value_buffer, + sizeof(insert_field_value_buffer), &my_charset_bin); + insert_field_value_string.length(0); + + DBUG_ENTER("ha_federated::write_row"); + /* + I want to use this and the next line, but the repository needs to be + updated to do so + */ + statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) + table->timestamp_field->set_time(); + + /* + get the current query id - the fields that we add to the insert + statement to send to the remote will not be appended unless they match + this query id + */ + current_query_id= table->in_use->query_id; + DBUG_PRINT("ha_federated::write_row", ("current query id %d", + current_query_id)); + + // start off our string + insert_string.append("INSERT INTO "); + insert_string.append(share->table_base_name); + // start both our field and field values strings + insert_string.append(" ("); + values_string.append(" VALUES ("); + + /* + Even if one field is different, all_fields_same_query_id can't remain + 0 if it remains 0, then that means no fields were specified in the query + such as in the case of INSERT INTO table VALUES (val1, val2, valN) + */ + for (Field **field= table->field; *field ; field++, x++) + { + if (x > 0 && tmp_query_id != (*field)->query_id) + all_fields_have_same_query_id= 0; + + tmp_query_id= (*field)->query_id; + } + /* + loop through the field pointer array, add any fields to both the values + list and the fields list that match the current query id + */ + for (Field **field= table->field; *field ; field++, x++) + { + DBUG_PRINT("ha_federated::write_row", ("field type %d", (*field)->type())); + // if there is a query id and if it's equal to the current query id + if ( ((*field)->query_id && (*field)->query_id == current_query_id ) + || all_fields_have_same_query_id) + { + num_fields++; + + if ((*field)->is_null()) + { + DBUG_PRINT("ha_federated::write_row", + ("current query id %d field is_null query id %d", + current_query_id, (*field)->query_id)); + insert_field_value_string.append("NULL"); + } + else + { + DBUG_PRINT("ha_federated::write_row", + ("current query id %d field is not null query ID %d", + current_query_id, (*field)->query_id)); + (*field)->val_str(&insert_field_value_string); + } + // append the field name + insert_string.append((*field)->field_name); + + // quote these fields if they require it + quote_data(&insert_field_value_string, *field); + // append the value + values_string.append(insert_field_value_string); + insert_field_value_string.length(0); + + // append commas between both fields and fieldnames + insert_string.append(','); + values_string.append(','); + DBUG_PRINT("ha_federated::write_row", + ("insert_string %s values_string %s insert_field_value_string %s", + insert_string.c_ptr_quick(), values_string.c_ptr_quick(), insert_field_value_string.c_ptr_quick())); + + } + } + + /* + chop of the trailing comma, or if there were no fields, a '(' + So, "INSERT INTO foo (" becomes "INSERT INTO foo " + or, with fields, "INSERT INTO foo (field1, field2," becomes + "INSERT INTO foo (field1, field2" + */ + insert_string.chop(); + + + /* + if there were no fields, we don't want to add a closing paren + AND, we don't want to chop off the last char '(' + insert will be "INSERT INTO t1 VALUES ();" + */ + DBUG_PRINT("ha_federated::write_row",("x %d num fields %d", + x, num_fields)); + if (num_fields > 0) + { + // chops off leading commas + values_string.chop(); + insert_string.append(')'); + } + // we always want to append this, even if there aren't any fields + values_string.append(')'); + + // add the values + insert_string.append(values_string); + + DBUG_PRINT("ha_federated::write_row",("insert query %s", + insert_string.c_ptr_quick())); + + if (mysql_real_query(mysql, insert_string.c_ptr_quick(), + insert_string.length())) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + + DBUG_RETURN(0); +} + +/* + Yes, update_row() does what you expect, it updates a row. old_data will have + the previous row record in it, while new_data will have the newest data in + it. + + Keep in mind that the server can do updates based on ordering if an ORDER BY + clause was used. Consecutive ordering is not guarenteed. + Currently new_data will not have an updated auto_increament record, or + and updated timestamp field. You can do these for federated by doing these: + if (table->timestamp_on_update_now) + update_timestamp(new_row+table->timestamp_on_update_now-1); + if (table->next_number_field && record == table->record[0]) + update_auto_increment(); + + Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. +*/ +int ha_federated::update_row( + const byte * old_data, + byte * new_data + ) +{ + uint x= 0; + uint has_a_primary_key; + int primary_key_field_num; + char old_field_value_buffer[IO_SIZE], new_field_value_buffer[IO_SIZE]; + char update_buffer[IO_SIZE], where_buffer[IO_SIZE]; + + // stores the value to be replaced of the field were are updating + String old_field_value(old_field_value_buffer, sizeof(old_field_value_buffer), &my_charset_bin); + old_field_value.length(0); + // stores the new value of the field + String new_field_value(new_field_value_buffer, sizeof(new_field_value_buffer), &my_charset_bin); + new_field_value.length(0); + // stores the update query + String update_string(update_buffer, sizeof(update_buffer), &my_charset_bin); + update_string.length(0); + // stores the WHERE clause + String where_string(where_buffer, sizeof(where_buffer), &my_charset_bin); + where_string.length(0); + + DBUG_ENTER("ha_federated::update_row"); + + + has_a_primary_key= table->primary_key == 0 ? 1 : 0; + primary_key_field_num= has_a_primary_key ? + table->key_info[table->primary_key].key_part->fieldnr -1 : -1; + if (has_a_primary_key) + DBUG_PRINT("ha_federated::update_row", ("has a primary key")); + + update_string.append("UPDATE "); + update_string.append(share->table_base_name); + update_string.append(" SET "); + +/* + In this loop, we want to match column names to values being inserted + (while building INSERT statement). + + Iterate through table->field (new data) and share->old_filed (old_data) + using the same index to created an SQL UPDATE statement, new data is + used to create SET field=value and old data is used to create WHERE + field=oldvalue + */ + + for (Field **field= table->field ; *field ; field++, x++) + { + /* + In all of these tests for 'has_a_primary_key', what I'm trying to + accomplish is to only use the primary key in the WHERE clause if the + table has a primary key, as opposed to a table without a primary key + in which case we have to use all the fields to create a WHERE clause + using the old/current values, as well as adding a LIMIT statement + */ + if (has_a_primary_key) + { + if (x == primary_key_field_num) + where_string.append((*field)->field_name); + } + else + where_string.append((*field)->field_name); + + update_string.append((*field)->field_name); + update_string.append('='); + + if ((*field)->is_null()) + new_field_value.append("NULL"); + else + { + // otherwise = + (*field)->val_str(&new_field_value); + quote_data(&new_field_value, *field); + + if ( has_a_primary_key ) + { + if (x == primary_key_field_num) + where_string.append("="); + } + else + if (! field_in_record_is_null(table, *field, (char*) old_data)) + where_string.append("="); + } + + if ( has_a_primary_key) + { + if (x == primary_key_field_num) + { + (*field)->val_str(&old_field_value, + (char *)(old_data + (*field)->offset())); + quote_data(&old_field_value, *field); + where_string.append(old_field_value); + } + } + else + { + if (field_in_record_is_null(table, *field, (char*) old_data)) + where_string.append(" IS NULL "); + else + { + (*field)->val_str(&old_field_value, + (char *)(old_data + (*field)->offset())); + quote_data(&old_field_value, *field); + where_string.append(old_field_value); + } + } + update_string.append(new_field_value); + new_field_value.length(0); + + if (x+1 < table->fields) + { + update_string.append(", "); + if (! has_a_primary_key) + where_string.append(" AND "); + } + old_field_value.length(0); + } + update_string.append(" WHERE "); + update_string.append(where_string.c_ptr_quick()); + if (! has_a_primary_key) + update_string.append(" LIMIT 1"); + + DBUG_PRINT("ha_federated::update_row", ("Final update query: %s", + update_string.c_ptr_quick())); + if (mysql_real_query(mysql, update_string.c_ptr_quick(), + update_string.length())) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + + + DBUG_RETURN(0); +} + +/* + This will delete a row. 'buf' will contain a copy of the row to be deleted. + The server will call this right after the current row has been called (from + either a previous rnd_nexT() or index call). + If you keep a pointer to the last row or can access a primary key it will + make doing the deletion quite a bit easier. + Keep in mind that the server does no guarentee consecutive deletions. + ORDER BY clauses can be used. + + Called in sql_acl.cc and sql_udf.cc to manage internal table information. + Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select + it is used for removing duplicates while in insert it is used for REPLACE + calls. +*/ +int ha_federated::delete_row(const byte * buf) +{ + int x= 0; + char delete_buffer[IO_SIZE]; + char data_buffer[IO_SIZE]; + + String delete_string(delete_buffer, sizeof(delete_buffer), &my_charset_bin); + delete_string.length(0); + String data_string(data_buffer, sizeof(data_buffer), &my_charset_bin); + data_string.length(0); + + DBUG_ENTER("ha_federated::delete_row"); + + delete_string.append("DELETE FROM "); + delete_string.append(share->table_base_name); + delete_string.append(" WHERE "); + + for (Field **field= table->field; *field; field++, x++) + { + delete_string.append((*field)->field_name); + + if ((*field)->is_null()) + { + delete_string.append(" IS "); + data_string.append("NULL"); + } + else + { + delete_string.append("="); + (*field)->val_str(&data_string); + quote_data(&data_string, *field); + } + + delete_string.append(data_string); + data_string.length(0); + + if (x+1 < table->fields) + delete_string.append(" AND "); + } + + delete_string.append(" LIMIT 1"); + DBUG_PRINT("ha_federated::delete_row", + ("Delete sql: %s", delete_string.c_ptr_quick())); + if ( mysql_real_query(mysql, delete_string.c_ptr_quick(), + delete_string.length())) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + + DBUG_RETURN(0); +} + + +/* + Positions an index cursor to the index specified in the handle. Fetches the + row if available. If the key value is null, begin at the first key of the + index. This method, which is called in the case of an SQL statement having + a WHERE clause on a non-primary key index, simply calls index_read_idx. +*/ +int ha_federated::index_read(byte * buf, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + DBUG_ENTER("ha_federated::index_read"); + DBUG_RETURN(index_read_idx(buf, active_index, key, key_len, find_flag)); +} + + +/* + Positions an index cursor to the index specified in key. Fetches the + row if any. This is only used to read whole keys. + + This method is called via index_read in the case of a WHERE clause using + a regular non-primary key index, OR is called DIRECTLY when the WHERE clause + uses a PRIMARY KEY index. +*/ +int ha_federated::index_read_idx(byte * buf, uint index, const byte * key, + uint key_len __attribute__((unused)), + enum ha_rkey_function find_flag + __attribute__((unused))) +{ + char index_value[IO_SIZE]; + String index_string(index_value, sizeof(index_value), &my_charset_bin); + index_string.length(0); + + char sql_query_buffer[IO_SIZE]; + String sql_query(sql_query_buffer, sizeof(sql_query_buffer), &my_charset_bin); + sql_query.length(0); + + DBUG_ENTER("ha_federated::index_read_idx"); + statistic_increment(table->in_use->status_var.ha_read_key_count,&LOCK_status); + + index_string.length(0); + sql_query.length(0); + + sql_query.append(share->select_query); + sql_query.append(" WHERE "); + sql_query.append(table->key_info[index].key_part->field->field_name); + sql_query.append(" = "); + + table->key_info[index].key_part->field->val_str(&index_string, (char *)(key)); + quote_data(&index_string, table->key_info[index].key_part->field); + sql_query.append(index_string); + + DBUG_PRINT("ha_federated::index_read_idx", + ("sql_query %s", sql_query.c_ptr_quick())); + + if (mysql_real_query(mysql, sql_query.c_ptr_quick(), sql_query.length())) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + result= mysql_store_result(mysql); + + if (!result) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + + if (mysql_errno(mysql)) + { + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(mysql_errno(mysql)); + } + + DBUG_RETURN(rnd_next(buf)); +} + +/* + Initialized at each key walk (called multiple times unlike ::rnd_init()) +*/ +int ha_federated::index_init(uint keynr) +{ + int error; + DBUG_ENTER("ha_federated::index_init"); + DBUG_PRINT("ha_federated::index_init", + ("table: '%s' key: %d", table->real_name, keynr)); + active_index= keynr; + DBUG_RETURN(0); +} + +/* + Used to read forward through the index. +*/ +int ha_federated::index_next(byte * buf) +{ + DBUG_ENTER("ha_federated::index_next"); + DBUG_RETURN(rnd_next(buf)); +} + + +/* + rnd_init() is called when the system wants the storage engine to do a table + scan. + + This is the method that gets data for the SELECT calls. + + See the federated in the introduction at the top of this file to see when + rnd_init() is called. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, + sql_table.cc, and sql_update.cc. +*/ +int ha_federated::rnd_init(bool scan) +{ + DBUG_ENTER("ha_federated::rnd_init"); + int num_fields, rows; + + DBUG_PRINT("ha_federated::rnd_init", + ("share->select_query %s", share->select_query)); + if (mysql_real_query(mysql, share->select_query, strlen(share->select_query))) + { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + result= mysql_store_result(mysql); + + if (mysql_errno(mysql)) + DBUG_RETURN(mysql_errno(mysql)); + DBUG_RETURN(0); +} + +int ha_federated::rnd_end() +{ + DBUG_ENTER("ha_federated::rnd_end"); + mysql_free_result(result); + DBUG_RETURN(index_end()); +} + +int ha_federated::index_end(void) +{ + DBUG_ENTER("ha_federated::index_end"); + active_index= MAX_KEY; + DBUG_RETURN(0); +} + +/* + This is called for each row of the table scan. When you run out of records + you should return HA_ERR_END_OF_FILE. Fill buff up with the row information. + The Field structure for the table is the key to getting data into buf + in a manner that will allow the server to understand it. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, + sql_table.cc, and sql_update.cc. +*/ +int ha_federated::rnd_next(byte *buf) +{ + MYSQL_ROW row; + DBUG_ENTER("ha_federated::rnd_next"); + + // Fetch a row, insert it back in a row format. + current_position= result->data_cursor; + if (! (row= mysql_fetch_row(result))) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + DBUG_RETURN(convert_row_to_internal_format(buf,row)); +} + + +/* + 'position()' is called after each call to rnd_next() if the data needs to be + ordered. You can do something like the following to store the position: + ha_store_ptr(ref, ref_length, current_position); + + The server uses ref to store data. ref_length in the above case is the size + needed to store current_position. ref is just a byte array that the server + will maintain. If you are using offsets to mark rows, then current_position + should be the offset. If it is a primary key like in BDB, then it needs to + be a primary key. + + Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. +*/ +void ha_federated::position(const byte *record) +{ + DBUG_ENTER("ha_federated::position"); + //ha_store_ptr Add seek storage + ha_store_ptr(ref, ref_length, current_position); + DBUG_VOID_RETURN; +} + + +/* + This is like rnd_next, but you are given a position to use to determine the + row. The position will be of the type that you stored in ref. You can use + ha_get_ptr(pos,ref_length) to retrieve whatever key or position you saved + when position() was called. + + This method is required for an ORDER BY. + + Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. +*/ +int ha_federated::rnd_pos(byte * buf, byte *pos) +{ + DBUG_ENTER("ha_federated::rnd_pos"); + statistic_increment(table->in_use->status_var.ha_read_rnd_count,&LOCK_status); + current_position= ha_get_ptr(pos,ref_length); + result->current_row= 0; + result->data_cursor= current_position; + DBUG_RETURN(rnd_next(buf)); +} + + +/* + ::info() is used to return information to the optimizer. + Currently this table handler doesn't implement most of the fields + really needed. SHOW also makes use of this data + Another note, you will probably want to have the following in your + code: + if (records < 2) + records = 2; + The reason is that the server will optimize for cases of only a single + record. If in a table scan you don't know the number of records + it will probably be better to set records to two so you can return + as many records as you need. + Along with records a few more variables you may wish to set are: + records + deleted + data_file_length + index_file_length + delete_length + check_time + Take a look at the public variables in handler.h for more information. + + Called in: + filesort.cc + ha_heap.cc + item_sum.cc + opt_sum.cc + sql_delete.cc + sql_delete.cc + sql_derived.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_table.cc + sql_union.cc + sql_update.cc + +*/ +// FIX: later version provide better information to the optimizer +void ha_federated::info(uint flag) +{ + DBUG_ENTER("ha_federated::info"); + records= 10000; // Fake! + DBUG_VOID_RETURN; +} + + +/* + Used to delete all rows in a table. Both for cases of truncate and + for cases where the optimizer realizes that all rows will be + removed as a result of a SQL statement. + + Called from item_sum.cc by Item_func_group_concat::clear(), + Item_sum_count_distinct::clear(), and Item_func_group_concat::clear(). + Called from sql_delete.cc by mysql_delete(). + Called from sql_select.cc by JOIN::reinit(). + Called from sql_union.cc by st_select_lex_unit::exec(). +*/ +int ha_federated::delete_all_rows() +{ + DBUG_ENTER("ha_federated::delete_all_rows"); + + char query_buffer[IO_SIZE]; + String query(query_buffer, sizeof(query_buffer), &my_charset_bin); + query.length(0); + + query.set_charset(system_charset_info); + query.append("TRUNCATE "); + query.append(share->table_base_name); + + if (mysql_real_query(mysql, query.c_ptr_quick(), query.length())) { + my_error(ER_QUERY_ON_MASTER,MYF(0),mysql_error(mysql)); + DBUG_RETURN(ER_QUERY_ON_MASTER); + } + + DBUG_RETURN(HA_ERR_WRONG_COMMAND); +} + + +/* + The idea with handler::store_lock() is the following: + + The statement decided which locks we should need for the table + for updates/deletes/inserts we get WRITE locks, for SELECT... we get + read locks. + + Before adding the lock into the table lock handler (see thr_lock.c) + mysqld calls store lock with the requested locks. Store lock can now + modify a write lock to a read lock (or some other lock), ignore the + lock (if we don't want to use MySQL table locks at all) or add locks + for many tables (like we do when we are using a MERGE handler). + + Berkeley DB for federated changes all WRITE locks to TL_WRITE_ALLOW_WRITE + (which signals that we are doing WRITES, but we are still allowing other + reader's and writer's. + + When releasing locks, store_lock() are also called. In this case one + usually doesn't have to do anything. + + In some exceptional cases MySQL may send a request for a TL_IGNORE; + This means that we are requesting the same lock as last time and this + should also be ignored. (This may happen when someone does a flush + table when we have opened a part of the tables, in which case mysqld + closes and reopens the tables and tries to get the same locks at last + time). In the future we will probably try to remove this. + + Called from lock.cc by get_lock_data(). +*/ +THR_LOCK_DATA **ha_federated::store_lock(THD *thd, + THR_LOCK_DATA **to, + enum thr_lock_type lock_type) +{ + if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) + { + /* + Here is where we get into the guts of a row level lock. + If TL_UNLOCK is set + If we are not doing a LOCK TABLE or DISCARD/IMPORT + TABLESPACE, then allow multiple writers + */ + + if ((lock_type >= TL_WRITE_CONCURRENT_INSERT && + lock_type <= TL_WRITE) && !thd->in_lock_tables + && !thd->tablespace_op) + lock_type= TL_WRITE_ALLOW_WRITE; + + /* + In queries of type INSERT INTO t1 SELECT ... FROM t2 ... + MySQL would use the lock TL_READ_NO_INSERT on t2, and that + would conflict with TL_WRITE_ALLOW_WRITE, blocking all inserts + to t2. Convert the lock to a normal read lock to allow + concurrent inserts to t2. + */ + + if (lock_type == TL_READ_NO_INSERT && !thd->in_lock_tables) + lock_type= TL_READ; + + lock.type= lock_type; + } + + *to++= &lock; + + return to; +} + +/* + create() does nothing, since we have no local setup of our own. + FUTURE: We should potentially connect to the remote database and + create tables if they do not exist. +*/ +int ha_federated::create(const char *name, TABLE *table_arg, + HA_CREATE_INFO *create_info) +{ + DBUG_ENTER("ha_federated::create"); + int retcode; + FEDERATED_SHARE *tmp; + retcode= parse_url(tmp, table_arg, 1); + if (retcode < 0) + { + DBUG_PRINT("ha_federated::create", + ("ERROR: on table creation for %s called parse_url, retcode %d", + create_info->data_file_name, retcode)); + DBUG_RETURN(ER_CANT_CREATE_TABLE); + } + DBUG_RETURN(0); +} +#endif /* HAVE_FEDERATED_DB */ diff --git a/sql/ha_federated.h b/sql/ha_federated.h new file mode 100755 index 00000000000..c11960a836f --- /dev/null +++ b/sql/ha_federated.h @@ -0,0 +1,177 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/* + Please read ha_exmple.cc before reading this file. + Please keep in mind that the federated storage engine implements all methods + that are required to be implemented. handler.h has a full list of methods + that you can implement. +*/ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +#include <mysql.h> +//#include <client.h> + +/* + FEDERATED_SHARE is a structure that will be shared amoung all open handlers + The example implements the minimum of what you will probably need. +*/ +//FIX document +typedef struct st_federated_share { + char *table_name; + char *table_base_name; + // the primary select query to be used in rnd_init + char *select_query; + // remote host info, parse_url supplies + char *scheme; + char *hostname; + char *username; + char *password; + char *database; + char *table; + char *socket; + char *sport; + int port; + uint table_name_length,table_base_name_length,use_count; + pthread_mutex_t mutex; + THR_LOCK lock; +} FEDERATED_SHARE; + +/* + Class definition for the storage engine +*/ +class ha_federated: public handler +{ + THR_LOCK_DATA lock; /* MySQL lock */ + FEDERATED_SHARE *share; /* Shared lock info */ + MYSQL *mysql; + MYSQL_RES *result; + uint ref_length; + uint fetch_num; // stores the fetch num + MYSQL_ROW_OFFSET current_position; // Current position used by ::position() + +private: + /* + return 0 on success + return errorcode otherwise + */ + //FIX + uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row); + uint type_quote(int type); + void quote_data(String *string1, Field *field); + +public: + ha_federated(TABLE *table): handler(table), + mysql(0), + ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0) + { + } + ~ha_federated() + { + } + /* The name that will be used for display purposes */ + const char *table_type() const { return "FEDERATED"; } + /* + The name of the index type that will be used for display + don't implement this method unless you really have indexes + */ + const char *index_type(uint inx) { return "REMOTE"; } + const char **bas_ext() const; + /* + This is a list of flags that says what the storage engine + implements. The current table flags are documented in + handler.h + Serg: Double check these (Brian) + // FIX add blob support + */ + ulong table_flags() const + { + return (HA_TABLE_SCAN_ON_INDEX | HA_NOT_EXACT_COUNT | + HA_PRIMARY_KEY_IN_READ_INDEX | HA_FILE_BASED | HA_AUTO_PART_KEY | + HA_TABLE_SCAN_ON_INDEX); + } + /* + This is a bitmap of flags that says how the storage engine + implements indexes. The current index flags are documented in + handler.h. If you do not implement indexes, just return zero + here. + + part is the key part to check. First key part is 0 + If all_parts it's set, MySQL want to know the flags for the combined + index up to and including 'part'. + */ + ulong index_flags(uint inx, uint part, bool all_parts) const + { + return (HA_READ_NEXT); + // return (HA_READ_NEXT | HA_ONLY_WHOLE_INDEX); + } + uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } + uint max_supported_keys() const { return MAX_KEY; } + uint max_supported_key_parts() const { return 1024; } + uint max_supported_key_length() const { return 1024; } + /* + Called in test_quick_select to determine if indexes should be used. + */ + virtual double scan_time() { DBUG_PRINT("ha_federated::scan_time", ("rows %d", records)); return (double)(records*2); } + /* + The next method will never be called if you do not implement indexes. + */ + virtual double read_time(ha_rows rows) { return (double) rows / 20.0+1; } + + /* + Everything below are methods that we implment in ha_federated.cc. + + Most of these methods are not obligatory, skip them and + MySQL will treat them as not implemented + */ + int open(const char *name, int mode, uint test_if_locked); // required + int close(void); // required + + int write_row(byte * buf); + int update_row(const byte * old_data, byte * new_data); + int delete_row(const byte * buf); + int index_init(uint keynr); + int index_read(byte * buf, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_read_idx(byte * buf, uint idx, const byte * key, + uint key_len, enum ha_rkey_function find_flag); + int index_next(byte * buf); + int index_end(); + /* + unlike index_init(), rnd_init() can be called two times + without rnd_end() in between (it only makes sense if scan=1). + then the second call should prepare for the new table scan + (e.g if rnd_init allocates the cursor, second call should + position it to the start of the table, no need to deallocate + and allocate it again + */ + int rnd_init(bool scan); //required + int rnd_end(); + int rnd_next(byte *buf); //required + int rnd_pos(byte * buf, byte *pos); //required + void position(const byte *record); //required + void info(uint); //required + + int delete_all_rows(void); + int create(const char *name, TABLE *form, + HA_CREATE_INFO *create_info); //required + + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, + enum thr_lock_type lock_type); //required +}; diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index dbcf7bc9197..1556a18bfca 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -98,6 +98,7 @@ void ha_heap::set_keys_for_scanning(void) } } + void ha_heap::update_key_stats() { for (uint i= 0; i < table->keys; i++) @@ -113,10 +114,11 @@ void ha_heap::update_key_stats() records_changed= 0; } + int ha_heap::write_row(byte * buf) { int res; - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) @@ -131,7 +133,7 @@ int ha_heap::write_row(byte * buf) int ha_heap::update_row(const byte * old_data, byte * new_data) { int res; - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); res= heap_update(file,old_data,new_data); @@ -144,7 +146,7 @@ int ha_heap::update_row(const byte * old_data, byte * new_data) int ha_heap::delete_row(const byte * buf) { int res; - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); res= heap_delete(file,buf); if (!res && table->tmp_table == NO_TMP_TABLE && ++records_changed*HEAP_STATS_UPDATE_THRESHOLD > file->s->records) @@ -156,7 +158,8 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error = heap_rkey(file,buf,active_index, key, key_len, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; @@ -165,7 +168,8 @@ int ha_heap::index_read(byte * buf, const byte * key, uint key_len, int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error= heap_rkey(file, buf, active_index, key, key_len, HA_READ_PREFIX_LAST); table->status= error ? STATUS_NOT_FOUND : 0; @@ -175,7 +179,8 @@ int ha_heap::index_read_last(byte *buf, const byte *key, uint key_len) int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error = heap_rkey(file, buf, index, key, key_len, find_flag); table->status = error ? STATUS_NOT_FOUND : 0; return error; @@ -184,7 +189,8 @@ int ha_heap::index_read_idx(byte * buf, uint index, const byte * key, int ha_heap::index_next(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=heap_rnext(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -193,7 +199,8 @@ int ha_heap::index_next(byte * buf) int ha_heap::index_prev(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); int error=heap_rprev(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -202,7 +209,8 @@ int ha_heap::index_prev(byte * buf) int ha_heap::index_first(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); int error=heap_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -211,7 +219,8 @@ int ha_heap::index_first(byte * buf) int ha_heap::index_last(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); int error=heap_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -224,7 +233,8 @@ int ha_heap::rnd_init(bool scan) int ha_heap::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=heap_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -234,7 +244,8 @@ int ha_heap::rnd_pos(byte * buf, byte *pos) { int error; HEAP_PTR position; - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); memcpy_fixed((char*) &position,pos,sizeof(HEAP_PTR)); error=heap_rrnd(file, buf, position); table->status=error ? STATUS_NOT_FOUND: 0; @@ -430,8 +441,7 @@ ha_rows ha_heap::records_in_range(uint inx, key_range *min_key, min_key->flag != HA_READ_KEY_EXACT || max_key->flag != HA_READ_AFTER_KEY) return HA_POS_ERROR; // Can only use exact keys - else - return key->rec_per_key[key->key_parts-1]; + return key->rec_per_key[key->key_parts-1]; } @@ -472,16 +482,22 @@ int ha_heap::create(const char *name, TABLE *table_arg, { uint flag= key_part->key_type; Field *field= key_part->field; + if (pos->algorithm == HA_KEY_ALG_BTREE) seg->type= field->key_type(); else { - if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT) + if ((seg->type = field->key_type()) != (int) HA_KEYTYPE_TEXT && + seg->type != HA_KEYTYPE_VARTEXT1 && + seg->type != HA_KEYTYPE_VARTEXT2 && + seg->type != HA_KEYTYPE_VARBINARY1 && + seg->type != HA_KEYTYPE_VARBINARY2) seg->type= HA_KEYTYPE_BINARY; } seg->start= (uint) key_part->offset; seg->length= (uint) key_part->length; - seg->flag = 0; + seg->flag= key_part->key_part_flag; + seg->charset= field->charset(); if (field->null_ptr) { @@ -501,7 +517,7 @@ int ha_heap::create(const char *name, TABLE *table_arg, } } mem_per_row+= MY_ALIGN(table_arg->reclength + 1, sizeof(char*)); - max_rows = (ha_rows) (current_thd->variables.max_heap_table_size / + max_rows = (ha_rows) (table->in_use->variables.max_heap_table_size / mem_per_row); HP_CREATE_INFO hp_create_info; hp_create_info.auto_key= auto_key; @@ -528,7 +544,7 @@ void ha_heap::update_create_info(HA_CREATE_INFO *create_info) create_info->auto_increment_value= auto_increment_value; } -longlong ha_heap::get_auto_increment() +ulonglong ha_heap::get_auto_increment() { ha_heap::info(HA_STATUS_AUTO); return auto_increment_value; diff --git a/sql/ha_heap.h b/sql/ha_heap.h index f36e9f31c55..8b44695df07 100644 --- a/sql/ha_heap.h +++ b/sql/ha_heap.h @@ -63,7 +63,7 @@ public: int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); - longlong get_auto_increment(); + ulonglong get_auto_increment(); int index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag); int index_read_idx(byte * buf, uint idx, const byte * key, @@ -92,6 +92,12 @@ public: THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); + int cmp_ref(const byte *ref1, const byte *ref2) + { + HEAP_PTR ptr1=*(HEAP_PTR*)ref1; + HEAP_PTR ptr2=*(HEAP_PTR*)ref2; + return ptr1 < ptr2? -1 : (ptr1 > ptr2? 1 : 0); + } private: void update_key_stats(); }; diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 14b143ca04b..91517770d04 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -80,6 +80,7 @@ extern "C" { #include "../innobase/include/fsp0fsp.h" #include "../innobase/include/sync0sync.h" #include "../innobase/include/fil0fil.h" +#include "../innobase/include/trx0xa.h" } #define HA_INNOBASE_ROWS_IN_TABLE 10000 /* to get optimization right */ @@ -87,6 +88,7 @@ extern "C" { uint innobase_init_flags = 0; ulong innobase_cache_size = 0; +ulong innobase_large_page_size = 0; /* The default values for the following, type long, start-up parameters are declared in mysqld.cc: */ @@ -115,6 +117,9 @@ values */ uint innobase_flush_log_at_trx_commit = 1; my_bool innobase_log_archive = FALSE;/* unused */ +my_bool innobase_use_doublewrite = TRUE; +my_bool innobase_use_checksums = TRUE; +my_bool innobase_use_large_pages = FALSE; my_bool innobase_use_native_aio = FALSE; my_bool innobase_fast_shutdown = TRUE; my_bool innobase_very_fast_shutdown = FALSE; /* this can be set to @@ -149,6 +154,103 @@ static mysql_byte* innobase_get_key(INNOBASE_SHARE *share,uint *length, static INNOBASE_SHARE *get_share(const char *table_name); static void free_share(INNOBASE_SHARE *share); +/********************************************************************* +Commits a transaction in an InnoDB database. */ + +void +innobase_commit_low( +/*================*/ + trx_t* trx); /* in: transaction handle */ + +struct show_var_st innodb_status_variables[]= { + {"buffer_pool_pages_data", + (char*) &export_vars.innodb_buffer_pool_pages_data, SHOW_LONG}, + {"buffer_pool_pages_dirty", + (char*) &export_vars.innodb_buffer_pool_pages_dirty, SHOW_LONG}, + {"buffer_pool_pages_flushed", + (char*) &export_vars.innodb_buffer_pool_pages_flushed, SHOW_LONG}, + {"buffer_pool_pages_free", + (char*) &export_vars.innodb_buffer_pool_pages_free, SHOW_LONG}, + {"buffer_pool_pages_latched", + (char*) &export_vars.innodb_buffer_pool_pages_latched, SHOW_LONG}, + {"buffer_pool_pages_misc", + (char*) &export_vars.innodb_buffer_pool_pages_misc, SHOW_LONG}, + {"buffer_pool_pages_total", + (char*) &export_vars.innodb_buffer_pool_pages_total, SHOW_LONG}, + {"buffer_pool_read_ahead_rnd", + (char*) &export_vars.innodb_buffer_pool_read_ahead_rnd, SHOW_LONG}, + {"buffer_pool_read_ahead_seq", + (char*) &export_vars.innodb_buffer_pool_read_ahead_seq, SHOW_LONG}, + {"buffer_pool_read_requests", + (char*) &export_vars.innodb_buffer_pool_read_requests, SHOW_LONG}, + {"buffer_pool_reads", + (char*) &export_vars.innodb_buffer_pool_reads, SHOW_LONG}, + {"buffer_pool_wait_free", + (char*) &export_vars.innodb_buffer_pool_wait_free, SHOW_LONG}, + {"buffer_pool_write_requests", + (char*) &export_vars.innodb_buffer_pool_write_requests, SHOW_LONG}, + {"data_fsyncs", + (char*) &export_vars.innodb_data_fsyncs, SHOW_LONG}, + {"data_pending_fsyncs", + (char*) &export_vars.innodb_data_pending_fsyncs, SHOW_LONG}, + {"data_pending_reads", + (char*) &export_vars.innodb_data_pending_reads, SHOW_LONG}, + {"data_pending_writes", + (char*) &export_vars.innodb_data_pending_writes, SHOW_LONG}, + {"data_read", + (char*) &export_vars.innodb_data_read, SHOW_LONG}, + {"data_reads", + (char*) &export_vars.innodb_data_reads, SHOW_LONG}, + {"data_writes", + (char*) &export_vars.innodb_data_writes, SHOW_LONG}, + {"data_written", + (char*) &export_vars.innodb_data_written, SHOW_LONG}, + {"dblwr_pages_written", + (char*) &export_vars.innodb_dblwr_pages_written, SHOW_LONG}, + {"dblwr_writes", + (char*) &export_vars.innodb_dblwr_writes, SHOW_LONG}, + {"log_waits", + (char*) &export_vars.innodb_log_waits, SHOW_LONG}, + {"log_write_requests", + (char*) &export_vars.innodb_log_write_requests, SHOW_LONG}, + {"log_writes", + (char*) &export_vars.innodb_log_writes, SHOW_LONG}, + {"os_log_fsyncs", + (char*) &export_vars.innodb_os_log_fsyncs, SHOW_LONG}, + {"os_log_pending_fsyncs", + (char*) &export_vars.innodb_os_log_pending_fsyncs, SHOW_LONG}, + {"os_log_pending_writes", + (char*) &export_vars.innodb_os_log_pending_writes, SHOW_LONG}, + {"os_log_written", + (char*) &export_vars.innodb_os_log_written, SHOW_LONG}, + {"page_size", + (char*) &export_vars.innodb_page_size, SHOW_LONG}, + {"pages_created", + (char*) &export_vars.innodb_pages_created, SHOW_LONG}, + {"pages_read", + (char*) &export_vars.innodb_pages_read, SHOW_LONG}, + {"pages_written", + (char*) &export_vars.innodb_pages_written, SHOW_LONG}, + {"row_lock_waits", + (char*) &export_vars.innodb_row_lock_waits, SHOW_LONG}, + {"row_lock_current_waits", + (char*) &export_vars.innodb_row_lock_current_waits, SHOW_LONG}, + {"row_lock_time", + (char*) &export_vars.innodb_row_lock_time, SHOW_LONGLONG}, + {"row_lock_time_max", + (char*) &export_vars.innodb_row_lock_time_max, SHOW_LONG}, + {"row_lock_time_avg", + (char*) &export_vars.innodb_row_lock_time_avg, SHOW_LONG}, + {"rows_deleted", + (char*) &export_vars.innodb_rows_deleted, SHOW_LONG}, + {"rows_inserted", + (char*) &export_vars.innodb_rows_inserted, SHOW_LONG}, + {"rows_read", + (char*) &export_vars.innodb_rows_read, SHOW_LONG}, + {"rows_updated", + (char*) &export_vars.innodb_rows_updated, SHOW_LONG}, + {NullS, NullS, SHOW_LONG}}; + /* General functions */ /********************************************************************** @@ -433,6 +535,25 @@ innobase_mysql_print_thd( } /********************************************************************** +Determines whether the given character set is of variable length. + +NOTE that the exact prototype of this function has to be in +/innobase/data/data0type.ic! */ +extern "C" +ibool +innobase_is_mb_cset( +/*================*/ + ulint cset) /* in: MySQL charset-collation code */ +{ + CHARSET_INFO* cs; + ut_ad(cset < 256); + + cs = all_charsets[cset]; + + return(cs && cs->mbminlen != cs->mbmaxlen); +} + +/********************************************************************** Compares NUL-terminated UTF-8 strings case insensitively. NOTE that the exact prototype of this function has to be in @@ -498,9 +619,10 @@ innobase_mysql_tmpfile(void) if (fd2 < 0) { DBUG_PRINT("error",("Got error %d on dup",fd2)); my_errno=errno; - my_error(EE_OUT_OF_FILERESOURCES, - MYF(ME_BELL+ME_WAITTANG), filename, my_errno); - } + my_error(EE_OUT_OF_FILERESOURCES, + MYF(ME_BELL+ME_WAITTANG), + filename, my_errno); + } my_close(fd, MYF(MY_WME)); } return(fd2); @@ -856,6 +978,8 @@ ha_innobase::init_table_handle_for_HANDLER(void) prebuilt->read_just_key = FALSE; prebuilt->used_in_HANDLER = TRUE; + + prebuilt->keep_other_fields_on_keyread = FALSE; } /************************************************************************* @@ -1013,6 +1137,12 @@ innobase_init(void) srv_fast_shutdown = (ibool) innobase_fast_shutdown; + srv_use_doublewrite_buf = (ibool) innobase_use_doublewrite; + srv_use_checksums = (ibool) innobase_use_checksums; + + os_use_large_pages = (ibool) innobase_use_large_pages; + os_large_page_size = (ulint) innobase_large_page_size; + srv_file_per_table = (ibool) innobase_file_per_table; srv_locks_unsafe_for_binlog = (ibool) innobase_locks_unsafe_for_binlog; @@ -1216,7 +1346,7 @@ innobase_commit( &innodb_dummy_stmt_trx_handle: the latter means that the current SQL statement ended */ { - trx_t* trx; + trx_t* trx; DBUG_ENTER("innobase_commit"); DBUG_PRINT("trans", ("ending transaction")); @@ -1236,7 +1366,8 @@ innobase_commit( 3. innobase_query_caching_of_table_permitted(), 4. innobase_savepoint(), 5. ::init_table_handle_for_HANDLER(), - 6. innobase_start_trx_and_assign_read_view() + 6. innobase_start_trx_and_assign_read_view(), + 7. ::transactional_table_lock() and it is only set to 0 in a commit or a rollback. If it is 0 we know there cannot be resources to be freed and we could return immediately. @@ -1254,7 +1385,7 @@ innobase_commit( if (trx_handle != (void*)&innodb_dummy_stmt_trx_handle || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { - /* We were instructed to commit the whole transaction, or + /* We were instructed to commit the whole transaction, or this is an SQL statement end and autocommit is on */ innobase_commit_low(trx); @@ -1396,6 +1527,39 @@ innobase_rollback( } /********************************************************************* +Rolls back a transaction */ + +int +innobase_rollback_trx( +/*==================*/ + /* out: 0 or error number */ + trx_t* trx) /* in: transaction */ +{ + int error = 0; + + DBUG_ENTER("innobase_rollback_trx"); + DBUG_PRINT("trans", ("aborting transaction")); + + /* Release a possible FIFO ticket and search latch. Since we will + reserve the kernel mutex, we have to release the search system latch + first to obey the latching order. */ + + innobase_release_stat_resources(trx); + + if (trx->auto_inc_lock) { + /* If we had reserved the auto-inc lock for some table (if + we come here to roll back the latest SQL statement) we + release it now before a possibly lengthy rollback */ + + row_unlock_table_autoinc_for_mysql(trx); + } + + error = trx_rollback_for_mysql(trx); + + DBUG_RETURN(convert_error_code_to_mysql(error, NULL)); +} + +/********************************************************************* Rolls back a transaction to a savepoint. */ int @@ -1724,8 +1888,6 @@ ha_innobase::open( } } - auto_inc_counter_for_this_stat = 0; - block_size = 16 * 1024; /* Index block size in InnoDB: used by MySQL in query optimization */ @@ -1868,12 +2030,13 @@ innobase_mysql_cmp( switch (mysql_tp) { - case FIELD_TYPE_STRING: - case FIELD_TYPE_VAR_STRING: + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: case FIELD_TYPE_TINY_BLOB: case FIELD_TYPE_MEDIUM_BLOB: case FIELD_TYPE_BLOB: case FIELD_TYPE_LONG_BLOB: + case MYSQL_TYPE_VARCHAR: /* Use the charset number to pick the right charset struct for the comparison. Since the MySQL function get_charset may be slow before Bar removes the mutex operation there, we first @@ -1901,7 +2064,7 @@ innobase_mysql_cmp( ret = charset->coll->strnncollsp(charset, a, a_length, - b, b_length); + b, b_length, 0); if (ret < 0) { return(-1); } else if (ret > 0) { @@ -1930,17 +2093,11 @@ get_innobase_type_from_mysql_type( 8 bits: this is used in ibuf and also when DATA_NOT_NULL is ORed to the type */ - DBUG_ASSERT((ulint)FIELD_TYPE_STRING < 256); - DBUG_ASSERT((ulint)FIELD_TYPE_VAR_STRING < 256); - DBUG_ASSERT((ulint)FIELD_TYPE_DOUBLE < 256); - DBUG_ASSERT((ulint)FIELD_TYPE_FLOAT < 256); - DBUG_ASSERT((ulint)FIELD_TYPE_DECIMAL < 256); - switch (field->type()) { /* NOTE that we only allow string types in DATA_MYSQL and DATA_VARMYSQL */ - case FIELD_TYPE_VAR_STRING: if (field->binary()) { - + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: if (field->binary()) { return(DATA_BINARY); } else if (strcmp( field->charset()->name, @@ -1949,7 +2106,7 @@ get_innobase_type_from_mysql_type( } else { return(DATA_VARMYSQL); } - case FIELD_TYPE_STRING: if (field->binary()) { + case MYSQL_TYPE_STRING: if (field->binary()) { return(DATA_FIXBINARY); } else if (strcmp( @@ -2128,7 +2285,13 @@ build_template( ulint n_fields; ulint n_requested_fields = 0; ibool fetch_all_in_key = FALSE; - ibool fetch_primary_key_cols = FALSE; + ibool fetch_primary_key_cols = TRUE; /* The ROR code in + opt_range.cc assumes that the + primary key cols are always + retrieved. Starting from + MySQL-5.0.2, let us always + fetch them, even though it + wastes some CPU. */ ulint i; if (prebuilt->select_lock_type == LOCK_X) { @@ -2205,12 +2368,15 @@ build_template( templ = prebuilt->mysql_template + n_requested_fields; field = table->field[i]; - if (templ_type == ROW_MYSQL_REC_FIELDS - && !(fetch_all_in_key - && dict_index_contains_col_or_prefix(index, i)) - && !(fetch_primary_key_cols - && dict_table_col_in_clustered_key(index->table, i)) - && thd->query_id != field->query_id) { + ibool index_contains_field= + dict_index_contains_col_or_prefix(index, i); + + if (templ_type == ROW_MYSQL_REC_FIELDS && + ((prebuilt->read_just_key && !index_contains_field) || + (!(fetch_all_in_key && index_contains_field) && + !(fetch_primary_key_cols && + dict_table_col_in_clustered_key(index->table, i)) && + thd->query_id != field->query_id))) { /* This field is not needed in the query, skip it */ @@ -2287,7 +2453,7 @@ ha_innobase::write_row( longlong dummy; ibool incremented_auto_inc_for_stat = FALSE; ibool incremented_auto_inc_counter = FALSE; - ibool skip_auto_inc_decr; + ibool auto_inc_used= FALSE; DBUG_ENTER("ha_innobase::write_row"); @@ -2309,7 +2475,8 @@ ha_innobase::write_row( ut_error; } - statistic_increment(ha_write_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_write_count, + &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); @@ -2415,40 +2582,34 @@ ha_innobase::write_row( prebuilt->sql_stat_start = TRUE; } - /* Fetch the value the user possibly has set in the - autoincrement field */ + /* + We must use the handler code to update the auto-increment + value to be sure that increment it correctly. + */ + update_auto_increment(); + auto_inc_used= 1; - auto_inc = table->next_number_field->val_int(); + } - /* In replication and also otherwise the auto-inc column - can be set with SET INSERT_ID. Then we must look at - user_thd->next_insert_id. If it is nonzero and the user - has not supplied a value, we must use it, and use values - incremented by 1 in all subsequent inserts within the - same SQL statement! */ + if (prebuilt->mysql_template == NULL + || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { + /* Build the template used in converting quickly between + the two database formats */ - if (auto_inc == 0 && user_thd->next_insert_id != 0) { + build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW); + } - auto_inc_counter_for_this_stat - = user_thd->next_insert_id; - } + innodb_srv_conc_enter_innodb(prebuilt->trx); - if (auto_inc == 0 && auto_inc_counter_for_this_stat) { - /* The user set the auto-inc counter for - this SQL statement with SET INSERT_ID. We must - assign sequential values from the counter. */ + error = row_insert_for_mysql((byte*) record, prebuilt); - auto_inc = auto_inc_counter_for_this_stat; + if (error == DB_SUCCESS && auto_inc_used) { - /* We give MySQL a new value to place in the - auto-inc column */ - user_thd->next_insert_id = auto_inc; + /* Fetch the value that was set in the autoincrement field */ - auto_inc_counter_for_this_stat++; - incremented_auto_inc_for_stat = TRUE; - } + auto_inc = table->next_number_field->val_int(); - if (auto_inc != 0) { + if (auto_inc != 0) { /* This call will calculate the max of the current value and the value supplied by the user and update the counter accordingly */ @@ -2460,107 +2621,19 @@ ha_innobase::write_row( The lock is released at each SQL statement's end. */ - innodb_srv_conc_enter_innodb(prebuilt->trx); - error = row_lock_table_autoinc_for_mysql(prebuilt); - innodb_srv_conc_exit_innodb(prebuilt->trx); - - if (error != DB_SUCCESS) { - - error = convert_error_code_to_mysql(error, - user_thd); - goto func_exit; - } - - dict_table_autoinc_update(prebuilt->table, auto_inc); - } else { - innodb_srv_conc_enter_innodb(prebuilt->trx); - - if (!prebuilt->trx->auto_inc_lock) { - - error = row_lock_table_autoinc_for_mysql( - prebuilt); - if (error != DB_SUCCESS) { - innodb_srv_conc_exit_innodb( - prebuilt->trx); - - error = convert_error_code_to_mysql( - error, user_thd); - goto func_exit; - } - } - - /* The following call gets the value of the auto-inc - counter of the table and increments it by 1 */ - - auto_inc = dict_table_autoinc_get(prebuilt->table); - incremented_auto_inc_counter = TRUE; - - innodb_srv_conc_exit_innodb(prebuilt->trx); - - /* We can give the new value for MySQL to place in - the field */ - - user_thd->next_insert_id = auto_inc; - } - - /* This call of a handler.cc function places - user_thd->next_insert_id to the column value, if the column - value was not set by the user */ - - update_auto_increment(); - } - - if (prebuilt->mysql_template == NULL - || prebuilt->template_type != ROW_MYSQL_WHOLE_ROW) { - /* Build the template used in converting quickly between - the two database formats */ - - build_template(prebuilt, NULL, table, ROW_MYSQL_WHOLE_ROW); - } + error = row_lock_table_autoinc_for_mysql(prebuilt); - innodb_srv_conc_enter_innodb(prebuilt->trx); + if (error != DB_SUCCESS) { - error = row_insert_for_mysql((byte*) record, prebuilt); + error = convert_error_code_to_mysql(error, user_thd); + goto func_exit; + } + dict_table_autoinc_update(prebuilt->table, auto_inc); + } + } innodb_srv_conc_exit_innodb(prebuilt->trx); - if (error != DB_SUCCESS) { - /* If the insert did not succeed we restore the value of - the auto-inc counter we used; note that this behavior was - introduced only in version 4.0.4. - NOTE that a REPLACE command and LOAD DATA INFILE REPLACE - handles a duplicate key error - itself, and we must not decrement the autoinc counter - if we are performing those statements. - NOTE 2: if there was an error, for example a deadlock, - which caused InnoDB to roll back the whole transaction - already in the call of row_insert_for_mysql(), we may no - longer have the AUTO-INC lock, and cannot decrement - the counter here. */ - - skip_auto_inc_decr = FALSE; - - if (error == DB_DUPLICATE_KEY - && (user_thd->lex->sql_command == SQLCOM_REPLACE - || user_thd->lex->sql_command - == SQLCOM_REPLACE_SELECT - || (user_thd->lex->sql_command == SQLCOM_LOAD - && user_thd->lex->duplicates == DUP_REPLACE))) { - - skip_auto_inc_decr= TRUE; - } - - if (!skip_auto_inc_decr && incremented_auto_inc_counter - && prebuilt->trx->auto_inc_lock) { - dict_table_autoinc_decrement(prebuilt->table); - } - - if (!skip_auto_inc_decr && incremented_auto_inc_for_stat - && prebuilt->trx->auto_inc_lock) { - auto_inc_counter_for_this_stat--; - } - } - error = convert_error_code_to_mysql(error, user_thd); /* Tell InnoDB server that there might be work for @@ -2571,6 +2644,7 @@ func_exit: DBUG_RETURN(error); } + /****************************************************************** Converts field data for storage in an InnoDB update vector. */ inline @@ -2590,12 +2664,12 @@ innobase_convert_and_store_changed_col( if (len == UNIV_SQL_NULL) { data = NULL; - } else if (col_type == DATA_VARCHAR || col_type == DATA_BINARY - || col_type == DATA_VARMYSQL) { - /* Remove trailing spaces */ - while (len > 0 && data[len - 1] == ' ') { - len--; - } + } else if (col_type == DATA_VARCHAR || col_type == DATA_BINARY + || col_type == DATA_VARMYSQL) { + /* Remove trailing spaces */ + while (len > 0 && data[len - 1] == ' ') { + len--; + } } else if (col_type == DATA_INT) { /* Store integer data in InnoDB in a big-endian format, sign bit negated, if signed */ @@ -2845,6 +2919,32 @@ ha_innobase::delete_row( DBUG_RETURN(error); } +/************************************************************************** +Deletes a lock set to a row */ + +void +ha_innobase::unlock_row(void) +/*=========================*/ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + + DBUG_ENTER("ha_innobase::unlock_row"); + + ut_ad(prebuilt->trx == + (trx_t*) current_thd->transaction.all.innobase_tid); + + if (last_query_id != user_thd->query_id) { + ut_print_timestamp(stderr); + fprintf(stderr, +" InnoDB: Error: last_query_id is %lu != user_thd_query_id is %lu\n", + (ulong)last_query_id, (ulong)user_thd->query_id); + mem_analyze_corruption((byte *) prebuilt->trx); + ut_error; + } + + row_unlock_for_mysql(prebuilt); +} + /********************************************************************** Initializes a handle to use an index. */ @@ -2998,7 +3098,8 @@ ha_innobase::index_read( ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, + &LOCK_status); if (last_query_id != user_thd->query_id) { prebuilt->sql_stat_start = TRUE; @@ -3104,7 +3205,8 @@ ha_innobase::change_active_index( { row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; KEY* key=0; - statistic_increment(ha_read_key_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, + &LOCK_status); DBUG_ENTER("change_active_index"); ut_ad(user_thd == current_thd); @@ -3236,7 +3338,8 @@ ha_innobase::index_next( mysql_byte* buf) /* in/out: buffer for next row in MySQL format */ { - statistic_increment(ha_read_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); return(general_fetch(buf, ROW_SEL_NEXT, 0)); } @@ -3253,7 +3356,8 @@ ha_innobase::index_next_same( const mysql_byte* key, /* in: key value */ uint keylen) /* in: key value length */ { - statistic_increment(ha_read_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); return(general_fetch(buf, ROW_SEL_NEXT, last_match_mode)); } @@ -3287,7 +3391,8 @@ ha_innobase::index_first( int error; DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); error = index_read(buf, NULL, 0, HA_READ_AFTER_KEY); @@ -3313,7 +3418,8 @@ ha_innobase::index_last( int error; DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count, + &LOCK_status); error = index_read(buf, NULL, 0, HA_READ_BEFORE_KEY); @@ -3378,7 +3484,8 @@ ha_innobase::rnd_next( int error; DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); if (start_of_scan) { error = index_first(buf); @@ -3414,7 +3521,8 @@ ha_innobase::rnd_pos( DBUG_ENTER("rnd_pos"); DBUG_DUMP("key", (char*) pos, ref_length); - statistic_increment(ha_read_rnd_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, + &LOCK_status); ut_ad(prebuilt->trx == (trx_t*) current_thd->transaction.all.innobase_tid); @@ -3503,7 +3611,7 @@ create_table_def( TABLE* form, /* in: information on table columns and indexes */ const char* table_name, /* in: table name */ - const char* path_of_temp_table)/* in: if this is a table explicitly + const char* path_of_temp_table,/* in: if this is a table explicitly created by the user with the TEMPORARY keyword, then this parameter is the dir path where the @@ -3511,6 +3619,7 @@ create_table_def( an .ibd file for it (no .ibd extension in the path, though); otherwise this is NULL */ + ibool comp) /* in: TRUE=compact record format */ { Field* field; dict_table_t* table; @@ -3531,7 +3640,7 @@ create_table_def( /* We pass 0 as the space id, and determine at a lower level the space id where to store the table */ - table = dict_mem_table_create((char*) table_name, 0, n_cols); + table = dict_mem_table_create(table_name, 0, n_cols, comp); if (path_of_temp_table) { table->dir_path_of_temp_table = @@ -3742,6 +3851,7 @@ ha_innobase::create( char name2[FN_REFLEN]; char norm_name[FN_REFLEN]; THD *thd= current_thd; + ib_longlong auto_inc_value; DBUG_ENTER("ha_innobase::create"); @@ -3795,12 +3905,9 @@ ha_innobase::create( /* Create the table definition in InnoDB */ - if (create_info->options & HA_LEX_CREATE_TMP_TABLE) { - - error = create_table_def(trx, form, norm_name, name2); - } else { - error = create_table_def(trx, form, norm_name, NULL); - } + error = create_table_def(trx, form, norm_name, + create_info->options & HA_LEX_CREATE_TMP_TABLE ? name2 : NULL, + !(form->db_options_in_use & HA_OPTION_PACK_RECORD)); if (error) { innobase_commit_low(trx); @@ -3915,6 +4022,20 @@ ha_innobase::create( DBUG_ASSERT(innobase_table != 0); + if ((thd->lex->create_info.used_fields & HA_CREATE_USED_AUTO) && + (thd->lex->create_info.auto_increment_value != 0)) { + + /* Query was ALTER TABLE...AUTO_INCREMENT = x; or + CREATE TABLE ...AUTO_INCREMENT = x; Find out a table + definition from the dictionary and get the current value + of the auto increment field. Set a new value to the + auto increment field if the value is greater than the + maximum value in the column. */ + + auto_inc_value = thd->lex->create_info.auto_increment_value; + dict_table_autoinc_initialize(innobase_table, auto_inc_value); + } + /* Tell the InnoDB server that there might be work for utility threads: */ @@ -4765,6 +4886,104 @@ ha_innobase::get_foreign_key_create_info(void) return(str); } + +int +ha_innobase::get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) +{ + dict_foreign_t* foreign; + + DBUG_ENTER("get_foreign_key_list"); + row_prebuilt_t* prebuilt = (row_prebuilt_t*)innobase_prebuilt; + ut_a(prebuilt != NULL); + update_thd(current_thd); + prebuilt->trx->op_info = (char*)"getting list of foreign keys"; + trx_search_latch_release_if_reserved(prebuilt->trx); + mutex_enter_noninline(&(dict_sys->mutex)); + foreign = UT_LIST_GET_FIRST(prebuilt->table->foreign_list); + + while (foreign != NULL) + { + uint i; + FOREIGN_KEY_INFO f_key_info; + LEX_STRING *name= 0; + const char *tmp_buff; + + tmp_buff= foreign->id; + i= 0; + while (tmp_buff[i] != '/') + i++; + tmp_buff+= i + 1; + f_key_info.forein_id= make_lex_string(thd, f_key_info.forein_id, + tmp_buff, strlen(tmp_buff), 1); + tmp_buff= foreign->referenced_table_name; + i= 0; + while (tmp_buff[i] != '/') + i++; + f_key_info.referenced_db= make_lex_string(thd, f_key_info.referenced_db, + tmp_buff, i, 1); + tmp_buff+= i + 1; + f_key_info.referenced_table= make_lex_string(thd, + f_key_info.referenced_table, + tmp_buff, strlen(tmp_buff), 1); + + for (i= 0;;) + { + tmp_buff= foreign->foreign_col_names[i]; + name= make_lex_string(thd, name, tmp_buff, strlen(tmp_buff), 1); + f_key_info.foreign_fields.push_back(name); + tmp_buff= foreign->referenced_col_names[i]; + name= make_lex_string(thd, name, tmp_buff, strlen(tmp_buff), 1); + f_key_info.referenced_fields.push_back(name); + if (++i >= foreign->n_fields) + break; + } + + ulong length= 0; + if (foreign->type == DICT_FOREIGN_ON_DELETE_CASCADE) + { + length=17; + tmp_buff= "ON DELETE CASCADE"; + } + else if (foreign->type == DICT_FOREIGN_ON_DELETE_SET_NULL) + { + length=18; + tmp_buff= "ON DELETE SET NULL"; + } + else if (foreign->type == DICT_FOREIGN_ON_DELETE_NO_ACTION) + { + length=19; + tmp_buff= "ON DELETE NO ACTION"; + } + else if (foreign->type == DICT_FOREIGN_ON_UPDATE_CASCADE) + { + length=17; + tmp_buff= "ON UPDATE CASCADE"; + } + else if (foreign->type == DICT_FOREIGN_ON_UPDATE_SET_NULL) + { + length=18; + tmp_buff= "ON UPDATE SET NULL"; + } + else if (foreign->type == DICT_FOREIGN_ON_UPDATE_NO_ACTION) + { + length=19; + tmp_buff= "ON UPDATE NO ACTION"; + } + f_key_info.constraint_method= make_lex_string(thd, + f_key_info.constraint_method, + tmp_buff, length, 1); + + FOREIGN_KEY_INFO *pf_key_info= ((FOREIGN_KEY_INFO *) + thd->memdup((gptr) &f_key_info, + sizeof(FOREIGN_KEY_INFO))); + f_key_list->push_back(pf_key_info); + foreign = UT_LIST_GET_NEXT(foreign_list, foreign); + } + mutex_exit_noninline(&(dict_sys->mutex)); + prebuilt->trx->op_info = (char*)""; + DBUG_RETURN(0); +} + /*********************************************************************** Checks if a table is referenced by a foreign key. The MySQL manual states that a REPLACE is either equivalent to an INSERT, or DELETE(s) + INSERT. Only a @@ -4827,9 +5046,11 @@ ha_innobase::extra( if (prebuilt->blob_heap) { row_mysql_prebuilt_free_blob_heap(prebuilt); } + prebuilt->keep_other_fields_on_keyread = 0; prebuilt->read_just_key = 0; break; case HA_EXTRA_RESET_STATE: + prebuilt->keep_other_fields_on_keyread = 0; prebuilt->read_just_key = 0; break; case HA_EXTRA_NO_KEYREAD: @@ -4848,6 +5069,9 @@ ha_innobase::extra( case HA_EXTRA_KEYREAD: prebuilt->read_just_key = 1; break; + case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: + prebuilt->keep_other_fields_on_keyread = 1; + break; default:/* Do nothing */ ; } @@ -4896,6 +5120,7 @@ ha_innobase::start_stmt( prebuilt->sql_stat_start = TRUE; prebuilt->hint_need_to_fetch_extra_cols = 0; prebuilt->read_just_key = 0; + prebuilt->keep_other_fields_on_keyread = FALSE; if (!prebuilt->mysql_has_locked) { /* This handle is for a temporary table created inside @@ -4918,8 +5143,9 @@ ha_innobase::start_stmt( select_lock_type value. The value of stored_select_lock_type was decided in: 1) ::store_lock(), - 2) ::external_lock(), and - 3) ::init_table_handle_for_HANDLER(). */ + 2) ::external_lock(), + 3) ::init_table_handle_for_HANDLER(), and + 4) :.transactional_table_lock(). */ prebuilt->select_lock_type = prebuilt->stored_select_lock_type; @@ -4993,6 +5219,7 @@ ha_innobase::external_lock( prebuilt->hint_need_to_fetch_extra_cols = 0; prebuilt->read_just_key = 0; + prebuilt->keep_other_fields_on_keyread = FALSE; if (lock_type == F_WRLCK) { @@ -5105,11 +5332,110 @@ ha_innobase::external_lock( DBUG_RETURN(0); } +/********************************************************************** +With this function MySQL request a transactional lock to a table when +user issued query LOCK TABLES..WHERE ENGINE = InnoDB. */ + +int +ha_innobase::transactional_table_lock( +/*==================================*/ + /* out: 0 */ + THD* thd, /* in: handle to the user thread */ + int lock_type) /* in: lock type */ +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + trx_t* trx; + + DBUG_ENTER("ha_innobase::transactional_table_lock"); + DBUG_PRINT("enter",("lock_type: %d", lock_type)); + + /* We do not know if MySQL can call this function before calling + external_lock(). To be safe, update the thd of the current table + handle. */ + + update_thd(thd); + + if (prebuilt->table->ibd_file_missing && !current_thd->tablespace_op) { + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB error:\n" +"MySQL is trying to use a table handle but the .ibd file for\n" +"table %s does not exist.\n" +"Have you deleted the .ibd file from the database directory under\n" +"the MySQL datadir?" +"Look from section 15.1 of http://www.innodb.com/ibman.html\n" +"how you can resolve the problem.\n", + prebuilt->table->name); + DBUG_RETURN(HA_ERR_CRASHED); + } + + trx = prebuilt->trx; + + prebuilt->sql_stat_start = TRUE; + prebuilt->hint_need_to_fetch_extra_cols = 0; + + prebuilt->read_just_key = 0; + prebuilt->keep_other_fields_on_keyread = FALSE; + + if (lock_type == F_WRLCK) { + prebuilt->select_lock_type = LOCK_X; + prebuilt->stored_select_lock_type = LOCK_X; + } else if (lock_type == F_RDLCK) { + prebuilt->select_lock_type = LOCK_S; + prebuilt->stored_select_lock_type = LOCK_S; + } else { + ut_print_timestamp(stderr); + fprintf(stderr, " InnoDB error:\n" +"MySQL is trying to set transactional table lock with corrupted lock type\n" +"to table %s, lock type %d does not exist.\n", + prebuilt->table->name, lock_type); + DBUG_RETURN(HA_ERR_CRASHED); + } + + /* MySQL is setting a new transactional table lock */ + + /* Set the MySQL flag to mark that there is an active transaction */ + thd->transaction.all.innodb_active_trans = 1; + + if (thd->in_lock_tables && thd->variables.innodb_table_locks) { + ulint error = DB_SUCCESS; + + error = row_lock_table_for_mysql(prebuilt,NULL, + LOCK_TABLE_TRANSACTIONAL); + + if (error != DB_SUCCESS) { + error = convert_error_code_to_mysql(error, user_thd); + DBUG_RETURN(error); + } + + if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) { + + /* Store the current undo_no of the transaction + so that we know where to roll back if we have + to roll back the next SQL statement */ + + trx_mark_sql_stat_end(trx); + } + } + + DBUG_RETURN(0); +} + +/**************************************************************************** +Here we export InnoDB status variables to MySQL. */ + +void +innodb_export_status(void) +/*======================*/ +{ + srv_export_innodb_status(); +} + + /**************************************************************************** Implements the SHOW INNODB STATUS command. Sends the output of the InnoDB Monitor to the client. */ -int +bool innodb_show_status( /*===============*/ THD* thd) /* in: the MySQL query thread of the caller */ @@ -5123,7 +5449,7 @@ innodb_show_status( my_message(ER_NOT_SUPPORTED_YET, "Cannot call SHOW INNODB STATUS because skip-innodb is defined", MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } trx = check_trx_exists(thd); @@ -5152,7 +5478,7 @@ innodb_show_status( if (!(str = my_malloc(flen + 1, MYF(0)))) { mutex_exit_noninline(&srv_monitor_file_mutex); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } rewind(srv_monitor_file); @@ -5164,11 +5490,12 @@ innodb_show_status( field_list.push_back(new Item_empty_string("Status", flen)); - if (protocol->send_fields(&field_list, 1)) { + if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) { my_free(str, MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } protocol->prepare_for_resend(); @@ -5176,10 +5503,111 @@ innodb_show_status( my_free(str, MYF(0)); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); +} + +/**************************************************************************** +Implements the SHOW MUTEX STATUS command. . */ + +bool +innodb_mutex_show_status( +/*===============*/ + THD* thd) /* in: the MySQL query thread of the caller */ +{ + Protocol *protocol= thd->protocol; + List<Item> field_list; + mutex_t* mutex; + const char* file_name; + ulint line; + ulint rw_lock_count= 0; + ulint rw_lock_count_spin_loop= 0; + ulint rw_lock_count_spin_rounds= 0; + ulint rw_lock_count_os_wait= 0; + ulint rw_lock_count_os_yield= 0; + ulonglong rw_lock_wait_time= 0; + + DBUG_ENTER("innodb_mutex_show_status"); + + field_list.push_back(new Item_empty_string("Mutex", FN_REFLEN)); + field_list.push_back(new Item_empty_string("Module", FN_REFLEN)); + field_list.push_back(new Item_uint("Count", 21)); + field_list.push_back(new Item_uint("Spin_waits", 21)); + field_list.push_back(new Item_uint("Spin_rounds", 21)); + field_list.push_back(new Item_uint("OS_waits", 21)); + field_list.push_back(new Item_uint("OS_yields", 21)); + field_list.push_back(new Item_uint("OS_waits_time", 21)); + + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); + +#ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER + mutex_enter(&mutex_list_mutex); +#endif + + mutex = UT_LIST_GET_FIRST(mutex_list); + + while ( mutex != NULL ) + { + if (mutex->mutex_type != 1) + { + if (mutex->count_using > 0) + { + protocol->prepare_for_resend(); + protocol->store(mutex->cmutex_name, system_charset_info); + protocol->store(mutex->cfile_name, system_charset_info); + protocol->store((ulonglong)mutex->count_using); + protocol->store((ulonglong)mutex->count_spin_loop); + protocol->store((ulonglong)mutex->count_spin_rounds); + protocol->store((ulonglong)mutex->count_os_wait); + protocol->store((ulonglong)mutex->count_os_yield); + protocol->store((ulonglong)mutex->lspent_time/1000); + + if (protocol->write()) + { +#ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER + mutex_exit(&mutex_list_mutex); +#endif + DBUG_RETURN(1); + } + } + } + else + { + rw_lock_count += mutex->count_using; + rw_lock_count_spin_loop += mutex->count_spin_loop; + rw_lock_count_spin_rounds += mutex->count_spin_rounds; + rw_lock_count_os_wait += mutex->count_os_wait; + rw_lock_count_os_yield += mutex->count_os_yield; + rw_lock_wait_time += mutex->lspent_time; + } + + mutex = UT_LIST_GET_NEXT(list, mutex); + } + + protocol->prepare_for_resend(); + protocol->store("rw_lock_mutexes", system_charset_info); + protocol->store("", system_charset_info); + protocol->store((ulonglong)rw_lock_count); + protocol->store((ulonglong)rw_lock_count_spin_loop); + protocol->store((ulonglong)rw_lock_count_spin_rounds); + protocol->store((ulonglong)rw_lock_count_os_wait); + protocol->store((ulonglong)rw_lock_count_os_yield); + protocol->store((ulonglong)rw_lock_wait_time/1000); + + if (protocol->write()) + { + DBUG_RETURN(1); + } + +#ifdef MUTEX_PROTECT_TO_BE_ADDED_LATER + mutex_exit(&mutex_list_mutex); +#endif + send_eof(thd); + DBUG_RETURN(FALSE); } /**************************************************************************** @@ -5431,7 +5859,7 @@ initialized yet. This function does not change the value of the auto-inc counter if it already has been initialized. Returns the value of the auto-inc counter. */ -longlong +ulonglong ha_innobase::get_auto_increment() /*=============================*/ /* out: auto-increment column value, -1 if error @@ -5444,10 +5872,10 @@ ha_innobase::get_auto_increment() if (error) { - return(-1); + return(~(ulonglong) 0); } - return(nr); + return((ulonglong) nr); } /*********************************************************************** @@ -5456,7 +5884,7 @@ This function stores the binlog offset and flushes logs. */ void innobase_store_binlog_offset_and_flush_log( /*=======================================*/ - char *binlog_name, /* in: binlog name */ + char *binlog_name, /* in: binlog name */ longlong offset) /* in: binlog offset */ { mtr_t mtr; @@ -5481,6 +5909,55 @@ innobase_store_binlog_offset_and_flush_log( log_buffer_flush_to_disk(); } + +int +ha_innobase::cmp_ref( + const mysql_byte *ref1, + const mysql_byte *ref2) +{ + row_prebuilt_t* prebuilt = (row_prebuilt_t*) innobase_prebuilt; + enum_field_types mysql_type; + Field* field; + int result; + + if (prebuilt->clust_index_was_generated) + return memcmp(ref1, ref2, DATA_ROW_ID_LEN); + + /* Do type-aware comparison of Primary Key members. PK members + are always NOT NULL, so no checks for NULL are performed */ + KEY_PART_INFO *key_part= table->key_info[table->primary_key].key_part; + KEY_PART_INFO *key_part_end= + key_part + table->key_info[table->primary_key].key_parts; + for (; key_part != key_part_end; ++key_part) { + field = key_part->field; + mysql_type = field->type(); + if (mysql_type == FIELD_TYPE_TINY_BLOB + || mysql_type == FIELD_TYPE_MEDIUM_BLOB + || mysql_type == FIELD_TYPE_BLOB + || mysql_type == FIELD_TYPE_LONG_BLOB) { + + ut_a(!ref1[1]); + ut_a(!ref2[1]); + byte len1= *ref1; + byte len2= *ref2; + ref1 += 2; + ref2 += 2; + result = + ((Field_blob*)field)->cmp((const char*)ref1, len1, + (const char*)ref2, len2); + } else { + result = + field->cmp((const char*)ref1, (const char*)ref2); + } + + if (result) + return result; + ref1 += key_part->length; + ref2 += key_part->length; + } + return 0; +} + char* ha_innobase::get_mysql_bin_log_name() { @@ -5598,4 +6075,158 @@ innobase_query_is_replace(void) } } +/*********************************************************************** +This function is used to prepare X/Open XA distributed transaction */ + +int innobase_xa_prepare( +/*====================*/ + /* out: 0 or error number */ + THD* thd, /* in: handle to the MySQL thread of the user + whose XA transaction should be prepared */ + bool all) /* in: TRUE - commit transaction + FALSE - the current SQL statement ended */ +{ + int error = 0; + trx_t* trx; + + trx = check_trx_exists(thd); + + /* TODO: Get X/Open XA Transaction Identification from MySQL*/ + memset(&trx->xid, 0, sizeof(trx->xid)); + trx->xid.formatID = -1; + + /* Release a possible FIFO ticket and search latch. Since we will + reserve the kernel mutex, we have to release the search system latch + first to obey the latching order. */ + + innobase_release_stat_resources(trx); + + if (trx->active_trans == 0 && trx->conc_state != TRX_NOT_STARTED) { + + fprintf(stderr, +"InnoDB: Error: thd->transaction.all.innodb_active_trans == 0\n" +"InnoDB: but trx->conc_state != TRX_NOT_STARTED\n"); + } + + if (all || (!(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))) { + + /* We were instructed to prepare the whole transaction, or + this is an SQL statement end and autocommit is on */ + + error = trx_prepare_for_mysql(trx); + } else { + /* We just mark the SQL statement ended and do not do a + transaction prepare */ + + if (trx->auto_inc_lock) { + /* If we had reserved the auto-inc lock for some + table in this SQL statement we release it now */ + + row_unlock_table_autoinc_for_mysql(trx); + } + /* Store the current undo_no of the transaction so that we + know where to roll back if we have to roll back the next + SQL statement */ + + trx_mark_sql_stat_end(trx); + } + + /* Tell the InnoDB server that there might be work for utility + threads: */ + + srv_active_wake_master_thread(); + + return error; +} + +/*********************************************************************** +This function is used to recover X/Open XA distributed transactions */ + +int innobase_xa_recover( + /* out: number of prepared transactions + stored in xid_list */ + XID* xid_list, /* in/out: prepared transactions */ + uint len) /* in: number of slots in xid_list */ +/*====================*/ +{ + if (len == 0 || xid_list == NULL) { + return 0; + } + + return (trx_recover_for_mysql(xid_list, len)); +} + +/*********************************************************************** +This function is used to commit one X/Open XA distributed transaction +which is in the prepared state */ + +int innobase_commit_by_xid( +/*=======================*/ + /* out: 0 or error number */ + XID* xid) /* in: X/Open XA Transaction Identification */ +{ + trx_t* trx; + + trx = trx_get_trx_by_xid(xid); + + if (trx) { + innobase_commit_low(trx); + + return(XA_OK); + } else { + return(XAER_NOTA); + } +} + +/*********************************************************************** +This function is used to rollback one X/Open XA distributed transaction +which is in the prepared state */ + +int innobase_rollback_by_xid( + /* out: 0 or error number */ + XID *xid) /* in : X/Open XA Transaction Idenfification */ +{ + trx_t* trx; + + trx = trx_get_trx_by_xid(xid); + + if (trx) { + return(innobase_rollback_trx(trx)); + } else { + return(XAER_NOTA); + } +} + +/*********************************************************************** +This function is used to test commit/rollback of XA transactions */ + +int innobase_xa_end( +/*================*/ + THD* thd) /* in: MySQL thread handle of the user for whom + transactions should be recovered */ +{ + DBUG_ENTER("innobase_xa_end"); + + XID trx_list[100]; + int trx_num, trx_num_max = 100; + int i; + XID xid; + + while((trx_num = innobase_xa_recover(trx_list, trx_num_max))) { + + for(i=0;i < trx_num; i++) { + xid = trx_list[i]; + + if ( i % 2) { + innobase_commit_by_xid(&xid); + } else { + innobase_rollback_by_xid(&xid); + } + } + } + + free(trx_list); + + DBUG_RETURN(0); +} #endif /* HAVE_INNOBASE_DB */ diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index e76a966c6b9..fcb9165de64 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -84,6 +84,7 @@ class ha_innobase: public handler HA_CAN_SQL_HANDLER | HA_NOT_EXACT_COUNT | HA_PRIMARY_KEY_IN_READ_INDEX | + HA_NO_VARCHAR | HA_TABLE_SCAN_ON_INDEX), last_dup_key((uint) -1), start_of_scan(0), @@ -122,6 +123,7 @@ class ha_innobase: public handler int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); + void unlock_row(); int index_init(uint index); int index_end(); @@ -148,6 +150,7 @@ class ha_innobase: public handler int discard_or_import_tablespace(my_bool discard); int extra(enum ha_extra_function operation); int external_lock(THD *thd, int lock_type); + int transactional_table_lock(THD *thd, int lock_type); int start_stmt(THD *thd); void position(byte *record); @@ -161,21 +164,25 @@ class ha_innobase: public handler int check(THD* thd, HA_CHECK_OPT* check_opt); char* update_table_comment(const char* comment); char* get_foreign_key_create_info(); + int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list); uint referenced_by_foreign_key(); void free_foreign_key_create_info(char* str); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); void init_table_handle_for_HANDLER(); - longlong get_auto_increment(); + ulonglong get_auto_increment(); uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; } - - static char *get_mysql_bin_log_name(); + static char *get_mysql_bin_log_name(); static ulonglong get_mysql_bin_log_pos(); + bool primary_key_is_clustered() { return true; } + int cmp_ref(const byte *ref1, const byte *ref2); }; +extern struct show_var_st innodb_status_variables[]; extern uint innobase_init_flags, innobase_lock_type; extern uint innobase_flush_log_at_trx_commit; extern ulong innobase_cache_size; +extern ulong innobase_large_page_size; extern char *innobase_home, *innobase_tmpdir, *innobase_logdir; extern long innobase_lock_scan_time; extern long innobase_mirrored_log_groups, innobase_log_files_in_group; @@ -190,6 +197,9 @@ extern char *innobase_log_group_home_dir, *innobase_log_arch_dir; extern char *innobase_unix_file_flush_method; /* The following variables have to be my_bool for SHOW VARIABLES to work */ extern my_bool innobase_log_archive, + innobase_use_doublewrite, + innobase_use_checksums, + innobase_use_large_pages, innobase_use_native_aio, innobase_fast_shutdown, innobase_file_per_table, innobase_locks_unsafe_for_binlog, innobase_create_status_file; @@ -231,7 +241,9 @@ int innobase_savepoint( my_off_t binlog_cache_pos); int innobase_close_connection(THD *thd); int innobase_drop_database(char *path); -int innodb_show_status(THD* thd); +bool innodb_show_status(THD* thd); +bool innodb_mutex_show_status(THD* thd); +void innodb_export_status(void); my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name, uint full_name_len); @@ -240,3 +252,46 @@ void innobase_release_temporary_latches(void* innobase_tid); void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset); int innobase_start_trx_and_assign_read_view(THD* thd); + +/*********************************************************************** +This function is used to prepare X/Open XA distributed transaction */ + +int innobase_xa_prepare( +/*====================*/ + /* out: 0 or error number */ + THD* thd, /* in: handle to the MySQL thread of the user + whose XA transaction should be prepared */ + bool all); /* in: TRUE - commit transaction + FALSE - the current SQL statement ended */ + +/*********************************************************************** +This function is used to recover X/Open XA distributed transactions */ + +int innobase_xa_recover( +/*====================*/ + /* out: number of prepared transactions + stored in xid_list */ + XID* xid_list, /* in/out: prepared transactions */ + uint len); /* in: number of slots in xid_list */ + +/*********************************************************************** +This function is used to commit one X/Open XA distributed transaction +which is in the prepared state */ + +int innobase_commit_by_xid( +/*=======================*/ + /* out: 0 or error number */ + XID* xid); /* in : X/Open XA Transaction Identification */ + +/*********************************************************************** +This function is used to rollback one X/Open XA distributed transaction +which is in the prepared state */ + +int innobase_rollback_by_xid( + /* out: 0 or error number */ + XID *xid); /* in : X/Open XA Transaction Idenfification */ + + +int innobase_xa_end(THD *thd); + + diff --git a/sql/ha_isam.cc b/sql/ha_isam.cc deleted file mode 100644 index 9de532fa7b0..00000000000 --- a/sql/ha_isam.cc +++ /dev/null @@ -1,402 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#ifdef __GNUC__ -#pragma implementation // gcc: Class implementation -#endif - -#include "mysql_priv.h" -#ifdef HAVE_ISAM -#include <m_ctype.h> -#include <myisampack.h> -#include "ha_isam.h" -#ifndef MASTER -#include "../srclib/isam/isamdef.h" -#else -#include "../isam/isamdef.h" -#endif - -/***************************************************************************** -** isam tables -*****************************************************************************/ - - -const char **ha_isam::bas_ext() const -{ static const char *ext[]= { ".ISM",".ISD", NullS }; return ext; } - -int ha_isam::open(const char *name, int mode, uint test_if_locked) -{ - char name_buff[FN_REFLEN]; - if (!(file=nisam_open(fn_format(name_buff,name,"","",2 | 4), mode, - test_if_locked))) - return (my_errno ? my_errno : -1); - - if (!(test_if_locked == HA_OPEN_WAIT_IF_LOCKED || - test_if_locked == HA_OPEN_ABORT_IF_LOCKED)) - (void) nisam_extra(file,HA_EXTRA_NO_WAIT_LOCK); - info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); - if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) - (void) nisam_extra(file,HA_EXTRA_WAIT_LOCK); - if (!table->db_record_offset) - int_table_flags|=HA_REC_NOT_IN_SEQ; - return (0); -} - -int ha_isam::close(void) -{ - return !nisam_close(file) ? 0 : my_errno ? my_errno : -1; -} - -uint ha_isam::min_record_length(uint options) const -{ - return (options & HA_OPTION_PACK_RECORD) ? 1 : 5; -} - - -int ha_isam::write_row(byte * buf) -{ - statistic_increment(ha_write_count,&LOCK_status); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) - table->timestamp_field->set_time(); - if (table->next_number_field && buf == table->record[0]) - update_auto_increment(); - return !nisam_write(file,buf) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::update_row(const byte * old_data, byte * new_data) -{ - statistic_increment(ha_update_count,&LOCK_status); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); - return !nisam_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::delete_row(const byte * buf) -{ - statistic_increment(ha_delete_count,&LOCK_status); - return !nisam_delete(file,buf) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - statistic_increment(ha_read_key_count,&LOCK_status); - int error=nisam_rkey(file, buf, active_index, key, key_len, find_flag); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - statistic_increment(ha_read_key_count,&LOCK_status); - int error=nisam_rkey(file, buf, index, key, key_len, find_flag); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::index_read_last(byte * buf, const byte * key, uint key_len) -{ - statistic_increment(ha_read_key_count,&LOCK_status); - int error=nisam_rkey(file, buf, active_index, key, key_len, - HA_READ_PREFIX_LAST); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::index_next(byte * buf) -{ - statistic_increment(ha_read_next_count,&LOCK_status); - int error=nisam_rnext(file,buf,active_index); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; -} - -int ha_isam::index_prev(byte * buf) -{ - statistic_increment(ha_read_prev_count,&LOCK_status); - int error=nisam_rprev(file,buf, active_index); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; -} - -int ha_isam::index_first(byte * buf) -{ - statistic_increment(ha_read_first_count,&LOCK_status); - int error=nisam_rfirst(file, buf, active_index); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; -} - -int ha_isam::index_last(byte * buf) -{ - statistic_increment(ha_read_last_count,&LOCK_status); - int error=nisam_rlast(file, buf, active_index); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : HA_ERR_END_OF_FILE; -} - -int ha_isam::rnd_init(bool scan) -{ - return nisam_extra(file,HA_EXTRA_RESET) ? 0 : my_errno ? my_errno : -1;; -} - -int ha_isam::rnd_next(byte *buf) -{ - statistic_increment(ha_read_rnd_next_count,&LOCK_status); - int error=nisam_rrnd(file, buf, NI_POS_ERROR); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isam::rnd_pos(byte * buf, byte *pos) -{ - statistic_increment(ha_read_rnd_count,&LOCK_status); - int error=nisam_rrnd(file, buf, (ulong) ha_get_ptr(pos,ref_length)); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -void ha_isam::position(const byte *record) -{ - my_off_t position=nisam_position(file); - if (position == (my_off_t) ~ (ulong) 0) - position=HA_OFFSET_ERROR; - ha_store_ptr(ref, ref_length, position); -} - -void ha_isam::info(uint flag) -{ - N_ISAMINFO info; - (void) nisam_info(file,&info,flag); - if (flag & HA_STATUS_VARIABLE) - { - records = info.records; - deleted = info.deleted; - data_file_length=info.data_file_length; - index_file_length=info.index_file_length; - delete_length = info.delete_length; - check_time = info.isamchk_time; - mean_rec_length=info.mean_reclength; - } - if (flag & HA_STATUS_CONST) - { - max_data_file_length=info.max_data_file_length; - max_index_file_length=info.max_index_file_length; - create_time = info.create_time; - sortkey = info.sortkey; - block_size=nisam_block_size; - table->keys = min(table->keys,info.keys); - table->keys_in_use.set_prefix(table->keys); - table->db_options_in_use= info.options; - table->db_record_offset= - (table->db_options_in_use & - (HA_OPTION_PACK_RECORD | HA_OPTION_COMPRESS_RECORD)) ? 0 : - table->reclength; - if (!table->tmp_table) - { - ulong *rec_per_key=info.rec_per_key; - for (uint i=0 ; i < table->keys ; i++) - { - table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= - *(rec_per_key++); - } - } - ref_length=4; - } - if (flag & HA_STATUS_ERRKEY) - { - errkey = info.errkey; - ha_store_ptr(dupp_ref, ref_length, info.dupp_key_pos); - } - if (flag & HA_STATUS_TIME) - update_time = info.update_time; -} - - -int ha_isam::extra(enum ha_extra_function operation) -{ - if ((specialflag & SPECIAL_SAFE_MODE || test_flags & TEST_NO_EXTRA) && - (operation == HA_EXTRA_WRITE_CACHE || - operation == HA_EXTRA_KEYREAD)) - return 0; - return nisam_extra(file,operation); -} - -int ha_isam::external_lock(THD *thd, int lock_type) -{ - if (!table->tmp_table) - return nisam_lock_database(file,lock_type); - return 0; -} - - -THR_LOCK_DATA **ha_isam::store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ - if (lock_type != TL_IGNORE && file->lock.type == TL_UNLOCK) - file->lock.type=lock_type; - *to++= &file->lock; - return to; -} - - -int ha_isam::create(const char *name, register TABLE *form, - HA_CREATE_INFO *create_info) - -{ - uint options=form->db_options_in_use; - int error; - uint i,j,recpos,minpos,fieldpos,temp_length,length; - enum ha_base_keytype type; - char buff[FN_REFLEN]; - KEY *pos; - N_KEYDEF keydef[MAX_KEY]; - N_RECINFO *recinfo,*recinfo_pos; - DBUG_ENTER("ha_isam::create"); - - type=HA_KEYTYPE_BINARY; // Keep compiler happy - if (!(recinfo= (N_RECINFO*) my_malloc((form->fields*2+2)*sizeof(N_RECINFO), - MYF(MY_WME)))) - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - - pos=form->key_info; - for (i=0; i < form->keys ; i++, pos++) - { - keydef[i].base.flag= (pos->flags & HA_NOSAME); - for (j=0 ; (int7) j < pos->key_parts ; j++) - { - keydef[i].seg[j].base.flag=pos->key_part[j].key_part_flag; - Field *field=pos->key_part[j].field; - type=field->key_type(); - - if ((options & HA_OPTION_PACK_KEYS || - (pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY | - HA_SPACE_PACK_USED))) && - pos->key_part[j].length > 8 && - (type == HA_KEYTYPE_TEXT || - type == HA_KEYTYPE_NUM || - (type == HA_KEYTYPE_BINARY && !field->zero_pack()))) - { - if (j == 0) - keydef[i].base.flag|=HA_PACK_KEY; - if (!(field->flags & ZEROFILL_FLAG) && - (field->type() == FIELD_TYPE_STRING || - field->type() == FIELD_TYPE_VAR_STRING || - ((int) (pos->key_part[j].length - field->decimals())) - >= 4)) - keydef[i].seg[j].base.flag|=HA_SPACE_PACK; - } - keydef[i].seg[j].base.type=(int) type; - keydef[i].seg[j].base.start= pos->key_part[j].offset; - keydef[i].seg[j].base.length= pos->key_part[j].length; - } - keydef[i].seg[j].base.type=(int) HA_KEYTYPE_END; /* End of key-parts */ - } - - recpos=0; recinfo_pos=recinfo; - while (recpos < (uint) form->reclength) - { - Field **field,*found=0; - minpos=form->reclength; length=0; - - for (field=form->field ; *field ; field++) - { - if ((fieldpos=(*field)->offset()) >= recpos && - fieldpos <= minpos) - { - /* skip null fields */ - if (!(temp_length= (*field)->pack_length())) - continue; /* Skip null-fields */ - if (! found || fieldpos < minpos || - (fieldpos == minpos && temp_length < length)) - { - minpos=fieldpos; found= *field; length=temp_length; - } - } - } - DBUG_PRINT("loop",("found: %lx recpos: %d minpos: %d length: %d", - found,recpos,minpos,length)); - if (recpos != minpos) - { // Reserved space (Null bits?) - recinfo_pos->base.type=(int) FIELD_NORMAL; - recinfo_pos++->base.length= (uint16) (minpos-recpos); - } - if (! found) - break; - - if (found->flags & BLOB_FLAG) - { - /* ISAM can only handle blob pointers of sizeof(char(*)) */ - recinfo_pos->base.type= (int) FIELD_BLOB; - if (options & HA_OPTION_LONG_BLOB_PTR) - length= length-portable_sizeof_char_ptr+sizeof(char*); - } - else if (!(options & HA_OPTION_PACK_RECORD)) - recinfo_pos->base.type= (int) FIELD_NORMAL; - else if (found->zero_pack()) - recinfo_pos->base.type= (int) FIELD_SKIP_ZERO; - else - recinfo_pos->base.type= (int) ((length <= 3 || - (found->flags & ZEROFILL_FLAG)) ? - FIELD_NORMAL : - found->type() == FIELD_TYPE_STRING || - found->type() == FIELD_TYPE_VAR_STRING ? - FIELD_SKIP_ENDSPACE : - FIELD_SKIP_PRESPACE); - recinfo_pos++ ->base.length=(uint16) length; - recpos=minpos+length; - DBUG_PRINT("loop",("length: %d type: %d", - recinfo_pos[-1].base.length,recinfo_pos[-1].base.type)); - - if ((found->flags & BLOB_FLAG) && (options & HA_OPTION_LONG_BLOB_PTR) && - sizeof(char*) != portable_sizeof_char_ptr) - { // Not used space - recinfo_pos->base.type=(int) FIELD_ZERO; - recinfo_pos++->base.length= - (uint16) (portable_sizeof_char_ptr-sizeof(char*)); - recpos+= (portable_sizeof_char_ptr-sizeof(char*)); - } - } - recinfo_pos->base.type= (int) FIELD_LAST; /* End of fieldinfo */ - error=nisam_create(fn_format(buff,name,"","",2+4+16),form->keys,keydef, - recinfo,(ulong) form->max_rows, (ulong) form->min_rows, - 0, 0, 0L); - my_free((gptr) recinfo,MYF(0)); - DBUG_RETURN(error); - -} - -static key_range no_range= { (byte*) 0, 0, HA_READ_KEY_EXACT }; - -ha_rows ha_isam::records_in_range(uint inx, key_range *min_key, - key_range *max_key) -{ - /* ISAM checks if 'key' pointer <> 0 to know if there is no range */ - if (!min_key) - min_key= &no_range; - if (!max_key) - max_key= &no_range; - return (ha_rows) nisam_records_in_range(file, - (int) inx, - min_key->key, min_key->length, - min_key->flag, - max_key->key, max_key->length, - max_key->flag); -} -#endif /* HAVE_ISAM */ diff --git a/sql/ha_isam.h b/sql/ha_isam.h deleted file mode 100644 index b3e932696cb..00000000000 --- a/sql/ha_isam.h +++ /dev/null @@ -1,79 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#ifdef __GNUC__ -#pragma interface /* gcc class implementation */ -#endif - -/* class for the the myisam handler */ - -#include <nisam.h> - -class ha_isam: public handler -{ - N_INFO *file; - /* We need this as table_flags() may change after open() */ - ulong int_table_flags; - - public: - ha_isam(TABLE *table) - :handler(table), file(0), - int_table_flags(HA_READ_RND_SAME | - HA_DUPP_POS | HA_NOT_DELETE_WITH_CACHE | HA_FILE_BASED) - {} - ~ha_isam() {} - ulong index_flags(uint idx, uint part, bool all_parts) const - { return HA_READ_NEXT; } // but no HA_READ_PREV here!!! - const char *table_type() const { return "ISAM"; } - const char *index_type(uint key_number) { return "BTREE"; } - const char **bas_ext() const; - ulong table_flags() const { return int_table_flags; } - uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; } - uint max_supported_keys() const { return N_MAXKEY; } - uint max_supported_key_parts() const { return N_MAXKEY_SEG; } - uint max_supported_key_length() const { return N_MAX_KEY_LENGTH; } - uint min_record_length(uint options) const; - bool low_byte_first() const { return 0; } - - int open(const char *name, int mode, uint test_if_locked); - int close(void); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_last(byte * buf, const byte * key, uint key_len); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); - int rnd_init(bool scan); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - void position(const byte *record); - void info(uint); - int extra(enum ha_extra_function operation); - int external_lock(THD *thd, int lock_type); - ha_rows records_in_range(uint inx, key_range *min_key, key_range *max_key); - - int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); - THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, - enum thr_lock_type lock_type); -}; - diff --git a/sql/ha_isammrg.cc b/sql/ha_isammrg.cc deleted file mode 100644 index 367607eef19..00000000000 --- a/sql/ha_isammrg.cc +++ /dev/null @@ -1,209 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#ifdef __GNUC__ -#pragma implementation // gcc: Class implementation -#endif - -#include "mysql_priv.h" -#ifdef HAVE_ISAM -#include <m_ctype.h> -#ifndef MASTER -#include "../srclib/merge/mrg_def.h" -#else -#include "../merge/mrg_def.h" -#endif -#include "ha_isammrg.h" - -/***************************************************************************** -** ISAM MERGE tables -*****************************************************************************/ - -const char **ha_isammrg::bas_ext() const -{ static const char *ext[]= { ".MRG", NullS }; return ext; } - -int ha_isammrg::open(const char *name, int mode, uint test_if_locked) -{ - char name_buff[FN_REFLEN]; - if (!(file=mrg_open(fn_format(name_buff,name,"","",2 | 4), mode, - test_if_locked))) - return (my_errno ? my_errno : -1); - - if (!(test_if_locked == HA_OPEN_WAIT_IF_LOCKED || - test_if_locked == HA_OPEN_ABORT_IF_LOCKED)) - mrg_extra(file,HA_EXTRA_NO_WAIT_LOCK); - info(HA_STATUS_NO_LOCK | HA_STATUS_VARIABLE | HA_STATUS_CONST); - if (!(test_if_locked & HA_OPEN_WAIT_IF_LOCKED)) - mrg_extra(file,HA_EXTRA_WAIT_LOCK); - if (table->reclength != mean_rec_length) - { - DBUG_PRINT("error",("reclength: %d mean_rec_length: %d", - table->reclength, mean_rec_length)); - mrg_close(file); - file=0; - return ER_WRONG_MRG_TABLE; - } - return (0); -} - -int ha_isammrg::close(void) -{ - return !mrg_close(file) ? 0 : my_errno ? my_errno : -1; -} - -uint ha_isammrg::min_record_length(uint options) const -{ - return (options & HA_OPTION_PACK_RECORD) ? 1 : 5; -} - -int ha_isammrg::write_row(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::update_row(const byte * old_data, byte * new_data) -{ - statistic_increment(ha_update_count,&LOCK_status); - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); - return !mrg_update(file,old_data,new_data) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::delete_row(const byte * buf) -{ - statistic_increment(ha_delete_count,&LOCK_status); - return !mrg_delete(file,buf) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len, enum ha_rkey_function find_flag) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_next(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_prev(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_first(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::index_last(byte * buf) -{ - return (my_errno=HA_ERR_WRONG_COMMAND); -} - -int ha_isammrg::rnd_init(bool scan) -{ - return !mrg_extra(file,HA_EXTRA_RESET) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::rnd_next(byte *buf) -{ - statistic_increment(ha_read_rnd_next_count,&LOCK_status); - int error=mrg_rrnd(file, buf, ~(mrg_off_t) 0); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::rnd_pos(byte * buf, byte *pos) -{ - statistic_increment(ha_read_rnd_count,&LOCK_status); - int error=mrg_rrnd(file, buf, (ulong) ha_get_ptr(pos,ref_length)); - table->status=error ? STATUS_NOT_FOUND: 0; - return !error ? 0 : my_errno ? my_errno : -1; -} - -void ha_isammrg::position(const byte *record) -{ - ulong position= mrg_position(file); - ha_store_ptr(ref, ref_length, (my_off_t) position); -} - - -void ha_isammrg::info(uint flag) -{ - MERGE_INFO info; - (void) mrg_info(file,&info,flag); - records = (ha_rows) info.records; - deleted = (ha_rows) info.deleted; - data_file_length=info.data_file_length; - errkey = info.errkey; - table->keys_in_use.clear_all(); // No keys yet - table->db_options_in_use = info.options; - mean_rec_length=info.reclength; - block_size=0; - update_time=0; - ref_length=4; // Should be big enough -} - - -int ha_isammrg::extra(enum ha_extra_function operation) -{ - return !mrg_extra(file,operation) ? 0 : my_errno ? my_errno : -1; -} - -int ha_isammrg::external_lock(THD *thd, int lock_type) -{ - return !mrg_lock_database(file,lock_type) ? 0 : my_errno ? my_errno : -1; -} - -uint ha_isammrg::lock_count(void) const -{ - return file->tables; -} - -THR_LOCK_DATA **ha_isammrg::store_lock(THD *thd, - THR_LOCK_DATA **to, - enum thr_lock_type lock_type) -{ - MRG_TABLE *open_table; - - for (open_table=file->open_tables ; - open_table != file->end_table ; - open_table++) - { - *(to++)= &open_table->table->lock; - if (lock_type != TL_IGNORE && open_table->table->lock.type == TL_UNLOCK) - open_table->table->lock.type=lock_type; - } - return to; -} - - -int ha_isammrg::create(const char *name, register TABLE *form, - HA_CREATE_INFO *create_info) - -{ - char buff[FN_REFLEN]; - return mrg_create(fn_format(buff,name,"","",2+4+16),0); -} -#endif /* HAVE_ISAM */ diff --git a/sql/ha_isammrg.h b/sql/ha_isammrg.h deleted file mode 100644 index 657e5060272..00000000000 --- a/sql/ha_isammrg.h +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - - -#ifdef __GNUC__ -#pragma interface /* gcc class implementation */ -#endif - -/* class for the the myisam merge handler */ - -#include <merge.h> - -class ha_isammrg: public handler -{ - MRG_INFO *file; - - public: - ha_isammrg(TABLE *table): handler(table), file(0) {} - ~ha_isammrg() {} - const char *table_type() const { return "MRG_ISAM"; } - const char **bas_ext() const; - ulong table_flags() const { return (HA_READ_RND_SAME | - HA_REC_NOT_IN_SEQ | HA_FILE_BASED); } - ulong index_flags(uint idx, uint part, bool all_parts) const - { DBUG_ASSERT(0); return 0; } - - uint max_supported_keys() const { return 0; } - bool low_byte_first() const { return 0; } - uint min_record_length(uint options) const; - - int open(const char *name, int mode, uint test_if_locked); - int close(void); - int write_row(byte * buf); - int update_row(const byte * old_data, byte * new_data); - int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint indx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); - int rnd_init(bool scan); - int rnd_next(byte *buf); - int rnd_pos(byte * buf, byte *pos); - void position(const byte *record); - void info(uint); - int extra(enum ha_extra_function operation); - int external_lock(THD *thd, int lock_type); - uint lock_count(void) const; - int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); - THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, - enum thr_lock_type lock_type); - uint8 table_cache_type() { return HA_CACHE_TBL_NOCACHE; } -}; diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index a7beae664b9..c89eb4426ff 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -87,9 +87,10 @@ static void mi_check_print_msg(MI_CHECK *param, const char* msg_type, extern "C" { -volatile my_bool *killed_ptr(MI_CHECK *param) +volatile int *killed_ptr(MI_CHECK *param) { - return &(((THD *)(param->thd))->killed); + /* In theory Unsafe conversion, but should be ok for now */ + return (int*) &(((THD *)(param->thd))->killed); } void mi_check_print_error(MI_CHECK *param, const char *fmt,...) @@ -248,7 +249,7 @@ int ha_myisam::close(void) int ha_myisam::write_row(byte * buf) { - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); /* If we have a timestamp column, update it to the current time */ if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) @@ -601,7 +602,7 @@ int ha_myisam::repair(THD *thd, MI_CHECK ¶m, bool optimize) local_testflag|= T_STATISTICS; param.testflag|= T_STATISTICS; // We get this for free statistics_done=1; - if (current_thd->variables.myisam_repair_threads>1) + if (thd->variables.myisam_repair_threads>1) { char buf[40]; /* TODO: respect myisam_repair_threads variable */ @@ -925,8 +926,11 @@ int ha_myisam::enable_indexes(uint mode) { sql_print_warning("Warning: Enabling keys got errno %d, retrying", my_errno); + thd->clear_error(); param.testflag&= ~(T_REP_BY_SORT | T_QUICK); error= (repair(thd,param,0) != HA_ADMIN_OK); + if (!error && thd->net.report_error) + error= HA_ERR_CRASHED; } info(HA_STATUS_CONST); thd->proc_info=save_proc_info; @@ -1083,7 +1087,7 @@ bool ha_myisam::is_crashed() const int ha_myisam::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return mi_update(file,old_data,new_data); @@ -1091,7 +1095,7 @@ int ha_myisam::update_row(const byte * old_data, byte * new_data) int ha_myisam::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); return mi_delete(file,buf); } @@ -1099,7 +1103,8 @@ int ha_myisam::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=mi_rkey(file,buf,active_index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1108,7 +1113,8 @@ int ha_myisam::index_read(byte * buf, const byte * key, int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=mi_rkey(file,buf,index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1117,7 +1123,8 @@ int ha_myisam::index_read_idx(byte * buf, uint index, const byte * key, int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=mi_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1126,7 +1133,8 @@ int ha_myisam::index_read_last(byte * buf, const byte * key, uint key_len) int ha_myisam::index_next(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=mi_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1135,7 +1143,8 @@ int ha_myisam::index_next(byte * buf) int ha_myisam::index_prev(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); int error=mi_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1144,7 +1153,8 @@ int ha_myisam::index_prev(byte * buf) int ha_myisam::index_first(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); int error=mi_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1153,7 +1163,8 @@ int ha_myisam::index_first(byte * buf) int ha_myisam::index_last(byte * buf) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); int error=mi_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1164,7 +1175,8 @@ int ha_myisam::index_next_same(byte * buf, uint length __attribute__((unused))) { DBUG_ASSERT(inited==INDEX); - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=mi_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1180,7 +1192,8 @@ int ha_myisam::rnd_init(bool scan) int ha_myisam::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=mi_scan(file, buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -1193,8 +1206,9 @@ int ha_myisam::restart_rnd_next(byte *buf, byte *pos) int ha_myisam::rnd_pos(byte * buf, byte *pos) { - statistic_increment(ha_read_rnd_count,&LOCK_status); - int error=mi_rrnd(file, buf, ha_get_ptr(pos,ref_length)); + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); + int error=mi_rrnd(file, buf, my_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; } @@ -1202,7 +1216,7 @@ int ha_myisam::rnd_pos(byte * buf, byte *pos) void ha_myisam::position(const byte* record) { my_off_t position=mi_position(file); - ha_store_ptr(ref, ref_length, position); + my_store_ptr(ref, ref_length, position); } void ha_myisam::info(uint flag) @@ -1258,7 +1272,7 @@ void ha_myisam::info(uint flag) if (flag & HA_STATUS_ERRKEY) { errkey = info.errkey; - ha_store_ptr(dupp_ref, ref_length, info.dupp_key_pos); + my_store_ptr(dupp_ref, ref_length, info.dupp_key_pos); } if (flag & HA_STATUS_TIME) update_time = info.update_time; @@ -1366,9 +1380,9 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].keysegs=pos->key_parts; for (j=0 ; j < pos->key_parts ; j++) { - keydef[i].seg[j].flag=pos->key_part[j].key_part_flag; Field *field=pos->key_part[j].field; type=field->key_type(); + keydef[i].seg[j].flag=pos->key_part[j].key_part_flag; if (options & HA_OPTION_PACK_KEYS || (pos->flags & (HA_PACK_KEY | HA_BINARY_PACK_KEY | @@ -1383,8 +1397,8 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, if (j == 0) keydef[i].flag|=HA_PACK_KEY; if (!(field->flags & ZEROFILL_FLAG) && - (field->type() == FIELD_TYPE_STRING || - field->type() == FIELD_TYPE_VAR_STRING || + (field->type() == MYSQL_TYPE_STRING || + field->type() == MYSQL_TYPE_VAR_STRING || ((int) (pos->key_part[j].length - field->decimals())) >= 4)) keydef[i].seg[j].flag|=HA_SPACE_PACK; @@ -1395,8 +1409,10 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].seg[j].type= (int) type; keydef[i].seg[j].start= pos->key_part[j].offset; keydef[i].seg[j].length= pos->key_part[j].length; - keydef[i].seg[j].bit_start=keydef[i].seg[j].bit_end=0; - keydef[i].seg[j].language = field->charset()->number; + keydef[i].seg[j].bit_start= keydef[i].seg[j].bit_end= + keydef[i].seg[j].bit_length= 0; + keydef[i].seg[j].bit_pos= 0; + keydef[i].seg[j].language= field->charset()->number; if (field->null_ptr) { @@ -1417,6 +1433,13 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, keydef[i].seg[j].bit_start= (uint) (field->pack_length() - table_arg->blob_ptr_size); } + else if (field->type() == FIELD_TYPE_BIT) + { + keydef[i].seg[j].bit_length= ((Field_bit *) field)->bit_len; + keydef[i].seg[j].bit_start= ((Field_bit *) field)->bit_ofs; + keydef[i].seg[j].bit_pos= (uint) (((Field_bit *) field)->bit_ptr - + (uchar*) table_arg->record[0]); + } } keyseg+=pos->key_parts; } @@ -1448,7 +1471,7 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, } } } - DBUG_PRINT("loop",("found: %lx recpos: %d minpos: %d length: %d", + DBUG_PRINT("loop",("found: 0x%lx recpos: %d minpos: %d length: %d", found,recpos,minpos,length)); if (recpos != minpos) { // Reserved space (Null bits?) @@ -1460,33 +1483,33 @@ int ha_myisam::create(const char *name, register TABLE *table_arg, break; if (found->flags & BLOB_FLAG) - { recinfo_pos->type= (int) FIELD_BLOB; - } + else if (found->type() == MYSQL_TYPE_VARCHAR) + recinfo_pos->type= FIELD_VARCHAR; else if (!(options & HA_OPTION_PACK_RECORD)) recinfo_pos->type= (int) FIELD_NORMAL; else if (found->zero_pack()) recinfo_pos->type= (int) FIELD_SKIP_ZERO; else recinfo_pos->type= (int) ((length <= 3 || - (found->flags & ZEROFILL_FLAG)) ? - FIELD_NORMAL : - found->type() == FIELD_TYPE_STRING || - found->type() == FIELD_TYPE_VAR_STRING ? - FIELD_SKIP_ENDSPACE : - FIELD_SKIP_PRESPACE); + (found->flags & ZEROFILL_FLAG)) ? + FIELD_NORMAL : + found->type() == MYSQL_TYPE_STRING || + found->type() == MYSQL_TYPE_VAR_STRING ? + FIELD_SKIP_ENDSPACE : + FIELD_SKIP_PRESPACE); if (found->null_ptr) { recinfo_pos->null_bit=found->null_bit; recinfo_pos->null_pos= (uint) (found->null_ptr- - (uchar*) table_arg->record[0]); + (uchar*) table_arg->record[0]); } else { recinfo_pos->null_bit=0; recinfo_pos->null_pos=0; } - (recinfo_pos++) ->length=(uint16) length; + (recinfo_pos++)->length= (uint16) length; recpos=minpos+length; DBUG_PRINT("loop",("length: %d type: %d", recinfo_pos[-1].length,recinfo_pos[-1].type)); @@ -1533,8 +1556,12 @@ int ha_myisam::rename_table(const char * from, const char * to) } -longlong ha_myisam::get_auto_increment() +ulonglong ha_myisam::get_auto_increment() { + ulonglong nr; + int error; + byte key[MI_MAX_KEY_LENGTH]; + if (!table->next_number_key_offset) { // Autoincrement at key-start ha_myisam::info(HA_STATUS_AUTO); @@ -1544,19 +1571,17 @@ longlong ha_myisam::get_auto_increment() /* it's safe to call the following if bulk_insert isn't on */ mi_flush_bulk_insert(file, table->next_number_index); - longlong nr; - int error; - byte key[MI_MAX_KEY_LENGTH]; (void) extra(HA_EXTRA_KEYREAD); - key_copy(key,table,table->next_number_index, - table->next_number_key_offset); + key_copy(key, table->record[0], + table->key_info + table->next_number_index, + table->next_number_key_offset); error=mi_rkey(file,table->record[1],(int) table->next_number_index, key,table->next_number_key_offset,HA_READ_PREFIX_LAST); if (error) - nr=1; + nr= 1; else - nr=(longlong) - table->next_number_field->val_int_offset(table->rec_buff_length)+1; + nr= ((ulonglong) table->next_number_field-> + val_int_offset(table->rec_buff_length)+1); extra(HA_EXTRA_NO_KEYREAD); return nr; } @@ -1601,7 +1626,8 @@ int ha_myisam::ft_read(byte * buf) if (!ft_handler) return -1; - thread_safe_increment(ha_read_next_count,&LOCK_status); // why ? + thread_safe_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); // why ? error=ft_handler->please->read_next(ft_handler,(char*) buf); diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index 972d6b18e19..d2fe36c8357 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -47,7 +47,7 @@ class ha_myisam: public handler int_table_flags(HA_NULL_IN_KEY | HA_CAN_FULLTEXT | HA_CAN_SQL_HANDLER | HA_DUPP_POS | HA_CAN_INDEX_BLOBS | HA_AUTO_PART_KEY | HA_FILE_BASED | HA_CAN_GEOMETRY | HA_READ_RND_SAME | - HA_CAN_INSERT_DELAYED), + HA_CAN_INSERT_DELAYED | HA_CAN_BIT_FIELD), can_enable_indexes(1) {} ~ha_myisam() {} @@ -111,7 +111,7 @@ class ha_myisam: public handler int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); - longlong get_auto_increment(); + ulonglong get_auto_increment(); int rename_table(const char * from, const char * to); int delete_table(const char *name); int check(THD* thd, HA_CHECK_OPT* check_opt); diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index bf47b4625e0..744128faf69 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -92,7 +92,7 @@ int ha_myisammrg::close(void) int ha_myisammrg::write_row(byte * buf) { - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_write_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); if (table->next_number_field && buf == table->record[0]) @@ -102,7 +102,7 @@ int ha_myisammrg::write_row(byte * buf) int ha_myisammrg::update_row(const byte * old_data, byte * new_data) { - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_update_count,&LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); return myrg_update(file,old_data,new_data); @@ -110,14 +110,15 @@ int ha_myisammrg::update_row(const byte * old_data, byte * new_data) int ha_myisammrg::delete_row(const byte * buf) { - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_delete_count,&LOCK_status); return myrg_delete(file,buf); } int ha_myisammrg::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=myrg_rkey(file,buf,active_index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -126,7 +127,8 @@ int ha_myisammrg::index_read(byte * buf, const byte * key, int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=myrg_rkey(file,buf,index, key, key_len, find_flag); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -134,7 +136,8 @@ int ha_myisammrg::index_read_idx(byte * buf, uint index, const byte * key, int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_key_count, + &LOCK_status); int error=myrg_rkey(file,buf,active_index, key, key_len, HA_READ_PREFIX_LAST); table->status=error ? STATUS_NOT_FOUND: 0; @@ -143,7 +146,8 @@ int ha_myisammrg::index_read_last(byte * buf, const byte * key, uint key_len) int ha_myisammrg::index_next(byte * buf) { - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=myrg_rnext(file,buf,active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -151,7 +155,8 @@ int ha_myisammrg::index_next(byte * buf) int ha_myisammrg::index_prev(byte * buf) { - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_prev_count, + &LOCK_status); int error=myrg_rprev(file,buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -159,7 +164,8 @@ int ha_myisammrg::index_prev(byte * buf) int ha_myisammrg::index_first(byte * buf) { - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_first_count, + &LOCK_status); int error=myrg_rfirst(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -167,7 +173,8 @@ int ha_myisammrg::index_first(byte * buf) int ha_myisammrg::index_last(byte * buf) { - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_last_count, + &LOCK_status); int error=myrg_rlast(file, buf, active_index); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -177,7 +184,8 @@ int ha_myisammrg::index_next_same(byte * buf, const byte *key __attribute__((unused)), uint length __attribute__((unused))) { - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_next_count, + &LOCK_status); int error=myrg_rnext_same(file,buf); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -190,7 +198,8 @@ int ha_myisammrg::rnd_init(bool scan) int ha_myisammrg::rnd_next(byte *buf) { - statistic_increment(ha_read_rnd_next_count,&LOCK_status); + statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, + &LOCK_status); int error=myrg_rrnd(file, buf, HA_OFFSET_ERROR); table->status=error ? STATUS_NOT_FOUND: 0; return error; @@ -198,8 +207,9 @@ int ha_myisammrg::rnd_next(byte *buf) int ha_myisammrg::rnd_pos(byte * buf, byte *pos) { - statistic_increment(ha_read_rnd_count,&LOCK_status); - int error=myrg_rrnd(file, buf, ha_get_ptr(pos,ref_length)); + statistic_increment(table->in_use->status_var.ha_read_rnd_count, + &LOCK_status); + int error=myrg_rrnd(file, buf, my_get_ptr(pos,ref_length)); table->status=error ? STATUS_NOT_FOUND: 0; return error; } @@ -207,7 +217,7 @@ int ha_myisammrg::rnd_pos(byte * buf, byte *pos) void ha_myisammrg::position(const byte *record) { ulonglong position= myrg_position(file); - ha_store_ptr(ref, ref_length, (my_off_t) position); + my_store_ptr(ref, ref_length, (my_off_t) position); } @@ -358,7 +368,7 @@ void ha_myisammrg::update_create_info(HA_CREATE_INFO *create_info) create_info->merge_list.elements++; (*create_info->merge_list.next) = (byte*) ptr; - create_info->merge_list.next= (byte**) &ptr->next; + create_info->merge_list.next= (byte**) &ptr->next_local; } *create_info->merge_list.next=0; } @@ -386,7 +396,7 @@ int ha_myisammrg::create(const char *name, register TABLE *form, if (!(table_names= (char**) thd->alloc((create_info->merge_list.elements+1)* sizeof(char*)))) DBUG_RETURN(HA_ERR_OUT_OF_MEM); - for (pos=table_names ; tables ; tables=tables->next) + for (pos= table_names; tables; tables= tables->next_local) { char *table_name; TABLE **tbl= 0; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 3c6cd83d5dc..84e285fa360 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -416,6 +416,8 @@ static inline bool ndb_supported_type(enum_field_types type) return TRUE; case MYSQL_TYPE_NULL: case MYSQL_TYPE_GEOMETRY: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_BIT: break; } return FALSE; @@ -1716,6 +1718,8 @@ int ha_ndbcluster::write_row(byte *record) NdbConnection *trans= m_active_trans; NdbOperation *op; int res; + THD *thd= current_thd; + DBUG_ENTER("write_row"); if(m_ignore_dup_key && table->primary_key != MAX_KEY) @@ -1731,7 +1735,7 @@ int ha_ndbcluster::write_row(byte *record) DBUG_RETURN(peek_res); } - statistic_increment(ha_write_count,&LOCK_status); + statistic_increment(thd->status_var.ha_write_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); has_auto_increment= (table->next_number_field && record == table->record[0]); @@ -1855,7 +1859,7 @@ int ha_ndbcluster::key_cmp(uint keynr, const byte * old_row, (new_row[key_part->null_offset] & key_part->null_bit)) return 1; } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH)) + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART)) { if (key_part->field->cmp_binary((char*) (old_row + key_part->offset), @@ -1886,7 +1890,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) uint i; DBUG_ENTER("update_row"); - statistic_increment(ha_update_count,&LOCK_status); + statistic_increment(thd->status_var.ha_update_count, &LOCK_status); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -1997,12 +2001,13 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) int ha_ndbcluster::delete_row(const byte *record) { + THD *thd= current_thd; NdbConnection *trans= m_active_trans; NdbResultSet* cursor= m_active_cursor; NdbOperation *op; DBUG_ENTER("delete_row"); - statistic_increment(ha_delete_count,&LOCK_status); + statistic_increment(thd->status_var.ha_delete_count,&LOCK_status); if (cursor) { @@ -2386,7 +2391,7 @@ int ha_ndbcluster::index_read_idx(byte *buf, uint index_no, const byte *key, uint key_len, enum ha_rkey_function find_flag) { - statistic_increment(ha_read_key_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_key_count, &LOCK_status); DBUG_ENTER("index_read_idx"); DBUG_PRINT("enter", ("index_no: %u, key_len: %u", index_no, key_len)); index_init(index_no); @@ -2399,7 +2404,8 @@ int ha_ndbcluster::index_next(byte *buf) DBUG_ENTER("index_next"); int error= 1; - statistic_increment(ha_read_next_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_next_count, + &LOCK_status); DBUG_RETURN(next_result(buf)); } @@ -2407,7 +2413,8 @@ int ha_ndbcluster::index_next(byte *buf) int ha_ndbcluster::index_prev(byte *buf) { DBUG_ENTER("index_prev"); - statistic_increment(ha_read_prev_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_prev_count, + &LOCK_status); DBUG_RETURN(1); } @@ -2415,7 +2422,8 @@ int ha_ndbcluster::index_prev(byte *buf) int ha_ndbcluster::index_first(byte *buf) { DBUG_ENTER("index_first"); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count, + &LOCK_status); // Start the ordered index scan and fetch the first row // Only HA_READ_ORDER indexes get called by index_first @@ -2426,7 +2434,7 @@ int ha_ndbcluster::index_first(byte *buf) int ha_ndbcluster::index_last(byte *buf) { DBUG_ENTER("index_last"); - statistic_increment(ha_read_last_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_last_count,&LOCK_status); int res; if((res= ordered_index_scan(0, 0, TRUE, buf)) == 0){ NdbResultSet *cursor= m_active_cursor; @@ -2566,7 +2574,8 @@ int ha_ndbcluster::rnd_end() int ha_ndbcluster::rnd_next(byte *buf) { DBUG_ENTER("rnd_next"); - statistic_increment(ha_read_rnd_next_count, &LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_next_count, + &LOCK_status); if (!m_active_cursor) DBUG_RETURN(full_table_scan(buf)); @@ -2584,7 +2593,8 @@ int ha_ndbcluster::rnd_next(byte *buf) int ha_ndbcluster::rnd_pos(byte *buf, byte *pos) { DBUG_ENTER("rnd_pos"); - statistic_increment(ha_read_rnd_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_rnd_count, + &LOCK_status); // The primary key for the record is stored in pos // Perform a pk_read using primary key "index" DBUG_RETURN(pk_read(pos, ref_length, buf)); @@ -2816,6 +2826,8 @@ int ha_ndbcluster::extra(enum ha_extra_function operation) break; case HA_EXTRA_CHANGE_KEY_TO_DUP: DBUG_PRINT("info", ("HA_EXTRA_CHANGE_KEY_TO_DUP")); + case HA_EXTRA_KEYREAD_PRESERVE_FIELDS: + DBUG_PRINT("info", ("HA_EXTRA_KEYREAD_PRESERVE_FIELDS")); break; } @@ -3661,7 +3673,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) if (check_ndb_connection()) DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION); - + Ndb *ndb= get_ndb(); dict= ndb->getDictionary(); if (!(orig_tab= dict->getTable(m_tabname))) @@ -3761,18 +3773,20 @@ int ndbcluster_drop_database(const char *path) } -longlong ha_ndbcluster::get_auto_increment() +ulonglong ha_ndbcluster::get_auto_increment() { + int cache_size; + Uint64 auto_value; DBUG_ENTER("get_auto_increment"); DBUG_PRINT("enter", ("m_tabname: %s", m_tabname)); Ndb *ndb= get_ndb(); - int cache_size= + cache_size= (m_rows_to_insert - m_rows_inserted < m_autoincrement_prefetch) ? m_rows_to_insert - m_rows_inserted : (m_rows_to_insert > m_autoincrement_prefetch) ? m_rows_to_insert : m_autoincrement_prefetch; - Uint64 auto_value= + auto_value= (m_skip_auto_increment) ? ndb->readAutoIncrementValue((const NDBTAB *) m_table) : ndb->getAutoIncrementValue((const NDBTAB *) m_table, cache_size); @@ -3793,6 +3807,7 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): m_table_flags(HA_REC_NOT_IN_SEQ | HA_NULL_IN_KEY | HA_AUTO_PART_KEY | + HA_NO_VARCHAR | HA_NO_PREFIX_CHAR_KEYS), m_share(0), m_use_write(FALSE), @@ -4225,6 +4240,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, (void)mysql_rm_table_part2(thd, &table_list, /* if_exists */ TRUE, /* drop_temporary */ FALSE, + /* drop_view */ FALSE, /* dont_log_query*/ TRUE); } } diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index 15e61a93574..bfe414162b1 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -199,7 +199,7 @@ class ha_ndbcluster: public handler int key_cmp(uint keynr, const byte * old_row, const byte * new_row); void print_results(); - longlong get_auto_increment(); + ulonglong get_auto_increment(); int ndb_err(NdbConnection*); bool uses_blob_value(bool all_fields); diff --git a/sql/handler.cc b/sql/handler.cc index 3200c6932e9..cde2c7b02a8 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -47,6 +47,9 @@ #ifdef HAVE_NDBCLUSTER_DB #include "ha_ndbcluster.h" #endif +#ifdef HAVE_FEDERATED_DB +#include "ha_federated.h" +#endif #include <myisampack.h> #include <errno.h> @@ -54,11 +57,7 @@ static int NEAR_F delete_file(const char *name,const char *ext,int extflag); -ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count, - ha_read_key_count, ha_read_next_count, ha_read_prev_count, - ha_read_first_count, ha_read_last_count, - ha_commit_count, ha_rollback_count, - ha_read_rnd_count, ha_read_rnd_next_count, ha_discover_count; +ulong ha_read_count, ha_discover_count; static SHOW_COMP_OPTION have_yes= SHOW_OPTION_YES; @@ -96,6 +95,8 @@ struct show_table_type_st sys_table_types[]= "Archive storage engine", DB_TYPE_ARCHIVE_DB}, {"CSV",&have_csv_db, "CSV storage engine", DB_TYPE_CSV_DB}, + {"FEDERATED",&have_federated_db, + "Federated MySQL storage engine", DB_TYPE_FEDERATED_DB}, {NullS, NULL, NullS, DB_TYPE_UNKNOWN} }; @@ -114,7 +115,7 @@ uint known_extensions_id= 0; enum db_type ha_resolve_by_name(const char *name, uint namelen) { - THD *thd=current_thd; + THD *thd= current_thd; if (thd && !my_strcasecmp(&my_charset_latin1, name, "DEFAULT")) { return (enum db_type) thd->variables.table_type; } @@ -145,6 +146,7 @@ const char *ha_get_storage_engine(enum db_type db_type) enum db_type ha_checktype(enum db_type database_type) { show_table_type_st *types; + THD *thd= current_thd; for (types= sys_table_types; types->type; types++) { if ((database_type == types->db_type) && @@ -163,12 +165,11 @@ enum db_type ha_checktype(enum db_type database_type) break; } - return - DB_TYPE_UNKNOWN != (enum db_type) current_thd->variables.table_type ? - (enum db_type) current_thd->variables.table_type : - DB_TYPE_UNKNOWN != (enum db_type) global_system_variables.table_type ? - (enum db_type) global_system_variables.table_type : - DB_TYPE_MYISAM; + return ((enum db_type) thd->variables.table_type != DB_TYPE_UNKNOWN ? + (enum db_type) thd->variables.table_type : + (enum db_type) global_system_variables.table_type != + DB_TYPE_UNKNOWN ? + (enum db_type) global_system_variables.table_type : DB_TYPE_MYISAM); } /* ha_checktype */ @@ -204,6 +205,10 @@ handler *get_new_handler(TABLE *table, enum db_type db_type) case DB_TYPE_ARCHIVE_DB: return new ha_archive(table); #endif +#ifdef HAVE_FEDERATED_DB + case DB_TYPE_FEDERATED_DB: + return new ha_federated(table); +#endif #ifdef HAVE_CSV_DB case DB_TYPE_CSV_DB: return new ha_tina(table); @@ -239,9 +244,99 @@ bool ha_caching_allowed(THD* thd, char* table_key, return 1; } + +/* + Register handler error messages for use with my_error(). + + SYNOPSIS + ha_init_errors() + + RETURN + 0 OK + != 0 Error +*/ + +static int ha_init_errors(void) +{ +#define SETMSG(nr, msg) errmsgs[(nr) - HA_ERR_FIRST]= (msg) + const char **errmsgs; + + /* Allocate a pointer array for the error message strings. */ + /* Zerofill it to avoid uninitialized gaps. */ + if (! (errmsgs= (const char**) my_malloc(HA_ERR_ERRORS * sizeof(char*), + MYF(MY_WME | MY_ZEROFILL)))) + return 1; + + /* Set the dedicated error messages. */ + SETMSG(HA_ERR_KEY_NOT_FOUND, ER(ER_KEY_NOT_FOUND)); + SETMSG(HA_ERR_FOUND_DUPP_KEY, ER(ER_DUP_KEY)); + SETMSG(HA_ERR_RECORD_CHANGED, "Update wich is recoverable"); + SETMSG(HA_ERR_WRONG_INDEX, "Wrong index given to function"); + SETMSG(HA_ERR_CRASHED, ER(ER_NOT_KEYFILE)); + SETMSG(HA_ERR_WRONG_IN_RECORD, ER(ER_CRASHED_ON_USAGE)); + SETMSG(HA_ERR_OUT_OF_MEM, "Table handler out of memory"); + SETMSG(HA_ERR_NOT_A_TABLE, "Incorrect file format '%.64s'"); + SETMSG(HA_ERR_WRONG_COMMAND, "Command not supported"); + SETMSG(HA_ERR_OLD_FILE, ER(ER_OLD_KEYFILE)); + SETMSG(HA_ERR_NO_ACTIVE_RECORD, "No record read in update"); + SETMSG(HA_ERR_RECORD_DELETED, "Intern record deleted"); + SETMSG(HA_ERR_RECORD_FILE_FULL, ER(ER_RECORD_FILE_FULL)); + SETMSG(HA_ERR_INDEX_FILE_FULL, "No more room in index file '%.64s'"); + SETMSG(HA_ERR_END_OF_FILE, "End in next/prev/first/last"); + SETMSG(HA_ERR_UNSUPPORTED, ER(ER_ILLEGAL_HA)); + SETMSG(HA_ERR_TO_BIG_ROW, "Too big row"); + SETMSG(HA_WRONG_CREATE_OPTION, "Wrong create option"); + SETMSG(HA_ERR_FOUND_DUPP_UNIQUE, ER(ER_DUP_UNIQUE)); + SETMSG(HA_ERR_UNKNOWN_CHARSET, "Can't open charset"); + SETMSG(HA_ERR_WRONG_MRG_TABLE_DEF, ER(ER_WRONG_MRG_TABLE)); + SETMSG(HA_ERR_CRASHED_ON_REPAIR, ER(ER_CRASHED_ON_REPAIR)); + SETMSG(HA_ERR_CRASHED_ON_USAGE, ER(ER_CRASHED_ON_USAGE)); + SETMSG(HA_ERR_LOCK_WAIT_TIMEOUT, ER(ER_LOCK_WAIT_TIMEOUT)); + SETMSG(HA_ERR_LOCK_TABLE_FULL, ER(ER_LOCK_TABLE_FULL)); + SETMSG(HA_ERR_READ_ONLY_TRANSACTION, ER(ER_READ_ONLY_TRANSACTION)); + SETMSG(HA_ERR_LOCK_DEADLOCK, ER(ER_LOCK_DEADLOCK)); + SETMSG(HA_ERR_CANNOT_ADD_FOREIGN, ER(ER_CANNOT_ADD_FOREIGN)); + SETMSG(HA_ERR_NO_REFERENCED_ROW, ER(ER_NO_REFERENCED_ROW)); + SETMSG(HA_ERR_ROW_IS_REFERENCED, ER(ER_ROW_IS_REFERENCED)); + SETMSG(HA_ERR_NO_SAVEPOINT, "No savepoint with that name"); + SETMSG(HA_ERR_NON_UNIQUE_BLOCK_SIZE, "Non unique key block size"); + SETMSG(HA_ERR_NO_SUCH_TABLE, "No such table: '%.64s'"); + SETMSG(HA_ERR_TABLE_EXIST, ER(ER_TABLE_EXISTS_ERROR)); + SETMSG(HA_ERR_NO_CONNECTION, "Could not connect to storage engine"); + + /* Register the error messages for use with my_error(). */ + return my_error_register(errmsgs, HA_ERR_FIRST, HA_ERR_LAST); +} + + +/* + Unregister handler error messages. + + SYNOPSIS + ha_finish_errors() + + RETURN + 0 OK + != 0 Error +*/ + +static int ha_finish_errors(void) +{ + const char **errmsgs; + + /* Allocate a pointer array for the error message strings. */ + if (! (errmsgs= my_error_unregister(HA_ERR_FIRST, HA_ERR_LAST))) + return 1; + my_free((gptr) errmsgs, MYF(0)); + return 0; +} + + int ha_init() { int error= 0; + if (ha_init_errors()) + return 1; #ifdef HAVE_BERKELEY_DB if (have_berkeley_db == SHOW_OPTION_YES) { @@ -309,6 +404,8 @@ int ha_panic(enum ha_panic_function flag) if (have_ndbcluster == SHOW_OPTION_YES) error|=ndbcluster_end(); #endif + if (ha_finish_errors()) + error= 1; return error; } /* ha_panic */ @@ -472,6 +569,21 @@ int ha_release_temporary_latches(THD *thd) return 0; } + +/* + Export statistics for different engines. Currently we use it only for + InnoDB. +*/ + +int ha_update_statistics() +{ +#ifdef HAVE_INNOBASE_DB + if (opt_innodb) + innodb_export_status(); +#endif + return 0; +} + int ha_commit_trans(THD *thd, THD_TRANS* trans) { int error=0; @@ -521,7 +633,8 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans) if ((error=ndbcluster_commit(thd,trans->ndb_tid))) { if (error == -1) - my_error(ER_ERROR_DURING_COMMIT, MYF(0)); + my_message(ER_ERROR_DURING_COMMIT, ER(ER_ERROR_DURING_COMMIT), + MYF(0)); error=1; } if (trans == &thd->transaction.all) @@ -565,7 +678,7 @@ int ha_commit_trans(THD *thd, THD_TRANS* trans) thd->variables.tx_isolation=thd->session_tx_isolation; if (operation_done) { - statistic_increment(ha_commit_count,&LOCK_status); + statistic_increment(thd->status_var.ha_commit_count,&LOCK_status); thd->transaction.cleanup(); } if (need_start_waiters) @@ -596,7 +709,8 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans) if ((error=ndbcluster_rollback(thd, trans->ndb_tid))) { if (error == -1) - my_error(ER_ERROR_DURING_ROLLBACK, MYF(0)); + my_message(ER_ERROR_DURING_ROLLBACK, ER(ER_ERROR_DURING_ROLLBACK), + MYF(0)); error=1; } trans->ndb_tid = 0; @@ -660,7 +774,7 @@ int ha_rollback_trans(THD *thd, THD_TRANS *trans) } thd->variables.tx_isolation=thd->session_tx_isolation; if (operation_done) - statistic_increment(ha_rollback_count,&LOCK_status); + statistic_increment(thd->status_var.ha_rollback_count,&LOCK_status); thd->proc_info= save_proc_info; } #endif /* USING_TRANSACTIONS */ @@ -734,7 +848,7 @@ int ha_rollback_to_savepoint(THD *thd, char *savepoint_name) operation_done=1; #endif if (operation_done) - statistic_increment(ha_rollback_count,&LOCK_status); + statistic_increment(thd->status_var.ha_rollback_count,&LOCK_status); } #endif /* USING_TRANSACTIONS */ @@ -816,10 +930,14 @@ bool ha_flush_logs() int ha_delete_table(enum db_type table_type, const char *path) { + handler *file; char tmp_path[FN_REFLEN]; - handler *file=get_new_handler((TABLE*) 0, table_type); - if (!file) + + /* DB_TYPE_UNKNOWN is used in ALTER TABLE when renaming only .frm files */ + if (table_type == DB_TYPE_UNKNOWN || + ! (file=get_new_handler((TABLE*) 0, table_type))) return ENOENT; + if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED)) { /* Ensure that table handler get path in lower case */ @@ -948,7 +1066,7 @@ int handler::read_first_row(byte * buf, uint primary_key) register int error; DBUG_ENTER("handler::read_first_row"); - statistic_increment(ha_read_first_count,&LOCK_status); + statistic_increment(current_thd->status_var.ha_read_first_count,&LOCK_status); /* If there is very few deleted rows in the table, find the first row by @@ -974,41 +1092,163 @@ int handler::read_first_row(byte * buf, uint primary_key) /* - Updates field with field_type NEXT_NUMBER according to following: - if field = 0 change field to the next free key in database. + Generate the next auto-increment number based on increment and offset + + In most cases increment= offset= 1, in which case we get: + 1,2,3,4,5,... + If increment=10 and offset=5 and previous number is 1, we get: + 1,5,15,25,35,... +*/ + +inline ulonglong +next_insert_id(ulonglong nr,struct system_variables *variables) +{ + nr= (((nr+ variables->auto_increment_increment - + variables->auto_increment_offset)) / + (ulonglong) variables->auto_increment_increment); + return (nr* (ulonglong) variables->auto_increment_increment + + variables->auto_increment_offset); +} + + +/* + Updates columns with type NEXT_NUMBER if: + + - If column value is set to NULL (in which case + auto_increment_field_not_null is 0) + - If column is set to 0 and (sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) is not + set. In the future we will only set NEXT_NUMBER fields if one sets them + to NULL (or they are not included in the insert list). + + + There are two different cases when the above is true: + + - thd->next_insert_id == 0 (This is the normal case) + In this case we set the set the column for the first row to the value + next_insert_id(get_auto_increment(column))) which is normally + max-used-column-value +1. + + We call get_auto_increment() only for the first row in a multi-row + statement. For the following rows we generate new numbers based on the + last used number. + + - thd->next_insert_id != 0. This happens when we have read a statement + from the binary log or when one has used SET LAST_INSERT_ID=#. + + In this case we will set the column to the value of next_insert_id. + The next row will be given the id + next_insert_id(next_insert_id) + + The idea is that generated auto_increment values are predictable and + independent of the column values in the table. This is needed to be + able to replicate into a table that already has rows with a higher + auto-increment value than the one that is inserted. + + After we have already generated an auto-increment number and the user + inserts a column with a higher value than the last used one, we will + start counting from the inserted value. + + thd->next_insert_id is cleared after it's been used for a statement. */ void handler::update_auto_increment() { - longlong nr; - THD *thd; + ulonglong nr; + THD *thd= table->in_use; + struct system_variables *variables= &thd->variables; DBUG_ENTER("handler::update_auto_increment"); - if (table->next_number_field->val_int() != 0 || + + /* + We must save the previous value to be able to restore it if the + row was not inserted + */ + thd->prev_insert_id= thd->next_insert_id; + + if ((nr= table->next_number_field->val_int()) != 0 || table->auto_increment_field_not_null && - current_thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) + thd->variables.sql_mode & MODE_NO_AUTO_VALUE_ON_ZERO) { + /* Clear flag for next row */ table->auto_increment_field_not_null= FALSE; + /* Mark that we didn't generate a new value **/ auto_increment_column_changed=0; + + /* Update next_insert_id if we have already generated a value */ + if (thd->clear_next_insert_id && nr >= thd->next_insert_id) + { + if (variables->auto_increment_increment != 1) + nr= next_insert_id(nr, variables); + else + nr++; + thd->next_insert_id= nr; + DBUG_PRINT("info",("next_insert_id: %lu", (ulong) nr)); + } DBUG_VOID_RETURN; } table->auto_increment_field_not_null= FALSE; - thd=current_thd; - if ((nr=thd->next_insert_id)) - thd->next_insert_id=0; // Clear after use - else - nr=get_auto_increment(); - if (!table->next_number_field->store(nr)) + if (!(nr= thd->next_insert_id)) + { + nr= get_auto_increment(); + if (variables->auto_increment_increment != 1) + nr= next_insert_id(nr-1, variables); + /* + Update next row based on the found value. This way we don't have to + call the handler for every generated auto-increment value on a + multi-row statement + */ + thd->next_insert_id= nr; + } + + DBUG_PRINT("info",("auto_increment: %lu", (ulong) nr)); + + /* Mark that we should clear next_insert_id before next stmt */ + thd->clear_next_insert_id= 1; + + if (!table->next_number_field->store((longlong) nr)) thd->insert_id((ulonglong) nr); else thd->insert_id(table->next_number_field->val_int()); + + /* + We can't set next_insert_id if the auto-increment key is not the + first key part, as there is no guarantee that the first parts will be in + sequence + */ + if (!table->next_number_key_offset) + { + /* + Set next insert id to point to next auto-increment value to be able to + handle multi-row statements + This works even if auto_increment_increment > 1 + */ + thd->next_insert_id= next_insert_id(nr, variables); + } + else + thd->next_insert_id= 0; + + /* Mark that we generated a new value */ auto_increment_column_changed=1; DBUG_VOID_RETURN; } +/* + restore_auto_increment -longlong handler::get_auto_increment() + In case of error on write, we restore the last used next_insert_id value + because the previous value was not used. +*/ + +void handler::restore_auto_increment() { - longlong nr; + THD *thd= table->in_use; + if (thd->next_insert_id) + thd->next_insert_id= thd->prev_insert_id; +} + + +ulonglong handler::get_auto_increment() +{ + ulonglong nr; int error; (void) extra(HA_EXTRA_KEYREAD); @@ -1020,7 +1260,8 @@ longlong handler::get_auto_increment() else { byte key[MAX_KEY_LENGTH]; - key_copy(key,table,table->next_number_index, + key_copy(key, table->record[0], + table->key_info + table->next_number_index, table->next_number_key_offset); error=index_read(table->record[1], key, table->next_number_key_offset, HA_READ_PREFIX_LAST); @@ -1029,8 +1270,8 @@ longlong handler::get_auto_increment() if (error) nr=1; else - nr=(longlong) table->next_number_field-> - val_int_offset(table->rec_buff_length)+1; + nr=((ulonglong) table->next_number_field-> + val_int_offset(table->rec_buff_length)+1); index_end(); (void) extra(HA_EXTRA_NO_KEYREAD); return nr; @@ -1077,7 +1318,7 @@ void handler::print_error(int error, myf errflag) str.length(max_length-4); str.append("..."); } - my_error(ER_DUP_ENTRY,MYF(0),str.c_ptr(),key_nr+1); + my_error(ER_DUP_ENTRY, MYF(0), str.c_ptr(), key_nr+1); DBUG_VOID_RETURN; } textno=ER_DUP_KEY; @@ -1095,14 +1336,20 @@ void handler::print_error(int error, myf errflag) case HA_ERR_CRASHED: textno=ER_NOT_KEYFILE; break; + case HA_ERR_WRONG_IN_RECORD: + textno= ER_CRASHED_ON_USAGE; + break; case HA_ERR_CRASHED_ON_USAGE: textno=ER_CRASHED_ON_USAGE; break; + case HA_ERR_NOT_A_TABLE: + textno= error; + break; case HA_ERR_CRASHED_ON_REPAIR: textno=ER_CRASHED_ON_REPAIR; break; case HA_ERR_OUT_OF_MEM: - my_error(ER_OUT_OF_RESOURCES,errflag); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), errflag); DBUG_VOID_RETURN; case HA_ERR_WRONG_COMMAND: textno=ER_ILLEGAL_HA; @@ -1116,6 +1363,9 @@ void handler::print_error(int error, myf errflag) case HA_ERR_RECORD_FILE_FULL: textno=ER_RECORD_FILE_FULL; break; + case HA_ERR_INDEX_FILE_FULL: + textno= errno; + break; case HA_ERR_LOCK_WAIT_TIMEOUT: textno=ER_LOCK_WAIT_TIMEOUT; break; @@ -1149,7 +1399,7 @@ void handler::print_error(int error, myf errflag) uint length=dirname_part(buff,table->path); buff[length-1]=0; db=buff+dirname_length(buff); - my_error(ER_NO_SUCH_TABLE,MYF(0),db,table->table_name); + my_error(ER_NO_SUCH_TABLE, MYF(0), db, table->table_name); break; } default: @@ -1163,16 +1413,16 @@ void handler::print_error(int error, myf errflag) { const char* engine= table_type(); if (temporary) - my_error(ER_GET_TEMPORARY_ERRMSG,MYF(0),error,str.ptr(),engine); + my_error(ER_GET_TEMPORARY_ERRMSG, MYF(0), error, str.ptr(), engine); else - my_error(ER_GET_ERRMSG,MYF(0),error,str.ptr(),engine); + my_error(ER_GET_ERRMSG, MYF(0), error, str.ptr(), engine); } else my_error(ER_GET_ERRNO,errflag,error); DBUG_VOID_RETURN; } } - my_error(textno,errflag,table->table_name,error); + my_error(textno, errflag, table->table_name, error); DBUG_VOID_RETURN; } @@ -1277,7 +1527,7 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info, char name_buff[FN_REFLEN]; DBUG_ENTER("ha_create_table"); - if (openfrm(name,"",0,(uint) READ_ALL, 0, &table)) + if (openfrm(current_thd, name,"",0,(uint) READ_ALL, 0, &table)) DBUG_RETURN(1); if (update_create_info) { @@ -1295,7 +1545,7 @@ int ha_create_table(const char *name, HA_CREATE_INFO *create_info, error=table.file->create(name,&table,create_info); VOID(closefrm(&table)); if (error) - my_error(ER_CANT_CREATE_TABLE,MYF(ME_BELL+ME_WAITTANG),name,error); + my_error(ER_CANT_CREATE_TABLE, MYF(ME_BELL+ME_WAITTANG), name,error); DBUG_RETURN(error != 0); } @@ -1342,7 +1592,7 @@ int ha_create_table_from_engine(THD* thd, if ((error = writefrm(path, frmblob, frmlen))) goto err_end; - if (openfrm(path,"",0,(uint) READ_ALL, 0, &table)) + if (openfrm(thd, path,"",0,(uint) READ_ALL, 0, &table)) DBUG_RETURN(1); update_create_info_from_table(&create_info, &table); @@ -1541,6 +1791,131 @@ int ha_table_exists(THD* thd, const char* db, const char* name) /* + Read the first row of a multi-range set. + + SYNOPSIS + read_multi_range_first() + found_range_p Returns a pointer to the element in 'ranges' that + corresponds to the returned row. + ranges An array of KEY_MULTI_RANGE range descriptions. + range_count Number of ranges in 'ranges'. + sorted If result should be sorted per key. + buffer A HANDLER_BUFFER for internal handler usage. + + NOTES + Record is read into table->record[0]. + *found_range_p returns a valid value only if read_multi_range_first() + returns 0. + Sorting is done within each range. If you want an overall sort, enter + 'ranges' with sorted ranges. + + RETURN + 0 OK, found a row + HA_ERR_END_OF_FILE No rows in range + # Error code +*/ + +int handler::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, + KEY_MULTI_RANGE *ranges, uint range_count, + bool sorted, HANDLER_BUFFER *buffer) +{ + int result= HA_ERR_END_OF_FILE; + DBUG_ENTER("handler::read_multi_range_first"); + multi_range_sorted= sorted; + multi_range_buffer= buffer; + + for (multi_range_curr= ranges, multi_range_end= ranges + range_count; + multi_range_curr < multi_range_end; + multi_range_curr++) + { + result= read_range_first(multi_range_curr->start_key.length ? + &multi_range_curr->start_key : 0, + multi_range_curr->end_key.length ? + &multi_range_curr->end_key : 0, + test(multi_range_curr->range_flag & EQ_RANGE), + multi_range_sorted); + if (result != HA_ERR_END_OF_FILE) + break; + } + + *found_range_p= multi_range_curr; + DBUG_PRINT("exit",("result %d", result)); + DBUG_RETURN(result); +} + + +/* + Read the next row of a multi-range set. + + SYNOPSIS + read_multi_range_next() + found_range_p Returns a pointer to the element in 'ranges' that + corresponds to the returned row. + + NOTES + Record is read into table->record[0]. + *found_range_p returns a valid value only if read_multi_range_next() + returns 0. + + RETURN + 0 OK, found a row + HA_ERR_END_OF_FILE No (more) rows in range + # Error code +*/ + +int handler::read_multi_range_next(KEY_MULTI_RANGE **found_range_p) +{ + int result; + DBUG_ENTER("handler::read_multi_range_next"); + + /* We should not be called after the last call returned EOF. */ + DBUG_ASSERT(multi_range_curr < multi_range_end); + + do + { + /* Save a call if there can be only one row in range. */ + if (multi_range_curr->range_flag != (UNIQUE_RANGE | EQ_RANGE)) + { + result= read_range_next(); + + /* On success or non-EOF errors jump to the end. */ + if (result != HA_ERR_END_OF_FILE) + break; + } + else + { + /* + We need to set this for the last range only, but checking this + condition is more expensive than just setting the result code. + */ + result= HA_ERR_END_OF_FILE; + } + + /* Try the next range(s) until one matches a record. */ + for (multi_range_curr++; + multi_range_curr < multi_range_end; + multi_range_curr++) + { + result= read_range_first(multi_range_curr->start_key.length ? + &multi_range_curr->start_key : 0, + multi_range_curr->end_key.length ? + &multi_range_curr->end_key : 0, + test(multi_range_curr->range_flag & EQ_RANGE), + multi_range_sorted); + if (result != HA_ERR_END_OF_FILE) + break; + } + } + while ((result == HA_ERR_END_OF_FILE) && + (multi_range_curr < multi_range_end)); + + *found_range_p= multi_range_curr; + DBUG_PRINT("exit",("handler::read_multi_range_next: result %d", result)); + DBUG_RETURN(result); +} + + +/* Read first row between two ranges. Store ranges for future calls to read_range_next diff --git a/sql/handler.h b/sql/handler.h index 245defe61e0..b10e6bfe88c 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -44,6 +44,7 @@ #define HA_ADMIN_INVALID -5 #define HA_ADMIN_REJECT -6 #define HA_ADMIN_TRY_ALTER -7 +#define HA_ADMIN_WRONG_CHECKSUM -8 /* Bits in table_flags() to show what database can do */ #define HA_READ_RND_SAME (1 << 0) /* can switch index during the scan @@ -73,6 +74,9 @@ #define HA_HAS_CHECKSUM (1 << 24) /* Table data are stored in separate files (for lower_case_table_names) */ #define HA_FILE_BASED (1 << 26) +#define HA_NO_VARCHAR (1 << 27) +#define HA_CAN_BIT_FIELD (1 << 28) /* supports bit fields */ +#define HA_NEED_READ_RANGE_BUFFER (1 << 29) /* for read_multi_range */ /* bits in index_flags(index_number) for what you can do with index */ @@ -141,16 +145,17 @@ /* Options of START TRANSACTION statement (and later of SET TRANSACTION stmt) */ #define MYSQL_START_TRANS_OPT_WITH_CONS_SNAPSHOT 1 -enum db_type -{ +enum db_type +{ DB_TYPE_UNKNOWN=0,DB_TYPE_DIAB_ISAM=1, DB_TYPE_HASH,DB_TYPE_MISAM,DB_TYPE_PISAM, DB_TYPE_RMS_ISAM, DB_TYPE_HEAP, DB_TYPE_ISAM, DB_TYPE_MRG_ISAM, DB_TYPE_MYISAM, DB_TYPE_MRG_MYISAM, - DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, + DB_TYPE_BERKELEY_DB, DB_TYPE_INNODB, DB_TYPE_GEMINI, DB_TYPE_NDBCLUSTER, DB_TYPE_EXAMPLE_DB, DB_TYPE_ARCHIVE_DB, DB_TYPE_CSV_DB, - + DB_TYPE_FEDERATED_DB, + DB_TYPE_DEFAULT // Must be last }; @@ -167,16 +172,24 @@ enum row_type { ROW_TYPE_NOT_USED=-1, ROW_TYPE_DEFAULT, ROW_TYPE_FIXED, /* struct to hold information about the table that should be created */ /* Bits in used_fields */ -#define HA_CREATE_USED_AUTO 1 -#define HA_CREATE_USED_RAID 2 -#define HA_CREATE_USED_UNION 4 -#define HA_CREATE_USED_INSERT_METHOD 8 -#define HA_CREATE_USED_MIN_ROWS 16 -#define HA_CREATE_USED_MAX_ROWS 32 -#define HA_CREATE_USED_AVG_ROW_LENGTH 64 -#define HA_CREATE_USED_PACK_KEYS 128 -#define HA_CREATE_USED_CHARSET 256 -#define HA_CREATE_USED_DEFAULT_CHARSET 512 +#define HA_CREATE_USED_AUTO (1L << 0) +#define HA_CREATE_USED_RAID (1L << 1) +#define HA_CREATE_USED_UNION (1L << 2) +#define HA_CREATE_USED_INSERT_METHOD (1L << 3) +#define HA_CREATE_USED_MIN_ROWS (1L << 4) +#define HA_CREATE_USED_MAX_ROWS (1L << 5) +#define HA_CREATE_USED_AVG_ROW_LENGTH (1L << 6) +#define HA_CREATE_USED_PACK_KEYS (1L << 7) +#define HA_CREATE_USED_CHARSET (1L << 8) +#define HA_CREATE_USED_DEFAULT_CHARSET (1L << 9) +#define HA_CREATE_USED_DATADIR (1L << 10) +#define HA_CREATE_USED_INDEXDIR (1L << 11) +#define HA_CREATE_USED_ENGINE (1L << 12) +#define HA_CREATE_USED_CHECKSUM (1L << 13) +#define HA_CREATE_USED_DELAY_KEY_WRITE (1L << 14) +#define HA_CREATE_USED_ROW_FORMAT (1L << 15) +#define HA_CREATE_USED_COMMENT (1L << 16) +#define HA_CREATE_USED_PASSWORD (1L << 17) typedef struct st_thd_trans { void *bdb_tid; @@ -185,6 +198,41 @@ typedef struct st_thd_trans { void *ndb_tid; } THD_TRANS; +#ifndef XIDDATASIZE /* no xa.h included */ + +/* XXX - may be we should disable xa completely in this case ? */ +#define XIDDATASIZE 128 +#define MAXGTRIDSIZE 64 +#define MAXBQUALSIZE 64 + +struct xid_t { + long formatID; + long gtrid_length; + long bqual_length; + char data[XIDDATASIZE]; +}; + +typedef struct xid_t XID; + + +#endif + +typedef struct +{ + byte slot; + uint savepoint_offset; + int (*close_connection)(THD *thd); + int (*savepoint_set)(THD *thd, void *sv); + int (*savepoint_rollback)(THD *thd, void *sv); + int (*savepoint_release)(THD *thd, void *sv); + int (*commit)(THD *thd, bool all); + int (*rollback)(THD *thd, bool all); + int (*prepare)(THD *thd, bool all); + int (*recover)(XID *xid_list, uint len); + int (*commit_by_xid)(XID *xid); + int (*rollback_by_xid)(XID *xid); +} handlerton; + enum enum_tx_isolation { ISO_READ_UNCOMMITTED, ISO_READ_COMMITTED, ISO_REPEATABLE_READ, ISO_SERIALIZABLE}; @@ -207,6 +255,8 @@ typedef struct st_ha_create_information uint raid_type,raid_chunks; uint merge_insert_method; bool table_existed; /* 1 in create if table existed */ + bool frm_only; /* 1 if no ha_create_table() */ + bool varchar; /* 1 if table has a VARCHAR */ } HA_CREATE_INFO; @@ -214,6 +264,8 @@ typedef struct st_ha_create_information struct st_table; typedef struct st_table TABLE; +struct st_foreign_key_info; +typedef struct st_foreign_key_info FOREIGN_KEY_INFO; typedef struct st_ha_check_opt { @@ -225,6 +277,21 @@ typedef struct st_ha_check_opt } HA_CHECK_OPT; +/* + This is a buffer area that the handler can use to store rows. + 'end_of_used_area' should be kept updated after calls to + read-functions so that other parts of the code can use the + remaining area (until next read calls is issued). +*/ + +typedef struct st_handler_buffer +{ + const byte *buffer; /* Buffer one can start using */ + const byte *buffer_end; /* End of buffer */ + byte *end_of_used_area; /* End of area that was used by handler */ +} HANDLER_BUFFER; + + class handler :public Sql_alloc { protected: @@ -259,6 +326,12 @@ public: time_t check_time; time_t update_time; + /* The following are for read_multi_range */ + bool multi_range_sorted; + KEY_MULTI_RANGE *multi_range_curr; + KEY_MULTI_RANGE *multi_range_end; + HANDLER_BUFFER *multi_range_buffer; + /* The following are for read_range() */ key_range save_end_range, *end_range; KEY_PART_INFO *range_key_part; @@ -315,29 +388,33 @@ public: int ha_index_init(uint idx) { + DBUG_ENTER("ha_index_init"); DBUG_ASSERT(inited==NONE); inited=INDEX; - return index_init(idx); + DBUG_RETURN(index_init(idx)); } int ha_index_end() { + DBUG_ENTER("ha_index_end"); DBUG_ASSERT(inited==INDEX); inited=NONE; - return index_end(); + DBUG_RETURN(index_end()); } int ha_rnd_init(bool scan) { + DBUG_ENTER("ha_rnd_init"); DBUG_ASSERT(inited==NONE || (inited==RND && scan)); inited=RND; - return rnd_init(scan); + DBUG_RETURN(rnd_init(scan)); } int ha_rnd_end() { + DBUG_ENTER("ha_rnd_end"); DBUG_ASSERT(inited==RND); inited=NONE; - return rnd_end(); + DBUG_RETURN(rnd_end()); } - /* this is neseccary in many places, e.g. in HANDLER command */ + /* this is necessary in many places, e.g. in HANDLER command */ int ha_index_or_rnd_end() { return inited == INDEX ? ha_index_end() : inited == RND ? ha_rnd_end() : 0; @@ -366,6 +443,10 @@ public: virtual int index_next_same(byte *buf, const byte *key, uint keylen); virtual int index_read_last(byte * buf, const byte * key, uint key_len) { return (my_errno=HA_ERR_WRONG_COMMAND); } + virtual int read_multi_range_first(KEY_MULTI_RANGE **found_range_p, + KEY_MULTI_RANGE *ranges, uint range_count, + bool sorted, HANDLER_BUFFER *buffer); + virtual int read_multi_range_next(KEY_MULTI_RANGE **found_range_p); virtual int read_range_first(const key_range *start_key, const key_range *end_key, bool eq_range, bool sorted); @@ -398,7 +479,7 @@ public: virtual int extra_opt(enum ha_extra_function operation, ulong cache_size) { return extra(operation); } virtual int reset() { return extra(HA_EXTRA_RESET); } - virtual int external_lock(THD *thd, int lock_type)=0; + virtual int external_lock(THD *thd, int lock_type) { return 0; } virtual void unlock_row() {} virtual int start_stmt(THD *thd) {return 0;} /* @@ -409,7 +490,8 @@ public: */ virtual int delete_all_rows() { return (my_errno=HA_ERR_WRONG_COMMAND); } - virtual longlong get_auto_increment(); + virtual ulonglong get_auto_increment(); + virtual void restore_auto_increment(); virtual void update_create_info(HA_CREATE_INFO *create_info) {} /* admin commands - called from mysql_admin_table */ @@ -451,6 +533,8 @@ public: virtual char* get_foreign_key_create_info() { return(NULL);} /* gets foreign key create string from InnoDB */ /* used in REPLACE; is > 0 if table is referred by a FOREIGN KEY */ + virtual int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list) + { return 0; } virtual uint referenced_by_foreign_key() { return 0;} virtual void init_table_handle_for_HANDLER() { return; } /* prepare InnoDB for HANDLER */ @@ -507,10 +591,18 @@ public: /* Type of table for caching query */ virtual uint8 table_cache_type() { return HA_CACHE_TBL_NONTRANSACT; } - /* - Is query with this table cachable (have sense only for ASKTRANSACT - tables) - */ + + /* + RETURN + true Primary key (if there is one) is clustered key covering all fields + false otherwise + */ + virtual bool primary_key_is_clustered() { return FALSE; } + + virtual int cmp_ref(const byte *ref1, const byte *ref2) + { + return memcmp(ref1, ref2, ref_length); + } }; /* Some extern variables used with handlers */ @@ -555,6 +647,7 @@ int ha_report_binlog_offset_and_commit(THD *thd, char *log_file_name, my_off_t end_offset); int ha_commit_complete(THD *thd); int ha_release_temporary_latches(THD *thd); +int ha_update_statistics(); int ha_commit_trans(THD *thd, THD_TRANS *trans); int ha_rollback_trans(THD *thd, THD_TRANS *trans); int ha_rollback_to_savepoint(THD *thd, char *savepoint_name); diff --git a/sql/item.cc b/sql/item.cc index 92a15694e89..7dba1f3a66a 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -22,6 +22,10 @@ #include "mysql_priv.h" #include <m_ctype.h> #include "my_dir.h" +#include "sp_rcontext.h" +#include "sp_head.h" +#include "sql_trigger.h" +#include "sql_select.h" static void mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, @@ -41,12 +45,11 @@ void item_init(void) } Item::Item(): - fixed(0) + name(0), orig_name(0), name_length(0), fixed(0), + collation(default_charset(), DERIVATION_COERCIBLE) { marker= 0; maybe_null=null_value=with_sum_func=unsigned_flag=0; - collation.set(default_charset(), DERIVATION_COERCIBLE); - name= 0; decimals= 0; max_length= 0; /* Put item in free list so that we can free all items at end */ @@ -76,6 +79,7 @@ Item::Item(): Item::Item(THD *thd, Item *item): str_value(item->str_value), name(item->name), + orig_name(item->orig_name), max_length(item->max_length), marker(item->marker), decimals(item->decimals), @@ -96,25 +100,78 @@ void Item::print_item_w_name(String *str) print(str); if (name) { - str->append(" AS `", 5); - str->append(name); - str->append('`'); + THD *thd= current_thd; + str->append(" AS ", 4); + append_identifier(thd, str, name, strlen(name)); } } +void Item::cleanup() +{ + DBUG_ENTER("Item::cleanup"); + DBUG_PRINT("info", ("Item: 0x%lx, Type: %d, name %s, original name %s", + this, (int)type(), name ? name : "(null)", + orig_name ? orig_name : "null")); + fixed=0; + marker= 0; + if (orig_name) + name= orig_name; + DBUG_VOID_RETURN; +} + + +/* + cleanup() item if it is 'fixed' + + SYNOPSIS + cleanup_processor() + arg - a dummy parameter, is not used here +*/ + +bool Item::cleanup_processor(byte *arg) +{ + if (fixed) + cleanup(); + return FALSE; +} + + +/* + rename item (used for views, cleanup() return original name) + + SYNOPSIS + Item::rename() + new_name new name of item; +*/ + +void Item::rename(char *new_name) +{ + /* + we can compare pointers to names here, bacause if name was not changed, + pointer will be same + */ + if (!orig_name && new_name != name) + orig_name= name; + name= new_name; +} + + Item_ident::Item_ident(const char *db_name_par,const char *table_name_par, const char *field_name_par) :orig_db_name(db_name_par), orig_table_name(table_name_par), orig_field_name(field_name_par), - db_name(db_name_par), table_name(table_name_par), - field_name(field_name_par), cached_field_index(NO_CACHED_FIELD_INDEX), + db_name(db_name_par), table_name(table_name_par), + field_name(field_name_par), + alias_name_used(FALSE), cached_field_index(NO_CACHED_FIELD_INDEX), cached_table(0), depended_from(0) { name = (char*) field_name_par; } -// Constructor used by Item_field & Item_*_ref (see Item comment) + +/* Constructor used by Item_field & Item_*_ref (see Item comment) */ + Item_ident::Item_ident(THD *thd, Item_ident *item) :Item(thd, item), orig_db_name(item->orig_db_name), @@ -123,6 +180,7 @@ Item_ident::Item_ident(THD *thd, Item_ident *item) db_name(item->db_name), table_name(item->table_name), field_name(item->field_name), + alias_name_used(item->alias_name_used), cached_field_index(item->cached_field_index), cached_table(item->cached_table), depended_from(item->depended_from) @@ -131,10 +189,14 @@ Item_ident::Item_ident(THD *thd, Item_ident *item) void Item_ident::cleanup() { DBUG_ENTER("Item_ident::cleanup"); - DBUG_PRINT("enter", ("b:%s(%s), t:%s(%s), f:%s(%s)", - db_name, orig_db_name, - table_name, orig_table_name, - field_name, orig_field_name)); +#ifdef CANT_BE_USED_AS_MEMORY_IS_FREED + db_name ? db_name : "(null)", + orig_db_name ? orig_db_name : "(null)", + table_name ? table_name : "(null)", + orig_table_name ? orig_table_name : "(null)", + field_name ? field_name : "(null)", + orig_field_name ? orig_field_name : "(null)")); +#endif Item::cleanup(); db_name= orig_db_name; table_name= orig_table_name; @@ -151,6 +213,45 @@ bool Item_ident::remove_dependence_processor(byte * arg) } +/* + Store the pointer to this item field into a list if not already there. + + SYNOPSIS + Item_field::collect_item_field_processor() + arg pointer to a List<Item_field> + + DESCRIPTION + The method is used by Item::walk to collect all unique Item_field objects + from a tree of Items into a set of items represented as a list. + + IMPLEMENTATION + Item_cond::walk() and Item_func::walk() stop the evaluation of the + processor function for its arguments once the processor returns + true.Therefore in order to force this method being called for all item + arguments in a condition the method must return false. + + RETURN + false to force the evaluation of collect_item_field_processor + for the subsequent items. +*/ + +bool Item_field::collect_item_field_processor(byte *arg) +{ + DBUG_ENTER("Item_field::collect_item_field_processor"); + DBUG_PRINT("info", ("%s", field->field_name ? field->field_name : "noname")); + List<Item_field> *item_list= (List<Item_field>*) arg; + List_iterator<Item_field> item_list_it(*item_list); + Item_field *curr_item; + while ((curr_item= item_list_it++)) + { + if (curr_item->eq(this, 1)) + DBUG_RETURN(false); /* Already in the set. */ + } + item_list->push_back(this); + DBUG_RETURN(false); +} + + bool Item::check_cols(uint c) { if (c != 1) @@ -168,12 +269,15 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) { /* Empty string, used by AS or internal function like last_insert_id() */ name= (char*) str; + name_length= 0; return; } if (cs->ctype) { - // This will probably need a better implementation in the future: - // a function in CHARSET_INFO structure. + /* + This will probably need a better implementation in the future: + a function in CHARSET_INFO structure. + */ while (length && !my_isgraph(cs,*str)) { // Fix problem with yacc length--; @@ -183,12 +287,12 @@ void Item::set_name(const char *str, uint length, CHARSET_INFO *cs) if (!my_charset_same(cs, system_charset_info)) { uint32 res_length; - name= sql_strmake_with_convert(str, length, cs, + name= sql_strmake_with_convert(str, name_length= length, cs, MAX_ALIAS_NAME, system_charset_info, &res_length); } else - name=sql_strmake(str, min(length,MAX_ALIAS_NAME)); + name= sql_strmake(str, (name_length= min(length,MAX_ALIAS_NAME))); } @@ -295,6 +399,46 @@ CHARSET_INFO *Item::default_charset() } +int Item::save_in_field_no_warnings(Field *field, bool no_conversions) +{ + int res; + THD *thd= field->table->in_use; + enum_check_fields tmp= thd->count_cuted_fields; + thd->count_cuted_fields= CHECK_FIELD_IGNORE; + res= save_in_field(field, no_conversions); + thd->count_cuted_fields= tmp; + return res; +} + + +Item * +Item_splocal::this_item() +{ + THD *thd= current_thd; + + return thd->spcont->get_item(m_offset); +} + +Item * +Item_splocal::this_const_item() const +{ + THD *thd= current_thd; + + return thd->spcont->get_item(m_offset); +} + +Item::Type +Item_splocal::type() const +{ + THD *thd= current_thd; + + if (thd->spcont) + return thd->spcont->get_item(m_offset)->type(); + return NULL_ITEM; // Anything but SUBSELECT_ITEM +} + + + /* Aggregate two collations together taking into account their coercibility (aka derivation): @@ -425,7 +569,9 @@ bool DTCollation::aggregate(DTCollation &dt, uint flags) } Item_field::Item_field(Field *f) - :Item_ident(NullS, f->table_name, f->field_name) + :Item_ident(NullS, f->table_name, f->field_name), + item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0) { set_field(f); /* @@ -436,7 +582,9 @@ Item_field::Item_field(Field *f) } Item_field::Item_field(THD *thd, Field *f) - :Item_ident(f->table->table_cache_key, f->table_name, f->field_name) + :Item_ident(f->table->table_cache_key, f->table_name, f->field_name), + item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0) { /* We always need to provide Item_field with a fully qualified field @@ -473,7 +621,11 @@ Item_field::Item_field(THD *thd, Field *f) Item_field::Item_field(THD *thd, Item_field *item) :Item_ident(thd, item), field(item->field), - result_field(item->result_field) + result_field(item->result_field), + item_equal(item->item_equal), + no_const_subst(item->no_const_subst), + have_privileges(item->have_privileges), + any_privileges(item->any_privileges) { collation.set(DERIVATION_IMPLICIT); } @@ -487,6 +639,7 @@ void Item_field::set_field(Field *field_par) table_name=field_par->table_name; field_name=field_par->field_name; db_name=field_par->table->table_cache_key; + alias_name_used= field_par->table->alias_name_used; unsigned_flag=test(field_par->flags & UNSIGNED_FLAG); collation.set(field_par->charset(), DERIVATION_IMPLICIT); fixed= 1; @@ -531,6 +684,55 @@ const char *Item_ident::full_name() const return tmp; } +void Item_ident::print(String *str) +{ + THD *thd= current_thd; + char d_name_buff[MAX_ALIAS_NAME], t_name_buff[MAX_ALIAS_NAME]; + const char *d_name= db_name, *t_name= table_name; + if (lower_case_table_names== 1 || + (lower_case_table_names == 2 && !alias_name_used)) + { + if (table_name && table_name[0]) + { + strmov(t_name_buff, table_name); + my_casedn_str(files_charset_info, t_name_buff); + t_name= t_name_buff; + } + if (db_name && db_name[0]) + { + strmov(d_name_buff, db_name); + my_casedn_str(files_charset_info, d_name_buff); + d_name= d_name_buff; + } + } + + if (!table_name || !field_name) + { + const char *nm= field_name ? field_name : name ? name : "tmp_field"; + append_identifier(thd, str, nm, strlen(nm)); + return; + } + if (db_name && db_name[0] && !alias_name_used) + { + append_identifier(thd, str, d_name, strlen(d_name)); + str->append('.'); + append_identifier(thd, str, t_name, strlen(t_name)); + str->append('.'); + append_identifier(thd, str, field_name, strlen(field_name)); + } + else + { + if (table_name[0]) + { + append_identifier(thd, str, t_name, strlen(t_name)); + str->append('.'); + append_identifier(thd, str, field_name, strlen(field_name)); + } + else + append_identifier(thd, str, field_name, strlen(field_name)); + } +} + /* ARGSUSED */ String *Item_field::val_str(String *str) { @@ -541,7 +743,7 @@ String *Item_field::val_str(String *str) return field->val_str(str,&str_value); } -double Item_field::val() +double Item_field::val_real() { DBUG_ASSERT(fixed == 1); if ((null_value=field->is_null())) @@ -734,7 +936,7 @@ void Item_string::print(String *str) bool Item_null::eq(const Item *item, bool binary_cmp) const { return item->type() == type(); } -double Item_null::val() +double Item_null::val_real() { // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); @@ -779,12 +981,13 @@ default_set_param_func(Item_param *param, param->set_null(); } + Item_param::Item_param(unsigned pos_in_query_arg) : state(NO_VALUE), item_result_type(STRING_RESULT), /* Don't pretend to be a literal unless value for this item is set. */ item_type(PARAM_ITEM), - param_type(MYSQL_TYPE_STRING), + param_type(MYSQL_TYPE_VARCHAR), pos_in_query(pos_in_query_arg), set_param_func(default_set_param_func) { @@ -797,6 +1000,7 @@ Item_param::Item_param(unsigned pos_in_query_arg) : maybe_null= 1; } + void Item_param::set_null() { DBUG_ENTER("Item_param::set_null"); @@ -866,7 +1070,7 @@ void Item_param::set_time(TIME *tm, timestamp_type type, uint32 max_length_arg) { char buff[MAX_DATE_STRING_REP_LENGTH]; uint length= my_TIME_to_str(&value.time, buff); - make_truncated_value_warning(current_thd, buff, length, type); + make_truncated_value_warning(current_thd, buff, length, type, 0); set_zero_time(&value.time, MYSQL_TIMESTAMP_ERROR); } @@ -1077,7 +1281,7 @@ bool Item_param::get_date(TIME *res, uint fuzzydate) } -double Item_param::val() +double Item_param::val_real() { switch (state) { case REAL_VALUE: @@ -1260,8 +1464,27 @@ bool Item_param::convert_str_value(THD *thd) return rc; } -/* End of Item_param related */ +void Item_param::print(String *str) +{ + if (state == NO_VALUE) + { + str->append('?'); + } + else + { + char buffer[80]; + String tmp(buffer, sizeof(buffer), &my_charset_bin); + const String *res; + res= query_val_str(&tmp); + str->append(*res); + } +} + + +/**************************************************************************** + Item_copy_string +****************************************************************************/ void Item_copy_string::copy() { @@ -1303,10 +1526,10 @@ bool Item::fix_fields(THD *thd, // We do not check fields which are fixed during construction DBUG_ASSERT(fixed == 0 || basic_const_item()); fixed= 1; - return 0; + return FALSE; } -double Item_ref_null_helper::val() +double Item_ref_null_helper::val_real() { DBUG_ASSERT(fixed == 1); double tmp= (*ref)->val_result(); @@ -1370,176 +1593,445 @@ static void mark_as_dependent(THD *thd, SELECT_LEX *last, SELECT_LEX *current, } -bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) + + +/* + Search a GROUP BY clause for a field with a certain name. + + SYNOPSIS + find_field_in_group_list() + find_item the item being searched for + group_list GROUP BY clause + + DESCRIPTION + Search the GROUP BY list for a column named as find_item. When searching + preference is given to columns that are qualified with the same table (and + database) name as the one being searched for. + + RETURN + - the found item on success + - NULL if find_item is not in group_list +*/ + +static Item** find_field_in_group_list(Item *find_item, ORDER *group_list) +{ + const char *db_name; + const char *table_name; + const char *field_name; + ORDER *found_group= NULL; + int found_match_degree= 0; + Item_field *cur_field; + int cur_match_degree= 0; + + if (find_item->type() == Item::FIELD_ITEM || + find_item->type() == Item::REF_ITEM) + { + db_name= ((Item_ident*) find_item)->db_name; + table_name= ((Item_ident*) find_item)->table_name; + field_name= ((Item_ident*) find_item)->field_name; + } + else + return NULL; + + DBUG_ASSERT(field_name); + + for (ORDER *cur_group= group_list ; cur_group ; cur_group= cur_group->next) + { + if ((*(cur_group->item))->type() == Item::FIELD_ITEM) + { + cur_field= (Item_field*) *cur_group->item; + cur_match_degree= 0; + + DBUG_ASSERT(cur_field->field_name); + + if (!my_strcasecmp(system_charset_info, + cur_field->field_name, field_name)) + ++cur_match_degree; + else + continue; + + if (cur_field->table_name && table_name) + { + /* If field_name is qualified by a table name. */ + if (strcmp(cur_field->table_name, table_name)) + /* Same field names, different tables. */ + return NULL; + + ++cur_match_degree; + if (cur_field->db_name && db_name) + { + /* If field_name is also qualified by a database name. */ + if (strcmp(cur_field->db_name, db_name)) + /* Same field names, different databases. */ + return NULL; + ++cur_match_degree; + } + } + + if (cur_match_degree > found_match_degree) + { + found_match_degree= cur_match_degree; + found_group= cur_group; + } + else if (found_group && (cur_match_degree == found_match_degree) && + ! (*(found_group->item))->eq(cur_field, 0)) + { + /* + If the current resolve candidate matches equally well as the current + best match, they must reference the same column, otherwise the field + is ambiguous. + */ + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find_item->full_name(), current_thd->where); + return NULL; + } + } + } + + if (found_group) + return found_group->item; + else + return NULL; +} + + +/* + Resolve a column reference in a sub-select. + + SYNOPSIS + resolve_ref_in_select_and_group() + thd current thread + ref column reference being resolved + select the sub-select that ref is resolved against + + DESCRIPTION + Resolve a column reference (usually inside a HAVING clause) against the + SELECT and GROUP BY clauses of the query described by 'select'. The name + resolution algorithm searches both the SELECT and GROUP BY clauses, and in + case of a name conflict prefers GROUP BY column names over SELECT names. If + both clauses contain different fields with the same names, a warning is + issued that name of 'ref' is ambiguous. We extend ANSI SQL in that when no + GROUP BY column is found, then a HAVING name is resolved as a possibly + derived SELECT column. + + NOTES + The resolution procedure is: + - Search for a column or derived column named col_ref_i [in table T_j] + in the SELECT clause of Q. + - Search for a column named col_ref_i [in table T_j] + in the GROUP BY clause of Q. + - If found different columns with the same name in GROUP BY and SELECT + - issue a warning and return the GROUP BY column, + - otherwise return the found SELECT column. + + + RETURN + NULL - there was an error, and the error was already reported + not_found_item - the item was not resolved, no error was reported + resolved item - if the item was resolved +*/ + +static Item** +resolve_ref_in_select_and_group(THD *thd, Item_ident *ref, SELECT_LEX *select) +{ + Item **group_by_ref= NULL; + Item **select_ref= NULL; + ORDER *group_list= (ORDER*) select->group_list.first; + bool ambiguous_fields= FALSE; + uint counter; + bool not_used; + + /* + Search for a column or derived column named as 'ref' in the SELECT + clause of the current select. + */ + if (!(select_ref= find_item_in_list(ref, *(select->get_item_list()), &counter, + REPORT_EXCEPT_NOT_FOUND, ¬_used))) + return NULL; /* Some error occurred. */ + + /* If this is a non-aggregated field inside HAVING, search in GROUP BY. */ + if (select->having_fix_field && !ref->with_sum_func && group_list) + { + group_by_ref= find_field_in_group_list(ref, group_list); + + /* Check if the fields found in SELECT and GROUP BY are the same field. */ + if (group_by_ref && (select_ref != not_found_item) && + !((*group_by_ref)->eq(*select_ref, 0))) + { + ambiguous_fields= TRUE; + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_NON_UNIQ_ERROR, + ER(ER_NON_UNIQ_ERROR), ref->full_name(), + current_thd->where); + + } + } + + if (select_ref != not_found_item || group_by_ref) + { + if (select_ref != not_found_item && !ambiguous_fields) + { + DBUG_ASSERT(*select_ref); + if (! (*select_ref)->fixed) + { + my_error(ER_ILLEGAL_REFERENCE, MYF(0), + ref->name, "forward reference in item list"); + return NULL; + } + return (select->ref_pointer_array + counter); + } + if (group_by_ref) + return group_by_ref; + DBUG_ASSERT(FALSE); + return NULL; /* So there is no compiler warning. */ + } + + return (Item**) not_found_item; +} + + +/* + Resolve the name of a column reference. + + SYNOPSIS + Item_field::fix_fields() + thd [in] current thread + tables [in] the tables in a FROM clause + reference [in/out] view column if this item was resolved to a view column + + DESCRIPTION + The method resolves the column reference represented by 'this' as a column + present in one of: FROM clause, SELECT clause, GROUP BY clause of a query + Q, or in outer queries that contain Q. + + NOTES + The name resolution algorithm used is (where [T_j] is an optional table + name that qualifies the column name): + + resolve_column_reference([T_j].col_ref_i) + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q; + + if such a column is NOT found AND // Lookup in outer queries. + there are outer queries + { + for each outer query Q_k beginning from the inner-most one + { + if - Q_k is not a group query AND + - Q_k is not inside an aggregate function + OR + - Q_(k-1) is not in a HAVING or SELECT clause of Q_k + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q_k; + } + + if such a column is not found + Search for a column or derived column named col_ref_i + [in table T_j] in the SELECT and GROUP clauses of Q_k. + } + } + } + + Notice that compared to Item_ref::fix_fields, here we first search the FROM + clause, and then we search the SELECT and GROUP BY clauses. + + RETURN + TRUE if error + FALSE on success +*/ + +bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference) { enum_parsing_place place= NO_MATTER; DBUG_ASSERT(fixed == 0); if (!field) // If field is not checked { - TABLE_LIST *where= 0; - bool upward_lookup= 0; - Field *tmp= (Field *)not_found_field; - if ((tmp= find_field_in_tables(thd, this, tables, &where, 0)) == + bool upward_lookup= FALSE; + Field *from_field= (Field *)not_found_field; + if ((from_field= find_field_in_tables(thd, this, tables, reference, + IGNORE_EXCEPT_NON_UNIQUE, + !any_privileges)) == not_found_field) { - /* - We can't find table field in table list of current select, - consequently we have to find it in outer subselect(s). - We can't join lists of outer & current select, because of scope - of view rules. For example if both tables (outer & current) have - field 'field' it is not mistake to refer to this field without - mention of table name, but if we join tables in one list it will - cause error ER_NON_UNIQ_ERROR in find_field_in_tables. - */ SELECT_LEX *last= 0; -#ifdef EMBEDDED_LIBRARY - thd->net.last_errno= 0; -#endif TABLE_LIST *table_list; - Item **refer= (Item **)not_found_item; - uint counter; - bool not_used; - // Prevent using outer fields in subselects, that is not supported now - SELECT_LEX *cursel= (SELECT_LEX *) thd->lex->current_select; - if (cursel->master_unit()->first_select()->linkage != DERIVED_TABLE_TYPE) + Item **ref= (Item **) not_found_item; + SELECT_LEX *current_sel= (SELECT_LEX *) thd->lex->current_select; + /* + If there is an outer select, and it is not a derived table (which do + not support the use of outer fields for now), try to resolve this + reference in the outer select(s). + + We treat each subselect as a separate namespace, so that different + subselects may contain columns with the same names. The subselects are + searched starting from the innermost. + */ + if (current_sel->master_unit()->first_select()->linkage != + DERIVED_TABLE_TYPE) { - SELECT_LEX_UNIT *prev_unit= cursel->master_unit(); - for (SELECT_LEX *sl= prev_unit->outer_select(); - sl; - sl= (prev_unit= sl->master_unit())->outer_select()) + SELECT_LEX_UNIT *prev_unit= current_sel->master_unit(); + SELECT_LEX *outer_sel= prev_unit->outer_select(); + for ( ; outer_sel ; + outer_sel= (prev_unit= outer_sel->master_unit())->outer_select()) { - upward_lookup= 1; - table_list= (last= sl)->get_table_list(); - if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) + last= outer_sel; + Item_subselect *prev_subselect_item= prev_unit->item; + upward_lookup= TRUE; + + /* Search in the tables of the FROM clause of the outer select. */ + table_list= outer_sel->get_table_list(); + if (outer_sel->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) { /* - it is primary INSERT st_select_lex => skip first table - resolving + It is a primary INSERT st_select_lex => do not resolve against the + first table. */ - table_list= table_list->next; - } - - Item_subselect *prev_subselect_item= prev_unit->item; + table_list= table_list->next_local; + } place= prev_subselect_item->parsing_place; /* - check table fields only if subquery used somewhere out of HAVING - or outer SELECT do not use groupping (i.e. tables are - accessable) + Check table fields only if the subquery is used somewhere out of + HAVING, or the outer SELECT does not use grouping (i.e. tables are + accessible). */ if ((place != IN_HAVING || - (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && - (tmp= find_field_in_tables(thd, this, - table_list, &where, - 0)) != not_found_field) - { - if (!tmp) - return -1; - prev_subselect_item->used_tables_cache|= tmp->table->map; - prev_subselect_item->const_item_cache= 0; - break; - } - if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && - (refer= find_item_in_list(this, sl->item_list, &counter, - REPORT_EXCEPT_NOT_FOUND, - ¬_used)) != - (Item **) not_found_item) + (outer_sel->with_sum_func == 0 && + outer_sel->group_list.elements == 0)) && + (from_field= find_field_in_tables(thd, this, table_list, + reference, + IGNORE_EXCEPT_NON_UNIQUE, + TRUE)) != + not_found_field) { - if (*refer && (*refer)->fixed) // Avoid crash in case of error - { - prev_subselect_item->used_tables_cache|= (*refer)->used_tables(); - prev_subselect_item->const_item_cache&= (*refer)->const_item(); - } + if (from_field) + { + if (from_field != view_ref_found) + { + prev_subselect_item->used_tables_cache|= from_field->table->map; + prev_subselect_item->const_item_cache= 0; + } + else + { + prev_subselect_item->used_tables_cache|= + (*reference)->used_tables(); + prev_subselect_item->const_item_cache&= + (*reference)->const_item(); + } + } break; } + /* Search in the SELECT and GROUP lists of the outer select. */ + if (outer_sel->resolve_mode == SELECT_LEX::SELECT_MODE) + { + if (!(ref= resolve_ref_in_select_and_group(thd, this, outer_sel))) + return TRUE; /* Some error occured (e.g. ambigous names). */ + if (ref != not_found_item) + { + DBUG_ASSERT(*ref && (*ref)->fixed); + prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); + prev_subselect_item->const_item_cache&= (*ref)->const_item(); + break; + } + } + // Reference is not found => depend from outer (or just error) prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; prev_subselect_item->const_item_cache= 0; - if (sl->master_unit()->first_select()->linkage == + if (outer_sel->master_unit()->first_select()->linkage == DERIVED_TABLE_TYPE) break; // do not look over derived table } } - if (!tmp) - return -1; - else if (!refer) - return 1; - else if (tmp == not_found_field && refer == (Item **)not_found_item) + + DBUG_ASSERT(ref); + if (!from_field) + return TRUE; + if (ref == not_found_item && from_field == not_found_field) { if (upward_lookup) { - // We can't say exactly what absend table or field - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), - full_name(), thd->where); + // We can't say exactly what absent table or field + my_error(ER_BAD_FIELD_ERROR, MYF(0), full_name(), thd->where); } else { // Call to report error - find_field_in_tables(thd, this, tables, &where, 1); + find_field_in_tables(thd, this, tables, reference, REPORT_ALL_ERRORS, + TRUE); } - return -1; + return TRUE; } - else if (refer != (Item **)not_found_item) + else if (ref != not_found_item) { - if (!(*refer)->fixed) - { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; - } + Item *save; + Item_ref *rf; + + /* Should have been checked in resolve_ref_in_select_and_group(). */ + DBUG_ASSERT(*ref && (*ref)->fixed); /* Here, a subset of actions performed by Item_ref::set_properties is not enough. So we pass ptr to NULL into Item_[direct]_ref constructor, so no initialization is performed, and call fix_fields() below. */ - Item *save= last->ref_pointer_array[counter]; - last->ref_pointer_array[counter]= NULL; - Item_ref *rf= (place == IN_HAVING ? - new Item_ref(last->ref_pointer_array + counter, - (char *)table_name, - (char *)field_name) : - new Item_direct_ref(last->ref_pointer_array + counter, - (char *)table_name, - (char *)field_name)); + save= *ref; + *ref= NULL; // Don't call set_properties() + rf= (place == IN_HAVING ? + new Item_ref(ref, (char*) table_name, (char*) field_name) : + new Item_direct_ref(ref, (char*) table_name, (char*) field_name)); + *ref= save; if (!rf) - return 1; - thd->change_item_tree(ref, rf); - last->ref_pointer_array[counter]= save; + return TRUE; + thd->change_item_tree(reference, rf); /* rf is Item_ref => never substitute other items (in this case) during fix_fields() => we can use rf after fix_fields() */ - if (rf->fix_fields(thd, tables, ref) || rf->check_cols(1)) - return 1; + if (rf->fix_fields(thd, tables, reference) || rf->check_cols(1)) + return TRUE; - mark_as_dependent(thd, last, cursel, rf); - return 0; + mark_as_dependent(thd, last, current_sel, rf); + return FALSE; } else { - mark_as_dependent(thd, last, cursel, this); + mark_as_dependent(thd, last, current_sel, this); if (last->having_fix_field) { Item_ref *rf; - rf= new Item_ref((where->db[0] ? where->db : 0), - (char*) where->alias, (char*) field_name); + rf= new Item_ref((cached_table->db[0] ? cached_table->db : 0), + (char*) cached_table->alias, (char*) field_name); if (!rf) - return 1; - thd->change_item_tree(ref, rf); + return TRUE; + thd->change_item_tree(reference, rf); /* rf is Item_ref => never substitute other items (in this case) during fix_fields() => we can use rf after fix_fields() */ - return rf->fix_fields(thd, tables, ref) || rf->check_cols(1); + return rf->fix_fields(thd, tables, reference) || rf->check_cols(1); } } } - else if (!tmp) - return -1; + else if (!from_field) + return TRUE; + + /* + if it is not expression from merged VIEW we will set this field. + + We can leave expression substituted from view for next PS/SP rexecution + (i.e. do not register this substitution for reverting on cleupup() + (register_item_tree_changing())), because this subtree will be + fix_field'ed during setup_tables()->setup_ancestor() (i.e. before + all other expressions of query, and references on tables which do + not present in query will not make problems. - set_field(tmp); + Also we suppose that view can't be changed during PS/SP life. + */ + if (from_field != view_ref_found) + set_field(from_field); } else if (thd->set_query_id && field->query_id != thd->query_id) { @@ -1548,11 +2040,44 @@ bool Item_field::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) field->query_id=thd->query_id; table->used_fields++; table->used_keys.intersect(field->part_of_key); - fixed= 1; } - return 0; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (any_privileges) + { + char *db, *tab; + if (cached_table->view) + { + db= cached_table->view_db.str; + tab= cached_table->view_name.str; + } + else + { + db= cached_table->db; + tab= cached_table->real_name; + } + if (!(have_privileges= (get_column_grant(thd, &field->table->grant, + db, tab, field_name) & + VIEW_ANY_ACL))) + { + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + "ANY", thd->priv_user, thd->host_or_ip, + field_name, tab); + return TRUE; + } + } +#endif + fixed= 1; + return FALSE; } + +Item *Item_field::safe_charset_converter(CHARSET_INFO *tocs) +{ + no_const_subst= 1; + return Item::safe_charset_converter(tocs); +} + + void Item_field::cleanup() { DBUG_ENTER("Item_field::cleanup"); @@ -1566,6 +2091,139 @@ void Item_field::cleanup() DBUG_VOID_RETURN; } +/* + Find a field among specified multiple equalities + + SYNOPSIS + find_item_equal() + cond_equal reference to list of multiple equalities where + the field (this object) is to be looked for + + DESCRIPTION + The function first searches the field among multiple equalities + of the current level (in the cond_equal->current_level list). + If it fails, it continues searching in upper levels accessed + through a pointer cond_equal->upper_levels. + The search terminates as soon as a multiple equality containing + the field is found. + + RETURN VALUES + First Item_equal containing the field, if success + 0, otherwise +*/ +Item_equal *Item_field::find_item_equal(COND_EQUAL *cond_equal) +{ + Item_equal *item= 0; + while (cond_equal) + { + List_iterator_fast<Item_equal> li(cond_equal->current_level); + while ((item= li++)) + { + if (item->contains(field)) + return item; + } + /* + The field is not found in any of the multiple equalities + of the current level. Look for it in upper levels + */ + cond_equal= cond_equal->upper_levels; + } + return 0; +} + + +/* + Set a pointer to the multiple equality the field reference belongs to + (if any) + + SYNOPSIS + equal_fields_propagator() + arg - reference to list of multiple equalities where + the field (this object) is to be looked for + + DESCRIPTION + The function looks for a multiple equality containing the field item + among those referenced by arg. + In the case such equality exists the function does the following. + If the found multiple equality contains a constant, then the field + reference is substituted for this constant, otherwise it sets a pointer + to the multiple equality in the field item. + + NOTES + This function is supposed to be called as a callback parameter in calls + of the transform method. + + RETURN VALUES + pointer to the replacing constant item, if the field item was substituted + pointer to the field item, otherwise. +*/ + +Item *Item_field::equal_fields_propagator(byte *arg) +{ + if (no_const_subst) + return this; + item_equal= find_item_equal((COND_EQUAL *) arg); + Item *item= 0; + if (item_equal) + item= item_equal->get_const(); + if (!item) + item= this; + return item; +} + + +/* + Mark the item to not be part of substitution if it's not a binary item + See comments in Arg_comparator::set_compare_func() for details +*/ + +Item *Item_field::set_no_const_sub(byte *arg) +{ + if (field->charset() != &my_charset_bin) + no_const_subst=1; + return this; +} + + +/* + Set a pointer to the multiple equality the field reference belongs to + (if any) + + SYNOPSIS + replace_equal_field_processor() + arg - a dummy parameter, is not used here + + DESCRIPTION + The function replaces a pointer to a field in the Item_field object + by a pointer to another field. + The replacement field is taken from the very beginning of + the item_equal list which the Item_field object refers to (belongs to) + If the Item_field object does not refer any Item_equal object, + nothing is done. + + NOTES + This function is supposed to be called as a callback parameter in calls + of the walk method. + + RETURN VALUES + 0 +*/ + +bool Item_field::replace_equal_field_processor(byte *arg) +{ + if (item_equal) + { + Item_field *subst= item_equal->get_first(); + if (!field->eq(subst->field)) + { + field= subst->field; + return 0; + } + } + return 0; +} + + void Item::init_make_field(Send_field *tmp_field, enum enum_field_types field_type) { @@ -1594,18 +2252,56 @@ void Item::make_field(Send_field *tmp_field) void Item_empty_string::make_field(Send_field *tmp_field) { - init_make_field(tmp_field,FIELD_TYPE_VAR_STRING); + init_make_field(tmp_field, MYSQL_TYPE_VARCHAR); } enum_field_types Item::field_type() const { - return ((result_type() == STRING_RESULT) ? FIELD_TYPE_VAR_STRING : + return ((result_type() == STRING_RESULT) ? MYSQL_TYPE_VARCHAR : (result_type() == INT_RESULT) ? FIELD_TYPE_LONGLONG : FIELD_TYPE_DOUBLE); } +/* + Create a field to hold a string value from an item + + SYNOPSIS + make_string_field() + table Table for which the field is created + + IMPLEMENTATION + If max_length > CONVERT_IF_BIGGER_TO_BLOB create a blob + If max_length > 0 create a varchar + If max_length == 0 create a CHAR(0) +*/ + + +Field *Item::make_string_field(TABLE *table) +{ + if (max_length > CONVERT_IF_BIGGER_TO_BLOB) + return new Field_blob(max_length, maybe_null, name, table, + collation.collation); + if (max_length > 0) + return new Field_varstring(max_length, maybe_null, name, table, + collation.collation); + return new Field_string(max_length, maybe_null, name, table, + collation.collation); +} + + +/* + Create a field based on field_type of argument + + For now, this is only used to create a field for + IFNULL(x,something) + + RETURN + 0 error + # Created field +*/ + Field *Item::tmp_table_field_from_field_type(TABLE *table) { /* @@ -1655,32 +2351,24 @@ Field *Item::tmp_table_field_from_field_type(TABLE *table) return new Field_year((char*) 0, max_length, null_ptr, 0, Field::NONE, name, table); default: - /* This case should never be choosen */ + /* This case should never be chosen */ DBUG_ASSERT(0); /* If something goes awfully wrong, it's better to get a string than die */ case MYSQL_TYPE_ENUM: case MYSQL_TYPE_SET: - case MYSQL_TYPE_VAR_STRING: - if (max_length > 255) - break; // If blob - return new Field_varstring(max_length, maybe_null, name, table, - collation.collation); case MYSQL_TYPE_STRING: - if (max_length > 255) // If blob - break; - return new Field_string(max_length, maybe_null, name, table, - collation.collation); + case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: + return make_string_field(table); case MYSQL_TYPE_TINY_BLOB: case MYSQL_TYPE_MEDIUM_BLOB: case MYSQL_TYPE_LONG_BLOB: case MYSQL_TYPE_BLOB: case MYSQL_TYPE_GEOMETRY: + return new Field_blob(max_length, maybe_null, name, table, + collation.collation); break; // Blob handled outside of case } - - /* blob is special as it's generated for both blobs and long strings */ - return new Field_blob(max_length, maybe_null, name, table, - collation.collation); } @@ -1793,7 +2481,7 @@ int Item::save_in_field(Field *field, bool no_conversions) } else if (result_type() == REAL_RESULT) { - double nr=val(); + double nr= val_real(); if (null_value) return set_field_to_null(field); field->set_notnull(); @@ -1845,20 +2533,62 @@ Item_num *Item_uint::neg() return new Item_real(name, - ((double) value), 0, max_length); } + +/* + This function is only called during parsing. We will signal an error if + value is not a true double value (overflow) +*/ + +Item_real::Item_real(const char *str_arg, uint length) +{ + int error; + char *end; + value= my_strntod(&my_charset_bin, (char*) str_arg, length, &end, &error); + if (error) + { + /* + Note that we depend on that str_arg is null terminated, which is true + when we are in the parser + */ + DBUG_ASSERT(str_arg[length] == 0); + my_error(ER_ILLEGAL_VALUE_FOR_TYPE, MYF(0), "double", (char*) str_arg); + } + presentation= name=(char*) str_arg; + decimals=(uint8) nr_of_decimals(str_arg); + max_length=length; + fixed= 1; +} + + int Item_real::save_in_field(Field *field, bool no_conversions) { - double nr=val(); + double nr= val_real(); if (null_value) return set_field_to_null(field); field->set_notnull(); return field->store(nr); } -/**************************************************************************** -** varbinary item -** In string context this is a binary string -** In number context this is a longlong value. -****************************************************************************/ + +void Item_real::print(String *str) +{ + if (presentation) + { + str->append(presentation); + return; + } + char buffer[20]; + String num(buffer, sizeof(buffer), &my_charset_bin); + num.set(value, decimals, &my_charset_bin); + str->append(num); +} + + +/* + hex item + In string context this is a binary string. + In number context this is a longlong value. +*/ inline uint char_val(char X) { @@ -1868,7 +2598,7 @@ inline uint char_val(char X) } -Item_varbinary::Item_varbinary(const char *str, uint str_length) +Item_hex_string::Item_hex_string(const char *str, uint str_length) { name=(char*) str-2; // Lex makes this start with 0x max_length=(str_length+1)/2; @@ -1889,7 +2619,7 @@ Item_varbinary::Item_varbinary(const char *str, uint str_length) fixed= 1; } -longlong Item_varbinary::val_int() +longlong Item_hex_string::val_int() { // following assert is redundant, because fixed=1 assigned in constructor DBUG_ASSERT(fixed == 1); @@ -1903,7 +2633,7 @@ longlong Item_varbinary::val_int() } -int Item_varbinary::save_in_field(Field *field, bool no_conversions) +int Item_hex_string::save_in_field(Field *field, bool no_conversions) { int error; field->set_notnull(); @@ -1921,6 +2651,44 @@ int Item_varbinary::save_in_field(Field *field, bool no_conversions) /* + bin item. + In string context this is a binary string. + In number context this is a longlong value. +*/ + +Item_bin_string::Item_bin_string(const char *str, uint str_length) +{ + const char *end= str + str_length - 1; + uchar bits= 0; + uint power= 1; + + name= (char*) str - 2; + max_length= (str_length + 7) >> 3; + char *ptr= (char*) sql_alloc(max_length + 1); + if (!ptr) + return; + str_value.set(ptr, max_length, &my_charset_bin); + ptr+= max_length - 1; + ptr[1]= 0; // Set end null for string + for (; end >= str; end--) + { + if (power == 256) + { + power= 1; + *ptr--= bits; + bits= 0; + } + if (*end == '1') + bits|= power; + power<<= 1; + } + *ptr= (char) bits; + collation.set(&my_charset_bin, DERIVATION_COERCIBLE); + fixed= 1; +} + + +/* Pack data in buffer for sending */ @@ -1937,7 +2705,7 @@ bool Item::send(Protocol *protocol, String *buffer) { bool result; enum_field_types type; - LINT_INIT(result); + LINT_INIT(result); // Will be set if null_value == 0 switch ((type=field_type())) { default: @@ -1952,6 +2720,8 @@ bool Item::send(Protocol *protocol, String *buffer) case MYSQL_TYPE_GEOMETRY: case MYSQL_TYPE_STRING: case MYSQL_TYPE_VAR_STRING: + case MYSQL_TYPE_VARCHAR: + case MYSQL_TYPE_BIT: { String *res; if ((res=val_str(buffer))) @@ -1994,15 +2764,14 @@ bool Item::send(Protocol *protocol, String *buffer) case MYSQL_TYPE_FLOAT: { float nr; - nr= (float) val(); + nr= (float) val_real(); if (!null_value) result= protocol->store(nr, decimals, buffer); break; } case MYSQL_TYPE_DOUBLE: { - double nr; - nr= val(); + double nr= val_real(); if (!null_value) result= protocol->store(nr, decimals, buffer); break; @@ -2044,190 +2813,241 @@ bool Item_field::send(Protocol *protocol, String *buffer) /* - This is used for HAVING clause - Find field in select list having the same name + Resolve the name of a reference to a column reference. + + SYNOPSIS + Item_ref::fix_fields() + thd [in] current thread + tables [in] the tables in a FROM clause + reference [in/out] view column if this item was resolved to a view column + + DESCRIPTION + The method resolves the column reference represented by 'this' as a column + present in one of: GROUP BY clause, SELECT clause, outer queries. It is + used typically for columns in the HAVING clause which are not under + aggregate functions. + + NOTES + The name resolution algorithm used is (where [T_j] is an optional table + name that qualifies the column name): + + resolve_extended([T_j].col_ref_i) + { + Search for a column or derived column named col_ref_i [in table T_j] + in the SELECT and GROUP clauses of Q. + + if such a column is NOT found AND // Lookup in outer queries. + there are outer queries + { + for each outer query Q_k beginning from the inner-most one + { + Search for a column or derived column named col_ref_i + [in table T_j] in the SELECT and GROUP clauses of Q_k. + + if such a column is not found AND + - Q_k is not a group query AND + - Q_k is not inside an aggregate function + OR + - Q_(k-1) is not in a HAVING or SELECT clause of Q_k + { + search for a column or derived column named col_ref_i + [in table T_j] in the FROM clause of Q_k; + } + } + } + } + + This procedure treats GROUP BY and SELECT clauses as one namespace for + column references in HAVING. Notice that compared to + Item_field::fix_fields, here we first search the SELECT and GROUP BY + clauses, and then we search the FROM clause. + + RETURN + TRUE if error + FALSE on success */ -bool Item_ref::fix_fields(THD *thd,TABLE_LIST *tables, Item **reference) +bool Item_ref::fix_fields(THD *thd, TABLE_LIST *tables, Item **reference) { DBUG_ASSERT(fixed == 0); - uint counter; enum_parsing_place place= NO_MATTER; - bool not_used; + SELECT_LEX *current_sel= thd->lex->current_select; + if (!ref) { - TABLE_LIST *where= 0, *table_list; - bool upward_lookup= 0; - SELECT_LEX_UNIT *prev_unit= thd->lex->current_select->master_unit(); - SELECT_LEX *sl= prev_unit->outer_select(); - /* - Finding only in current select will be performed for selects that have - not outer one and for derived tables (which not support using outer - fields for now) - */ - if ((ref= find_item_in_list(this, - *(thd->lex->current_select->get_item_list()), - &counter, - ((sl && - thd->lex->current_select->master_unit()-> - first_select()->linkage != - DERIVED_TABLE_TYPE) ? - REPORT_EXCEPT_NOT_FOUND : - REPORT_ALL_ERRORS ), ¬_used)) == - (Item **)not_found_item) + SELECT_LEX_UNIT *prev_unit= current_sel->master_unit(); + SELECT_LEX *outer_sel= prev_unit->outer_select(); + ORDER *group_list= (ORDER*) current_sel->group_list.first; + bool ambiguous_fields= FALSE; + Item **group_by_ref= NULL; + + if (!(ref= resolve_ref_in_select_and_group(thd, this, current_sel))) + return TRUE; /* Some error occured (e.g. ambigous names). */ + + if (ref == not_found_item) /* This reference was not resolved. */ { - Field *tmp= (Field*) not_found_field; - SELECT_LEX *last= 0; - upward_lookup= 1; /* - We can't find table field in select list of current select, - consequently we have to find it in outer subselect(s). - We can't join lists of outer & current select, because of scope - of view rules. For example if both tables (outer & current) have - field 'field' it is not mistake to refer to this field without - mention of table name, but if we join tables in one list it will - cause error ER_NON_UNIQ_ERROR in find_item_in_list. + If there is an outer select, and it is not a derived table (which do + not support the use of outer fields for now), try to resolve this + reference in the outer select(s). + + We treat each subselect as a separate namespace, so that different + subselects may contain columns with the same names. The subselects are + searched starting from the innermost. */ - for ( ; sl ; sl= (prev_unit= sl->master_unit())->outer_select()) + if (outer_sel && (current_sel->master_unit()->first_select()->linkage != + DERIVED_TABLE_TYPE)) { - last= sl; - Item_subselect *prev_subselect_item= prev_unit->item; - if (sl->resolve_mode == SELECT_LEX::SELECT_MODE && - (ref= find_item_in_list(this, sl->item_list, - &counter, REPORT_EXCEPT_NOT_FOUND, - ¬_used)) != - (Item **)not_found_item) - { - if (*ref && (*ref)->fixed) // Avoid crash in case of error - { - prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); - prev_subselect_item->const_item_cache&= (*ref)->const_item(); - } - break; - } - table_list= sl->get_table_list(); - if (sl->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) - { - // it is primary INSERT st_select_lex => skip first table resolving - table_list= table_list->next; - } - place= prev_subselect_item->parsing_place; - /* - check table fields only if subquery used somewhere out of HAVING - or SELECT list or outer SELECT do not use groupping (i.e. tables - are accessable) - */ - if ((place != IN_HAVING || - (sl->with_sum_func == 0 && sl->group_list.elements == 0)) && - (tmp= find_field_in_tables(thd, this, - table_list, &where, - 0)) != not_found_field) + TABLE_LIST *table_list; + Field *from_field= (Field*) not_found_field; + SELECT_LEX *last= 0; + + for ( ; outer_sel ; + outer_sel= (prev_unit= outer_sel->master_unit())->outer_select()) { - prev_subselect_item->used_tables_cache|= tmp->table->map; + last= outer_sel; + Item_subselect *prev_subselect_item= prev_unit->item; + + /* Search in the SELECT and GROUP lists of the outer select. */ + if (outer_sel->resolve_mode == SELECT_LEX::SELECT_MODE) + { + if (!(ref= resolve_ref_in_select_and_group(thd, this, outer_sel))) + return TRUE; /* Some error occured (e.g. ambigous names). */ + if (ref != not_found_item) + { + DBUG_ASSERT(*ref && (*ref)->fixed); + prev_subselect_item->used_tables_cache|= (*ref)->used_tables(); + prev_subselect_item->const_item_cache&= (*ref)->const_item(); + break; + } + } + + /* Search in the tables of the FROM clause of the outer select. */ + table_list= outer_sel->get_table_list(); + if (outer_sel->resolve_mode == SELECT_LEX::INSERT_MODE && table_list) + /* + It is a primary INSERT st_select_lex => do not resolve against the + first table. + */ + table_list= table_list->next_local; + + place= prev_subselect_item->parsing_place; + /* + Check table fields only if the subquery is used somewhere out of + HAVING or the outer SELECT does not use grouping (i.e. tables are + accessible). + TODO: + Here we could first find the field anyway, and then test this + condition, so that we can give a better error message - + ER_WRONG_FIELD_WITH_GROUP, instead of the less informative + ER_BAD_FIELD_ERROR which we produce now. + */ + if ((place != IN_HAVING || + (!outer_sel->with_sum_func && + outer_sel->group_list.elements == 0))) + { + if ((from_field= find_field_in_tables(thd, this, table_list, + reference, + IGNORE_EXCEPT_NON_UNIQUE, + TRUE)) != + not_found_field) + { + if (from_field != view_ref_found) + { + prev_subselect_item->used_tables_cache|= from_field->table->map; + prev_subselect_item->const_item_cache= 0; + } + else + { + prev_subselect_item->used_tables_cache|= + (*reference)->used_tables(); + prev_subselect_item->const_item_cache&= + (*reference)->const_item(); + } + break; + } + } + + /* Reference is not found => depend on outer (or just error). */ + prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; prev_subselect_item->const_item_cache= 0; - break; - } - // Reference is not found => depend from outer (or just error) - prev_subselect_item->used_tables_cache|= OUTER_REF_TABLE_BIT; - prev_subselect_item->const_item_cache= 0; - if (sl->master_unit()->first_select()->linkage == - DERIVED_TABLE_TYPE) - break; // do not look over derived table - } + if (outer_sel->master_unit()->first_select()->linkage == + DERIVED_TABLE_TYPE) + break; /* Do not consider derived tables. */ + } - if (!ref) - return 1; - if (!tmp) - return -1; - if (ref == (Item **)not_found_item && tmp == not_found_field) - { - if (upward_lookup) - { - // We can't say exactly what absend (table or field) - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), - full_name(), thd->where); - } - else - { - // Call to report error - find_item_in_list(this, - *(thd->lex->current_select->get_item_list()), - &counter, REPORT_ALL_ERRORS, ¬_used); - } - ref= 0; // Safety - return 1; - } - if (tmp != not_found_field) - { - Item_field* fld; - /* - Set ref to 0 as we are replacing this item with the found item - and this will ensure we get an error if this item would be - used elsewhere - */ - ref= 0; // Safety - if (!(fld= new Item_field(tmp))) - return 1; - thd->change_item_tree(reference, fld); - mark_as_dependent(thd, last, thd->lex->current_select, fld); - return 0; - } - if (!(*ref)->fixed) - { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; - } - mark_as_dependent(thd, last, thd->lex->current_select, - this); - if (place == IN_HAVING) - { - Item_ref *rf; - if (!(rf= new Item_direct_ref(last->ref_pointer_array + counter, - (char *)table_name, - (char *)field_name))) - return 1; - ref= 0; // Safety - if (rf->fix_fields(thd, tables, ref) || rf->check_cols(1)) - return 1; - thd->change_item_tree(reference, rf); - return 0; + DBUG_ASSERT(ref); + if (!from_field) + return TRUE; + if (ref == not_found_item && from_field == not_found_field) + { + my_error(ER_BAD_FIELD_ERROR, MYF(0), + this->full_name(), current_thd->where); + ref= 0; // Safety + return TRUE; + } + if (from_field != not_found_field) + { + /* + Set ref to 0 as we are replacing this item with the found item and + this will ensure we get an error if this item would be used + elsewhere + */ + ref= 0; // Safety + if (from_field != view_ref_found) + { + Item_field* fld; + if (!(fld= new Item_field(from_field))) + return TRUE; + thd->change_item_tree(reference, fld); + mark_as_dependent(thd, last, thd->lex->current_select, fld); + return FALSE; + } + /* + We can leave expression substituted from view for next PS/SP + re-execution (i.e. do not register this substitution for reverting + on cleanup() (register_item_tree_changing())), because this subtree + will be fix_field'ed during setup_tables()->setup_ancestor() + (i.e. before all other expressions of query, and references on + tables which do not present in query will not make problems. + + Also we suppose that view can't be changed during PS/SP life. + */ + } + else + { + /* Should be checked in resolve_ref_in_select_and_group(). */ + DBUG_ASSERT(*ref && (*ref)->fixed); + mark_as_dependent(thd, last, current_sel, this); + } } - ref= last->ref_pointer_array + counter; - } - else if (!ref) - return 1; - else - { - if (!(*ref)->fixed) + else { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - "forward reference in item list"); - return -1; + /* The current reference cannot be resolved in this query. */ + my_error(ER_BAD_FIELD_ERROR,MYF(0), + this->full_name(), current_thd->where); + return TRUE; } - ref= thd->lex->current_select->ref_pointer_array + counter; } } /* - The following conditional is changed as to correctly identify - incorrect references in group functions or forward references - with sub-select's / derived tables, while it prevents this - check when Item_ref is created in an expression involving - summing function, which is to be placed in the user variable. + Check if this is an incorrect reference in a group function or forward + reference. Do not issue an error if this is an unnamed reference inside an + aggregate function. */ if (((*ref)->with_sum_func && name && (depended_from || - !(thd->lex->current_select->linkage != GLOBAL_OPTIONS_TYPE && - thd->lex->current_select->having_fix_field))) || + !(current_sel->linkage != GLOBAL_OPTIONS_TYPE && + current_sel->having_fix_field))) || !(*ref)->fixed) { - my_error(ER_ILLEGAL_REFERENCE, MYF(0), name, - ((*ref)->with_sum_func? - "reference on group function": - "forward reference in item list")); - return 1; + my_error(ER_ILLEGAL_REFERENCE, MYF(0), + name, ((*ref)->with_sum_func? + "reference to group function": + "forward reference in item list")); + return TRUE; } set_properties(); @@ -2244,9 +3064,23 @@ void Item_ref::set_properties() decimals= (*ref)->decimals; collation.set((*ref)->collation); with_sum_func= (*ref)->with_sum_func; + if ((*ref)->type() == FIELD_ITEM) + alias_name_used= ((Item_ident *) (*ref))->alias_name_used; + else + alias_name_used= TRUE; // it is not field, so it is was resolved by alias fixed= 1; } + +void Item_ref::cleanup() +{ + DBUG_ENTER("Item_ref::cleanup"); + Item_ident::cleanup(); + result_field= 0; + DBUG_VOID_RETURN; +} + + void Item_ref::print(String *str) { if (ref && *ref) @@ -2256,6 +3090,51 @@ void Item_ref::print(String *str) } +bool Item_ref::send(Protocol *prot, String *tmp) +{ + if (result_field) + return prot->store(result_field); + return (*ref)->send(prot, tmp); +} + + +double Item_ref::val_result() +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0.0; + return result_field->val_real(); + } + return val_real(); +} + + +longlong Item_ref::val_int_result() +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0; + return result_field->val_int(); + } + return val_int(); +} + + +String *Item_ref::str_result(String* str) +{ + if (result_field) + { + if ((null_value= result_field->is_null())) + return 0; + str->set_charset(str_value.charset()); + return result_field->val_str(str, &str_value); + } + return val_str(str); +} + + void Item_ref_null_helper::print(String *str) { str->append("<ref_null_helper>(", 18); @@ -2286,33 +3165,40 @@ bool Item_default_value::fix_fields(THD *thd, struct st_table_list *table_list, Item **items) { + Item_field *field_arg; + Field *def_field; DBUG_ASSERT(fixed == 0); + if (!arg) { fixed= 1; - return 0; + return FALSE; } if (!arg->fixed && arg->fix_fields(thd, table_list, &arg)) - return 1; + return TRUE; if (arg->type() == REF_ITEM) { Item_ref *ref= (Item_ref *)arg; if (ref->ref[0]->type() != FIELD_ITEM) { - return 1; + return TRUE; } arg= ref->ref[0]; } - Item_field *field_arg= (Item_field *)arg; - Field *def_field= (Field*) sql_alloc(field_arg->field->size_of()); - if (!def_field) - return 1; + field_arg= (Item_field *)arg; + if (field_arg->field->flags & NO_DEFAULT_VALUE_FLAG) + { + my_error(ER_NO_DEFAULT_FOR_FIELD, MYF(0), field_arg->field->field_name); + return TRUE; + } + if (!(def_field= (Field*) sql_alloc(field_arg->field->size_of()))) + return TRUE; memcpy(def_field, field_arg->field, field_arg->field->size_of()); def_field->move_field(def_field->table->default_values - def_field->table->record[0]); set_field(def_field); - return 0; + return FALSE; } void Item_default_value::print(String *str) @@ -2340,14 +3226,14 @@ bool Item_insert_value::fix_fields(THD *thd, { DBUG_ASSERT(fixed == 0); if (!arg->fixed && arg->fix_fields(thd, table_list, &arg)) - return 1; + return TRUE; if (arg->type() == REF_ITEM) { Item_ref *ref= (Item_ref *)arg; if (ref->ref[0]->type() != FIELD_ITEM) { - return 1; + return TRUE; } arg= ref->ref[0]; } @@ -2356,7 +3242,7 @@ bool Item_insert_value::fix_fields(THD *thd, { Field *def_field= (Field*) sql_alloc(field_arg->field->size_of()); if (!def_field) - return 1; + return TRUE; memcpy(def_field, field_arg->field, field_arg->field->size_of()); def_field->move_field(def_field->table->insert_values - def_field->table->record[0]); @@ -2369,7 +3255,7 @@ bool Item_insert_value::fix_fields(THD *thd, set_field(new Field_null(0, 0, Field::NONE, tmp_field->field_name, tmp_field->table, &my_charset_bin)); } - return 0; + return FALSE; } void Item_insert_value::print(String *str) @@ -2379,6 +3265,98 @@ void Item_insert_value::print(String *str) str->append(')'); } + +/* + Bind item representing field of row being changed in trigger + to appropriate Field object. + + SYNOPSIS + setup_field() + thd - current thread context + table - table of trigger (and where we looking for fields) + event - type of trigger event + + NOTE + This function does almost the same as fix_fields() for Item_field + but is invoked during trigger definition parsing and takes TABLE + object as its argument. If proper field was not found in table + error will be reported at fix_fields() time. +*/ +void Item_trigger_field::setup_field(THD *thd, TABLE *table, + enum trg_event_type event) +{ + uint field_idx= (uint)-1; + bool save_set_query_id= thd->set_query_id; + + /* TODO: Think more about consequences of this step. */ + thd->set_query_id= 0; + + if (find_field_in_real_table(thd, table, field_name, + strlen(field_name), 0, 0, + &field_idx)) + { + field= (row_version == OLD_ROW && event == TRG_EVENT_UPDATE) ? + table->triggers->old_field[field_idx] : + table->field[field_idx]; + } + + thd->set_query_id= save_set_query_id; +} + + +bool Item_trigger_field::eq(const Item *item, bool binary_cmp) const +{ + return item->type() == TRIGGER_FIELD_ITEM && + row_version == ((Item_trigger_field *)item)->row_version && + !my_strcasecmp(system_charset_info, field_name, + ((Item_trigger_field *)item)->field_name); +} + + +bool Item_trigger_field::fix_fields(THD *thd, + TABLE_LIST *table_list, + Item **items) +{ + /* + Since trigger is object tightly associated with TABLE object most + of its set up can be performed during trigger loading i.e. trigger + parsing! So we have little to do in fix_fields. :) + FIXME may be we still should bother about permissions here. + */ + DBUG_ASSERT(fixed == 0); + + if (field) + { + // QQ: May be this should be moved to setup_field? + set_field(field); + fixed= 1; + return 0; + } + + my_error(ER_BAD_FIELD_ERROR, MYF(0), field_name, + (row_version == NEW_ROW) ? "NEW" : "OLD"); + return 1; +} + + +void Item_trigger_field::print(String *str) +{ + str->append((row_version == NEW_ROW) ? "NEW" : "OLD", 3); + str->append('.'); + str->append(field_name); +} + + +void Item_trigger_field::cleanup() +{ + /* + Since special nature of Item_trigger_field we should not do most of + things from Item_field::cleanup() or Item_ident::cleanup() here. + */ + Item::cleanup(); +} + + /* If item is a const function, calculate it and return a const item The original item is freed if not returned @@ -2430,7 +3408,7 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item) } else { // It must REAL_RESULT - double result=item->val(); + double result= item->val_real(); uint length=item->max_length,decimals=item->decimals; bool null_value=item->null_value; new_item= (null_value ? (Item*) new Item_null(name) : (Item*) @@ -2465,7 +3443,7 @@ bool field_is_equal_to_item(Field *field,Item *item) } if (res_type == INT_RESULT) return 1; // Both where of type int - double result=item->val(); + double result= item->val_real(); if (item->null_value) return 1; return result == field->val_real(); @@ -2538,7 +3516,7 @@ void Item_cache_str::store(Item *item) } -double Item_cache_str::val() +double Item_cache_str::val_real() { DBUG_ASSERT(fixed == 1); int err; @@ -2769,10 +3747,10 @@ bool Item_type_holder::join_types(THD *thd, Item *item) if (item_type == STRING_RESULT && collation.aggregate(item->collation)) { my_error(ER_CANT_AGGREGATE_2COLLATIONS, MYF(0), - old_cs, old_derivation, - item->collation.collation->name, - item->collation.derivation_name(), - "UNION"); + old_cs, old_derivation, + item->collation.collation->name, + item->collation.derivation_name(), + "UNION"); return 1; } @@ -2806,7 +3784,7 @@ uint32 Item_type_holder::real_length(Item *item) } } -double Item_type_holder::val() +double Item_type_holder::val_real() { DBUG_ASSERT(0); // should never be called return 0.0; @@ -2842,5 +3820,6 @@ void Item_result_field::cleanup() template class List<Item>; template class List_iterator<Item>; template class List_iterator_fast<Item>; +template class List_iterator_fast<Item_field>; template class List<List_item>; #endif diff --git a/sql/item.h b/sql/item.h index 237a8f7efac..2503f137355 100644 --- a/sql/item.h +++ b/sql/item.h @@ -22,6 +22,7 @@ class Protocol; struct st_table_list; void item_init(void); /* Init item functions */ +class Item_field; /* @@ -110,6 +111,7 @@ public: }; typedef bool (Item::*Item_processor)(byte *arg); +typedef Item* (Item::*Item_transformer) (byte *arg); class Item { Item(const Item &); /* Prevent use of these */ @@ -118,8 +120,9 @@ public: static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } static void *operator new(size_t size, MEM_ROOT *mem_root) { return (void*) alloc_root(mem_root, (uint) size); } - static void operator delete(void *ptr,size_t size) {} - static void operator delete(void *ptr,size_t size, MEM_ROOT *mem_root) {} + static void operator delete(void *ptr,size_t size) { TRASH(ptr, size); } + static void operator delete(void *ptr,size_t size, MEM_ROOT *mem_root) + { TRASH(ptr, size); } enum Type {FIELD_ITEM, FUNC_ITEM, SUM_FUNC_ITEM, STRING_ITEM, INT_ITEM, REAL_ITEM, NULL_ITEM, VARBIN_ITEM, @@ -127,7 +130,7 @@ public: PROC_ITEM,COND_ITEM, REF_ITEM, FIELD_STD_ITEM, FIELD_VARIANCE_ITEM, INSERT_VALUE_ITEM, SUBSELECT_ITEM, ROW_ITEM, CACHE_ITEM, TYPE_HOLDER, - PARAM_ITEM}; + PARAM_ITEM, TRIGGER_FIELD_ITEM}; enum cond_result { COND_UNDEF,COND_OK,COND_TRUE,COND_FALSE }; @@ -137,8 +140,11 @@ public: */ String str_value; my_string name; /* Name from select */ + /* Original item name (if it was renamed)*/ + my_string orig_name; Item *next; uint32 max_length; + uint name_length; /* Length of name */ uint8 marker,decimals; my_bool maybe_null; /* If item may be null */ my_bool null_value; /* if item is null */ @@ -158,18 +164,16 @@ public: optimisation changes in prepared statements */ Item(THD *thd, Item *item); - virtual ~Item() { name=0; } /*lint -e1509 */ + virtual ~Item() + { + name=0; + } /*lint -e1509 */ void set_name(const char *str,uint length, CHARSET_INFO *cs); + void rename(char *new_name); void init_make_field(Send_field *tmp_field,enum enum_field_types type); - virtual void cleanup() - { - DBUG_ENTER("Item::cleanup"); - DBUG_PRINT("info", ("Type: %d", (int)type())); - fixed=0; - marker= 0; - DBUG_VOID_RETURN; - } + virtual void cleanup(); virtual void make_field(Send_field *field); + Field *make_string_field(TABLE *table); virtual bool fix_fields(THD *, struct st_table_list *, Item **); /* should be used in case where we are sure that we do not need @@ -177,6 +181,7 @@ public: */ inline void quick_fix_field() { fixed= 1; } /* Function returns 1 on overflow and -1 on fatal errors */ + int save_in_field_no_warnings(Field *field, bool no_conversions); virtual int save_in_field(Field *field, bool no_conversions); virtual void save_org_in_field(Field *field) { (void) save_in_field(field, 1); } @@ -188,7 +193,7 @@ public: virtual enum_field_types field_type() const; virtual enum Type type() const =0; /* valXXX methods must return NULL or 0 or 0.0 if null_value is set. */ - virtual double val()=0; + virtual double val_real()=0; virtual longlong val_int()=0; /* Return string representation of this item object. @@ -219,7 +224,7 @@ public: virtual Field *get_tmp_table_field() { return 0; } virtual Field *tmp_table_field(TABLE *t_arg) { return 0; } virtual const char *full_name() const { return name ? name : "???"; } - virtual double val_result() { return val(); } + virtual double val_result() { return val_real(); } virtual longlong val_int_result() { return val_int(); } virtual String *str_result(String* tmp) { return val_str(tmp); } /* bit map of tables used by item */ @@ -305,9 +310,22 @@ public: return (this->*processor)(arg); } + virtual Item* transform(Item_transformer transformer, byte *arg) + { + return (this->*transformer)(arg); + } + virtual bool remove_dependence_processor(byte * arg) { return 0; } virtual bool remove_fixed(byte * arg) { fixed= 0; return 0; } + virtual bool cleanup_processor(byte *arg); + virtual bool collect_item_field_processor(byte * arg) { return 0; } + virtual Item *equal_fields_propagator(byte * arg) { return this; } + virtual Item *set_no_const_sub(byte *arg) { return this; } + virtual bool replace_equal_field_processor(byte * arg) { return 0; } + virtual Item *this_item() { return this; } /* For SPs mostly. */ + virtual Item *this_const_item() const { return const_cast<Item*>(this); } /* For SPs mostly. */ + // Row emulation virtual uint cols() { return 1; } virtual Item* el(uint i) { return this; } @@ -319,6 +337,7 @@ public: virtual void bring_value() {} Field *tmp_table_field_from_field_type(TABLE *table); + virtual Item_field *filed_for_view_update() { return 0; } virtual Item *neg_transformer(THD *thd) { return NULL; } virtual Item *safe_charset_converter(CHARSET_INFO *tocs); @@ -330,6 +349,104 @@ public: }; +// A local SP variable (incl. parameters), used in runtime +class Item_splocal : public Item +{ +private: + + uint m_offset; + LEX_STRING m_name; + +public: + + Item_splocal(LEX_STRING name, uint offset) + : m_offset(offset), m_name(name) + { + Item::maybe_null= TRUE; + } + + Item *this_item(); + Item *this_const_item() const; + + inline uint get_offset() + { + return m_offset; + } + + // Abstract methods inherited from Item. Just defer the call to + // the item in the frame + enum Type type() const; + + inline double val_real() + { + Item *it= this_item(); + double ret= it->val_real(); + Item::null_value= it->null_value; + return ret; + } + + inline longlong val_int() + { + Item *it= this_item(); + longlong ret= it->val_int(); + Item::null_value= it->null_value; + return ret; + } + + inline String *val_str(String *sp) + { + Item *it= this_item(); + String *ret= it->val_str(sp); + Item::null_value= it->null_value; + return ret; + } + + inline bool is_null() + { + Item *it= this_item(); + bool ret= it->is_null(); + Item::null_value= it->null_value; + return ret; + } + + inline void make_field(Send_field *field) + { + Item *it= this_item(); + + it->set_name(m_name.str, m_name.length, system_charset_info); + it->make_field(field); + } + + inline Item_result result_type() const + { + return this_const_item()->result_type(); + } + + inline bool const_item() const + { + return TRUE; + } + + inline int save_in_field(Field *field, bool no_conversions) + { + return this_item()->save_in_field(field, no_conversions); + } + + void print(String *str) + { + str->reserve(m_name.length+8); + str->append(m_name.str, m_name.length); + str->append('@'); + str->qs_append(m_offset); + } + + inline bool send(Protocol *protocol, String *str) + { + return this_item()->send(protocol, str); + } +}; + + class Item_num: public Item { public: @@ -355,6 +472,7 @@ public: const char *db_name; const char *table_name; const char *field_name; + bool alias_name_used; /* true if item was resolved against alias */ /* Cached value of index for this field in table->field array, used by prep. stmts for speeding up their re-execution. Holds NO_CACHED_FIELD_INDEX @@ -374,19 +492,37 @@ public: const char *full_name() const; void cleanup(); bool remove_dependence_processor(byte * arg); + void print(String *str); + + friend bool insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, + const char *table_name, List_iterator<Item> *it, + bool any_privileges, bool allocate_view_names); }; +class Item_equal; +class COND_EQUAL; class Item_field :public Item_ident { +protected: void set_field(Field *field); public: Field *field,*result_field; + Item_equal *item_equal; + bool no_const_subst; + /* + if any_privileges set to TRUE then here real effective privileges will + be stored + */ + uint have_privileges; + /* field need any privileges (for VIEW creation) */ + bool any_privileges; Item_field(const char *db_par,const char *table_name_par, const char *field_name_par) :Item_ident(db_par,table_name_par,field_name_par), - field(0), result_field(0) + field(0), result_field(0), item_equal(0), no_const_subst(0), + have_privileges(0), any_privileges(0) { collation.set(DERIVATION_IMPLICIT); } /* Constructor needed to process subselect with temporary tables (see Item) @@ -406,7 +542,7 @@ public: Item_field(Field *field); enum Type type() const { return FIELD_ITEM; } bool eq(const Item *item, bool binary_cmp) const; - double val(); + double val_real(); longlong val_int(); String *val_str(String*); double val_result(); @@ -434,8 +570,15 @@ public: bool get_time(TIME *ltime); bool is_null() { return field->is_null(); } Item *get_tmp_table_item(THD *thd); + bool collect_item_field_processor(byte * arg); void cleanup(); + Item_equal *find_item_equal(COND_EQUAL *cond_equal); + Item *equal_fields_propagator(byte *arg); + Item *set_no_const_sub(byte *arg); + bool replace_equal_field_processor(byte *arg); inline uint32 max_disp_length() { return field->max_length(); } + Item_field *filed_for_view_update() { return this; } + Item *safe_charset_converter(CHARSET_INFO *tocs); friend class Item_default_value; friend class Item_insert_value; friend class st_select_lex_unit; @@ -454,7 +597,7 @@ public: } enum Type type() const { return NULL_ITEM; } bool eq(const Item *item, bool binary_cmp) const; - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); int save_in_field(Field *field, bool no_conversions); @@ -543,7 +686,7 @@ public: enum Type type() const { return item_type; } enum_field_types field_type() const { return param_type; } - double val(); + double val_real(); longlong val_int(); String *val_str(String*); bool get_time(TIME *tm); @@ -578,7 +721,7 @@ public: */ virtual table_map used_tables() const { return state != NO_VALUE ? (table_map)0 : PARAM_TABLE_BIT; } - void print(String *str) { str->append('?'); } + void print(String *str); /* parameter never equal to other parameter of other item */ bool eq(const Item *item, bool binary_cmp) const { return 0; } bool is_null() @@ -602,7 +745,7 @@ public: enum Item_result result_type () const { return INT_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_LONGLONG; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } - double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } String *val_str(String*); int save_in_field(Field *field, bool no_conversions); bool basic_const_item() const { return 1; } @@ -614,13 +757,24 @@ public: }; +class Item_static_int_func :public Item_int +{ + const char *func_name; +public: + Item_static_int_func(const char *str_arg, longlong i, uint length) + :Item_int(NullS, i, length), func_name(str_arg) + {} + void print(String *str) { str->append(func_name); } +}; + + class Item_uint :public Item_int { public: Item_uint(const char *str_arg, uint length); Item_uint(uint32 i) :Item_int((longlong) i, 10) { unsigned_flag= 1; } - double val() + double val_real() { DBUG_ASSERT(fixed == 1); return ulonglong2double((ulonglong)value); } String *val_str(String*); Item *new_item() { return new Item_uint(name,max_length); } @@ -632,29 +786,24 @@ public: class Item_real :public Item_num { + char *presentation; public: double value; // Item_real() :value(0) {} - Item_real(const char *str_arg, uint length) :value(my_atof(str_arg)) - { - name=(char*) str_arg; - decimals=(uint8) nr_of_decimals(str_arg); - max_length=length; - fixed= 1; - } + Item_real(const char *str_arg, uint length); Item_real(const char *str,double val_arg,uint decimal_par,uint length) :value(val_arg) { - name=(char*) str; + presentation= name=(char*) str; decimals=(uint8) decimal_par; max_length=length; fixed= 1; } - Item_real(double value_par) :value(value_par) { fixed= 1; } + Item_real(double value_par) :presentation(0), value(value_par) { fixed= 1; } int save_in_field(Field *field, bool no_conversions); enum Type type() const { return REAL_ITEM; } enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } - double val() { DBUG_ASSERT(fixed == 1); return value; } + double val_real() { DBUG_ASSERT(fixed == 1); return value; } longlong val_int() { DBUG_ASSERT(fixed == 1); @@ -666,6 +815,19 @@ public: void cleanup() {} Item *new_item() { return new Item_real(name,value,decimals,max_length); } Item_num *neg() { value= -value; return this; } + void print(String *str); +}; + + +class Item_static_real_func :public Item_real +{ + const char *func_name; +public: + Item_static_real_func(const char *str, double val_arg, uint decimal_par, + uint length) + :Item_real(NullS, val_arg, decimal_par, length), func_name(str) + {} + void print(String *str) { str->append(func_name); } }; @@ -712,7 +874,7 @@ public: fixed= 1; } enum Type type() const { return STRING_ITEM; } - double val() + double val_real() { DBUG_ASSERT(fixed == 1); int err; @@ -733,7 +895,7 @@ public: } int save_in_field(Field *field, bool no_conversions); enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } bool basic_const_item() const { return 1; } bool eq(const Item *item, bool binary_cmp) const; Item *new_item() @@ -749,6 +911,20 @@ public: void cleanup() {} }; + +class Item_static_string_func :public Item_string +{ + const char *func_name; +public: + Item_static_string_func(const char *name_par, const char *str, uint length, + CHARSET_INFO *cs, + Derivation dv= DERIVATION_COERCIBLE) + :Item_string(NullS, str, length, cs, dv), func_name(name_par) + {} + void print(String *str) { str->append(func_name); } +}; + + /* for show tables */ class Item_datetime :public Item_string @@ -783,24 +959,31 @@ public: }; -class Item_varbinary :public Item +class Item_hex_string: public Item { public: - Item_varbinary(const char *str,uint str_length); + Item_hex_string(): Item() {} + Item_hex_string(const char *str,uint str_length); enum Type type() const { return VARBIN_ITEM; } - double val() - { DBUG_ASSERT(fixed == 1); return (double) Item_varbinary::val_int(); } + double val_real() + { DBUG_ASSERT(fixed == 1); return (double) Item_hex_string::val_int(); } longlong val_int(); bool basic_const_item() const { return 1; } String *val_str(String*) { DBUG_ASSERT(fixed == 1); return &str_value; } int save_in_field(Field *field, bool no_conversions); enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } // to prevent drop fixed flag (no need parent cleanup call) void cleanup() {} }; +class Item_bin_string: public Item_hex_string +{ +public: + Item_bin_string(const char *str,uint str_length); +}; + class Item_result_field :public Item /* Item with result field */ { public: @@ -834,7 +1017,7 @@ public: Item **ref; Item_ref(const char *db_par, const char *table_name_par, const char *field_name_par) - :Item_ident(db_par, table_name_par, field_name_par), ref(0) {} + :Item_ident(db_par, table_name_par, field_name_par), result_field(0), ref(0) {} /* This constructor is used in two scenarios: A) *item = NULL @@ -850,7 +1033,7 @@ public: with Bar, and if we have a more broader set of problems like this. */ Item_ref(Item **item, const char *table_name_par, const char *field_name_par) - :Item_ident(NullS, table_name_par, field_name_par), ref(item) + :Item_ident(NullS, table_name_par, field_name_par), result_field(0), ref(item) { DBUG_ASSERT(item); if (*item) @@ -858,11 +1041,11 @@ public: } /* Constructor need to process subselect with temporary tables (see Item) */ - Item_ref(THD *thd, Item_ref *item) :Item_ident(thd, item), ref(item->ref) {} + Item_ref(THD *thd, Item_ref *item) :Item_ident(thd, item), result_field(item->result_field), ref(item->ref) {} enum Type type() const { return REF_ITEM; } bool eq(const Item *item, bool binary_cmp) const { return ref && (*ref)->eq(item, binary_cmp); } - double val() + double val_real() { DBUG_ASSERT(fixed); double tmp=(*ref)->val_result(); @@ -894,7 +1077,10 @@ public: DBUG_ASSERT(fixed); return (null_value=(*ref)->get_date_result(ltime,fuzzydate)); } - bool send(Protocol *prot, String *tmp){ return (*ref)->send(prot, tmp); } + double val_result(); + longlong val_int_result(); + String *str_result(String* tmp); + bool send(Protocol *prot, String *tmp); void make_field(Send_field *field) { (*ref)->make_field(field); } bool fix_fields(THD *, struct st_table_list *, Item **); int save_in_field(Field *field, bool no_conversions) @@ -907,13 +1093,17 @@ public: return depended_from ? OUTER_REF_TABLE_BIT : (*ref)->used_tables(); } void set_result_field(Field *field) { result_field= field; } + Field *get_tmp_table_field() { return result_field; } bool is_result_field() { return 1; } void save_in_result_field(bool no_conversions) { (*ref)->save_in_field(result_field, no_conversions); } Item *real_item() { return *ref; } + bool walk(Item_processor processor, byte *arg) + { return (*ref)->walk(processor, arg); } void print(String *str); + void cleanup(); }; @@ -930,9 +1120,9 @@ public: /* Constructor need to process subselect with temporary tables (see Item) */ Item_direct_ref(THD *thd, Item_direct_ref *item) : Item_ref(thd, item) {} - double val() + double val_real() { - double tmp=(*ref)->val(); + double tmp=(*ref)->val_real(); null_value=(*ref)->null_value; return tmp; } @@ -970,7 +1160,7 @@ public: Item_ref_null_helper(Item_in_subselect* master, Item **item, const char *table_name_par, const char *field_name_par): Item_ref(item, table_name_par, field_name_par), owner(master) {} - double val(); + double val_real(); longlong val_int(); String* val_str(String* s); bool get_date(TIME *ltime, uint fuzzydate); @@ -1038,7 +1228,7 @@ public: enum Type type() const { return COPY_STR_ITEM; } enum Item_result result_type () const { return STRING_RESULT; } enum_field_types field_type() const { return cached_field_type; } - double val() + double val_real() { int err; return (null_value ? 0.0 : @@ -1142,6 +1332,19 @@ public: return arg->walk(processor, args) || (this->*processor)(args); } + + /* + This method like the walk method traverses the item tree, but + at the same time it can replace some nodes in the tree + */ + Item *transform(Item_transformer transformer, byte *args) + { + Item *new_item= arg->transform(transformer, args); + if (!new_item) + return 0; + arg= new_item; + return (this->*transformer)(args); + } }; class Item_insert_value : public Item_field @@ -1166,6 +1369,59 @@ public: } }; + +/* + We need this two enums here instead of sql_lex.h because + at least one of them is used by Item_trigger_field interface. + + Time when trigger is invoked (i.e. before or after row actually + inserted/updated/deleted). +*/ +enum trg_action_time_type +{ + TRG_ACTION_BEFORE= 0, TRG_ACTION_AFTER= 1 +}; + +/* + Event on which trigger is invoked. +*/ +enum trg_event_type +{ + TRG_EVENT_INSERT= 0 , TRG_EVENT_UPDATE= 1, TRG_EVENT_DELETE= 2 +}; + +/* + Represents NEW/OLD version of field of row which is + changed/read in trigger. + + Note: For this item actual binding to Field object happens not during + fix_fields() (like for Item_field) but during parsing of trigger + definition, when table is opened, with special setup_field() call. +*/ +class Item_trigger_field : public Item_field +{ +public: + /* Is this item represents row from NEW or OLD row ? */ + enum row_version_type {OLD_ROW, NEW_ROW}; + row_version_type row_version; + /* Next in list of all Item_trigger_field's in trigger */ + Item_trigger_field *next_trg_field; + + Item_trigger_field(row_version_type row_ver_par, + const char *field_name_par): + Item_field((const char *)NULL, (const char *)NULL, field_name_par), + row_version(row_ver_par) + {} + void setup_field(THD *thd, TABLE *table, enum trg_event_type event); + enum Type type() const { return TRIGGER_FIELD_ITEM; } + bool eq(const Item *item, bool binary_cmp) const; + bool fix_fields(THD *, struct st_table_list *, Item **); + void print(String *str); + table_map used_tables() const { return (table_map)0L; } + void cleanup(); +}; + + class Item_cache: public Item { protected: @@ -1202,7 +1458,7 @@ public: Item_cache_int(): Item_cache(), value(0) {} void store(Item *item); - double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } String* val_str(String *str) { @@ -1220,7 +1476,7 @@ public: Item_cache_real(): Item_cache(), value(0) {} void store(Item *item); - double val() { DBUG_ASSERT(fixed == 1); return value; } + double val_real() { DBUG_ASSERT(fixed == 1); return value; } longlong val_int() { DBUG_ASSERT(fixed == 1); @@ -1242,7 +1498,7 @@ public: Item_cache_str(): Item_cache(), value(0) { } void store(Item *item); - double val(); + double val_real(); longlong val_int(); String* val_str(String *) { DBUG_ASSERT(fixed == 1); return value; } enum Item_result result_type() const { return STRING_RESULT; } @@ -1274,7 +1530,7 @@ public: { illegal_method_call((const char*)"make_field"); }; - double val() + double val_real() { illegal_method_call((const char*)"val"); return 0; @@ -1325,7 +1581,7 @@ public: Item_result result_type () const { return item_type; } enum Type type() const { return TYPE_HOLDER; } - double val(); + double val_real(); longlong val_int(); String *val_str(String*); bool join_types(THD *thd, Item *); diff --git a/sql/item_buff.cc b/sql/item_buff.cc index 1559cfe958e..66de26dba9a 100644 --- a/sql/item_buff.cc +++ b/sql/item_buff.cc @@ -70,7 +70,7 @@ Item_str_buff::~Item_str_buff() bool Item_real_buff::cmp(void) { - double nr=item->val(); + double nr= item->val_real(); if (null_value != item->null_value || nr != value) { null_value= item->null_value; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 3d79c16b5d0..aa98f1860b6 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -53,10 +53,10 @@ static void agg_cmp_type(Item_result *type, Item **items, uint nitems) static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname) { - my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), - c1.collation->name,c1.derivation_name(), - c2.collation->name,c2.derivation_name(), - fname); + my_error(ER_CANT_AGGREGATE_2COLLATIONS, MYF(0), + c1.collation->name,c1.derivation_name(), + c2.collation->name,c2.derivation_name(), + fname); } @@ -104,7 +104,7 @@ Item_bool_func2* Le_creator::create(Item *a, Item *b) const longlong Item_func_not::val_int() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); null_value=args[0]->null_value; return ((!null_value && value == 0) ? 1 : 0); } @@ -116,7 +116,7 @@ longlong Item_func_not::val_int() longlong Item_func_not_all::val_int() { DBUG_ASSERT(fixed == 1); - double value= args[0]->val(); + double value= args[0]->val_real(); /* return TRUE if there was records in underlaying select in max/min @@ -154,7 +154,7 @@ void Item_func_not_all::print(String *str) longlong Item_func_nop_all::val_int() { DBUG_ASSERT(fixed == 1); - double value= args[0]->val(); + longlong value= args[0]->val_int(); /* return FALSE if there was records in underlaying select in max/min @@ -207,12 +207,26 @@ void Item_bool_func2::fix_length_and_dec() if (!args[0] || !args[1]) return; + /* + We allow to convert to Unicode character sets in some cases. + The conditions when conversion is possible are: + - arguments A and B have different charsets + - A wins according to coercibility rules + - character set of A is superset for character set of B + + If all of the above is true, then it's possible to convert + B into the character set of A, and then compare according + to the collation of A. + */ + + DTCollation coll; if (args[0]->result_type() == STRING_RESULT && args[1]->result_type() == STRING_RESULT && agg_arg_charsets(coll, args, 2, MY_COLL_CMP_CONV)) return; - + + // Make a special case of compare with fields to get nicer DATE comparisons if (functype() == LIKE_FUNC) // Disable conversion in case of LIKE function. @@ -234,7 +248,7 @@ void Item_bool_func2::fix_length_and_dec() } } } - if (args[1]->type() == FIELD_ITEM) + if (args[1]->type() == FIELD_ITEM /* && !args[1]->const_item() */) { Field *field=((Item_field*) args[1])->field; if (field->can_be_compared_as_longlong()) @@ -299,6 +313,17 @@ int Arg_comparator::set_compare_func(Item_bool_func2 *item, Item_result type) func= &Arg_comparator::compare_binary_string; else if (func == &Arg_comparator::compare_e_string) func= &Arg_comparator::compare_e_binary_string; + + /* + As this is binary comparsion, mark all fields that they can't be + transformed. Otherwise we would get into trouble with comparisons + like: + WHERE col= 'j' AND col LIKE BINARY 'j' + which would be transformed to: + WHERE col= 'j' + */ + (*a)->transform(&Item::set_no_const_sub, (byte*) 0); + (*b)->transform(&Item::set_no_const_sub, (byte*) 0); } } else if (type == INT_RESULT) @@ -393,10 +418,10 @@ int Arg_comparator::compare_e_binary_string() int Arg_comparator::compare_real() { - double val1= (*a)->val(); + double val1= (*a)->val_real(); if (!(*a)->null_value) { - double val2= (*b)->val(); + double val2= (*b)->val_real(); if (!(*b)->null_value) { owner->null_value= 0; @@ -411,8 +436,8 @@ int Arg_comparator::compare_real() int Arg_comparator::compare_e_real() { - double val1= (*a)->val(); - double val2= (*b)->val(); + double val1= (*a)->val_real(); + double val2= (*b)->val_real(); if ((*a)->null_value || (*b)->null_value) return test((*a)->null_value && (*b)->null_value); return test(val1 == val2); @@ -576,7 +601,7 @@ bool Item_in_optimizer::fix_left(THD *thd, If it is preparation PS only then we do not know values of parameters => cant't get there values and do not need that values. */ - if (! thd->current_arena->is_stmt_prepare()) + if (!thd->only_prepare()) cache->store(args[0]); if (cache->cols() == 1) { @@ -609,17 +634,17 @@ bool Item_in_optimizer::fix_fields(THD *thd, struct st_table_list *tables, { DBUG_ASSERT(fixed == 0); if (fix_left(thd, tables, ref)) - return 1; + return TRUE; if (args[0]->maybe_null) maybe_null=1; if (!args[1]->fixed && args[1]->fix_fields(thd, tables, args+1)) - return 1; + return TRUE; Item_in_subselect * sub= (Item_in_subselect *)args[1]; if (args[0]->cols() != sub->engine->cols()) { my_error(ER_OPERAND_COLUMNS, MYF(0), args[0]->cols()); - return 1; + return TRUE; } if (args[1]->maybe_null) maybe_null=1; @@ -628,7 +653,7 @@ bool Item_in_optimizer::fix_fields(THD *thd, struct st_table_list *tables, not_null_tables_cache|= args[1]->not_null_tables(); const_item_cache&= args[1]->const_item(); fixed= 1; - return 0; + return FALSE; } @@ -764,7 +789,7 @@ void Item_func_interval::fix_length_and_dec() (intervals=(double*) sql_alloc(sizeof(double)*(row->cols()-1)))) { for (uint i=1 ; i < row->cols(); i++) - intervals[i-1]=row->el(i)->val(); + intervals[i-1]= row->el(i)->val_real(); } } maybe_null= 0; @@ -786,7 +811,7 @@ void Item_func_interval::fix_length_and_dec() longlong Item_func_interval::val_int() { DBUG_ASSERT(fixed == 1); - double value= row->el(0)->val(); + double value= row->el(0)->val_real(); uint i; if (row->el(0)->null_value) @@ -809,7 +834,7 @@ longlong Item_func_interval::val_int() for (i=1 ; i < row->cols() ; i++) { - if (row->el(i)->val() > value) + if (row->el(i)->val_real() > value) return i-1; } return i-1; @@ -883,7 +908,7 @@ longlong Item_func_between::val_int() } else if (cmp_type == INT_RESULT) { - longlong value=args[0]->val_int(),a,b; + longlong value=args[0]->val_int(), a, b; if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ a=args[1]->val_int(); @@ -903,11 +928,11 @@ longlong Item_func_between::val_int() } else { - double value=args[0]->val(),a,b; + double value= args[0]->val_real(),a,b; if ((null_value=args[0]->null_value)) return 0; /* purecov: inspected */ - a=args[1]->val(); - b=args[2]->val(); + a= args[1]->val_real(); + b= args[2]->val_real(); if (!args[1]->null_value && !args[2]->null_value) return (value >= a && value <= b) ? 1 : 0; if (args[1]->null_value && args[2]->null_value) @@ -964,16 +989,16 @@ Field *Item_func_ifnull::tmp_table_field(TABLE *table) } double -Item_func_ifnull::val() +Item_func_ifnull::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if (!args[0]->null_value) { null_value=0; return value; } - value=args[1]->val(); + value= args[1]->val_real(); if ((null_value=args[1]->null_value)) return 0.0; return value; @@ -1041,7 +1066,7 @@ Item_func_if::fix_length_and_dec() if (cached_result_type == STRING_RESULT) { if (agg_arg_charsets(collation, args+1, 2, MY_COLL_ALLOW_CONV)) - return; + return; } else { @@ -1052,11 +1077,11 @@ Item_func_if::fix_length_and_dec() double -Item_func_if::val() +Item_func_if::val_real() { DBUG_ASSERT(fixed == 1); Item *arg= args[0]->val_int() ? args[1] : args[2]; - double value=arg->val(); + double value= arg->val_real(); null_value=arg->null_value; return value; } @@ -1105,7 +1130,7 @@ Item_func_nullif::fix_length_and_dec() */ double -Item_func_nullif::val() +Item_func_nullif::val_real() { DBUG_ASSERT(fixed == 1); double value; @@ -1114,7 +1139,7 @@ Item_func_nullif::val() null_value=1; return 0.0; } - value=args[0]->val(); + value= args[0]->val_real(); null_value=args[0]->null_value; return value; } @@ -1189,7 +1214,7 @@ Item *Item_func_case::find_item(String *str) return else_expr_num != -1 ? args[else_expr_num] : 0; break; case REAL_RESULT: - first_expr_real= args[first_expr_num]->val(); + first_expr_real= args[first_expr_num]->val_real(); if (args[first_expr_num]->null_value) return else_expr_num != -1 ? args[else_expr_num] : 0; break; @@ -1222,7 +1247,7 @@ Item *Item_func_case::find_item(String *str) return args[i+1]; break; case REAL_RESULT: - if (args[i]->val()==first_expr_real && !args[i]->null_value) + if (args[i]->val_real() == first_expr_real && !args[i]->null_value) return args[i+1]; break; case ROW_RESULT: @@ -1274,7 +1299,7 @@ longlong Item_func_case::val_int() return res; } -double Item_func_case::val() +double Item_func_case::val_real() { DBUG_ASSERT(fixed == 1); char buff[MAX_FIELD_WIDTH]; @@ -1287,7 +1312,7 @@ double Item_func_case::val() null_value=1; return 0; } - res=item->val(); + res= item->val_real(); null_value=item->null_value; return res; } @@ -1300,8 +1325,10 @@ void Item_func_case::fix_length_and_dec() if (!(agg= (Item**) sql_alloc(sizeof(Item*)*(ncases+1)))) return; - // Aggregate all THEN and ELSE expression types - // and collations when string result + /* + Aggregate all THEN and ELSE expression types + and collations when string result + */ for (nagg= 0 ; nagg < ncases/2 ; nagg++) agg[nagg]= args[nagg*2+1]; @@ -1408,13 +1435,13 @@ longlong Item_func_coalesce::val_int() return 0; } -double Item_func_coalesce::val() +double Item_func_coalesce::val_real() { DBUG_ASSERT(fixed == 1); null_value=0; for (uint i=0 ; i < arg_count ; i++) { - double res=args[i]->val(); + double res= args[i]->val_real(); if (!args[i]->null_value) return res; } @@ -1567,12 +1594,12 @@ in_double::in_double(uint elements) void in_double::set(uint pos,Item *item) { - ((double*) base)[pos]=item->val(); + ((double*) base)[pos]= item->val_real(); } byte *in_double::get_value(Item *item) { - tmp= item->val(); + tmp= item->val_real(); if (item->null_value) return 0; /* purecov: inspected */ return (byte*) &tmp; @@ -1621,7 +1648,7 @@ cmp_item* cmp_item_row::make_same() cmp_item_row::~cmp_item_row() { DBUG_ENTER("~cmp_item_row"); - DBUG_PRINT("enter",("this: %lx", this)); + DBUG_PRINT("enter",("this: 0x%lx", this)); if (comparators) { for (uint i= 0; i < n; i++) @@ -1734,7 +1761,7 @@ static int srtcmp_in(CHARSET_INFO *cs, const String *x,const String *y) { return cs->coll->strnncollsp(cs, (uchar *) x->ptr(),x->length(), - (uchar *) y->ptr(),y->length()); + (uchar *) y->ptr(),y->length(), 0); } @@ -1956,7 +1983,7 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) char buff[sizeof(char*)]; // Max local vars in function #endif not_null_tables_cache= used_tables_cache= 0; - const_item_cache= 0; + const_item_cache= 1; /* and_table_cache is the value that Item_cond_or() returns for not_null_tables() @@ -1964,7 +1991,7 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) and_tables_cache= ~(table_map) 0; if (check_stack_overrun(thd, buff)) - return 1; // Fatal error flag is set! + return TRUE; // Fatal error flag is set! while ((item=li++)) { table_map tmp_table_map; @@ -1982,12 +2009,12 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) if ((!item->fixed && item->fix_fields(thd, tables, li.ref())) || (item= *li.ref())->check_cols(1)) - return 1; /* purecov: inspected */ + return TRUE; /* purecov: inspected */ used_tables_cache|= item->used_tables(); tmp_table_map= item->not_null_tables(); not_null_tables_cache|= tmp_table_map; and_tables_cache&= tmp_table_map; - const_item_cache&= item->const_item(); + const_item_cache&= item->const_item(); with_sum_func= with_sum_func || item->with_sum_func; if (item->maybe_null) maybe_null=1; @@ -1995,7 +2022,7 @@ Item_cond::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) thd->lex->current_select->cond_count+= list.elements; fix_length_and_dec(); fixed= 1; - return 0; + return FALSE; } bool Item_cond::walk(Item_processor processor, byte *arg) @@ -2008,13 +2035,51 @@ bool Item_cond::walk(Item_processor processor, byte *arg) return Item_func::walk(processor, arg); } + +/* + Transform an Item_cond object with a transformer callback function + + SYNOPSIS + transform() + transformer the transformer callback function to be applied to the nodes + of the tree of the object + arg parameter to be passed to the transformer + + DESCRIPTION + The function recursively applies the transform method with the + same transformer to each member item of the codition list. + If the call of the method for a member item returns a new item + the old item is substituted for a new one. + After this the transform method is applied to the root node + of the Item_cond object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_cond::transform(Item_transformer transformer, byte *arg) +{ + List_iterator<Item> li(list); + Item *item; + while ((item= li++)) + { + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + if (new_item != item) + li.replace(new_item); + } + return Item_func::transform(transformer, arg); +} + + void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields) { List_iterator<Item> li(list); Item *item; used_tables_cache=0; - const_item_cache=0; + const_item_cache=1; while ((item=li++)) { if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) @@ -2054,7 +2119,7 @@ void Item_cond::update_used_tables() { item->update_used_tables(); used_tables_cache|= item->used_tables(); - const_item_cache&= item->const_item(); + const_item_cache&= item->const_item(); } } @@ -2310,12 +2375,12 @@ bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) DBUG_ASSERT(fixed == 0); if (Item_bool_func2::fix_fields(thd, tlist, ref) || escape_item->fix_fields(thd, tlist, &escape_item)) - return 1; + return TRUE; if (!escape_item->const_during_execution()) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"ESCAPE"); - return 1; + return TRUE; } if (escape_item->const_item()) @@ -2333,7 +2398,7 @@ bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) { String* res2 = args[1]->val_str(&tmp_value2); if (!res2) - return 0; // Null argument + return FALSE; // Null argument const size_t len = res2->length(); const char* first = res2->ptr(); @@ -2366,7 +2431,7 @@ bool Item_func_like::fix_fields(THD *thd, TABLE_LIST *tlist, Item ** ref) } } } - return 0; + return FALSE; } #ifdef USE_REGEX @@ -2379,13 +2444,13 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) args[0]->fix_fields(thd, tables, args)) || args[0]->check_cols(1) || (!args[1]->fixed && args[1]->fix_fields(thd,tables, args + 1)) || args[1]->check_cols(1)) - return 1; /* purecov: inspected */ + return TRUE; /* purecov: inspected */ with_sum_func=args[0]->with_sum_func || args[1]->with_sum_func; max_length= 1; decimals= 0; if (agg_arg_charsets(cmp_collation, args, 2, MY_COLL_CMP_CONV)) - return 1; + return TRUE; used_tables_cache=args[0]->used_tables() | args[1]->used_tables(); not_null_tables_cache= (args[0]->not_null_tables() | @@ -2399,7 +2464,7 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) if (args[1]->null_value) { // Will always return NULL maybe_null=1; - return 0; + return FALSE; } int error; if ((error= regcomp(&preg,res->c_ptr(), @@ -2410,8 +2475,8 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) cmp_collation.collation))) { (void) regerror(error,&preg,buff,sizeof(buff)); - my_printf_error(ER_REGEXP_ERROR,ER(ER_REGEXP_ERROR),MYF(0),buff); - return 1; + my_error(ER_REGEXP_ERROR, MYF(0), buff); + return TRUE; } regex_compiled=regex_is_const=1; maybe_null=args[0]->maybe_null; @@ -2419,7 +2484,7 @@ Item_func_regex::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) else maybe_null=1; fixed= 1; - return 0; + return FALSE; } @@ -2862,3 +2927,290 @@ Item *Item_bool_rowready_func2::negated_item() DBUG_ASSERT(0); return 0; } + +Item_equal::Item_equal(Item_field *f1, Item_field *f2) + : Item_bool_func(), const_item(0), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + fields.push_back(f1); + fields.push_back(f2); +} + +Item_equal::Item_equal(Item *c, Item_field *f) + : Item_bool_func(), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + fields.push_back(f); + const_item= c; +} + +Item_equal::Item_equal(Item_equal *item_equal) + : Item_bool_func(), eval_item(0), cond_false(0) +{ + const_item_cache= 0; + List_iterator_fast<Item_field> li(item_equal->fields); + Item_field *item; + while ((item= li++)) + { + fields.push_back(item); + } + const_item= item_equal->const_item; + cond_false= item_equal->cond_false; +} + +void Item_equal::add(Item *c) +{ + if (cond_false) + return; + if (!const_item) + { + const_item= c; + return; + } + Item_func_eq *func= new Item_func_eq(c, const_item); + func->set_cmp_func(); + func->quick_fix_field(); + cond_false = !(func->val_int()); +} + +void Item_equal::add(Item_field *f) +{ + fields.push_back(f); +} + +uint Item_equal::members() +{ + uint count= 0; + List_iterator_fast<Item_field> li(fields); + Item_field *item; + while ((item= li++)) + count++; + return count; +} + + +/* + Check whether a field is referred in the multiple equality + + SYNOPSIS + contains() + field field whose occurence is to be checked + + DESCRIPTION + The function checks whether field is occured in the Item_equal object + + RETURN VALUES + 1 if nultiple equality contains a reference to field + 0 otherwise +*/ + +bool Item_equal::contains(Field *field) +{ + List_iterator_fast<Item_field> it(fields); + Item_field *item; + while ((item= it++)) + { + if (field->eq(item->field)) + return 1; + } + return 0; +} + + +/* + Join members of another Item_equal object + + SYNOPSIS + merge() + item multiple equality whose members are to be joined + + DESCRIPTION + The function actually merges two multiple equalitis. + After this operation the Item_equal object additionally contains + the field items of another item of the type Item_equal. + If the optional constant items are not equal the cond_false flag is + set to 1. + + RETURN VALUES + none +*/ + +void Item_equal::merge(Item_equal *item) +{ + fields.concat(&item->fields); + Item *c= item->const_item; + if (c) + { + /* + The flag cond_false will be set to 1 after this, if + the multiple equality already contains a constant and its + value is not equal to the value of c. + */ + add(const_item); + } + cond_false|= item->cond_false; +} + + +/* + Order field items in multiple equality according to a sorting criteria + + SYNOPSIS + sort() + cmp function to compare field item + arg context extra parameter for the cmp function + + DESCRIPTION + The function perform ordering of the field items in the Item_equal + object according to the criteria determined by the cmp callback parameter. + If cmp(item_field1,item_field2,arg)<0 than item_field1 must be + placed after item_fiel2. + + IMPLEMENTATION + The function sorts field items by the exchange sort algorithm. + The list of field items is looked through and whenever two neighboring + members follow in a wrong order they are swapped. This is performed + again and again until we get all members in a right order. + + RETURN VALUES + None +*/ + +void Item_equal::sort(Item_field_cmpfunc cmp, void *arg) +{ + bool swap; + List_iterator<Item_field> it(fields); + do + { + Item_field *item1= it++; + Item_field **ref1= it.ref(); + Item_field *item2; + + swap= FALSE; + while ((item2= it++)) + { + Item_field **ref2= it.ref(); + if (cmp(item1, item2, arg) < 0) + { + Item_field *item= *ref1; + *ref1= *ref2; + *ref2= item; + swap= TRUE; + } + else + { + item1= item2; + ref1= ref2; + } + } + it.rewind(); + } while (swap); +} + +bool Item_equal::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) +{ + List_iterator_fast<Item_field> li(fields); + Item *item; + not_null_tables_cache= used_tables_cache= 0; + const_item_cache= 0; + while ((item=li++)) + { + table_map tmp_table_map; + used_tables_cache|= item->used_tables(); + tmp_table_map= item->not_null_tables(); + not_null_tables_cache|= tmp_table_map; + if (item->maybe_null) + maybe_null=1; + } + fix_length_and_dec(); + fixed= 1; + return 0; +} + +void Item_equal::update_used_tables() +{ + List_iterator_fast<Item_field> li(fields); + Item *item; + not_null_tables_cache= used_tables_cache= 0; + if ((const_item_cache= cond_false)) + return; + while ((item=li++)) + { + item->update_used_tables(); + used_tables_cache|= item->used_tables(); + const_item_cache&= item->const_item(); + } +} + +longlong Item_equal::val_int() +{ + if (cond_false) + return 0; + List_iterator_fast<Item_field> it(fields); + Item *item= const_item ? const_item : it++; + if ((null_value= item->null_value)) + return 0; + eval_item->store_value(item); + while ((item= it++)) + { + if ((null_value= item->null_value) || eval_item->cmp(item)) + return 0; + } + return 1; +} + +void Item_equal::fix_length_and_dec() +{ + Item *item= const_item ? const_item : get_first(); + eval_item= cmp_item::get_comparator(item); + if (item->result_type() == STRING_RESULT) + eval_item->cmp_charset= cmp_collation.collation; +} + +bool Item_equal::walk(Item_processor processor, byte *arg) +{ + List_iterator_fast<Item_field> it(fields); + Item *item; + while ((item= it++)) + if (item->walk(processor, arg)) + return 1; + return Item_func::walk(processor, arg); +} + +Item *Item_equal::transform(Item_transformer transformer, byte *arg) +{ + List_iterator<Item_field> it(fields); + Item *item; + while ((item= it++)) + { + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + if (new_item != item) + it.replace((Item_field *) new_item); + } + return Item_func::transform(transformer, arg); +} + +void Item_equal::print(String *str) +{ + str->append(func_name()); + str->append('('); + List_iterator_fast<Item_field> it(fields); + Item *item; + if (const_item) + const_item->print(str); + else + { + item= it++; + item->print(str); + } + while ((item= it++)) + { + str->append(','); + str->append(' '); + item->print(str); + } + str->append(')'); +} + diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 6834799688d..a6fe9e44a3d 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -27,6 +27,8 @@ class Arg_comparator; typedef int (Arg_comparator::*arg_cmp_func)(); +typedef int (*Item_field_cmpfunc)(Item_field *f1, Item_field *f2, void *arg); + class Arg_comparator: public Sql_alloc { Item **a, **b; @@ -230,6 +232,40 @@ public: }; class Item_maxmin_subselect; + +/* + The class Item_func_trig_cond is used for guarded predicates + which are employed only for internal purposes. + A guarded predicates is an object consisting of an a regular or + a guarded predicate P and a pointer to a boolean guard variable g. + A guarded predicate P/g is evaluated to true if the value of the + guard g is false, otherwise it is evaluated to the same value that + the predicate P: val(P/g)= g ? val(P):true. + Guarded predicates allow us to include predicates into a conjunction + conditionally. Currently they are utilized for pushed down predicates + in queries with outer join operations. + + In the future, probably, it makes sense to extend this class to + the objects consisting of three elements: a predicate P, a pointer + to a variable g and a firing value s with following evaluation + rule: val(P/g,s)= g==s? val(P) : true. It will allow us to build only + one item for the objects of the form P/g1/g2... + + Objects of this class are built only for query execution after + the execution plan has been already selected. That's why this + class needs only val_int out of generic methods. +*/ + +class Item_func_trig_cond: public Item_bool_func +{ + bool *trig_var; +public: + Item_func_trig_cond(Item *a, bool *f) : Item_bool_func(a) { trig_var= f; } + longlong val_int() { return *trig_var ? args[0]->val_int() : 1; } + enum Functype functype() const { return TRIG_COND_FUNC; }; + const char *func_name() const { return "trigcond"; }; +}; + class Item_func_not_all :public Item_func_not { /* allow to check presence od values in max/min optimisation */ @@ -406,7 +442,7 @@ public: Item_func_ifnull(Item *a,Item *b) :Item_func(a,b), cached_result_type(INT_RESULT) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); enum Item_result result_type () const { return cached_result_type; } @@ -425,7 +461,7 @@ public: Item_func_if(Item *a,Item *b,Item *c) :Item_func(a,b,c), cached_result_type(INT_RESULT) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); enum Item_result result_type () const { return cached_result_type; } @@ -448,7 +484,7 @@ public: Item_func_nullif(Item *a,Item *b) :Item_bool_func2(a,b), cached_result_type(INT_RESULT) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); enum Item_result result_type () const { return cached_result_type; } @@ -467,7 +503,7 @@ public: Item_func_coalesce(List<Item> &list) :Item_func(list),cached_result_type(INT_RESULT) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *); void fix_length_and_dec(); @@ -503,7 +539,7 @@ public: } set_arguments(list); } - double val(); + double val_real(); longlong val_int(); String *val_str(String *); void fix_length_and_dec(); @@ -660,11 +696,11 @@ class cmp_item_real :public cmp_item public: void store_value(Item *item) { - value= item->val(); + value= item->val_real(); } int cmp(Item *arg) { - return value != arg->val(); + return value != arg->val_real(); } int compare(cmp_item *c) { @@ -837,7 +873,7 @@ public: } const char *func_name() const { return "isnotnull"; } optimize_type select_optimize() const { return OPTIMIZE_NULL; } - table_map not_null_tables() const { return 0; } + table_map not_null_tables() const { return used_tables(); } Item *neg_transformer(THD *thd); void print(String *str); CHARSET_INFO *compare_collation() { return args[0]->collation.collation; } @@ -936,6 +972,7 @@ public: Item_cond(List<Item> &nlist) :Item_bool_func(), list(nlist), abort_on_null(0) {} bool add(Item *item) { return list.push_back(item); } + void add_at_head(List<Item> *nlist) { list.prepand(nlist); } bool fix_fields(THD *, struct st_table_list *, Item **ref); enum Type type() const { return COND_ITEM; } @@ -944,17 +981,156 @@ public: void update_used_tables(); void print(String *str); void split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields); - friend int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds); + friend int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, + COND **conds); void top_level_item() { abort_on_null=1; } void copy_andor_arguments(THD *thd, Item_cond *item); bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); void neg_arguments(THD *thd); }; +/* + The class Item_equal is used to represent conjuctions of equality + predicates of the form field1 = field2, and field=const in where + conditions and on expressions. + + All equality predicates of the form field1=field2 contained in a + conjuction are substituted for a sequence of items of this class. + An item of this class Item_equal(f1,f2,...fk) respresents a + multiple equality f1=f2=...=fk. + + If a conjuction contains predicates f1=f2 and f2=f3, a new item of + this class is created Item_equal(f1,f2,f3) representing the multiple + equality f1=f2=f3 that substitutes the above equality predicates in + the conjuction. + A conjuction of the predicates f2=f1 and f3=f1 and f3=f2 will be + substituted for the item representing the same multiple equality + f1=f2=f3. + An item Item_equal(f1,f2) can appear instead of a conjuction of + f2=f1 and f1=f2, or instead of just the predicate f1=f2. + + An item of the class Item_equal inherites equalities from outer + conjunctive levels. + + Suppose we have a where condition of the following form: + WHERE f1=f2 AND f3=f4 AND f3=f5 AND ... AND (...OR (f1=f3 AND ...)). + In this case: + f1=f2 will be substituted for Item_equal(f1,f2); + f3=f4 and f3=f5 will be substituted for Item_equal(f3,f4,f5); + f1=f3 will be substituted for Item_equal(f1,f2,f3,f4,f5); + + An object of the class Item_equal can contain an optional constant + item c. Thenit represents a multiple equality of the form + c=f1=...=fk. + + Objects of the class Item_equal are used for the following: + + 1. An object Item_equal(t1.f1,...,tk.fk) allows us to consider any + pair of tables ti and tj as joined by an equi-condition. + Thus it provide us with additional access paths from table to table. + + 2. An object Item_equal(t1.f1,...,tk.fk) is applied to deduce new + SARGable predicates: + f1=...=fk AND P(fi) => f1=...=fk AND P(fi) AND P(fj). + It also can give us additional index scans and can allow us to + improve selectivity estimates. + + 3. An object Item_equal(t1.f1,...,tk.fk) is used to optimize the + selected execution plan for the query: if table ti is accessed + before the table tj then in any predicate P in the where condition + the occurence of tj.fj is substituted for ti.fi. This can allow + an evaluation of the predicate at an earlier step. + + When feature 1 is supported they say that join transitive closure + is employed. + When feature 2 is supported they say that search argument transitive + closure is employed. + Both features are usually supported by preprocessing original query and + adding additional predicates. + We do not just add predicates, we rather dynamically replace some + predicates that can not be used to access tables in the investigated + plan for those, obtained by substitution of some fields for equal fields, + that can be used. +*/ + +class Item_equal: public Item_bool_func +{ + List<Item_field> fields; /* list of equal field items */ + Item *const_item; /* optional constant item equal to fields items */ + cmp_item *eval_item; + bool cond_false; + DTCollation cmp_collation; +public: + inline Item_equal() + : Item_bool_func(), const_item(0), eval_item(0), cond_false(0) + { const_item_cache=0 ;} + Item_equal(Item_field *f1, Item_field *f2); + Item_equal(Item *c, Item_field *f); + Item_equal(Item_equal *item_equal); + inline Item* get_const() { return const_item; } + void add(Item *c); + void add(Item_field *f); + uint members(); + bool contains(Field *field); + Item_field* get_first() { return fields.head(); } + void merge(Item_equal *item); + enum Functype functype() const { return MULT_EQUAL_FUNC; } + longlong val_int(); + const char *func_name() const { return "multiple equal"; } + optimize_type select_optimize() const { return OPTIMIZE_EQUAL; } + void sort(Item_field_cmpfunc cmp, void *arg); + friend class Item_equal_iterator; + void fix_length_and_dec(); + bool fix_fields(THD *thd, TABLE_LIST *tables, Item **ref); + void update_used_tables(); + bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); + void print(String *str); + CHARSET_INFO *compare_collation() + { return fields.head()->collation.collation; } +}; + +class COND_EQUAL: public Sql_alloc +{ +public: + uint max_members; /* max number of members the current level + list and all lower level lists */ + COND_EQUAL *upper_levels; /* multiple equalities of upper and levels */ + List<Item_equal> current_level; /* list of multiple equalities of + the current and level */ + COND_EQUAL() + { + max_members= 0; + upper_levels= 0; + } +}; + + +class Item_equal_iterator : public List_iterator_fast<Item_field> +{ +public: + inline Item_equal_iterator(Item_equal &item_equal) + :List_iterator_fast<Item_field> (item_equal.fields) + {} + inline Item_field* operator++(int) + { + Item_field *item= (*(List_iterator_fast<Item_field> *) this)++; + return item; + } + inline void rewind(void) + { + List_iterator_fast<Item_field>::rewind(); + } +}; + class Item_cond_and :public Item_cond { public: + COND_EQUAL cond_equal; /* contains list of Item_equal objects for + the current and level and reference + to multiple equalities of upper and levels */ Item_cond_and() :Item_cond() {} Item_cond_and(Item *i1,Item *i2) :Item_cond(i1,i2) {} Item_cond_and(THD *thd, Item_cond_and *item) :Item_cond(thd, item) {} diff --git a/sql/item_create.cc b/sql/item_create.cc index 800489a6a72..cec6de3eede 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -18,10 +18,6 @@ #include "mysql_priv.h" -#ifndef M_PI -#define M_PI 3.14159265358979323846 -#endif - Item *create_func_abs(Item* a) { return new Item_func_abs(a); @@ -77,12 +73,13 @@ Item *create_func_connection_id(void) { THD *thd=current_thd; thd->lex->safe_to_cache_query= 0; - return new Item_int(NullS,(longlong) - ((thd->slave_thread) ? - thd->variables.pseudo_thread_id : - thd->thread_id), - 10); -} + return new Item_static_int_func("connection_id()", + (longlong) + ((thd->slave_thread) ? + thd->variables.pseudo_thread_id : + thd->thread_id), + 10); +} Item *create_func_conv(Item* a, Item *b, Item *c) { @@ -292,7 +289,7 @@ Item *create_func_period_diff(Item* a, Item *b) Item *create_func_pi(void) { - return new Item_real("pi()",M_PI,6,8); + return new Item_static_real_func("pi()", M_PI, 6, 8); } Item *create_func_pow(Item* a, Item *b) @@ -308,13 +305,9 @@ Item *create_func_current_user() length= (uint) (strxmov(buff, thd->priv_user, "@", thd->priv_host, NullS) - buff); - return new Item_string(NullS, thd->memdup(buff, length), length, - system_charset_info); -} - -Item *create_func_quarter(Item* a) -{ - return new Item_func_quarter(a); + return new Item_static_string_func("current_user()", + thd->memdup(buff, length), length, + system_charset_info); } Item *create_func_radians(Item *a) @@ -441,7 +434,7 @@ Item *create_func_uuid(void) Item *create_func_version(void) { - return new Item_string(NullS,server_version, + return new Item_static_string_func("version()", server_version, (uint) strlen(server_version), system_charset_info, DERIVATION_IMPLICIT); } diff --git a/sql/item_create.h b/sql/item_create.h index faff6f45220..1be33fef257 100644 --- a/sql/item_create.h +++ b/sql/item_create.h @@ -72,7 +72,6 @@ Item *create_func_period_diff(Item* a, Item *b); Item *create_func_pi(void); Item *create_func_pow(Item* a, Item *b); Item *create_func_current_user(void); -Item *create_func_quarter(Item* a); Item *create_func_radians(Item *a); Item *create_func_release_lock(Item* a); Item *create_func_repeat(Item* a, Item *b); diff --git a/sql/item_func.cc b/sql/item_func.cc index 7125f4704b8..69ddd21f913 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -28,6 +28,9 @@ #include <time.h> #include <ft_global.h> +#include "sp_head.h" +#include "sp_rcontext.h" +#include "sp.h" bool check_reserved_words(LEX_STRING *name) { @@ -42,10 +45,10 @@ bool check_reserved_words(LEX_STRING *name) static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname) { - my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), - c1.collation->name,c1.derivation_name(), - c2.collation->name,c2.derivation_name(), - fname); + my_error(ER_CANT_AGGREGATE_2COLLATIONS, MYF(0), + c1.collation->name, c1.derivation_name(), + c2.collation->name, c2.derivation_name(), + fname); } static void my_coll_agg_error(DTCollation &c1, @@ -53,11 +56,11 @@ static void my_coll_agg_error(DTCollation &c1, DTCollation &c3, const char *fname) { - my_error(ER_CANT_AGGREGATE_3COLLATIONS,MYF(0), - c1.collation->name,c1.derivation_name(), - c2.collation->name,c2.derivation_name(), - c3.collation->name,c3.derivation_name(), - fname); + my_error(ER_CANT_AGGREGATE_3COLLATIONS, MYF(0), + c1.collation->name, c1.derivation_name(), + c2.collation->name, c2.derivation_name(), + c3.collation->name, c3.derivation_name(), + fname); } @@ -71,7 +74,7 @@ static void my_coll_agg_error(Item** args, uint count, const char *fname) args[2]->collation, fname); else - my_error(ER_CANT_AGGREGATE_NCOLLATIONS,MYF(0),fname); + my_error(ER_CANT_AGGREGATE_NCOLLATIONS, MYF(0), fname); } @@ -189,6 +192,8 @@ bool Item_func::agg_arg_charsets(DTCollation &coll, res= TRUE; break; // we cannot return here, we need to restore "arena". } + if ((*arg)->type() == FIELD_ITEM) + ((Item_field *)(*arg))->no_const_subst= 1; conv->fix_fields(thd, 0, &conv); *arg= conv; } @@ -276,8 +281,8 @@ Item_func::Item_func(THD *thd, Item_func *item) item. RETURN VALUES - 0 ok - 1 Got error. Stored with my_error(). + FALSE ok + TRUE Got error. Stored with my_error(). */ bool @@ -293,7 +298,7 @@ Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) const_item_cache=1; if (check_stack_overrun(thd, buff)) - return 1; // Fatal error if flag is set! + return TRUE; // Fatal error if flag is set! if (arg_count) { // Print purify happy for (arg=args, arg_end=args+arg_count; arg != arg_end ; arg++) @@ -305,7 +310,7 @@ Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) */ if ((!(*arg)->fixed && (*arg)->fix_fields(thd, tables, arg)) || (*arg)->check_cols(allowed_arg_cols)) - return 1; /* purecov: inspected */ + return TRUE; /* purecov: inspected */ item= *arg; if (item->maybe_null) maybe_null=1; @@ -317,10 +322,10 @@ Item_func::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) } } fix_length_and_dec(); - if (thd->net.last_errno) // An error inside fix_length_and_dec occured - return 1; + if (thd->net.report_error) // An error inside fix_length_and_dec occured + return TRUE; fixed= 1; - return 0; + return FALSE; } bool Item_func::walk (Item_processor processor, byte *argument) @@ -337,6 +342,46 @@ bool Item_func::walk (Item_processor processor, byte *argument) return (this->*processor)(argument); } + +/* + Transform an Item_func object with a transformer callback function + + SYNOPSIS + transform() + transformer the transformer callback function to be applied to the nodes + of the tree of the object + argument parameter to be passed to the transformer + + DESCRIPTION + The function recursively applies the transform method with the + same transformer to each argument the function. + If the call of the method for a member item returns a new item + the old item is substituted for a new one. + After this the transform method is applied to the root node + of the Item_func object. + + RETURN VALUES + Item returned as the result of transformation of the root node +*/ + +Item *Item_func::transform(Item_transformer transformer, byte *argument) +{ + if (arg_count) + { + Item **arg,**arg_end; + for (arg= args, arg_end= args+arg_count; arg != arg_end; arg++) + { + Item *new_item= (*arg)->transform(transformer, argument); + if (!new_item) + return 0; + if (*arg != new_item) + current_thd->change_item_tree(arg, new_item); + } + } + return (this->*transformer)(argument); +} + + void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, List<Item> &fields) { @@ -436,6 +481,7 @@ bool Item_func::eq(const Item *item, bool binary_cmp) const return 1; } + Field *Item_func::tmp_table_field(TABLE *t_arg) { Field *res; @@ -454,10 +500,7 @@ Field *Item_func::tmp_table_field(TABLE *t_arg) res= new Field_double(max_length, maybe_null, name, t_arg, decimals); break; case STRING_RESULT: - if (max_length > 255) - res= new Field_blob(max_length, maybe_null, name, t_arg, collation.collation); - else - res= new Field_string(max_length, maybe_null, name, t_arg, collation.collation); + res= make_string_field(t_arg); break; case ROW_RESULT: default: @@ -472,7 +515,7 @@ Field *Item_func::tmp_table_field(TABLE *t_arg) String *Item_real_func::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals, &my_charset_bin); @@ -495,7 +538,7 @@ String *Item_num_func::val_str(String *str) } else { - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals,&my_charset_bin); @@ -512,6 +555,16 @@ void Item_func::fix_num_length_and_dec() max_length=float_length(decimals); } +void Item_func::signal_divide_by_null() +{ + THD *thd= current_thd; + if (thd->variables.sql_mode & MODE_ERROR_FOR_DIVISION_BY_ZERO) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_DIVISION_BY_ZERO, + ER(ER_DIVISION_BY_ZERO)); + null_value= 1; +} + + Item *Item_func::get_tmp_table_item(THD *thd) { if (!with_sum_func && !const_item()) @@ -562,7 +615,7 @@ String *Item_num_op::val_str(String *str) } else { - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals,&my_charset_bin); @@ -589,10 +642,10 @@ void Item_func_unsigned::print(String *str) } -double Item_func_plus::val() +double Item_func_plus::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val()+args[1]->val(); + double value= args[0]->val_real() + args[1]->val_real(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; return value; @@ -608,7 +661,7 @@ longlong Item_func_plus::val_int() return 0; return value; } - return (longlong) Item_func_plus::val(); + return (longlong) Item_func_plus::val_real(); } @@ -626,10 +679,10 @@ void Item_func_minus::fix_length_and_dec() } -double Item_func_minus::val() +double Item_func_minus::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val() - args[1]->val(); + double value= args[0]->val_real() - args[1]->val_real(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; return value; @@ -645,14 +698,14 @@ longlong Item_func_minus::val_int() return 0; return value; } - return (longlong) Item_func_minus::val(); + return (longlong) Item_func_minus::val_real(); } -double Item_func_mul::val() +double Item_func_mul::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val()*args[1]->val(); + double value= args[0]->val_real() * args[1]->val_real(); if ((null_value=args[0]->null_value || args[1]->null_value)) return 0.0; /* purecov: inspected */ return value; @@ -668,20 +721,26 @@ longlong Item_func_mul::val_int() return 0; /* purecov: inspected */ return value; } - return (longlong) Item_func_mul::val(); + return (longlong) Item_func_mul::val_real(); } -double Item_func_div::val() +double Item_func_div::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - double val2=args[1]->val(); - if ((null_value= val2 == 0.0 || args[0]->null_value || args[1]->null_value)) + double value= args[0]->val_real(); + double val2= args[1]->val_real(); + if ((null_value= args[0]->null_value || args[1]->null_value)) return 0.0; + if (val2 == 0.0) + { + signal_divide_by_null(); + return 0.0; + } return value/val2; } + longlong Item_func_div::val_int() { DBUG_ASSERT(fixed == 1); @@ -689,13 +748,19 @@ longlong Item_func_div::val_int() { longlong value=args[0]->val_int(); longlong val2=args[1]->val_int(); - if ((null_value= val2 == 0 || args[0]->null_value || args[1]->null_value)) + if ((null_value= args[0]->null_value || args[1]->null_value)) + return 0; + if (val2 == 0) + { + signal_divide_by_null(); return 0; + } return value/val2; } - return (longlong) Item_func_div::val(); + return (longlong) Item_func_div::val_real(); } + void Item_func_div::fix_length_and_dec() { decimals=max(args[0]->decimals,args[1]->decimals)+2; @@ -713,8 +778,13 @@ longlong Item_func_int_div::val_int() DBUG_ASSERT(fixed == 1); longlong value=args[0]->val_int(); longlong val2=args[1]->val_int(); - if ((null_value= val2 == 0 || args[0]->null_value || args[1]->null_value)) + if (args[0]->null_value || args[1]->null_value) return 0; + if (val2 == 0) + { + signal_divide_by_null(); + return 0; + } return (unsigned_flag ? (ulonglong) value / (ulonglong) val2 : value / val2); @@ -729,14 +799,19 @@ void Item_func_int_div::fix_length_and_dec() } -double Item_func_mod::val() +double Item_func_mod::val_real() { DBUG_ASSERT(fixed == 1); - double x= args[0]->val(); - double y= args[1]->val(); - if ((null_value= (y == 0.0) || args[0]->null_value || args[1]->null_value)) + double value= args[0]->val_real(); + double val2= args[1]->val_real(); + if ((null_value= args[0]->null_value || args[1]->null_value)) return 0.0; /* purecov: inspected */ - return fmod(x, y); + if (val2 == 0.0) + { + signal_divide_by_null(); + return 0.0; + } + return fmod(value,val2); } longlong Item_func_mod::val_int() @@ -744,8 +819,13 @@ longlong Item_func_mod::val_int() DBUG_ASSERT(fixed == 1); longlong value= args[0]->val_int(); longlong val2= args[1]->val_int(); - if ((null_value=val2 == 0 || args[0]->null_value || args[1]->null_value)) + if ((null_value= args[0]->null_value || args[1]->null_value)) return 0; /* purecov: inspected */ + if (val2 == 0) + { + signal_divide_by_null(); + return 0; + } return value % val2; } @@ -755,10 +835,10 @@ void Item_func_mod::fix_length_and_dec() } -double Item_func_neg::val() +double Item_func_neg::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); null_value=args[0]->null_value; return -value; } @@ -814,10 +894,10 @@ void Item_func_neg::fix_length_and_dec() } -double Item_func_abs::val() +double Item_func_abs::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); null_value=args[0]->null_value; return fabs(value); } @@ -846,10 +926,10 @@ void Item_func_abs::fix_length_and_dec() /* Gateway to natural LOG function */ -double Item_func_ln::val() +double Item_func_ln::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; return log(value); @@ -860,15 +940,15 @@ double Item_func_ln::val() We have to check if all values are > zero and first one is not one as these are the cases then result is not a number. */ -double Item_func_log::val() +double Item_func_log::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; if (arg_count == 2) { - double value2= args[1]->val(); + double value2= args[1]->val_real(); if ((null_value=(args[1]->null_value || value2 <= 0.0 || value == 1.0))) return 0.0; return log(value2) / log(value); @@ -876,47 +956,47 @@ double Item_func_log::val() return log(value); } -double Item_func_log2::val() +double Item_func_log2::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; - return log(value) / log(2.0); + return log(value) / M_LN2; } -double Item_func_log10::val() +double Item_func_log10::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || value <= 0.0))) return 0.0; /* purecov: inspected */ return log10(value); } -double Item_func_exp::val() +double Item_func_exp::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; /* purecov: inspected */ return exp(value); } -double Item_func_sqrt::val() +double Item_func_sqrt::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || value < 0))) return 0.0; /* purecov: inspected */ return sqrt(value); } -double Item_func_pow::val() +double Item_func_pow::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); - double val2=args[1]->val(); + double value= args[0]->val_real(); + double val2= args[1]->val_real(); if ((null_value=(args[0]->null_value || args[1]->null_value))) return 0.0; /* purecov: inspected */ return pow(value,val2); @@ -924,35 +1004,35 @@ double Item_func_pow::val() // Trigonometric functions -double Item_func_acos::val() +double Item_func_acos::val_real() { DBUG_ASSERT(fixed == 1); // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug) - volatile double value=args[0]->val(); + volatile double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0)))) return 0.0; return fix_result(acos(value)); } -double Item_func_asin::val() +double Item_func_asin::val_real() { DBUG_ASSERT(fixed == 1); // the volatile's for BUG #2338 to calm optimizer down (because of gcc's bug) - volatile double value=args[0]->val(); + volatile double value= args[0]->val_real(); if ((null_value=(args[0]->null_value || (value < -1.0 || value > 1.0)))) return 0.0; return fix_result(asin(value)); } -double Item_func_atan::val() +double Item_func_atan::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; if (arg_count == 2) { - double val2= args[1]->val(); + double val2= args[1]->val_real(); if ((null_value=args[1]->null_value)) return 0.0; return fix_result(atan2(value,val2)); @@ -960,28 +1040,28 @@ double Item_func_atan::val() return fix_result(atan(value)); } -double Item_func_cos::val() +double Item_func_cos::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; return fix_result(cos(value)); } -double Item_func_sin::val() +double Item_func_sin::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; return fix_result(sin(value)); } -double Item_func_tan::val() +double Item_func_tan::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0.0; return fix_result(tan(value)); @@ -1045,7 +1125,7 @@ void Item_func_integer::fix_length_and_dec() longlong Item_func_ceiling::val_int() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); null_value=args[0]->null_value; return (longlong) ceil(value); } @@ -1054,7 +1134,7 @@ longlong Item_func_floor::val_int() { DBUG_ASSERT(fixed == 1); // the volatile's for BUG #3051 to calm optimizer down (because of gcc's bug) - volatile double value=args[0]->val(); + volatile double value= args[0]->val_real(); null_value=args[0]->null_value; return (longlong) floor(value); } @@ -1073,10 +1153,10 @@ void Item_func_round::fix_length_and_dec() } } -double Item_func_round::val() +double Item_func_round::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); int dec=(int) args[1]->val_int(); uint abs_dec=abs(dec); double tmp; @@ -1160,7 +1240,7 @@ void Item_func_rand::update_used_tables() } -double Item_func_rand::val() +double Item_func_rand::val_real() { DBUG_ASSERT(fixed == 1); return my_rnd(rand); @@ -1169,16 +1249,16 @@ double Item_func_rand::val() longlong Item_func_sign::val_int() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); null_value=args[0]->null_value; return value < 0.0 ? -1 : (value > 0 ? 1 : 0); } -double Item_func_units::val() +double Item_func_units::val_real() { DBUG_ASSERT(fixed == 1); - double value=args[0]->val(); + double value= args[0]->val_real(); if ((null_value=args[0]->null_value)) return 0; return value*mul+add; @@ -1224,7 +1304,7 @@ String *Item_func_min_max::val_str(String *str) } case REAL_RESULT: { - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals,&my_charset_bin); @@ -1268,7 +1348,7 @@ String *Item_func_min_max::val_str(String *str) } -double Item_func_min_max::val() +double Item_func_min_max::val_real() { DBUG_ASSERT(fixed == 1); double value=0.0; @@ -1277,12 +1357,12 @@ double Item_func_min_max::val() { if (null_value) { - value=args[i]->val(); + value= args[i]->val_real(); null_value=args[i]->null_value; } else { - double tmp=args[i]->val(); + double tmp= args[i]->val_real(); if (!args[i]->null_value && (tmp < value ? cmp_sign : -cmp_sign) > 0) value=tmp; } @@ -1438,10 +1518,10 @@ longlong Item_func_field::val_int() } else { - double val= args[0]->val(); + double val= args[0]->val_real(); for (uint i=1; i < arg_count ; i++) { - if (val == args[i]->val()) + if (val == args[i]->val_real()) return (longlong) (i); } } @@ -1647,15 +1727,14 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, DBUG_ENTER("Item_udf_func::fix_fields"); if (check_stack_overrun(thd, buff)) - DBUG_RETURN(1); // Fatal error flag is set! + DBUG_RETURN(TRUE); // Fatal error flag is set! udf_func *tmp_udf=find_udf(u_d->name.str,(uint) u_d->name.length,1); if (!tmp_udf) { - my_printf_error(ER_CANT_FIND_UDF,ER(ER_CANT_FIND_UDF),MYF(0),u_d->name.str, - errno); - DBUG_RETURN(1); + my_error(ER_CANT_FIND_UDF, MYF(0), u_d->name.str, errno); + DBUG_RETURN(TRUE); } u_d=tmp_udf; args=arguments; @@ -1672,7 +1751,7 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, { free_udf(u_d); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } uint i; Item **arg,**arg_end; @@ -1686,7 +1765,7 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, // we can't assign 'item' before, because fix_fields() can change arg Item *item= *arg; if (item->check_cols(1)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); /* TODO: We should think about this. It is not always right way just to set an UDF result to return my_charset_bin @@ -1707,14 +1786,19 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, const_item_cache&=item->const_item(); f_args.arg_type[i]=item->result_type(); } + //TODO: why all folowing memory is not allocated with 1 call of sql_alloc? if (!(buffers=new String[arg_count]) || !(f_args.args= (char**) sql_alloc(arg_count * sizeof(char *))) || - !(f_args.lengths=(ulong*) sql_alloc(arg_count * sizeof(long))) || - !(f_args.maybe_null=(char*) sql_alloc(arg_count * sizeof(char))) || - !(num_buffer= (char*) sql_alloc(ALIGN_SIZE(sizeof(double))*arg_count))) + !(f_args.lengths= (ulong*) sql_alloc(arg_count * sizeof(long))) || + !(f_args.maybe_null= (char*) sql_alloc(arg_count * sizeof(char))) || + !(num_buffer= (char*) sql_alloc(arg_count * + ALIGN_SIZE(sizeof(double)))) || + !(f_args.attributes= (char**) sql_alloc(arg_count * sizeof(char *))) || + !(f_args.attribute_lengths= (ulong*) sql_alloc(arg_count * + sizeof(long)))) { free_udf(u_d); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } func->fix_length_and_dec(); @@ -1730,8 +1814,10 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, for (uint i=0; i < arg_count; i++) { f_args.args[i]=0; - f_args.lengths[i]=arguments[i]->max_length; - f_args.maybe_null[i]=(char) arguments[i]->maybe_null; + f_args.lengths[i]= arguments[i]->max_length; + f_args.maybe_null[i]= (char) arguments[i]->maybe_null; + f_args.attributes[i]= arguments[i]->name; + f_args.attribute_lengths[i]= arguments[i]->name_length; switch(arguments[i]->type()) { case Item::STRING_ITEM: // Constant string ! @@ -1751,7 +1837,7 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, } break; case Item::REAL_ITEM: - *((double*) to) = arguments[i]->val(); + *((double*) to)= arguments[i]->val_real(); if (!arguments[i]->null_value) { f_args.args[i]=to; @@ -1768,10 +1854,10 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, u_d->func_init; if ((error=(uchar) init(&initid, &f_args, thd->net.last_error))) { - my_printf_error(ER_CANT_INITIALIZE_UDF,ER(ER_CANT_INITIALIZE_UDF),MYF(0), - u_d->name.str, thd->net.last_error); + my_error(ER_CANT_INITIALIZE_UDF, MYF(0), + u_d->name.str, thd->net.last_error); free_udf(u_d); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } func->max_length=min(initid.max_length,MAX_BLOB_WIDTH); func->maybe_null=initid.maybe_null; @@ -1781,11 +1867,11 @@ udf_handler::fix_fields(THD *thd, TABLE_LIST *tables, Item_result_field *func, initialized=1; if (error) { - my_printf_error(ER_CANT_INITIALIZE_UDF,ER(ER_CANT_INITIALIZE_UDF),MYF(0), - u_d->name.str, ER(ER_UNKNOWN_ERROR)); - DBUG_RETURN(1); + my_error(ER_CANT_INITIALIZE_UDF, MYF(0), + u_d->name.str, ER(ER_UNKNOWN_ERROR)); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -1818,7 +1904,7 @@ bool udf_handler::get_arguments() } break; case REAL_RESULT: - *((double*) to) = args[i]->val(); + *((double*) to)= args[i]->val_real(); if (!args[i]->null_value) { f_args.args[i]=to; @@ -1873,7 +1959,7 @@ String *udf_handler::val_str(String *str,String *save_str) -double Item_func_udf_float::val() +double Item_func_udf_float::val_real() { DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_udf_float::val"); @@ -1886,7 +1972,7 @@ double Item_func_udf_float::val() String *Item_func_udf_float::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals,&my_charset_bin); @@ -2307,7 +2393,7 @@ longlong Item_func_benchmark::val_int() { switch (args[0]->result_type()) { case REAL_RESULT: - (void) args[0]->val(); + (void) args[0]->val_real(); break; case INT_RESULT: (void) args[0]->val_int(); @@ -2396,7 +2482,7 @@ bool Item_func_set_user_var::fix_fields(THD *thd, TABLE_LIST *tables, /* fix_fields will call Item_func_set_user_var::fix_length_and_dec */ if (Item_func::fix_fields(thd, tables, ref) || !(entry= get_variable(&thd->user_vars, name, 1))) - return 1; + return TRUE; /* Remember the last query which updated it, this way a query can later know if this variable is a constant item in the query (it is if update_query_id @@ -2422,7 +2508,7 @@ bool Item_func_set_user_var::fix_fields(THD *thd, TABLE_LIST *tables, entry->collation.set(args[0]->collation); collation.set(entry->collation); cached_result_type= args[0]->result_type(); - return 0; + return FALSE; } @@ -2580,7 +2666,7 @@ String *user_var_entry::val_str(my_bool *null_value, String *str, will be catched by thd->net.report_error check in sql_set_variables(). RETURN - 0 - OK. + FALSE OK. */ bool @@ -2591,7 +2677,7 @@ Item_func_set_user_var::check() switch (cached_result_type) { case REAL_RESULT: { - save_result.vreal= args[0]->val(); + save_result.vreal= args[0]->val_real(); break; } case INT_RESULT: @@ -2610,7 +2696,7 @@ Item_func_set_user_var::check() DBUG_ASSERT(0); break; } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -2672,7 +2758,7 @@ Item_func_set_user_var::update() } -double Item_func_set_user_var::val() +double Item_func_set_user_var::val_real() { DBUG_ASSERT(fixed == 1); check(); @@ -2707,6 +2793,16 @@ void Item_func_set_user_var::print(String *str) } +void Item_func_set_user_var::print_as_stmt(String *str) +{ + str->append("set @", 5); + str->append(name.str, name.length); + str->append(":=", 2); + args[0]->print(str); + str->append(')'); +} + + String * Item_func_get_user_var::val_str(String *str) { @@ -2718,7 +2814,7 @@ Item_func_get_user_var::val_str(String *str) } -double Item_func_get_user_var::val() +double Item_func_get_user_var::val_real() { DBUG_ASSERT(fixed == 1); if (!var_entry) @@ -3063,7 +3159,7 @@ bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) !args[0]->const_during_execution()) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"AGAINST"); - return 1; + return TRUE; } const_item_cache=0; @@ -3086,7 +3182,7 @@ bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) if (key == NO_SUCH_KEY && !(flags & FT_BOOL)) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH"); - return 1; + return TRUE; } table=((Item_field *)item)->field->table; table->fulltext_searched=1; @@ -3168,7 +3264,8 @@ err: key=NO_SUCH_KEY; return 0; } - my_error(ER_FT_MATCHING_KEY_NOT_FOUND,MYF(0)); + my_message(ER_FT_MATCHING_KEY_NOT_FOUND, + ER(ER_FT_MATCHING_KEY_NOT_FOUND), MYF(0)); return 1; } @@ -3189,7 +3286,7 @@ bool Item_func_match::eq(const Item *item, bool binary_cmp) const } -double Item_func_match::val() +double Item_func_match::val_real() { DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_func_match::val"); @@ -3297,7 +3394,7 @@ Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name, { if (!var->is_struct()) { - net_printf(thd, ER_VARIABLE_IS_NOT_STRUCT, base_name->str); + my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), base_name->str); return 0; } } @@ -3405,6 +3502,166 @@ longlong Item_func_is_used_lock::val_int() } +longlong Item_func_row_count::val_int() +{ + DBUG_ASSERT(fixed == 1); + THD *thd= current_thd; + + return thd->row_count_func; +} + + +Item_func_sp::Item_func_sp(sp_name *name) + :Item_func(), m_name(name), m_sp(NULL) +{ + m_name->init_qname(current_thd); +} + +Item_func_sp::Item_func_sp(sp_name *name, List<Item> &list) + :Item_func(list), m_name(name), m_sp(NULL) +{ + m_name->init_qname(current_thd); +} + +const char * +Item_func_sp::func_name() const +{ + THD *thd= current_thd; + /* Calculate length to avoud reallocation of string for sure */ + uint len= ((m_name->m_db.length + + m_name->m_name.length)*2 + //characters*quoting + 2 + // ` and ` + 1 + // . + 1 + // end of string + ALIGN_SIZE(1)); // to avoid String reallocation + String qname((char *)alloc_root(thd->mem_root, len), len, + system_charset_info); + + qname.length(0); + append_identifier(thd, &qname, m_name->m_db.str, m_name->m_db.length); + qname.append('.'); + append_identifier(thd, &qname, m_name->m_name.str, m_name->m_name.length); + return qname.ptr(); +} + + +int +Item_func_sp::execute(Item **itp) +{ + DBUG_ENTER("Item_func_sp::execute"); + THD *thd= current_thd; + int res; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + st_sp_security_context save_ctx; +#endif + + if (! m_sp) + m_sp= sp_find_function(thd, m_name); + if (! m_sp) + { + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); + DBUG_RETURN(-1); + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (check_procedure_access(thd, EXECUTE_ACL, + m_sp->m_db.str, m_sp->m_name.str, 0)) + DBUG_RETURN(-1); + sp_change_security_context(thd, m_sp, &save_ctx); + if (save_ctx.changed && + check_procedure_access(thd, EXECUTE_ACL, + m_sp->m_db.str, m_sp->m_name.str, 0)) + { + sp_restore_security_context(thd, m_sp, &save_ctx); + DBUG_RETURN(-1); + } +#endif + + /* + We don't need to surpress senfing of ok packet here (by setting + thd->net.no_send_ok to true), because we are not allowing statements + in functions now. + */ + res= m_sp->execute_function(thd, args, arg_count, itp); + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + sp_restore_security_context(thd, m_sp, &save_ctx); +#endif + + DBUG_RETURN(res); +} + + +enum enum_field_types +Item_func_sp::field_type() const +{ + DBUG_ENTER("Item_func_sp::field_type"); + + if (! m_sp) + m_sp= sp_find_function(current_thd, m_name); + if (m_sp) + { + DBUG_PRINT("info", ("m_returns = %d", m_sp->m_returns)); + DBUG_RETURN(m_sp->m_returns); + } + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); + DBUG_RETURN(MYSQL_TYPE_VARCHAR); +} + + +Item_result +Item_func_sp::result_type() const +{ + DBUG_ENTER("Item_func_sp::result_type"); + DBUG_PRINT("info", ("m_sp = %p", m_sp)); + + if (! m_sp) + m_sp= sp_find_function(current_thd, m_name); + if (m_sp) + { + DBUG_RETURN(m_sp->result()); + } + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); + DBUG_RETURN(STRING_RESULT); +} + +void +Item_func_sp::fix_length_and_dec() +{ + DBUG_ENTER("Item_func_sp::fix_length_and_dec"); + + if (! m_sp) + m_sp= sp_find_function(current_thd, m_name); + if (! m_sp) + { + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", m_name->m_qname.str); + } + else + { + switch (m_sp->result()) { + case STRING_RESULT: + maybe_null= 1; + max_length= MAX_BLOB_WIDTH; + break; + case REAL_RESULT: + decimals= NOT_FIXED_DEC; + max_length= float_length(decimals); + break; + case INT_RESULT: + decimals= 0; + max_length= 21; + break; + case ROW_RESULT: + default: + // This case should never be choosen + DBUG_ASSERT(0); + break; + } + } + DBUG_VOID_RETURN; +} + + longlong Item_func_found_rows::val_int() { DBUG_ASSERT(fixed == 1); diff --git a/sql/item_func.h b/sql/item_func.h index ce2b34499d6..4657ee81dfb 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -40,15 +40,19 @@ public: enum Functype { UNKNOWN_FUNC,EQ_FUNC,EQUAL_FUNC,NE_FUNC,LT_FUNC,LE_FUNC, GE_FUNC,GT_FUNC,FT_FUNC, LIKE_FUNC,NOTLIKE_FUNC,ISNULL_FUNC,ISNOTNULL_FUNC, - COND_AND_FUNC, COND_OR_FUNC, COND_XOR_FUNC, BETWEEN, IN_FUNC, + COND_AND_FUNC, COND_OR_FUNC, COND_XOR_FUNC, + BETWEEN, IN_FUNC, MULT_EQUAL_FUNC, INTERVAL_FUNC, ISNOTNULLTEST_FUNC, SP_EQUALS_FUNC, SP_DISJOINT_FUNC,SP_INTERSECTS_FUNC, SP_TOUCHES_FUNC,SP_CROSSES_FUNC,SP_WITHIN_FUNC, SP_CONTAINS_FUNC,SP_OVERLAPS_FUNC, SP_STARTPOINT,SP_ENDPOINT,SP_EXTERIORRING, SP_POINTN,SP_GEOMETRYN,SP_INTERIORRINGN, - NOT_FUNC, NOT_ALL_FUNC, NOW_FUNC}; - enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL }; + NOT_FUNC, NOT_ALL_FUNC, + NOW_FUNC, TRIG_COND_FUNC, + GUSERVAR_FUNC}; + enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL, + OPTIMIZE_EQUAL }; enum Type type() const { return FUNC_ITEM; } virtual enum Functype functype() const { return UNKNOWN_FUNC; } Item_func(void): @@ -135,6 +139,7 @@ public: return (null_value=args[0]->get_time(ltime)); } bool is_null() { (void) val_int(); return null_value; } + void signal_divide_by_null(); friend class udf_handler; Field *tmp_table_field() { return result_field; } Field *tmp_table_field(TABLE *t_arg); @@ -148,6 +153,7 @@ public: bool agg_arg_charsets(DTCollation &c, Item **items, uint nitems, uint flags= 0); bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); }; @@ -159,7 +165,8 @@ public: Item_real_func(Item *a,Item *b) :Item_func(a,b) {} Item_real_func(List<Item> &list) :Item_func(list) {} String *val_str(String*str); - longlong val_int() { DBUG_ASSERT(fixed == 1); return (longlong) val(); } + longlong val_int() + { DBUG_ASSERT(fixed == 1); return (longlong) val_real(); } enum Item_result result_type () const { return REAL_RESULT; } void fix_length_and_dec() { decimals=NOT_FIXED_DEC; max_length=float_length(decimals); } }; @@ -173,10 +180,11 @@ public: Item_num_func(Item *a) :Item_func(a),hybrid_type(REAL_RESULT) {} Item_num_func(Item *a,Item *b) :Item_func(a,b),hybrid_type(REAL_RESULT) {} String *val_str(String*str); - longlong val_int() { DBUG_ASSERT(fixed == 1); return (longlong) val(); } + longlong val_int() + { DBUG_ASSERT(fixed == 1); return (longlong) val_real(); } enum Item_result result_type () const { return hybrid_type; } void fix_length_and_dec() { fix_num_length_and_dec(); } - bool is_null() { (void) val(); return null_value; } + bool is_null() { (void) val_real(); return null_value; } }; @@ -191,7 +199,7 @@ class Item_num_op :public Item_func enum Item_result result_type () const { return hybrid_type; } void fix_length_and_dec() { fix_num_length_and_dec(); find_num_type(); } void find_num_type(void); - bool is_null() { (void) val(); return null_value; } + bool is_null() { (void) val_real(); return null_value; } }; @@ -204,7 +212,7 @@ public: Item_int_func(Item *a,Item *b,Item *c) :Item_func(a,b,c) { max_length=21; } Item_int_func(List<Item> &list) :Item_func(list) { max_length=21; } Item_int_func(THD *thd, Item_int_func *item) :Item_func(thd, item) {} - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } String *val_str(String*str); enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() {} @@ -216,9 +224,9 @@ class Item_func_signed :public Item_int_func public: Item_func_signed(Item *a) :Item_int_func(a) {} const char *func_name() const { return "cast_as_signed"; } - double val() + double val_real() { - double tmp= args[0]->val(); + double tmp= args[0]->val_real(); null_value= args[0]->null_value; return tmp; } @@ -250,7 +258,7 @@ class Item_func_plus :public Item_num_op public: Item_func_plus(Item *a,Item *b) :Item_num_op(a,b) {} const char *func_name() const { return "+"; } - double val(); + double val_real(); longlong val_int(); }; @@ -259,7 +267,7 @@ class Item_func_minus :public Item_num_op public: Item_func_minus(Item *a,Item *b) :Item_num_op(a,b) {} const char *func_name() const { return "-"; } - double val(); + double val_real(); longlong val_int(); void fix_length_and_dec(); }; @@ -270,7 +278,7 @@ class Item_func_mul :public Item_num_op public: Item_func_mul(Item *a,Item *b) :Item_num_op(a,b) {} const char *func_name() const { return "*"; } - double val(); + double val_real(); longlong val_int(); }; @@ -279,7 +287,7 @@ class Item_func_div :public Item_num_op { public: Item_func_div(Item *a,Item *b) :Item_num_op(a,b) {} - double val(); + double val_real(); longlong val_int(); const char *func_name() const { return "/"; } void fix_length_and_dec(); @@ -291,7 +299,7 @@ class Item_func_int_div :public Item_num_op public: Item_func_int_div(Item *a,Item *b) :Item_num_op(a,b) { hybrid_type=INT_RESULT; } - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } longlong val_int(); const char *func_name() const { return "DIV"; } void fix_length_and_dec(); @@ -302,7 +310,7 @@ class Item_func_mod :public Item_num_op { public: Item_func_mod(Item *a,Item *b) :Item_num_op(a,b) {} - double val(); + double val_real(); longlong val_int(); const char *func_name() const { return "%"; } void fix_length_and_dec(); @@ -313,7 +321,7 @@ class Item_func_neg :public Item_num_func { public: Item_func_neg(Item *a) :Item_num_func(a) {} - double val(); + double val_real(); longlong val_int(); const char *func_name() const { return "-"; } void fix_length_and_dec(); @@ -325,7 +333,7 @@ class Item_func_abs :public Item_num_func public: Item_func_abs(Item *a) :Item_num_func(a) {} const char *func_name() const { return "abs"; } - double val(); + double val_real(); longlong val_int(); enum Item_result result_type () const { return args[0]->result_type() == INT_RESULT ? INT_RESULT : REAL_RESULT; } @@ -362,7 +370,7 @@ class Item_func_exp :public Item_dec_func { public: Item_func_exp(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "exp"; } }; @@ -371,7 +379,7 @@ class Item_func_ln :public Item_dec_func { public: Item_func_ln(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "ln"; } }; @@ -381,7 +389,7 @@ class Item_func_log :public Item_dec_func public: Item_func_log(Item *a) :Item_dec_func(a) {} Item_func_log(Item *a,Item *b) :Item_dec_func(a,b) {} - double val(); + double val_real(); const char *func_name() const { return "log"; } }; @@ -390,7 +398,7 @@ class Item_func_log2 :public Item_dec_func { public: Item_func_log2(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "log2"; } }; @@ -399,7 +407,7 @@ class Item_func_log10 :public Item_dec_func { public: Item_func_log10(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "log10"; } }; @@ -408,7 +416,7 @@ class Item_func_sqrt :public Item_dec_func { public: Item_func_sqrt(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "sqrt"; } }; @@ -417,7 +425,7 @@ class Item_func_pow :public Item_dec_func { public: Item_func_pow(Item *a,Item *b) :Item_dec_func(a,b) {} - double val(); + double val_real(); const char *func_name() const { return "pow"; } }; @@ -426,7 +434,7 @@ class Item_func_acos :public Item_dec_func { public: Item_func_acos(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "acos"; } }; @@ -434,7 +442,7 @@ class Item_func_asin :public Item_dec_func { public: Item_func_asin(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "asin"; } }; @@ -443,7 +451,7 @@ class Item_func_atan :public Item_dec_func public: Item_func_atan(Item *a) :Item_dec_func(a) {} Item_func_atan(Item *a,Item *b) :Item_dec_func(a,b) {} - double val(); + double val_real(); const char *func_name() const { return "atan"; } }; @@ -451,7 +459,7 @@ class Item_func_cos :public Item_dec_func { public: Item_func_cos(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "cos"; } }; @@ -459,7 +467,7 @@ class Item_func_sin :public Item_dec_func { public: Item_func_sin(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "sin"; } }; @@ -467,7 +475,7 @@ class Item_func_tan :public Item_dec_func { public: Item_func_tan(Item *a) :Item_dec_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "tan"; } }; @@ -505,7 +513,7 @@ public: Item_func_round(Item *a,Item *b,bool trunc_arg) :Item_real_func(a,b),truncate(trunc_arg) {} const char *func_name() const { return truncate ? "truncate" : "round"; } - double val(); + double val_real(); void fix_length_and_dec(); }; @@ -516,7 +524,7 @@ class Item_func_rand :public Item_real_func public: Item_func_rand(Item *a) :Item_real_func(a), rand(0) {} Item_func_rand() :Item_real_func() {} - double val(); + double val_real(); const char *func_name() const { return "rand"; } bool const_item() const { return 0; } void update_used_tables(); @@ -540,7 +548,7 @@ class Item_func_units :public Item_real_func public: Item_func_units(char *name_arg,Item *a,double mul_arg,double add_arg) :Item_real_func(a),name(name_arg),mul(mul_arg),add(add_arg) {} - double val(); + double val_real(); const char *func_name() const { return name; } void fix_length_and_dec() { decimals=NOT_FIXED_DEC; max_length=float_length(decimals); } }; @@ -554,7 +562,7 @@ class Item_func_min_max :public Item_func public: Item_func_min_max(List<Item> &list,int cmp_sign_arg) :Item_func(list), cmp_type(INT_RESULT), cmp_sign(cmp_sign_arg) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *); void fix_length_and_dec(); @@ -793,8 +801,11 @@ class Item_func_udf_float :public Item_udf_func Item_func_udf_float(udf_func *udf_arg, List<Item> &list) :Item_udf_func(udf_arg,list) {} longlong val_int() - { DBUG_ASSERT(fixed == 1); return (longlong) Item_func_udf_float::val(); } - double val(); + { + DBUG_ASSERT(fixed == 1); + return (longlong) Item_func_udf_float::val_real(); + } + double val_real(); String *val_str(String *str); void fix_length_and_dec() { fix_num_length_and_dec(); } }; @@ -807,7 +818,7 @@ public: Item_func_udf_int(udf_func *udf_arg, List<Item> &list) :Item_udf_func(udf_arg,list) {} longlong val_int(); - double val() { return (double) Item_func_udf_int::val_int(); } + double val_real() { return (double) Item_func_udf_int::val_int(); } String *val_str(String *str); enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() { decimals=0; max_length=21; } @@ -821,7 +832,7 @@ public: Item_func_udf_str(udf_func *udf_arg, List<Item> &list) :Item_udf_func(udf_arg,list) {} String *val_str(String *); - double val() + double val_real() { int err; String *res; res=val_str(&str_value); @@ -844,7 +855,7 @@ class Item_func_udf_float :public Item_real_func public: Item_func_udf_float(udf_func *udf_arg) :Item_real_func() {} Item_func_udf_float(udf_func *udf_arg, List<Item> &list) :Item_real_func(list) {} - double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } }; @@ -864,7 +875,7 @@ public: Item_func_udf_str(udf_func *udf_arg, List<Item> &list) :Item_func(list) {} String *val_str(String *) { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } - double val() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); null_value= 1; return 0.0; } longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec() { maybe_null=1; max_length=0; } @@ -939,7 +950,7 @@ public: Item_func_set_user_var(LEX_STRING a,Item *b) :Item_func(b), cached_result_type(INT_RESULT), name(a) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); bool update_hash(void *ptr, uint length, enum Item_result type, @@ -950,6 +961,7 @@ public: bool fix_fields(THD *thd, struct st_table_list *tables, Item **ref); void fix_length_and_dec(); void print(String *str); + void print_as_stmt(String *str); const char *func_name() const { return "set_user_var"; } }; @@ -962,7 +974,9 @@ class Item_func_get_user_var :public Item_func public: Item_func_get_user_var(LEX_STRING a): Item_func(), name(a) {} - double val(); + enum Functype functype() const { return GUSERVAR_FUNC; } + LEX_STRING get_name() { return name; } + double val_real(); longlong val_int(); String *val_str(String* str); void fix_length_and_dec(); @@ -972,7 +986,7 @@ public: We must always return variables as strings to guard against selects of type select @t1:=1,@t1,@t:="hello",@t from foo where (@t1:= t2.b) */ - enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } const char *func_name() const { return "get_user_var"; } bool const_item() const; table_map used_tables() const @@ -1021,11 +1035,7 @@ public: table->file->ft_handler=0; table->fulltext_searched=0; } - if (concat) - { - delete concat; - concat= 0; - } + concat= 0; DBUG_VOID_RETURN; } enum Functype functype() const { return FT_FUNC; } @@ -1035,8 +1045,8 @@ public: bool fix_fields(THD *thd, struct st_table_list *tlist, Item **ref); bool eq(const Item *, bool binary_cmp) const; /* The following should be safe, even if we compare doubles */ - longlong val_int() { DBUG_ASSERT(fixed == 1); return val()!=0.0; } - double val(); + longlong val_int() { DBUG_ASSERT(fixed == 1); return val_real() != 0.0; } + double val_real(); void print(String *str); bool fix_index(); @@ -1081,6 +1091,88 @@ enum Cast_target }; +class Item_func_row_count :public Item_int_func +{ +public: + Item_func_row_count() :Item_int_func() {} + longlong val_int(); + const char *func_name() const { return "row_count"; } + void fix_length_and_dec() { decimals= 0; maybe_null=0; } +}; + + +/* + * + * Stored FUNCTIONs + * + */ + +class sp_head; +class sp_name; + +class Item_func_sp :public Item_func +{ +private: + sp_name *m_name; + mutable sp_head *m_sp; + + int execute(Item **itp); + +public: + + Item_func_sp(sp_name *name); + + Item_func_sp(sp_name *name, List<Item> &list); + + virtual ~Item_func_sp() + {} + + const char *func_name() const; + + enum enum_field_types field_type() const; + + Item_result result_type() const; + + longlong val_int() + { + return (longlong)Item_func_sp::val_real(); + } + + double val_real() + { + Item *it; + double d; + + if (execute(&it)) + { + null_value= 1; + return 0.0; + } + d= it->val_real(); + null_value= it->null_value; + return d; + } + + String *val_str(String *str) + { + Item *it; + String *s; + + if (execute(&it)) + { + null_value= 1; + return NULL; + } + s= it->val_str(str); + null_value= it->null_value; + return s; + } + + void fix_length_and_dec(); + +}; + + class Item_func_found_rows :public Item_int_func { public: diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 2f00416bddf..1a8cb50081b 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -319,8 +319,8 @@ err: String *Item_func_point::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double x= args[0]->val(); - double y= args[1]->val(); + double x= args[0]->val_real(); + double y= args[1]->val_real(); if ((null_value= (args[0]->null_value || args[1]->null_value || @@ -627,7 +627,7 @@ longlong Item_func_numpoints::val_int() } -double Item_func_x::val() +double Item_func_x::val_real() { DBUG_ASSERT(fixed == 1); double res= 0.0; // In case of errors @@ -644,7 +644,7 @@ double Item_func_x::val() } -double Item_func_y::val() +double Item_func_y::val_real() { DBUG_ASSERT(fixed == 1); double res= 0; // In case of errors @@ -661,7 +661,7 @@ double Item_func_y::val() } -double Item_func_area::val() +double Item_func_area::val_real() { DBUG_ASSERT(fixed == 1); double res= 0; // In case of errors @@ -678,7 +678,7 @@ double Item_func_area::val() return res; } -double Item_func_glength::val() +double Item_func_glength::val_real() { DBUG_ASSERT(fixed == 1); double res= 0; // In case of errors diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 79e4f804a04..e19036cc982 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -263,7 +263,7 @@ class Item_func_x: public Item_real_func String value; public: Item_func_x(Item *a): Item_real_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "x"; } }; @@ -273,7 +273,7 @@ class Item_func_y: public Item_real_func String value; public: Item_func_y(Item *a): Item_real_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "y"; } }; @@ -316,7 +316,7 @@ class Item_func_area: public Item_real_func String value; public: Item_func_area(Item *a): Item_real_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "area"; } }; @@ -326,7 +326,7 @@ class Item_func_glength: public Item_real_func String value; public: Item_func_glength(Item *a): Item_real_func(a) {} - double val(); + double val_real(); const char *func_name() const { return "glength"; } }; diff --git a/sql/item_row.cc b/sql/item_row.cc index 4e4957b980e..62f31186b09 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -62,7 +62,7 @@ bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref) for (arg= items, arg_end= items+arg_count; arg != arg_end ; arg++) { if ((*arg)->fix_fields(thd, tabl, arg)) - return 1; + return TRUE; // we can't assign 'item' before, because fix_fields() can change arg Item *item= *arg; used_tables_cache |= item->used_tables(); @@ -81,7 +81,7 @@ bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref) with_sum_func= with_sum_func || item->with_sum_func; } fixed= 1; - return 0; + return FALSE; } void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array, @@ -148,6 +148,18 @@ bool Item_row::walk(Item_processor processor, byte *arg) return (this->*processor)(arg); } +Item *Item_row::transform(Item_transformer transformer, byte *arg) +{ + for (uint i= 0; i < arg_count; i++) + { + Item *new_item= items[i]->transform(transformer, arg); + if (!new_item) + return 0; + items[i]= new_item; + } + return (this->*transformer)(arg); +} + void Item_row::bring_value() { for (uint i= 0; i < arg_count; i++) diff --git a/sql/item_row.h b/sql/item_row.h index 39bc4513e1e..64bd5cbbb44 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -41,7 +41,7 @@ public: { illegal_method_call((const char*)"make_field"); }; - double val() + double val_real() { illegal_method_call((const char*)"val"); return 0; @@ -65,6 +65,7 @@ public: void print(String *str); bool walk(Item_processor processor, byte *arg); + Item *transform(Item_transformer transformer, byte *arg); uint cols() { return arg_count; } Item* el(uint i) { return items[i]; } diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 2a63c5355a4..3c4ffd3e4f3 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -41,10 +41,10 @@ String my_empty_string("",default_charset_info); static void my_coll_agg_error(DTCollation &c1, DTCollation &c2, const char *fname) { - my_error(ER_CANT_AGGREGATE_2COLLATIONS,MYF(0), - c1.collation->name,c1.derivation_name(), - c2.collation->name,c2.derivation_name(), - fname); + my_error(ER_CANT_AGGREGATE_2COLLATIONS, MYF(0), + c1.collation->name, c1.derivation_name(), + c2.collation->name, c2.derivation_name(), + fname); } uint nr_of_decimals(const char *str) @@ -58,7 +58,7 @@ uint nr_of_decimals(const char *str) return 0; } -double Item_str_func::val() +double Item_str_func::val_real() { DBUG_ASSERT(fixed == 1); int err; @@ -1185,21 +1185,29 @@ String *Item_func_substr_index::val_str(String *str) String *Item_func_ltrim::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - if ((null_value=args[0]->null_value)) - return 0; /* purecov: inspected */ - char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff),res->charset()); - String *remove_str= (arg_count==2) ? args[1]->val_str(&tmp) : &remove; + char buff[MAX_FIELD_WIDTH], *ptr, *end; + String tmp(buff,sizeof(buff),system_charset_info); + String *res, *remove_str; uint remove_length; LINT_INIT(remove_length); - if (!remove_str || (remove_length=remove_str->length()) == 0 || + res= args[0]->val_str(str); + if ((null_value=args[0]->null_value)) + return 0; + remove_str= &remove; /* Default value. */ + if (arg_count == 2) + { + remove_str= args[1]->val_str(&tmp); + if ((null_value= args[1]->null_value)) + return 0; + } + + if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) return res; - char *ptr=(char*) res->ptr(); - char *end=ptr+res->length(); + ptr= (char*) res->ptr(); + end= ptr+res->length(); if (remove_length == 1) { char chr=(*remove_str)[0]; @@ -1224,21 +1232,29 @@ String *Item_func_ltrim::val_str(String *str) String *Item_func_rtrim::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - if ((null_value=args[0]->null_value)) - return 0; /* purecov: inspected */ - char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff),res->charset()); - String *remove_str= (arg_count==2) ? args[1]->val_str(&tmp) : &remove; + char buff[MAX_FIELD_WIDTH], *ptr, *end; + String tmp(buff, sizeof(buff), system_charset_info); + String *res, *remove_str; uint remove_length; LINT_INIT(remove_length); - if (!remove_str || (remove_length=remove_str->length()) == 0 || + res= args[0]->val_str(str); + if ((null_value=args[0]->null_value)) + return 0; + remove_str= &remove; /* Default value. */ + if (arg_count == 2) + { + remove_str= args[1]->val_str(&tmp); + if ((null_value= args[1]->null_value)) + return 0; + } + + if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) return res; - char *ptr=(char*) res->ptr(); - char *end=ptr+res->length(); + ptr= (char*) res->ptr(); + end= ptr+res->length(); #ifdef USE_MB char *p=ptr; register uint32 l; @@ -1297,31 +1313,31 @@ String *Item_func_rtrim::val_str(String *str) String *Item_func_trim::val_str(String *str) { DBUG_ASSERT(fixed == 1); - String *res =args[0]->val_str(str); - if ((null_value=args[0]->null_value)) - return 0; /* purecov: inspected */ - char buff[MAX_FIELD_WIDTH]; - String tmp(buff,sizeof(buff),res->charset()); + char buff[MAX_FIELD_WIDTH], *ptr, *end; + const char *r_ptr; + String tmp(buff, sizeof(buff), system_charset_info); + String *res, *remove_str; uint remove_length; LINT_INIT(remove_length); - String *remove_str; /* The string to remove from res. */ + res= args[0]->val_str(str); + if ((null_value=args[0]->null_value)) + return 0; + remove_str= &remove; /* Default value. */ if (arg_count == 2) { remove_str= args[1]->val_str(&tmp); if ((null_value= args[1]->null_value)) return 0; } - else - remove_str= &remove; /* Default value. */ - if (!remove_str || (remove_length=remove_str->length()) == 0 || + if ((remove_length= remove_str->length()) == 0 || remove_length > res->length()) return res; - char *ptr=(char*) res->ptr(); - char *end=ptr+res->length(); - const char *r_ptr=remove_str->ptr(); + ptr= (char*) res->ptr(); + end= ptr+res->length(); + r_ptr= remove_str->ptr(); while (ptr+remove_length <= end && !memcmp(ptr,r_ptr,remove_length)) ptr+=remove_length; #ifdef USE_MB @@ -1638,7 +1654,7 @@ Item_func_format::Item_func_format(Item *org,int dec) :Item_str_func(org) String *Item_func_format::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double nr =args[0]->val(); + double nr= args[0]->val_real(); uint32 diff,length,str_length; uint dec; if ((null_value=args[0]->null_value)) @@ -1706,14 +1722,14 @@ void Item_func_elt::fix_length_and_dec() } -double Item_func_elt::val() +double Item_func_elt::val_real() { DBUG_ASSERT(fixed == 1); uint tmp; null_value=1; if ((tmp=(uint) args[0]->val_int()) == 0 || tmp >= arg_count) return 0.0; - double result= args[tmp]->val(); + double result= args[tmp]->val_real(); null_value= args[tmp]->null_value; return result; } @@ -2236,8 +2252,8 @@ void Item_func_set_collation::fix_length_and_dec() if (!set_collation || !my_charset_same(args[0]->collation.collation,set_collation)) { - my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), - colname,args[0]->collation.collation->csname); + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + colname, args[0]->collation.collation->csname); return; } collation.set(set_collation, DERIVATION_EXPLICIT); @@ -2265,6 +2281,18 @@ bool Item_func_set_collation::eq(const Item *item, bool binary_cmp) const return 1; } + +void Item_func_set_collation::print(String *str) +{ + str->append('('); + args[0]->print(str); + str->append(" collate ", 9); + DBUG_ASSERT(args[1]->basic_const_item() && + args[1]->type() == Item::STRING_ITEM); + args[1]->str_value.print(str); + str->append(')'); +} + String *Item_func_charset::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -2836,6 +2864,8 @@ String *Item_func_uuid::val_str(String *str) { DBUG_ASSERT(fixed == 1); char *s; + THD *thd= current_thd; + pthread_mutex_lock(&LOCK_uuid_generator); if (! uuid_time) /* first UUID() call. initializing data */ { @@ -2850,7 +2880,7 @@ String *Item_func_uuid::val_str(String *str) with a clock_seq value (initialized random below), we use a separate randominit() here */ - randominit(&uuid_rand, tmp + (ulong)current_thd, tmp + query_id); + randominit(&uuid_rand, tmp + (ulong) thd, tmp + query_id); for (i=0; i < (int)sizeof(mac); i++) mac[i]=(uchar)(my_rnd(&uuid_rand)*255); } @@ -2860,7 +2890,8 @@ String *Item_func_uuid::val_str(String *str) *--s=_dig_vec_lower[mac[i] & 15]; *--s=_dig_vec_lower[mac[i] >> 4]; } - randominit(&uuid_rand, tmp + (ulong)start_time, tmp + bytes_sent); + randominit(&uuid_rand, tmp + (ulong)start_time, + tmp + thd->status_var.bytes_sent); set_clock_seq_str(); } diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 698536a61c7..e322e5616a1 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -32,7 +32,7 @@ public: Item_str_func(Item *a,Item *b,Item *c,Item *d, Item* e) :Item_func(a,b,c,d,e) {decimals=NOT_FIXED_DEC; } Item_str_func(List<Item> &list) :Item_func(list) {decimals=NOT_FIXED_DEC; } longlong val_int(); - double val(); + double val_real(); enum Item_result result_type () const { return STRING_RESULT; } void left_right_max_length(); }; @@ -201,7 +201,7 @@ public: Item_func_substr_index(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} String *val_str(String *); void fix_length_and_dec(); - const char *func_name() const { return "substr_index"; } + const char *func_name() const { return "substring_index"; } }; @@ -244,7 +244,7 @@ public: Returns strcat('*', octet2hex(sha1(sha1(password)))). '*' stands for new password format, sha1(sha1(password) is so-called hash_stage2 value. Length of returned string is always 41 byte. To find out how entire - authentification procedure works, see comments in password.c. + authentication procedure works, see comments in password.c. */ class Item_func_password :public Item_str_func @@ -378,7 +378,7 @@ class Item_func_elt :public Item_str_func { public: Item_func_elt(List<Item> &list) :Item_str_func(list) {} - double val(); + double val_real(); longlong val_int(); String *val_str(String *str); void fix_length_and_dec(); @@ -412,6 +412,14 @@ public: return item->walk(processor, arg) || Item_str_func::walk(processor, arg); } + Item *transform(Item_transformer transformer, byte *arg) + { + Item *new_item= item->transform(transformer, arg); + if (!new_item) + return 0; + item= new_item; + return Item_str_func::transform(transformer, arg); + } void print(String *str); }; @@ -619,7 +627,12 @@ public: void fix_length_and_dec(); bool eq(const Item *item, bool binary_cmp) const; const char *func_name() const { return "collate"; } - void print(String *str) { print_op(str); } + void print(String *str); + Item_field *filed_for_view_update() + { + /* this function is transparent for view updating */ + return args[0]->filed_for_view_update(); + } }; class Item_func_charset :public Item_str_func diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index e1a80941a52..dbf30d7d793 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -66,7 +66,7 @@ void Item_subselect::init(st_select_lex *select_lex, parsing_place= unit->item->parsing_place; unit->item->engine= 0; unit->item= this; - engine->change_item(this, result); + engine->change_result(this, result); } else { @@ -132,14 +132,14 @@ Item_subselect::select_transformer(JOIN *join) bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref) { + char const *save_where= thd_param->where; + bool res; + DBUG_ASSERT(fixed == 0); engine->set_thd((thd= thd_param)); - char const *save_where= thd->where; - int res; - if (check_stack_overrun(thd, (gptr)&res)) - return 1; + return TRUE; res= engine->prepare(); @@ -173,7 +173,7 @@ bool Item_subselect::fix_fields(THD *thd_param, TABLE_LIST *tables, Item **ref) if (engine->cols() > max_columns) { my_error(ER_OPERAND_COLUMNS, MYF(0), 1); - return 1; + return TRUE; } fix_length_and_dec(); } @@ -270,7 +270,8 @@ Item_singlerow_subselect::Item_singlerow_subselect(st_select_lex *select_lex) DBUG_VOID_RETURN; } -Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent, +Item_maxmin_subselect::Item_maxmin_subselect(THD *thd_param, + Item_subselect *parent, st_select_lex *select_lex, bool max_arg) :Item_singlerow_subselect(), was_values(TRUE) @@ -289,6 +290,12 @@ Item_maxmin_subselect::Item_maxmin_subselect(Item_subselect *parent, used_tables_cache= parent->get_used_tables_cache(); const_item_cache= parent->get_const_item_cache(); + /* + this subquery alwais creates during preparation, so we can assign + thd here + */ + thd= thd_param; + DBUG_VOID_RETURN; } @@ -332,10 +339,8 @@ Item_singlerow_subselect::select_transformer(JOIN *join) return RES_OK; SELECT_LEX *select_lex= join->select_lex; - - /* Juggle with current arena only if we're in prepared statement prepare */ - Item_arena *arena= join->thd->current_arena; - + Item_arena *arena= thd->current_arena; + if (!select_lex->master_unit()->first_select()->next_select() && !select_lex->table_list.elements && select_lex->item_list.elements == 1 && @@ -359,11 +364,11 @@ Item_singlerow_subselect::select_transformer(JOIN *join) { have_to_be_excluded= 1; - if (join->thd->lex->describe) + if (thd->lex->describe) { char warn_buff[MYSQL_ERRMSG_SIZE]; sprintf(warn_buff, ER(ER_SELECT_REDUCED), select_lex->select_number); - push_warning(join->thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SELECT_REDUCED, warn_buff); } substitution= select_lex->item_list.head(); @@ -386,7 +391,7 @@ Item_singlerow_subselect::select_transformer(JOIN *join) if (!(substitution= new Item_func_if(cond, substitution, new Item_null()))) goto err; - } + } return RES_REDUCE; } return RES_OK; @@ -395,6 +400,7 @@ err: return RES_ERROR; } + void Item_singlerow_subselect::store(uint i, Item *item) { row[i]->store(item); @@ -457,13 +463,13 @@ void Item_singlerow_subselect::bring_value() exec(); } -double Item_singlerow_subselect::val() +double Item_singlerow_subselect::val_real() { DBUG_ASSERT(fixed == 1); if (!exec() && !value->null_value) { null_value= 0; - return value->val(); + return value->val_real(); } else { @@ -581,7 +587,7 @@ void Item_exists_subselect::fix_length_and_dec() max_columns= engine->cols(); } -double Item_exists_subselect::val() +double Item_exists_subselect::val_real() { DBUG_ASSERT(fixed == 1); if (exec()) @@ -615,7 +621,7 @@ String *Item_exists_subselect::val_str(String *str) return str; } -double Item_in_subselect::val() +double Item_in_subselect::val_real() { DBUG_ASSERT(fixed == 1); if (exec()) @@ -669,6 +675,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, Comp_creator *func) { const char *save_where= thd->where; + Item_subselect::trans_res result= RES_ERROR; DBUG_ENTER("Item_in_subselect::single_value_transformer"); if (changed) @@ -759,7 +766,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, // remove LIMIT placed by ALL/ANY subquery select_lex->master_unit()->global_parameters->select_limit= HA_POS_ERROR; - subs= item= new Item_maxmin_subselect(this, select_lex, func->l_op()); + subs= item= new Item_maxmin_subselect(thd, this, select_lex, func->l_op()); if (upper_item) upper_item->set_sub_test(item); } @@ -806,9 +813,6 @@ Item_in_subselect::single_value_transformer(JOIN *join, } select_lex->uncacheable|= UNCACHEABLE_DEPENDENT; - Item *item; - - item= (Item*) select_lex->item_list.head(); /* Add the left part of a subselect to a WHERE or HAVING clause of the right part, e.g. SELECT 1 IN (SELECT a FROM t1) => @@ -819,11 +823,13 @@ Item_in_subselect::single_value_transformer(JOIN *join, if (join->having || select_lex->with_sum_func || select_lex->group_list.elements) { - item= func->create(expr, - new Item_ref_null_helper(this, - select_lex->ref_pointer_array, - (char *)"<ref>", - this->full_name())); + bool tmp; + Item *item= func->create(expr, + new Item_ref_null_helper(this, + select_lex-> + ref_pointer_array, + (char *)"<ref>", + this->full_name())); if (!abort_on_null && left_expr->maybe_null) item= new Item_cond_or(new Item_func_isnull(left_expr), item); /* @@ -833,21 +839,22 @@ Item_in_subselect::single_value_transformer(JOIN *join, */ select_lex->having= join->having= and_items(join->having, item); select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, 0)) - { - select_lex->having_fix_field= 0; - goto err; - } + tmp= join->having->fix_fields(thd, join->tables_list, 0); select_lex->having_fix_field= 0; + if (tmp) + goto err; } else { + Item *item= (Item*) select_lex->item_list.head(); + select_lex->item_list.empty(); select_lex->item_list.push_back(new Item_int("Not_used", (longlong) 1, 21)); select_lex->ref_pointer_array[0]= select_lex->item_list.head(); if (select_lex->table_list.elements) { + bool tmp; Item *having= item, *orig_item= item; item= func->create(expr, item); if (!abort_on_null && orig_item->maybe_null) @@ -863,12 +870,10 @@ Item_in_subselect::single_value_transformer(JOIN *join, new Item_cond_and(having, join->having) : having); select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, 0)) - { - select_lex->having_fix_field= 0; + tmp= join->having->fix_fields(thd, join->tables_list, 0); + select_lex->having_fix_field= 0; + if (tmp) goto err; - } - select_lex->having_fix_field= 0; item= new Item_cond_or(item, new Item_func_isnull(orig_item)); if (left_expr->maybe_null) @@ -886,6 +891,7 @@ Item_in_subselect::single_value_transformer(JOIN *join, } else { + bool tmp; if (select_lex->master_unit()->first_select()->next_select()) { /* @@ -894,26 +900,23 @@ Item_in_subselect::single_value_transformer(JOIN *join, argument (reference) to fix_fields() */ item= func->create(expr, - new Item_null_helper(this, item, + new Item_null_helper(this, item, (char *)"<no matter>", (char *)"<result>")); if (!abort_on_null && left_expr->maybe_null) item= new Item_cond_or(new Item_func_isnull(left_expr), item); select_lex->having= join->having= item; select_lex->having_fix_field= 1; - if (join->having->fix_fields(thd, join->tables_list, - 0)) - { - select_lex->having_fix_field= 0; + tmp= join->having->fix_fields(thd, join->tables_list, 0); + select_lex->having_fix_field= 0; + if (tmp) goto err; - } - select_lex->having_fix_field= 0; } else { // it is single select without tables => possible optimization item= func->create(left_expr, item); - // fix_field of item will be done in time of substituting + // fix_field of item will be done in time of substituting substitution= item; have_to_be_excluded= 1; if (thd->lex->describe) @@ -923,23 +926,20 @@ Item_in_subselect::single_value_transformer(JOIN *join, push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_SELECT_REDUCED, warn_buff); } - if (arena) - thd->restore_backup_item_arena(arena, &backup); - DBUG_RETURN(RES_REDUCE); + result= RES_REDUCE; + goto err; } } } ok: - if (arena) - thd->restore_backup_item_arena(arena, &backup); thd->where= save_where; - DBUG_RETURN(RES_OK); + result= RES_OK; err: if (arena) thd->restore_backup_item_arena(arena, &backup); - DBUG_RETURN(RES_ERROR); + DBUG_RETURN(result); } @@ -947,17 +947,17 @@ Item_subselect::trans_res Item_in_subselect::row_value_transformer(JOIN *join) { const char *save_where= thd->where; + Item *item= 0; + SELECT_LEX *select_lex= join->select_lex; DBUG_ENTER("Item_in_subselect::row_value_transformer"); if (changed) { DBUG_RETURN(RES_OK); } - Item_arena *arena, backup; - Item *item= 0; - SELECT_LEX *select_lex= join->select_lex; - thd->where= "row IN/ALL/ANY subquery"; + + Item_arena *arena, backup; arena= thd->change_arena_if_needed(&backup); if (select_lex->item_list.elements != left_expr->cols()) @@ -1036,9 +1036,9 @@ Item_in_subselect::row_value_transformer(JOIN *join) if (join->conds->fix_fields(thd, join->tables_list, 0)) goto err; } + thd->where= save_where; if (arena) thd->restore_backup_item_arena(arena, &backup); - thd->where= save_where; DBUG_RETURN(RES_OK); err: @@ -1105,13 +1105,7 @@ subselect_single_select_engine(st_select_lex *select, { select_lex= select; SELECT_LEX_UNIT *unit= select_lex->master_unit(); - unit->offset_limit_cnt= unit->global_parameters->offset_limit; - unit->select_limit_cnt= unit->global_parameters->select_limit+ - unit->global_parameters ->offset_limit; - if (unit->select_limit_cnt < unit->global_parameters->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // no limit - if (unit->select_limit_cnt == HA_POS_ERROR) - select_lex->options&= ~OPTION_FOUND_ROWS; + unit->set_limit(unit->global_parameters, select_lex); unit->item= item; this->select_lex= select_lex; } @@ -1250,17 +1244,17 @@ void subselect_uniquesubquery_engine::fix_length_and_dec(Item_cache **row) int subselect_single_select_engine::exec() { DBUG_ENTER("subselect_single_select_engine::exec"); - char const *save_where= join->thd->where; - SELECT_LEX *save_select= join->thd->lex->current_select; - join->thd->lex->current_select= select_lex; + char const *save_where= thd->where; + SELECT_LEX *save_select= thd->lex->current_select; + thd->lex->current_select= select_lex; if (!optimized) { optimized=1; if (join->optimize()) { - join->thd->where= save_where; + thd->where= save_where; executed= 1; - join->thd->lex->current_select= save_select; + thd->lex->current_select= save_select; DBUG_RETURN(join->error ? join->error : 1); } if (item->engine_changed) @@ -1272,8 +1266,8 @@ int subselect_single_select_engine::exec() { if (join->reinit()) { - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(1); } item->reset(); @@ -1284,20 +1278,20 @@ int subselect_single_select_engine::exec() item->reset_value_registration(); join->exec(); executed= 1; - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(join->error||thd->is_fatal_error); } - join->thd->where= save_where; - join->thd->lex->current_select= save_select; + thd->where= save_where; + thd->lex->current_select= save_select; DBUG_RETURN(0); } int subselect_union_engine::exec() { - char const *save_where= unit->thd->where; + char const *save_where= thd->where; int res= unit->exec(); - unit->thd->where= save_where; + thd->where= save_where; return res; } @@ -1463,7 +1457,7 @@ void subselect_uniquesubquery_engine::exclude() table_map subselect_engine::calc_const_tables(TABLE_LIST *table) { table_map map= 0; - for (; table; table= table->next) + for(; table; table= table->next_leaf) { TABLE *tbl= table->table; if (tbl && tbl->const_table) @@ -1476,14 +1470,13 @@ table_map subselect_engine::calc_const_tables(TABLE_LIST *table) table_map subselect_single_select_engine::upper_select_const_tables() { return calc_const_tables((TABLE_LIST *) select_lex->outer_select()-> - table_list.first); + leaf_tables); } table_map subselect_union_engine::upper_select_const_tables() { - return calc_const_tables((TABLE_LIST *) unit->outer_select()-> - table_list.first); + return calc_const_tables((TABLE_LIST *) unit->outer_select()->leaf_tables); } @@ -1545,12 +1538,12 @@ void subselect_indexsubquery_engine::print(String *str) res new select_result object RETURN - 0 OK - -1 error + FALSE OK + TRUE error */ -int subselect_single_select_engine::change_item(Item_subselect *si, - select_subselect *res) +bool subselect_single_select_engine::change_result(Item_subselect *si, + select_subselect *res) { item= si; result= res; @@ -1567,12 +1560,12 @@ int subselect_single_select_engine::change_item(Item_subselect *si, res new select_result object RETURN - 0 OK - -1 error + FALSE OK + TRUE error */ -int subselect_union_engine::change_item(Item_subselect *si, - select_subselect *res) +bool subselect_union_engine::change_result(Item_subselect *si, + select_subselect *res) { item= si; int rc= unit->change_result(res, result); @@ -1590,14 +1583,15 @@ int subselect_union_engine::change_item(Item_subselect *si, res new select_result object RETURN - -1 error + FALSE OK + TRUE error */ -int subselect_uniquesubquery_engine::change_item(Item_subselect *si, - select_subselect *res) +bool subselect_uniquesubquery_engine::change_result(Item_subselect *si, + select_subselect *res) { DBUG_ASSERT(0); - return -1; + return TRUE; } diff --git a/sql/item_subselect.h b/sql/item_subselect.h index ab2d441ed7a..53fe21a9bb6 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -26,7 +26,7 @@ class JOIN; class select_subselect; class subselect_engine; class Item_bool_func2; -class Statement; +class Item_arena; /* base class for subselects */ @@ -114,6 +114,7 @@ public: mechanism. Engine call this method before rexecution query. */ virtual void reset_value_registration() {} + enum_parsing_place place() { return parsing_place; } friend class select_subselect; friend class Item_in_optimizer; @@ -138,7 +139,7 @@ public: void reset(); trans_res select_transformer(JOIN *join); void store(uint i, Item* item); - double val(); + double val_real(); longlong val_int (); String *val_str (String *); enum Item_result result_type() const; @@ -162,7 +163,7 @@ protected: bool max; bool was_values; // Set if we have found at least one row public: - Item_maxmin_subselect(Item_subselect *parent, + Item_maxmin_subselect(THD *thd, Item_subselect *parent, st_select_lex *select_lex, bool max); void print(String *str); void cleanup(); @@ -190,7 +191,7 @@ public: enum Item_result result_type() const { return INT_RESULT;} longlong val_int(); - double val(); + double val_real(); String *val_str(String*); void fix_length_and_dec(); void print(String *str); @@ -221,7 +222,6 @@ public: Item_in_subselect(Item * left_expr, st_select_lex *select_lex); Item_in_subselect() :Item_exists_subselect(), abort_on_null(0), transformed(0), upper_item(0) - {} subs_type substype() { return IN_SUBS; } @@ -236,7 +236,7 @@ public: Comp_creator *func); trans_res row_value_transformer(JOIN * join); longlong val_int(); - double val(); + double val_real(); String *val_str(String*); void top_level_item() { abort_on_null=1; } bool test_limit(st_select_lex_unit *unit); @@ -302,7 +302,7 @@ public: virtual table_map upper_select_const_tables()= 0; static table_map calc_const_tables(TABLE_LIST *); virtual void print(String *str)= 0; - virtual int change_item(Item_subselect *si, select_subselect *result)= 0; + virtual bool change_result(Item_subselect *si, select_subselect *result)= 0; virtual bool no_tables()= 0; }; @@ -327,7 +327,7 @@ public: void exclude(); table_map upper_select_const_tables(); void print (String *str); - int change_item(Item_subselect *si, select_subselect *result); + bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); }; @@ -348,7 +348,7 @@ public: void exclude(); table_map upper_select_const_tables(); void print (String *str); - int change_item(Item_subselect *si, select_subselect *result); + bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); }; @@ -378,7 +378,7 @@ public: void exclude(); table_map upper_select_const_tables() { return 0; } void print (String *str); - int change_item(Item_subselect *si, select_subselect *result); + bool change_result(Item_subselect *si, select_subselect *result); bool no_tables(); }; diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 029a1fd6c48..1c005cf60a9 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -138,11 +138,12 @@ bool Item_sum::walk (Item_processor processor, byte *argument) return (this->*processor)(argument); } + String * Item_sum_num::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double nr=val(); + double nr= val_real(); if (null_value) return 0; str->set(nr,decimals, &my_charset_bin); @@ -172,8 +173,9 @@ Item_sum_num::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) if (!thd->allow_sum_func) { - my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); - return 1; + my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE), + MYF(0)); + return TRUE; } thd->allow_sum_func=0; // No included group funcs decimals=0; @@ -181,7 +183,7 @@ Item_sum_num::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) for (uint i=0 ; i < arg_count ; i++) { if (args[i]->fix_fields(thd, tables, args + i) || args[i]->check_cols(1)) - return 1; + return TRUE; if (decimals < args[i]->decimals) decimals=args[i]->decimals; maybe_null |= args[i]->maybe_null; @@ -192,7 +194,7 @@ Item_sum_num::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) fix_length_and_dec(); thd->allow_sum_func=1; // Allow group functions fixed= 1; - return 0; + return FALSE; } @@ -204,8 +206,9 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) Item *item= args[0]; if (!thd->allow_sum_func) { - my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); - return 1; + my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE), + MYF(0)); + return TRUE; } thd->allow_sum_func=0; // No included group funcs @@ -213,7 +216,7 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) if (!item->fixed && item->fix_fields(thd, tables, args) || (item= args[0])->check_cols(1)) - return 1; + return TRUE; hybrid_type= item->result_type(); if (hybrid_type == INT_RESULT) @@ -241,7 +244,7 @@ Item_sum_hybrid::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) else hybrid_field_type= Item::field_type(); fixed= 1; - return 0; + return FALSE; } @@ -263,20 +266,138 @@ void Item_sum_sum::clear() bool Item_sum_sum::add() { - sum+=args[0]->val(); + sum+= args[0]->val_real(); if (!args[0]->null_value) null_value= 0; return 0; } -double Item_sum_sum::val() +double Item_sum_sum::val_real() { DBUG_ASSERT(fixed == 1); return sum; } +/* Item_sum_sum_distinct */ + +Item_sum_sum_distinct::Item_sum_sum_distinct(Item *item) + :Item_sum_num(item), sum(0.0), tree(0) +{ + /* + quick_group is an optimizer hint, which means that GROUP BY can be + handled with help of index on grouped columns. + By setting quick_group to zero we force creation of temporary table + to perform GROUP BY. + */ + quick_group= 0; +} + + +Item_sum_sum_distinct::Item_sum_sum_distinct(THD *thd, + Item_sum_sum_distinct *original) + :Item_sum_num(thd, original), sum(0.0), tree(0) +{ + quick_group= 0; +} + + +Item * +Item_sum_sum_distinct::copy_or_same(THD *thd) +{ + return new (thd->mem_root) Item_sum_sum_distinct(thd, this); +} + +C_MODE_START + +static int simple_raw_key_cmp(void* arg, const void* key1, const void* key2) +{ + return memcmp(key1, key2, *(uint *) arg); +} + +C_MODE_END + +bool Item_sum_sum_distinct::setup(THD *thd) +{ + SELECT_LEX *select_lex= thd->lex->current_select; + /* what does it mean??? */ + if (select_lex->linkage == GLOBAL_OPTIONS_TYPE) + return 1; + + DBUG_ASSERT(tree == 0); /* setup can not be called twice */ + + /* + Uniques handles all unique elements in a tree until they can't fit in. + Then thee tree is dumped to the temporary file. + See class Unique for details. + */ + null_value= maybe_null= 1; + /* + TODO: if underlying item result fits in 4 bytes we can take advantage + of it and have tree of long/ulong. It gives 10% performance boost + */ + static uint key_length= sizeof(double); + tree= new Unique(simple_raw_key_cmp, &key_length, key_length, + thd->variables.max_heap_table_size); + return tree == 0; +} + +void Item_sum_sum_distinct::clear() +{ + DBUG_ASSERT(tree); /* we always have a tree */ + null_value= 1; + tree->reset(); +} + +void Item_sum_sum_distinct::cleanup() +{ + Item_sum_num::cleanup(); + delete tree; + tree= 0; +} + + +bool Item_sum_sum_distinct::add() +{ + /* args[0]->val_real() may reset args[0]->null_value */ + double val= args[0]->val_real(); + if (!args[0]->null_value) + { + DBUG_ASSERT(tree); + null_value= 0; + if (val) + return tree->unique_add(&val); + } + return 0; +} + +C_MODE_START + +static int sum_sum_distinct(void *element, element_count num_of_dups, + void *item_sum_sum_distinct) +{ + ((Item_sum_sum_distinct *) + (item_sum_sum_distinct))->add(* (double *) element); + return 0; +} + +C_MODE_END + +double Item_sum_sum_distinct::val_real() +{ + /* + We don't have a tree only if 'setup()' hasn't been called; + this is the case of sql_select.cc:return_zero_rows. + */ + sum= 0.0; + if (tree) + tree->walk(sum_sum_distinct, (void *) this); + return sum; +} + +/* end of Item_sum_sum_distinct */ + Item *Item_sum_count::copy_or_same(THD* thd) { return new (thd->mem_root) Item_sum_count(thd, this); @@ -336,7 +457,7 @@ void Item_sum_avg::clear() bool Item_sum_avg::add() { - double nr=args[0]->val(); + double nr= args[0]->val_real(); if (!args[0]->null_value) { sum+=nr; @@ -345,7 +466,7 @@ bool Item_sum_avg::add() return 0; } -double Item_sum_avg::val() +double Item_sum_avg::val_real() { DBUG_ASSERT(fixed == 1); if (!count) @@ -362,10 +483,10 @@ double Item_sum_avg::val() Standard deviation */ -double Item_sum_std::val() +double Item_sum_std::val_real() { DBUG_ASSERT(fixed == 1); - double tmp= Item_sum_variance::val(); + double tmp= Item_sum_variance::val_real(); return tmp <= 0.0 ? 0.0 : sqrt(tmp); } @@ -393,7 +514,7 @@ void Item_sum_variance::clear() bool Item_sum_variance::add() { - double nr=args[0]->val(); + double nr= args[0]->val_real(); if (!args[0]->null_value) { sum+=nr; @@ -403,7 +524,7 @@ bool Item_sum_variance::add() return 0; } -double Item_sum_variance::val() +double Item_sum_variance::val_real() { DBUG_ASSERT(fixed == 1); if (!count) @@ -420,7 +541,7 @@ double Item_sum_variance::val() void Item_sum_variance::reset_field() { - double nr=args[0]->val(); + double nr= args[0]->val_real(); char *res=result_field->ptr; if (args[0]->null_value) @@ -445,7 +566,7 @@ void Item_sum_variance::update_field() float8get(old_sqr, res+sizeof(double)); field_count=sint8korr(res+sizeof(double)*2); - nr=args[0]->val(); + nr= args[0]->val_real(); if (!args[0]->null_value) { old_nr+=nr; @@ -467,7 +588,7 @@ void Item_sum_hybrid::clear() null_value= 1; } -double Item_sum_hybrid::val() +double Item_sum_hybrid::val_real() { DBUG_ASSERT(fixed == 1); int err; @@ -500,7 +621,7 @@ longlong Item_sum_hybrid::val_int() return 0; if (hybrid_type == INT_RESULT) return sum_int; - return (longlong) Item_sum_hybrid::val(); + return (longlong) Item_sum_hybrid::val_real(); } @@ -591,7 +712,7 @@ bool Item_sum_min::add() break; case REAL_RESULT: { - double nr=args[0]->val(); + double nr= args[0]->val_real(); if (!args[0]->null_value && (null_value || nr < sum)) { sum=nr; @@ -644,7 +765,7 @@ bool Item_sum_max::add() break; case REAL_RESULT: { - double nr=args[0]->val(); + double nr= args[0]->val_real(); if (!args[0]->null_value && (null_value || nr > sum)) { sum=nr; @@ -724,7 +845,7 @@ bool Item_sum_and::add() void Item_sum_num::reset_field() { - double nr=args[0]->val(); + double nr= args[0]->val_real(); char *res=result_field->ptr; if (maybe_null) @@ -778,7 +899,7 @@ void Item_sum_hybrid::reset_field() } else // REAL_RESULT { - double nr=args[0]->val(); + double nr= args[0]->val_real(); if (maybe_null) { @@ -797,7 +918,7 @@ void Item_sum_hybrid::reset_field() void Item_sum_sum::reset_field() { - double nr=args[0]->val(); // Nulls also return 0 + double nr= args[0]->val_real(); // Nulls also return 0 float8store(result_field->ptr,nr); if (args[0]->null_value) result_field->set_null(); @@ -825,7 +946,7 @@ void Item_sum_count::reset_field() void Item_sum_avg::reset_field() { - double nr=args[0]->val(); + double nr= args[0]->val_real(); char *res=result_field->ptr; if (args[0]->null_value) @@ -863,7 +984,7 @@ void Item_sum_sum::update_field() char *res=result_field->ptr; float8get(old_nr,res); - nr=args[0]->val(); + nr= args[0]->val_real(); if (!args[0]->null_value) { old_nr+=nr; @@ -900,7 +1021,7 @@ void Item_sum_avg::update_field() float8get(old_nr,res); field_count=sint8korr(res+sizeof(double)); - nr=args[0]->val(); + nr= args[0]->val_real(); if (!args[0]->null_value) { old_nr+=nr; @@ -946,7 +1067,7 @@ Item_sum_hybrid::min_max_update_real_field() double nr,old_nr; old_nr=result_field->val_real(); - nr=args[0]->val(); + nr= args[0]->val_real(); if (!args[0]->null_value) { if (result_field->is_null(0) || @@ -998,7 +1119,7 @@ Item_avg_field::Item_avg_field(Item_sum_avg *item) } -double Item_avg_field::val() +double Item_avg_field::val_real() { // fix_fields() never calls for this Item double nr; @@ -1019,7 +1140,7 @@ double Item_avg_field::val() String *Item_avg_field::val_str(String *str) { // fix_fields() never calls for this Item - double nr=Item_avg_field::val(); + double nr= Item_avg_field::val_real(); if (null_value) return 0; str->set(nr,decimals, &my_charset_bin); @@ -1031,10 +1152,10 @@ Item_std_field::Item_std_field(Item_sum_std *item) { } -double Item_std_field::val() +double Item_std_field::val_real() { // fix_fields() never calls for this Item - double tmp= Item_variance_field::val(); + double tmp= Item_variance_field::val_real(); return tmp <= 0.0 ? 0.0 : sqrt(tmp); } @@ -1047,7 +1168,7 @@ Item_variance_field::Item_variance_field(Item_sum_variance *item) maybe_null=1; } -double Item_variance_field::val() +double Item_variance_field::val_real() { // fix_fields() never calls for this Item double sum,sum_sqr; @@ -1070,7 +1191,7 @@ double Item_variance_field::val() String *Item_variance_field::val_str(String *str) { // fix_fields() never calls for this Item - double nr=val(); + double nr= val_real(); if (null_value) return 0; str->set(nr,decimals, &my_charset_bin); @@ -1083,11 +1204,6 @@ String *Item_variance_field::val_str(String *str) #include "sql_select.h" -int simple_raw_key_cmp(void* arg, byte* key1, byte* key2) -{ - return memcmp(key1, key2, *(uint*) arg); -} - int simple_str_key_cmp(void* arg, byte* key1, byte* key2) { Item_sum_count_distinct* item = (Item_sum_count_distinct*)arg; @@ -1095,7 +1211,7 @@ int simple_str_key_cmp(void* arg, byte* key1, byte* key2) uint len=item->key_length; return cs->coll->strnncollsp(cs, (const uchar*) key1, len, - (const uchar*) key2, len); + (const uchar*) key2, len, 0); } /* @@ -1115,7 +1231,7 @@ int composite_key_cmp(void* arg, byte* key1, byte* key2) { Field* f = *field; int len = *lengths++; - int res = f->key_cmp(key1, key2); + int res = f->cmp(key1, key2); if (res) return res; key1 += len; @@ -1245,10 +1361,9 @@ bool Item_sum_count_distinct::setup(THD *thd) about other fields */ Field* field = table->field[0]; - switch(field->type()) - { - case FIELD_TYPE_STRING: - case FIELD_TYPE_VAR_STRING: + switch (field->type()) { + case MYSQL_TYPE_STRING: + case MYSQL_TYPE_VAR_STRING: if (field->binary()) { compare_key = (qsort_cmp2)simple_raw_key_cmp; @@ -1453,7 +1568,7 @@ Item *Item_sum_udf_float::copy_or_same(THD* thd) return new (thd->mem_root) Item_sum_udf_float(thd, this); } -double Item_sum_udf_float::val() +double Item_sum_udf_float::val_real() { DBUG_ASSERT(fixed == 1); DBUG_ENTER("Item_sum_udf_float::val"); @@ -1465,7 +1580,7 @@ double Item_sum_udf_float::val() String *Item_sum_udf_float::val_str(String *str) { DBUG_ASSERT(fixed == 1); - double nr=val(); + double nr= val_real(); if (null_value) return 0; /* purecov: inspected */ str->set(nr,decimals, &my_charset_bin); @@ -1570,7 +1685,7 @@ int group_concat_key_cmp_with_distinct(void* arg, byte* key1, { int res; uint offset= (uint) (field->ptr - record); - if ((res= field->key_cmp(key1 + offset, key2 + offset))) + if ((res= field->cmp(key1 + offset, key2 + offset))) return res; } } @@ -1604,7 +1719,7 @@ int group_concat_key_cmp_with_order(void* arg, byte* key1, byte* key2) { int res; uint offset= (uint) (field->ptr - record); - if ((res= field->key_cmp(key1 + offset, key2 + offset))) + if ((res= field->cmp(key1 + offset, key2 + offset))) return (*order_item)->asc ? res : -res; } } @@ -1904,8 +2019,9 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) if (!thd->allow_sum_func) { - my_error(ER_INVALID_GROUP_FUNC_USE,MYF(0)); - return 1; + my_message(ER_INVALID_GROUP_FUNC_USE, ER(ER_INVALID_GROUP_FUNC_USE), + MYF(0)); + return TRUE; } thd->allow_sum_func= 0; @@ -1921,7 +2037,7 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) if ((!args[i]->fixed && args[i]->fix_fields(thd, tables, args + i)) || args[i]->check_cols(1)) - return 1; + return TRUE; if (i < arg_count_field) maybe_null|= args[i]->maybe_null; } @@ -1929,14 +2045,14 @@ Item_func_group_concat::fix_fields(THD *thd, TABLE_LIST *tables, Item **ref) result_field= 0; null_value= 1; max_length= group_concat_max_len; - thd->allow_sum_func= 1; + thd->allow_sum_func= 1; if (!(tmp_table_param= new TMP_TABLE_PARAM)) - return 1; + return TRUE; /* We'll convert all blobs to varchar fields in the temporary table */ tmp_table_param->convert_blob_length= group_concat_max_len; tables_list= tables; fixed= 1; - return 0; + return FALSE; } @@ -1945,7 +2061,6 @@ bool Item_func_group_concat::setup(THD *thd) List<Item> list; SELECT_LEX *select_lex= thd->lex->current_select; uint const_fields; - byte *record; qsort_cmp2 compare_key; DBUG_ENTER("Item_func_group_concat::setup"); @@ -1954,7 +2069,7 @@ bool Item_func_group_concat::setup(THD *thd) /* push all not constant fields to list and create temp table - */ + */ const_fields= 0; always_null= 0; for (uint i= 0; i < arg_count_field; i++) @@ -1972,15 +2087,15 @@ bool Item_func_group_concat::setup(THD *thd) } if (always_null) DBUG_RETURN(0); - + List<Item> all_fields(list); - if (arg_count_order) + if (arg_count_order) { bool hidden_group_fields; setup_group(thd, args, tables_list, list, all_fields, *order, &hidden_group_fields); } - + count_field_types(tmp_table_param,all_fields,0); if (table) { @@ -2010,7 +2125,6 @@ bool Item_func_group_concat::setup(THD *thd) table->no_rows= 1; key_length= table->reclength; - record= table->record[0]; /* Offset to first result field in table */ field_list_offset= table->fields - (list.elements - const_fields); diff --git a/sql/item_sum.h b/sql/item_sum.h index cec611b8854..68899268b31 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -23,13 +23,15 @@ #include <my_tree.h> +class Item_arena; + class Item_sum :public Item_result_field { public: enum Sumfunctype - { COUNT_FUNC,COUNT_DISTINCT_FUNC,SUM_FUNC,AVG_FUNC,MIN_FUNC, - MAX_FUNC, UNIQUE_USERS_FUNC,STD_FUNC,VARIANCE_FUNC,SUM_BIT_FUNC, - UDF_SUM_FUNC, GROUP_CONCAT_FUNC + { COUNT_FUNC, COUNT_DISTINCT_FUNC, SUM_FUNC, SUM_DISTINCT_FUNC, AVG_FUNC, + MIN_FUNC, MAX_FUNC, UNIQUE_USERS_FUNC, STD_FUNC, VARIANCE_FUNC, + SUM_BIT_FUNC, UDF_SUM_FUNC, GROUP_CONCAT_FUNC }; Item **args, *tmp_args[2]; @@ -105,7 +107,10 @@ public: Item_sum_num(THD *thd, Item_sum_num *item) :Item_sum(thd, item) {} bool fix_fields(THD *, TABLE_LIST *, Item **); longlong val_int() - { DBUG_ASSERT(fixed == 1); return (longlong) val(); } /* Real as default */ + { + DBUG_ASSERT(fixed == 1); + return (longlong) val_real(); /* Real as default */ + } String *val_str(String*str); void reset_field(); }; @@ -117,7 +122,7 @@ public: Item_sum_int(Item *item_par) :Item_sum_num(item_par) {} Item_sum_int(List<Item> &list) :Item_sum_num(list) {} Item_sum_int(THD *thd, Item_sum_int *item) :Item_sum_num(thd, item) {} - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } String *val_str(String*str); enum Item_result result_type () const { return INT_RESULT; } void fix_length_and_dec() @@ -137,7 +142,7 @@ class Item_sum_sum :public Item_sum_num enum Sumfunctype sum_func () const {return SUM_FUNC;} void clear(); bool add(); - double val(); + double val_real(); void reset_field(); void update_field(); void no_rows_in_result() {} @@ -146,6 +151,40 @@ class Item_sum_sum :public Item_sum_num }; +/* + Item_sum_sum_distinct - SELECT SUM(DISTINCT expr) FROM ... + support. See also: MySQL manual, chapter 'Adding New Functions To MySQL' + and comments in item_sum.cc. +*/ + +class Unique; + +class Item_sum_sum_distinct :public Item_sum_num +{ + double sum; + Unique *tree; +private: + Item_sum_sum_distinct(THD *thd, Item_sum_sum_distinct *item); +public: + Item_sum_sum_distinct(Item *item_par); + ~Item_sum_sum_distinct() {} + + bool setup(THD *thd); + void clear(); + void cleanup(); + bool add(); + double val_real(); + + inline void add(double val) { sum+= val; } + enum Sumfunctype sum_func () const { return SUM_DISTINCT_FUNC; } + void reset_field() {} // not used + void update_field() {} // not used + const char *func_name() const { return "sum_distinct"; } + Item *copy_or_same(THD* thd); + virtual void no_rows_in_result() {} +}; + + class Item_sum_count :public Item_sum_int { longlong count; @@ -267,8 +306,9 @@ public: Field *field; Item_avg_field(Item_sum_avg *item); enum Type type() const { return FIELD_AVG_ITEM; } - double val(); - longlong val_int() { /* can't be fix_fields()ed */ return (longlong) val(); } + double val_real(); + longlong val_int() + { /* can't be fix_fields()ed */ return (longlong) val_real(); } bool is_null() { (void) val_int(); return null_value; } String *val_str(String*); enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } @@ -294,7 +334,7 @@ class Item_sum_avg :public Item_sum_num enum Sumfunctype sum_func () const {return AVG_FUNC;} void clear(); bool add(); - double val(); + double val_real(); void reset_field(); void update_field(); Item *result_item(Field *field) @@ -312,8 +352,9 @@ public: Field *field; Item_variance_field(Item_sum_variance *item); enum Type type() const {return FIELD_VARIANCE_ITEM; } - double val(); - longlong val_int() { /* can't be fix_fields()ed */ return (longlong) val(); } + double val_real(); + longlong val_int() + { /* can't be fix_fields()ed */ return (longlong) val_real(); } String *val_str(String*); bool is_null() { (void) val_int(); return null_value; } enum_field_types field_type() const { return MYSQL_TYPE_DOUBLE; } @@ -351,7 +392,7 @@ class Item_sum_variance : public Item_sum_num enum Sumfunctype sum_func () const { return VARIANCE_FUNC; } void clear(); bool add(); - double val(); + double val_real(); void reset_field(); void update_field(); Item *result_item(Field *field) @@ -368,7 +409,7 @@ class Item_std_field :public Item_variance_field public: Item_std_field(Item_sum_std *item); enum Type type() const { return FIELD_STD_ITEM; } - double val(); + double val_real(); }; /* @@ -383,7 +424,7 @@ class Item_sum_std :public Item_sum_variance :Item_sum_variance(thd, item) {} enum Sumfunctype sum_func () const { return STD_FUNC; } - double val(); + double val_real(); Item *result_item(Field *field) { return new Item_std_field(this); } const char *func_name() const { return "std"; } @@ -422,7 +463,7 @@ class Item_sum_hybrid :public Item_sum bool const_item() const { return !used_table_cache; } void clear(); - double val(); + double val_real(); longlong val_int(); void reset_field(); String *val_str(String *); @@ -562,8 +603,11 @@ class Item_sum_udf_float :public Item_udf_sum Item_sum_udf_float(THD *thd, Item_sum_udf_float *item) :Item_udf_sum(thd, item) {} longlong val_int() - { DBUG_ASSERT(fixed == 1); return (longlong) Item_sum_udf_float::val(); } - double val(); + { + DBUG_ASSERT(fixed == 1); + return (longlong) Item_sum_udf_float::val_real(); + } + double val_real(); String *val_str(String*str); void fix_length_and_dec() { fix_num_length_and_dec(); } Item *copy_or_same(THD* thd); @@ -579,7 +623,7 @@ public: Item_sum_udf_int(THD *thd, Item_sum_udf_int *item) :Item_udf_sum(thd, item) {} longlong val_int(); - double val() + double val_real() { DBUG_ASSERT(fixed == 1); return (double) Item_sum_udf_int::val_int(); } String *val_str(String*str); enum Item_result result_type () const { return INT_RESULT; } @@ -597,7 +641,7 @@ public: Item_sum_udf_str(THD *thd, Item_sum_udf_str *item) :Item_udf_sum(thd, item) {} String *val_str(String *); - double val() + double val_real() { int err; String *res; res=val_str(&str_value); @@ -625,7 +669,7 @@ class Item_sum_udf_float :public Item_sum_num Item_sum_udf_float(THD *thd, Item_sum_udf_float *item) :Item_sum_num(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } - double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } void clear() {} bool add() { return 0; } void update_field() {} @@ -641,7 +685,7 @@ public: :Item_sum_num(thd, item) {} enum Sumfunctype sum_func () const { return UDF_SUM_FUNC; } longlong val_int() { DBUG_ASSERT(fixed == 1); return 0; } - double val() { DBUG_ASSERT(fixed == 1); return 0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0; } void clear() {} bool add() { return 0; } void update_field() {} @@ -657,7 +701,7 @@ public: :Item_sum_num(thd, item) {} String *val_str(String *) { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } - double val() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); null_value=1; return 0.0; } longlong val_int() { DBUG_ASSERT(fixed == 1); null_value=1; return 0; } enum Item_result result_type () const { return STRING_RESULT; } void fix_length_and_dec() { maybe_null=1; max_length=0; } @@ -729,7 +773,7 @@ class Item_func_group_concat : public Item_sum bool setup(THD *thd); void make_unique(); virtual void update_field() {} - double val() + double val_real() { String *res; res=val_str(&str_value); return res ? my_atof(res->c_ptr()) : 0.0; diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 0d652a431cb..2d0e5d7632f 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -463,7 +463,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, if (!my_isspace(&my_charset_latin1,*val)) { make_truncated_value_warning(current_thd, val_begin, length, - cached_timestamp_type); + cached_timestamp_type, NullS); break; } } while (++val != val_end); @@ -794,7 +794,7 @@ longlong Item_func_to_days::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day); } @@ -803,7 +803,7 @@ longlong Item_func_dayofyear::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime,TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_daynr(ltime.year,ltime.month,ltime.day) - calc_daynr(ltime.year,1,1) + 1; @@ -813,7 +813,7 @@ longlong Item_func_dayofmonth::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.day; } @@ -821,7 +821,7 @@ longlong Item_func_month::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.month; } @@ -850,7 +850,7 @@ longlong Item_func_quarter::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ((ltime.month+2)/3); } @@ -922,7 +922,7 @@ longlong Item_func_week::val_int() DBUG_ASSERT(fixed == 1); uint year; TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; return (longlong) calc_week(<ime, week_mode((uint) args[1]->val_int()), @@ -935,7 +935,7 @@ longlong Item_func_yearweek::val_int() DBUG_ASSERT(fixed == 1); uint year,week; TIME ltime; - if (get_arg0_date(<ime,0)) + if (get_arg0_date(<ime, TIME_NO_ZERO_DATE)) return 0; week= calc_week(<ime, (week_mode((uint) args[1]->val_int()) | WEEK_YEAR), @@ -976,7 +976,7 @@ longlong Item_func_year::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; - (void) get_arg0_date(<ime,1); + (void) get_arg0_date(<ime, TIME_FUZZY_DATE); return (longlong) ltime.year; } @@ -1075,9 +1075,15 @@ static bool get_interval_value(Item *args,interval_type int_type, case INTERVAL_YEAR: interval->year= (ulong) value; break; + case INTERVAL_QUARTER: + interval->month= (ulong)(value*3); + break; case INTERVAL_MONTH: interval->month= (ulong) value; break; + case INTERVAL_WEEK: + interval->day= (ulong)(value*7); + break; case INTERVAL_DAY: interval->day= (ulong) value; break; @@ -1548,7 +1554,7 @@ String *Item_func_date_format::val_str(String *str) if (!is_time_format) { - if (get_arg0_date(&l_time,1)) + if (get_arg0_date(&l_time, TIME_FUZZY_DATE)) return 0; } else @@ -1664,7 +1670,7 @@ Item_func_convert_tz::fix_fields(THD *thd_arg, TABLE_LIST *tables_arg, Item **re { String str; if (Item_date_func::fix_fields(thd_arg, tables_arg, ref)) - return 1; + return TRUE; tz_tables= thd_arg->lex->time_zone_tables_used; @@ -1674,7 +1680,7 @@ Item_func_convert_tz::fix_fields(THD *thd_arg, TABLE_LIST *tables_arg, Item **re if (args[2]->const_item()) to_tz= my_tz_find(args[2]->val_str(&str), tz_tables); - return 0; + return FALSE; } @@ -1708,7 +1714,7 @@ longlong Item_func_convert_tz::val_int() bool Item_func_convert_tz::get_date(TIME *ltime, - uint fuzzy_date __attribute__((unused))) + uint fuzzy_date __attribute__((unused))) { my_time_t my_time_tmp; bool not_used; @@ -1720,7 +1726,7 @@ bool Item_func_convert_tz::get_date(TIME *ltime, if (!args[2]->const_item()) to_tz= my_tz_find(args[2]->val_str(&str), tz_tables); - if (from_tz==0 || to_tz==0 || get_arg0_date(ltime, 0)) + if (from_tz==0 || to_tz==0 || get_arg0_date(ltime, TIME_NO_ZERO_DATE)) { null_value= 1; return 1; @@ -1784,7 +1790,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) INTERVAL interval; ltime->neg= 0; - if (args[0]->get_date(ltime,0) || + if (args[0]->get_date(ltime, TIME_NO_ZERO_DATE) || get_interval_value(args[1],int_type,&value,&interval)) goto null_date; sign= (interval.neg ? -1 : 1); @@ -1844,6 +1850,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) break; } case INTERVAL_DAY: + case INTERVAL_WEEK: period= (calc_daynr(ltime->year,ltime->month,ltime->day) + sign * (long) interval.day); /* Daynumber from year 0 to 9999-12-31 */ @@ -1860,6 +1867,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) ltime->day=28; // Was leap-year break; case INTERVAL_YEAR_MONTH: + case INTERVAL_QUARTER: case INTERVAL_MONTH: period= (ltime->year*12 + sign * (long) interval.year*12 + ltime->month-1 + sign * (long) interval.month); @@ -1891,7 +1899,7 @@ String *Item_date_add_interval::val_str(String *str) TIME ltime; enum date_time_format_types format; - if (Item_date_add_interval::get_date(<ime,0)) + if (Item_date_add_interval::get_date(<ime, TIME_NO_ZERO_DATE)) return 0; if (ltime.time_type == MYSQL_TIMESTAMP_DATE) @@ -1914,7 +1922,7 @@ longlong Item_date_add_interval::val_int() DBUG_ASSERT(fixed == 1); TIME ltime; longlong date; - if (Item_date_add_interval::get_date(<ime,0)) + if (Item_date_add_interval::get_date(<ime, TIME_NO_ZERO_DATE)) return (longlong) 0; date = (ltime.year*100L + ltime.month)*100L + ltime.day; return ltime.time_type == MYSQL_TIMESTAMP_DATE ? date : @@ -1923,12 +1931,13 @@ longlong Item_date_add_interval::val_int() static const char *interval_names[]= { - "year", "month", "day", "hour", "minute", - "second", "microsecond", "year_month", - "day_hour", "day_minute", "day_second", - "hour_minute", "hour_second", "minute_second", - "day_microsecond", "hour_microsecond", - "minute_microsecond", "second_microsecond" + "year", "quarter", "month", "day", "hour", + "minute", "week", "second", "microsecond", + "year_month", "day_hour", "day_minute", + "day_second", "hour_minute", "hour_second", + "minute_second", "day_microsecond", + "hour_microsecond", "minute_microsecond", + "second_microsecond" }; void Item_date_add_interval::print(String *str) @@ -1959,7 +1968,9 @@ void Item_extract::fix_length_and_dec() switch (int_type) { case INTERVAL_YEAR: max_length=4; date_value=1; break; case INTERVAL_YEAR_MONTH: max_length=6; date_value=1; break; + case INTERVAL_QUARTER: max_length=2; date_value=1; break; case INTERVAL_MONTH: max_length=2; date_value=1; break; + case INTERVAL_WEEK: max_length=2; date_value=1; break; case INTERVAL_DAY: max_length=2; date_value=1; break; case INTERVAL_DAY_HOUR: max_length=9; date_value=0; break; case INTERVAL_DAY_MINUTE: max_length=11; date_value=0; break; @@ -1983,10 +1994,12 @@ longlong Item_extract::val_int() { DBUG_ASSERT(fixed == 1); TIME ltime; + uint year; + ulong week_format; long neg; if (date_value) { - if (get_arg0_date(<ime,1)) + if (get_arg0_date(<ime, TIME_FUZZY_DATE)) return 0; neg=1; } @@ -2004,7 +2017,13 @@ longlong Item_extract::val_int() switch (int_type) { case INTERVAL_YEAR: return ltime.year; case INTERVAL_YEAR_MONTH: return ltime.year*100L+ltime.month; + case INTERVAL_QUARTER: return ltime.month/3 + 1; case INTERVAL_MONTH: return ltime.month; + case INTERVAL_WEEK: + { + week_format= current_thd->variables.default_week_format; + return calc_week(<ime, week_mode(week_format), &year); + } case INTERVAL_DAY: return ltime.day; case INTERVAL_DAY_HOUR: return (long) (ltime.day*100L+ltime.hour)*neg; case INTERVAL_DAY_MINUTE: return (long) (ltime.day*10000L+ @@ -2104,7 +2123,7 @@ void Item_char_typecast::print(String *str) if (cast_cs) { str->append(" charset ", 9); - str->append(cast_cs->name); + str->append(cast_cs->csname); } str->append(')'); } @@ -2172,7 +2191,7 @@ String *Item_datetime_typecast::val_str(String *str) { DBUG_ASSERT(fixed == 1); TIME ltime; - if (!get_arg0_date(<ime,1) && + if (!get_arg0_date(<ime, TIME_FUZZY_DATE) && !make_datetime(ltime.second_part ? DATE_TIME_MICROSECOND : DATE_TIME, <ime, str)) return str; @@ -2213,7 +2232,7 @@ String *Item_time_typecast::val_str(String *str) bool Item_date_typecast::get_date(TIME *ltime, uint fuzzy_date) { - bool res= get_arg0_date(ltime,1); + bool res= get_arg0_date(ltime, TIME_FUZZY_DATE); ltime->hour= ltime->minute= ltime->second= ltime->second_part= 0; ltime->time_type= MYSQL_TIMESTAMP_DATE; return res; @@ -2225,7 +2244,7 @@ String *Item_date_typecast::val_str(String *str) DBUG_ASSERT(fixed == 1); TIME ltime; - if (!get_arg0_date(<ime,1) && !str->alloc(11)) + if (!get_arg0_date(<ime, TIME_FUZZY_DATE) && !str->alloc(11)) { make_date((DATE_TIME_FORMAT *) 0, <ime, str); return str; @@ -2319,7 +2338,7 @@ String *Item_func_add_time::val_str(String *str) l_time3.neg= 0; if (is_date) // TIMESTAMP function { - if (get_arg0_date(&l_time1,1) || + if (get_arg0_date(&l_time1, TIME_FUZZY_DATE) || args[1]->get_time(&l_time2) || l_time1.time_type == MYSQL_TIMESTAMP_TIME || l_time2.time_type != MYSQL_TIMESTAMP_TIME) @@ -2422,6 +2441,79 @@ void Item_func_add_time::print(String *str) /* + SYNOPSIS + calc_time_diff() + l_time1 TIME/DATE/DATETIME value + l_time2 TIME/DATE/DATETIME value + l_sign Can be 1 (operation of addition) + or -1 (substraction) + seconds_out Returns count of seconds bitween + l_time1 and l_time2 + microseconds_out Returns count of microseconds bitween + l_time1 and l_time2. + + DESCRIPTION + Calculates difference in seconds(seconds_out) + and microseconds(microseconds_out) + bitween two TIME/DATE/DATETIME values. + + RETURN VALUES + Rertuns sign of difference. + 1 means negative result + 0 means positive result + +*/ + +bool calc_time_diff(TIME *l_time1,TIME *l_time2, int l_sign, + longlong *seconds_out, long *microseconds_out) +{ + long days; + bool neg; + longlong seconds= *seconds_out; + long microseconds= *microseconds_out; + + /* + We suppose that if first argument is MYSQL_TIMESTAMP_TIME + the second argument should be TIMESTAMP_TIME also. + We should check it before calc_time_diff call. + */ + if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value + days= l_time1->day - l_sign*l_time2->day; + else // DateTime value + days= (calc_daynr((uint) l_time1->year, + (uint) l_time1->month, + (uint) l_time1->day) - + l_sign*calc_daynr((uint) l_time2->year, + (uint) l_time2->month, + (uint) l_time2->day)); + + microseconds= l_time1->second_part - l_sign*l_time2->second_part; + seconds= ((longlong) days*86400L + l_time1->hour*3600L + + l_time1->minute*60L + l_time1->second + microseconds/1000000L - + (longlong)l_sign*(l_time2->hour*3600L+l_time2->minute*60L+l_time2->second)); + + neg= 0; + if (seconds < 0) + { + seconds= -seconds; + neg= 1; + } + else if (seconds == 0 && microseconds < 0) + { + microseconds= -microseconds; + neg= 1; + } + if (microseconds < 0) + { + microseconds+= 1000000L; + seconds--; + } + *seconds_out= seconds; + *microseconds_out= microseconds; + return neg; +} + +/* TIMEDIFF(t,s) is a time function that calculates the time value between a start and end time. @@ -2434,7 +2526,6 @@ String *Item_func_timediff::val_str(String *str) DBUG_ASSERT(fixed == 1); longlong seconds; long microseconds; - long days; int l_sign= 1; TIME l_time1 ,l_time2, l_time3; @@ -2447,40 +2538,16 @@ String *Item_func_timediff::val_str(String *str) if (l_time1.neg != l_time2.neg) l_sign= -l_sign; - if (l_time1.time_type == MYSQL_TIMESTAMP_TIME) // Time value - days= l_time1.day - l_sign*l_time2.day; - else // DateTime value - days= (calc_daynr((uint) l_time1.year, - (uint) l_time1.month, - (uint) l_time1.day) - - l_sign*calc_daynr((uint) l_time2.year, - (uint) l_time2.month, - (uint) l_time2.day)); - - microseconds= l_time1.second_part - l_sign*l_time2.second_part; - seconds= ((longlong) days*86400L + l_time1.hour*3600L + - l_time1.minute*60L + l_time1.second + microseconds/1000000L - - (longlong)l_sign*(l_time2.hour*3600L+l_time2.minute*60L+ - l_time2.second)); + l_time3.neg= calc_time_diff(&l_time1,&l_time2, l_sign, + &seconds, µseconds); - l_time3.neg= 0; - if (seconds < 0) - { - seconds= -seconds; - l_time3.neg= 1; - } - else if (seconds == 0 && microseconds < 0) - { - microseconds= -microseconds; - l_time3.neg= 1; - } - if (microseconds < 0) - { - microseconds+= 1000000L; - seconds--; - } + /* + For MYSQL_TIMESTAMP_TIME only: + If both argumets are negative values and diff between them + is negative we need to swap sign as result should be positive. + */ if ((l_time2.neg == l_time1.neg) && l_time1.neg) - l_time3.neg= l_time3.neg ? 0 : 1; + l_time3.neg= 1-l_time3.neg; // Swap sign of result calc_time_from_sec(&l_time3, (long) seconds, microseconds); @@ -2549,6 +2616,165 @@ longlong Item_func_microsecond::val_int() } +longlong Item_func_timestamp_diff::val_int() +{ + TIME ltime1, ltime2; + longlong seconds; + long microseconds; + long months= 0; + int neg= 1; + + null_value= 0; + if (args[0]->get_date(<ime1, TIME_NO_ZERO_DATE) || + args[1]->get_date(<ime2, TIME_NO_ZERO_DATE)) + goto null_date; + + if (calc_time_diff(<ime2,<ime1, 1, + &seconds, µseconds)) + neg= -1; + + if (int_type == INTERVAL_YEAR || + int_type == INTERVAL_QUARTER || + int_type == INTERVAL_MONTH) + { + uint year; + uint year_beg, year_end, month_beg, month_end; + uint diff_days= (uint) (seconds/86400L); + uint diff_years= 0; + if (neg == -1) + { + year_beg= ltime2.year; + year_end= ltime1.year; + month_beg= ltime2.month; + month_end= ltime1.month; + } + else + { + year_beg= ltime1.year; + year_end= ltime2.year; + month_beg= ltime1.month; + month_end= ltime2.month; + } + /* calc years*/ + for (year= year_beg;year < year_end; year++) + { + uint days=calc_days_in_year(year); + if (days > diff_days) + break; + diff_days-= days; + diff_years++; + } + + /* calc months; Current year is in the 'year' variable */ + month_beg--; /* Change months to be 0-11 for easier calculation */ + month_end--; + + months= 12*diff_years; + while (month_beg != month_end) + { + uint m_days= (uint) days_in_month[month_beg]; + if (month_beg == 1) + { + /* This is only calculated once so there is no reason to cache it*/ + uint leap= (uint) ((year & 3) == 0 && (year%100 || + (year%400 == 0 && year))); + m_days+= leap; + } + if (m_days > diff_days) + break; + diff_days-= m_days; + months++; + if (month_beg++ == 11) /* if we wrap to next year */ + { + month_beg= 0; + year++; + } + } + if (neg == -1) + months= -months; + } + + switch (int_type) { + case INTERVAL_YEAR: + return months/12; + case INTERVAL_QUARTER: + return months/3; + case INTERVAL_MONTH: + return months; + case INTERVAL_WEEK: + return seconds/86400L/7L*neg; + case INTERVAL_DAY: + return seconds/86400L*neg; + case INTERVAL_HOUR: + return seconds/3600L*neg; + case INTERVAL_MINUTE: + return seconds/60L*neg; + case INTERVAL_SECOND: + return seconds*neg; + case INTERVAL_MICROSECOND: + { + longlong max_sec= LONGLONG_MAX/1000000; + if (max_sec > seconds || + max_sec == seconds && LONGLONG_MAX%1000000 >= microseconds) + return (longlong) (seconds*1000000L+microseconds)*neg; + goto null_date; + } + default: + break; + } + +null_date: + null_value=1; + return 0; +} + + +void Item_func_timestamp_diff::print(String *str) +{ + str->append(func_name()); + str->append('('); + + switch (int_type) { + case INTERVAL_YEAR: + str->append("YEAR"); + break; + case INTERVAL_QUARTER: + str->append("QUARTER"); + break; + case INTERVAL_MONTH: + str->append("MONTH"); + break; + case INTERVAL_WEEK: + str->append("WEEK"); + break; + case INTERVAL_DAY: + str->append("DAY"); + break; + case INTERVAL_HOUR: + str->append("HOUR"); + break; + case INTERVAL_MINUTE: + str->append("MINUTE"); + break; + case INTERVAL_SECOND: + str->append("SECOND"); + break; + case INTERVAL_MICROSECOND: + str->append("SECOND_FRAC"); + break; + default: + break; + } + + for (uint i=0 ; i < 2 ; i++) + { + str->append(','); + args[i]->print(str); + } + str->append(')'); +} + + String *Item_func_get_format::val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -2655,9 +2881,9 @@ get_date_time_result_type(const char *format, uint length) have all types of date-time components and can end our search. */ return DATE_TIME_MICROSECOND; - } } } + } /* We don't have all three types of date-time components */ if (frac_second_used) diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index df0b05d6d42..dccba7f52b1 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -88,7 +88,7 @@ class Item_func_month :public Item_func public: Item_func_month(Item *a) :Item_func(a) {} longlong val_int(); - double val() + double val_real() { DBUG_ASSERT(fixed == 1); return (double) Item_func_month::val_int(); } String *val_str(String *str) { @@ -250,7 +250,7 @@ public: Item_func_weekday(Item *a,bool type_arg) :Item_func(a), odbc_type(type_arg) {} longlong val_int(); - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } String *val_str(String *str) { DBUG_ASSERT(fixed == 1); @@ -326,7 +326,7 @@ public: enum_field_types field_type() const { return MYSQL_TYPE_DATE; } String *val_str(String *str); longlong val_int(); - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } const char *func_name() const { return "date"; } void fix_length_and_dec() { @@ -369,7 +369,7 @@ public: Item_func_curtime(Item *a) :Item_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } enum_field_types field_type() const { return MYSQL_TYPE_TIME; } - double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } String *val_str(String *str); void fix_length_and_dec(); @@ -452,7 +452,7 @@ public: Item_func_now() :Item_date_func() {} Item_func_now(Item *a) :Item_date_func(a) {} enum Item_result result_type () const { return STRING_RESULT; } - double val() { DBUG_ASSERT(fixed == 1); return (double) value; } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) value; } longlong val_int() { DBUG_ASSERT(fixed == 1); return value; } int save_in_field(Field *to, bool no_conversions); String *val_str(String *str); @@ -512,7 +512,7 @@ class Item_func_from_unixtime :public Item_date_func THD *thd; public: Item_func_from_unixtime(Item *a) :Item_date_func(a) {} - double val() + double val_real() { DBUG_ASSERT(fixed == 1); return (double) Item_func_from_unixtime::val_int(); @@ -552,7 +552,7 @@ class Item_func_convert_tz :public Item_date_func Item_func_convert_tz(Item *a, Item *b, Item *c): Item_date_func(a, b, c) {} longlong val_int(); - double val() { return (double) val_int(); } + double val_real() { return (double) val_int(); } String *val_str(String *str); const char *func_name() const { return "convert_tz"; } bool fix_fields(THD *, struct st_table_list *, Item **); @@ -565,7 +565,7 @@ class Item_func_sec_to_time :public Item_str_func { public: Item_func_sec_to_time(Item *item) :Item_str_func(item) {} - double val() + double val_real() { DBUG_ASSERT(fixed == 1); return (double) Item_func_sec_to_time::val_int(); @@ -593,11 +593,11 @@ public: enum interval_type { - INTERVAL_YEAR, INTERVAL_MONTH, INTERVAL_DAY, INTERVAL_HOUR, INTERVAL_MINUTE, - INTERVAL_SECOND, INTERVAL_MICROSECOND ,INTERVAL_YEAR_MONTH, - INTERVAL_DAY_HOUR, INTERVAL_DAY_MINUTE, INTERVAL_DAY_SECOND, - INTERVAL_HOUR_MINUTE, INTERVAL_HOUR_SECOND, INTERVAL_MINUTE_SECOND, - INTERVAL_DAY_MICROSECOND, INTERVAL_HOUR_MICROSECOND, + INTERVAL_YEAR, INTERVAL_QUARTER, INTERVAL_MONTH, INTERVAL_DAY, INTERVAL_HOUR, + INTERVAL_MINUTE, INTERVAL_WEEK, INTERVAL_SECOND, INTERVAL_MICROSECOND , + INTERVAL_YEAR_MONTH, INTERVAL_DAY_HOUR, INTERVAL_DAY_MINUTE, + INTERVAL_DAY_SECOND, INTERVAL_HOUR_MINUTE, INTERVAL_HOUR_SECOND, + INTERVAL_MINUTE_SECOND, INTERVAL_DAY_MICROSECOND, INTERVAL_HOUR_MICROSECOND, INTERVAL_MINUTE_MICROSECOND, INTERVAL_SECOND_MICROSECOND }; @@ -615,7 +615,7 @@ public: const char *func_name() const { return "date_add_interval"; } void fix_length_and_dec(); enum_field_types field_type() const { return cached_field_type; } - double val() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } + double val_real() { DBUG_ASSERT(fixed == 1); return (double) val_int(); } longlong val_int(); bool get_date(TIME *res, uint fuzzy_date); void print(String *str); @@ -838,6 +838,23 @@ public: }; +class Item_func_timestamp_diff :public Item_int_func +{ + const interval_type int_type; +public: + Item_func_timestamp_diff(Item *a,Item *b,interval_type type_arg) + :Item_int_func(a,b), int_type(type_arg) {} + const char *func_name() const { return "timestamp_diff"; } + longlong val_int(); + void fix_length_and_dec() + { + decimals=0; + maybe_null=1; + } + void print(String *str); +}; + + enum date_time_format { USA_FORMAT, JIS_FORMAT, ISO_FORMAT, EUR_FORMAT, INTERNAL_FORMAT diff --git a/sql/item_uniq.h b/sql/item_uniq.h index 5582537bdbb..e74c09ca3c4 100644 --- a/sql/item_uniq.h +++ b/sql/item_uniq.h @@ -27,7 +27,7 @@ class Item_func_unique_users :public Item_real_func public: Item_func_unique_users(Item *name_arg,int start,int end,List<Item> &list) :Item_real_func(list) {} - double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } void fix_length_and_dec() { decimals=0; max_length=6; } void print(String *str) { str->append("0.0", 3); } }; @@ -40,7 +40,7 @@ public: :Item_sum_num(item_arg) {} Item_sum_unique_users(THD *thd, Item_sum_unique_users *item) :Item_sum_num(thd, item) {} - double val() { DBUG_ASSERT(fixed == 1); return 0.0; } + double val_real() { DBUG_ASSERT(fixed == 1); return 0.0; } enum Sumfunctype sum_func () const {return UNIQUE_USERS_FUNC;} void clear() {} bool add() { return 0; } @@ -50,7 +50,7 @@ public: { DBUG_ASSERT(fixed == 0); fixed= 1; - return 0; + return FALSE; } Item *copy_or_same(THD* thd) { diff --git a/sql/key.cc b/sql/key.cc index b1f4c9533a9..d54b8721cab 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -66,94 +66,165 @@ int find_ref_key(TABLE *table,Field *field, uint *key_length) } - /* Copy a key from record to some buffer */ - /* if length == 0 then copy whole key */ +/* + Copy part of a record that forms a key or key prefix to a buffer. + + SYNOPSIS + key_copy() + to_key buffer that will be used as a key + from_record full record to be copied from + key_info descriptor of the index + key_length specifies length of all keyparts that will be copied + + DESCRIPTION + The function takes a complete table record (as e.g. retrieved by + handler::index_read()), and a description of an index on the same table, + and extracts the first key_length bytes of the record which are part of a + key into to_key. If length == 0 then copy all bytes from the record that + form a key. + + RETURN + None +*/ -void key_copy(byte *key,TABLE *table,uint idx,uint key_length) +void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length) { uint length; - KEY *key_info=table->key_info+idx; KEY_PART_INFO *key_part; if (key_length == 0) - key_length=key_info->key_length; - for (key_part=key_info->key_part; - (int) key_length > 0 ; - key_part++) + key_length= key_info->key_length; + for (key_part= key_info->key_part; (int) key_length > 0; key_part++) { if (key_part->null_bit) { - *key++= test(table->record[0][key_part->null_offset] & + *to_key++= test(from_record[key_part->null_offset] & key_part->null_bit); key_length--; } + if (key_part->type == HA_KEYTYPE_BIT) + { + Field_bit *field= (Field_bit *) (key_part->field); + if (field->bit_len) + { + uchar bits= get_rec_bits((uchar*) from_record + + key_part->null_offset + + (key_part->null_bit == 128), + field->bit_ofs, field->bit_len); + *to_key++= bits; + key_length--; + } + } if (key_part->key_part_flag & HA_BLOB_PART) { char *pos; - ulong blob_length=((Field_blob*) key_part->field)->get_length(); - key_length-=2; + ulong blob_length= ((Field_blob*) key_part->field)->get_length(); + key_length-= HA_KEY_BLOB_LENGTH; ((Field_blob*) key_part->field)->get_ptr(&pos); - length=min(key_length,key_part->length); - set_if_smaller(blob_length,length); - int2store(key,(uint) blob_length); - key+=2; // Skip length info - memcpy(key,pos,blob_length); + length=min(key_length, key_part->length); + set_if_smaller(blob_length, length); + int2store(to_key, (uint) blob_length); + to_key+= HA_KEY_BLOB_LENGTH; // Skip length info + memcpy(to_key, pos, blob_length); + } + else if (key_part->key_part_flag & HA_VAR_LENGTH_PART) + { + key_length-= HA_KEY_BLOB_LENGTH; + length= min(key_length, key_part->length); + key_part->field->get_key_image(to_key, length, Field::itRAW); + to_key+= HA_KEY_BLOB_LENGTH; } else { - length=min(key_length,key_part->length); - memcpy(key,table->record[0]+key_part->offset,(size_t) length); + length= min(key_length, key_part->length); + memcpy(to_key, from_record + key_part->offset, (size_t) length); } - key+=length; - key_length-=length; + to_key+= length; + key_length-= length; } -} /* key_copy */ +} - /* restore a key from some buffer to record */ +/* + Restore a key from some buffer to record. + + SYNOPSIS + key_restore() + to_record record buffer where the key will be restored to + from_key buffer that contains a key + key_info descriptor of the index + key_length specifies length of all keyparts that will be restored -void key_restore(TABLE *table,byte *key,uint idx,uint key_length) + DESCRIPTION + This function converts a key into record format. It can be used in cases + when we want to return a key as a result row. + + RETURN + None +*/ + +void key_restore(byte *to_record, byte *from_key, KEY *key_info, + uint key_length) { uint length; - KEY *key_info=table->key_info+idx; KEY_PART_INFO *key_part; if (key_length == 0) { - if (idx == (uint) -1) - return; - key_length=key_info->key_length; + key_length= key_info->key_length; } - for (key_part=key_info->key_part; - (int) key_length > 0 ; - key_part++) + for (key_part= key_info->key_part ; (int) key_length > 0 ; key_part++) { if (key_part->null_bit) { - if (*key++) - table->record[0][key_part->null_offset]|= key_part->null_bit; + if (*from_key++) + to_record[key_part->null_offset]|= key_part->null_bit; else - table->record[0][key_part->null_offset]&= ~key_part->null_bit; + to_record[key_part->null_offset]&= ~key_part->null_bit; key_length--; } + if (key_part->type == HA_KEYTYPE_BIT) + { + Field_bit *field= (Field_bit *) (key_part->field); + if (field->bit_len) + { + uchar bits= *(from_key + key_part->length - field->field_length -1); + set_rec_bits(bits, to_record + key_part->null_offset + + (key_part->null_bit == 128), + field->bit_ofs, field->bit_len); + } + else + { + clr_rec_bits(to_record + key_part->null_offset + + (key_part->null_bit == 128), + field->bit_ofs, field->bit_len); + } + } if (key_part->key_part_flag & HA_BLOB_PART) { - uint blob_length=uint2korr(key); - key+=2; - key_length-=2; + uint blob_length= uint2korr(from_key); + from_key+= HA_KEY_BLOB_LENGTH; + key_length-= HA_KEY_BLOB_LENGTH; ((Field_blob*) key_part->field)->set_ptr((ulong) blob_length, - (char*) key); - length=key_part->length; + (char*) from_key); + length= key_part->length; + } + else if (key_part->key_part_flag & HA_VAR_LENGTH_PART) + { + key_length-= HA_KEY_BLOB_LENGTH; + length= min(key_length, key_part->length); + key_part->field->set_key_image(from_key, length); + from_key+= HA_KEY_BLOB_LENGTH; } else { - length=min(key_length,key_part->length); - memcpy(table->record[0]+key_part->offset,key,(size_t) length); + length= min(key_length, key_part->length); + memcpy(to_record + key_part->offset, from_key, (size_t) length); } - key+=length; - key_length-=length; + from_key+= length; + key_length-= length; } -} /* key_restore */ +} /* @@ -179,54 +250,54 @@ void key_restore(TABLE *table,byte *key,uint idx,uint key_length) bool key_cmp_if_same(TABLE *table,const byte *key,uint idx,uint key_length) { - uint length; + uint store_length; KEY_PART_INFO *key_part; + const byte *key_end= key + key_length;; for (key_part=table->key_info[idx].key_part; - (int) key_length > 0; - key_part++, key+=length, key_length-=length) + key < key_end ; + key_part++, key+= store_length) { + uint length; + store_length= key_part->store_length; + if (key_part->null_bit) { - key_length--; if (*key != test(table->record[0][key_part->null_offset] & key_part->null_bit)) return 1; if (*key) - { - length=key_part->store_length; continue; - } key++; + store_length--; } - if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH)) + if (key_part->key_part_flag & (HA_BLOB_PART | HA_VAR_LENGTH_PART | + HA_BIT_PART)) { - if (key_part->field->key_cmp(key, key_part->length+ HA_KEY_BLOB_LENGTH)) + if (key_part->field->key_cmp(key, key_part->length)) return 1; - length=key_part->length+HA_KEY_BLOB_LENGTH; + continue; } - else + length= min((uint) (key_end-key), store_length); + if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ + FIELDFLAG_PACK))) { - length=min(key_length,key_part->length); - if (!(key_part->key_type & (FIELDFLAG_NUMBER+FIELDFLAG_BINARY+ - FIELDFLAG_PACK))) + CHARSET_INFO *cs= key_part->field->charset(); + uint char_length= key_part->length / cs->mbmaxlen; + const byte *pos= table->record[0] + key_part->offset; + if (length > char_length) { - CHARSET_INFO *cs= key_part->field->charset(); - uint char_length= key_part->length / cs->mbmaxlen; - const byte *pos= table->record[0] + key_part->offset; - if (length > char_length) - { - char_length= my_charpos(cs, pos, pos + length, char_length); - set_if_smaller(char_length, length); - } - if (cs->coll->strnncollsp(cs, - (const uchar*) key, length, - (const uchar*) pos, char_length)) - return 1; + char_length= my_charpos(cs, pos, pos + length, char_length); + set_if_smaller(char_length, length); } - else if (memcmp(key,table->record[0]+key_part->offset,length)) - return 1; + if (cs->coll->strnncollsp(cs, + (const uchar*) key, length, + (const uchar*) pos, char_length, 0)) + return 1; + continue; } + if (memcmp(key,table->record[0]+key_part->offset,length)) + return 1; } return 0; } diff --git a/sql/lex.h b/sql/lex.h index 325d052de90..56d824b7bb8 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -48,7 +48,7 @@ SYM_GROUP sym_group_rtree= {"RTree keys", "HAVE_RTREE_KEYS"}; */ static SYMBOL symbols[] = { - { "&&", SYM(AND_SYM)}, + { "&&", SYM(AND_AND_SYM)}, { "<", SYM(LT)}, { "<=", SYM(LE)}, { "<>", SYM(NE)}, @@ -65,6 +65,7 @@ static SYMBOL symbols[] = { { "AGAINST", SYM(AGAINST)}, { "AGGREGATE", SYM(AGGREGATE_SYM)}, { "ALL", SYM(ALL)}, + { "ALGORITHM", SYM(ALGORITHM_SYM)}, { "ALTER", SYM(ALTER)}, { "ANALYZE", SYM(ANALYZE_SYM)}, { "AND", SYM(AND_SYM)}, @@ -72,6 +73,7 @@ static SYMBOL symbols[] = { { "AS", SYM(AS)}, { "ASC", SYM(ASC)}, { "ASCII", SYM(ASCII_SYM)}, + { "ASENSITIVE", SYM(ASENSITIVE_SYM)}, { "AUTO_INCREMENT", SYM(AUTO_INC)}, { "AVG", SYM(AVG_SYM)}, { "AVG_ROW_LENGTH", SYM(AVG_ROW_LENGTH)}, @@ -93,7 +95,9 @@ static SYMBOL symbols[] = { { "BY", SYM(BY)}, { "BYTE", SYM(BYTE_SYM)}, { "CACHE", SYM(CACHE_SYM)}, + { "CALL", SYM(CALL_SYM)}, { "CASCADE", SYM(CASCADE)}, + { "CASCADED", SYM(CASCADED)}, { "CASE", SYM(CASE_SYM)}, { "CHANGE", SYM(CHANGE)}, { "CHANGED", SYM(CHANGED)}, @@ -114,8 +118,12 @@ static SYMBOL symbols[] = { { "COMMITTED", SYM(COMMITTED_SYM)}, { "COMPRESSED", SYM(COMPRESSED_SYM)}, { "CONCURRENT", SYM(CONCURRENT)}, + { "CONDITION", SYM(CONDITION_SYM)}, + { "CONNECTION", SYM(CONNECTION_SYM)}, { "CONSISTENT", SYM(CONSISTENT_SYM)}, { "CONSTRAINT", SYM(CONSTRAINT)}, + { "CONTAINS", SYM(CONTAINS_SYM)}, + { "CONTINUE", SYM(CONTINUE_SYM)}, { "CONVERT", SYM(CONVERT_SYM)}, { "CREATE", SYM(CREATE)}, { "CROSS", SYM(CROSS)}, @@ -124,6 +132,7 @@ static SYMBOL symbols[] = { { "CURRENT_TIME", SYM(CURTIME)}, { "CURRENT_TIMESTAMP", SYM(NOW_SYM)}, { "CURRENT_USER", SYM(CURRENT_USER)}, + { "CURSOR", SYM(CURSOR_SYM)}, { "DATA", SYM(DATA_SYM)}, { "DATABASE", SYM(DATABASE)}, { "DATABASES", SYM(DATABASES)}, @@ -137,13 +146,16 @@ static SYMBOL symbols[] = { { "DEALLOCATE", SYM(DEALLOCATE_SYM)}, { "DEC", SYM(DECIMAL_SYM)}, { "DECIMAL", SYM(DECIMAL_SYM)}, + { "DECLARE", SYM(DECLARE_SYM)}, { "DEFAULT", SYM(DEFAULT)}, + { "DEFINER", SYM(DEFINER_SYM)}, { "DELAYED", SYM(DELAYED_SYM)}, { "DELAY_KEY_WRITE", SYM(DELAY_KEY_WRITE_SYM)}, { "DELETE", SYM(DELETE_SYM)}, { "DESC", SYM(DESC)}, { "DESCRIBE", SYM(DESCRIBE)}, { "DES_KEY_FILE", SYM(DES_KEY_FILE)}, + { "DETERMINISTIC", SYM(DETERMINISTIC_SYM)}, { "DIRECTORY", SYM(DIRECTORY_SYM)}, { "DISABLE", SYM(DISABLE_SYM)}, { "DISCARD", SYM(DISCARD)}, @@ -157,7 +169,9 @@ static SYMBOL symbols[] = { { "DUMPFILE", SYM(DUMPFILE)}, { "DUPLICATE", SYM(DUPLICATE_SYM)}, { "DYNAMIC", SYM(DYNAMIC_SYM)}, + { "EACH", SYM(EACH_SYM)}, { "ELSE", SYM(ELSE)}, + { "ELSEIF", SYM(ELSEIF_SYM)}, { "ENABLE", SYM(ENABLE_SYM)}, { "ENCLOSED", SYM(ENCLOSED)}, { "END", SYM(END)}, @@ -170,11 +184,13 @@ static SYMBOL symbols[] = { { "EVENTS", SYM(EVENTS_SYM)}, { "EXECUTE", SYM(EXECUTE_SYM)}, { "EXISTS", SYM(EXISTS)}, + { "EXIT", SYM(EXIT_SYM)}, { "EXPANSION", SYM(EXPANSION_SYM)}, { "EXPLAIN", SYM(DESCRIBE)}, { "EXTENDED", SYM(EXTENDED_SYM)}, { "FALSE", SYM(FALSE_SYM)}, { "FAST", SYM(FAST_SYM)}, + { "FETCH", SYM(FETCH_SYM)}, { "FIELDS", SYM(COLUMNS)}, { "FILE", SYM(FILE_SYM)}, { "FIRST", SYM(FIRST_SYM)}, @@ -186,14 +202,17 @@ static SYMBOL symbols[] = { { "FOR", SYM(FOR_SYM)}, { "FORCE", SYM(FORCE_SYM)}, { "FOREIGN", SYM(FOREIGN)}, + { "FOUND", SYM(FOUND_SYM)}, + { "FRAC_SECOND", SYM(FRAC_SECOND_SYM)}, { "FROM", SYM(FROM)}, { "FULL", SYM(FULL)}, { "FULLTEXT", SYM(FULLTEXT_SYM)}, - { "FUNCTION", SYM(UDF_SYM)}, + { "FUNCTION", SYM(FUNCTION_SYM)}, { "GEOMETRY", SYM(GEOMETRY_SYM)}, { "GEOMETRYCOLLECTION",SYM(GEOMETRYCOLLECTION)}, { "GET_FORMAT", SYM(GET_FORMAT)}, { "GLOBAL", SYM(GLOBAL_SYM)}, + { "GOTO", SYM(GOTO_SYM)}, { "GRANT", SYM(GRANT)}, { "GRANTS", SYM(GRANTS)}, { "GROUP", SYM(GROUP)}, @@ -218,6 +237,8 @@ static SYMBOL symbols[] = { { "INNER", SYM(INNER_SYM)}, { "INNOBASE", SYM(INNOBASE_SYM)}, { "INNODB", SYM(INNOBASE_SYM)}, + { "INOUT", SYM(INOUT_SYM)}, + { "INSENSITIVE", SYM(INSENSITIVE_SYM)}, { "INSERT", SYM(INSERT)}, { "INSERT_METHOD", SYM(INSERT_METHOD)}, { "INT", SYM(INT_SYM)}, @@ -233,12 +254,17 @@ static SYMBOL symbols[] = { { "IS", SYM(IS)}, { "ISOLATION", SYM(ISOLATION)}, { "ISSUER", SYM(ISSUER_SYM)}, + { "ITERATE", SYM(ITERATE_SYM)}, + { "INVOKER", SYM(INVOKER_SYM)}, { "JOIN", SYM(JOIN_SYM)}, { "KEY", SYM(KEY_SYM)}, { "KEYS", SYM(KEYS)}, { "KILL", SYM(KILL_SYM)}, + { "LABEL", SYM(LABEL_SYM)}, + { "LANGUAGE", SYM(LANGUAGE_SYM)}, { "LAST", SYM(LAST_SYM)}, { "LEADING", SYM(LEADING)}, + { "LEAVE", SYM(LEAVE_SYM)}, { "LEAVES", SYM(LEAVES)}, { "LEFT", SYM(LEFT)}, { "LEVEL", SYM(LEVEL_SYM)}, @@ -256,6 +282,7 @@ static SYMBOL symbols[] = { { "LONG", SYM(LONG_SYM)}, { "LONGBLOB", SYM(LONGBLOB)}, { "LONGTEXT", SYM(LONGTEXT)}, + { "LOOP", SYM(LOOP_SYM)}, { "LOW_PRIORITY", SYM(LOW_PRIORITY)}, { "MASTER", SYM(MASTER_SYM)}, { "MASTER_CONNECT_RETRY", SYM(MASTER_CONNECT_RETRY_SYM)}, @@ -277,10 +304,12 @@ static SYMBOL symbols[] = { { "MAX_QUERIES_PER_HOUR", SYM(MAX_QUERIES_PER_HOUR)}, { "MAX_ROWS", SYM(MAX_ROWS)}, { "MAX_UPDATES_PER_HOUR", SYM(MAX_UPDATES_PER_HOUR)}, + { "MAX_USER_CONNECTIONS", SYM(MAX_USER_CONNECTIONS_SYM)}, { "MEDIUM", SYM(MEDIUM_SYM)}, { "MEDIUMBLOB", SYM(MEDIUMBLOB)}, { "MEDIUMINT", SYM(MEDIUMINT)}, { "MEDIUMTEXT", SYM(MEDIUMTEXT)}, + { "MERGE", SYM(MERGE_SYM)}, { "MICROSECOND", SYM(MICROSECOND_SYM)}, { "MIDDLEINT", SYM(MEDIUMINT)}, /* For powerbuilder */ { "MINUTE", SYM(MINUTE_SYM)}, @@ -289,11 +318,14 @@ static SYMBOL symbols[] = { { "MIN_ROWS", SYM(MIN_ROWS)}, { "MOD", SYM(MOD_SYM)}, { "MODE", SYM(MODE_SYM)}, + { "MODIFIES", SYM(MODIFIES_SYM)}, { "MODIFY", SYM(MODIFY_SYM)}, { "MONTH", SYM(MONTH_SYM)}, { "MULTILINESTRING", SYM(MULTILINESTRING)}, { "MULTIPOINT", SYM(MULTIPOINT)}, { "MULTIPOLYGON", SYM(MULTIPOLYGON)}, + { "MUTEX", SYM(MUTEX_SYM)}, + { "NAME", SYM(NAME_SYM)}, { "NAMES", SYM(NAMES_SYM)}, { "NATIONAL", SYM(NATIONAL_SYM)}, { "NATURAL", SYM(NATURAL)}, @@ -304,7 +336,7 @@ static SYMBOL symbols[] = { { "NEXT", SYM(NEXT_SYM)}, { "NO", SYM(NO_SYM)}, { "NONE", SYM(NONE_SYM)}, - { "NOT", SYM(NOT)}, + { "NOT", SYM(NOT_SYM)}, { "NO_WRITE_TO_BINLOG", SYM(NO_WRITE_TO_BINLOG)}, { "NULL", SYM(NULL_SYM)}, { "NUMERIC", SYM(NUMERIC_SYM)}, @@ -319,6 +351,7 @@ static SYMBOL symbols[] = { { "OPTIONALLY", SYM(OPTIONALLY)}, { "OR", SYM(OR_SYM)}, { "ORDER", SYM(ORDER_SYM)}, + { "OUT", SYM(OUT_SYM)}, { "OUTER", SYM(OUTER)}, { "OUTFILE", SYM(OUTFILE)}, { "PACK_KEYS", SYM(PACK_KEYS_SYM)}, @@ -335,6 +368,7 @@ static SYMBOL symbols[] = { { "PROCESS" , SYM(PROCESS)}, { "PROCESSLIST", SYM(PROCESSLIST_SYM)}, { "PURGE", SYM(PURGE)}, + { "QUARTER", SYM(QUARTER_SYM)}, { "QUERY", SYM(QUERY_SYM)}, { "QUICK", SYM(QUICK)}, { "RAID0", SYM(RAID_0_SYM)}, @@ -342,6 +376,7 @@ static SYMBOL symbols[] = { { "RAID_CHUNKSIZE", SYM(RAID_CHUNKSIZE)}, { "RAID_TYPE", SYM(RAID_TYPE)}, { "READ", SYM(READ_SYM)}, + { "READS", SYM(READS_SYM)}, { "REAL", SYM(REAL)}, { "REFERENCES", SYM(REFERENCES)}, { "REGEXP", SYM(REGEXP)}, @@ -354,24 +389,31 @@ static SYMBOL symbols[] = { { "REPEATABLE", SYM(REPEATABLE_SYM)}, { "REPLACE", SYM(REPLACE)}, { "REPLICATION", SYM(REPLICATION)}, + { "REPEAT", SYM(REPEAT_SYM)}, { "REQUIRE", SYM(REQUIRE_SYM)}, { "RESET", SYM(RESET_SYM)}, { "RESTORE", SYM(RESTORE_SYM)}, { "RESTRICT", SYM(RESTRICT)}, - { "RETURNS", SYM(UDF_RETURNS_SYM)}, + { "RETURN", SYM(RETURN_SYM)}, + { "RETURNS", SYM(RETURNS_SYM)}, { "REVOKE", SYM(REVOKE)}, { "RIGHT", SYM(RIGHT)}, { "RLIKE", SYM(REGEXP)}, /* Like in mSQL2 */ { "ROLLBACK", SYM(ROLLBACK_SYM)}, { "ROLLUP", SYM(ROLLUP_SYM)}, + { "ROUTINE", SYM(ROUTINE_SYM)}, { "ROW", SYM(ROW_SYM)}, { "ROWS", SYM(ROWS_SYM)}, { "ROW_FORMAT", SYM(ROW_FORMAT_SYM)}, { "RTREE", SYM(RTREE_SYM)}, { "SAVEPOINT", SYM(SAVEPOINT_SYM)}, + { "SCHEMA", SYM(DATABASE)}, + { "SCHEMAS", SYM(DATABASES)}, { "SECOND", SYM(SECOND_SYM)}, { "SECOND_MICROSECOND", SYM(SECOND_MICROSECOND_SYM)}, + { "SECURITY", SYM(SECURITY_SYM)}, { "SELECT", SYM(SELECT_SYM)}, + { "SENSITIVE", SYM(SENSITIVE_SYM)}, { "SEPARATOR", SYM(SEPARATOR_SYM)}, { "SERIAL", SYM(SERIAL_SYM)}, { "SERIALIZABLE", SYM(SERIALIZABLE_SYM)}, @@ -389,6 +431,11 @@ static SYMBOL symbols[] = { { "SONAME", SYM(UDF_SONAME_SYM)}, { "SOUNDS", SYM(SOUNDS_SYM)}, { "SPATIAL", SYM(SPATIAL_SYM)}, + { "SPECIFIC", SYM(SPECIFIC_SYM)}, + { "SQL", SYM(SQL_SYM)}, + { "SQLEXCEPTION", SYM(SQLEXCEPTION_SYM)}, + { "SQLSTATE", SYM(SQLSTATE_SYM)}, + { "SQLWARNING", SYM(SQLWARNING_SYM)}, { "SQL_BIG_RESULT", SYM(SQL_BIG_RESULT)}, { "SQL_BUFFER_RESULT", SYM(SQL_BUFFER_RESULT)}, { "SQL_CACHE", SYM(SQL_CACHE_SYM)}, @@ -396,6 +443,15 @@ static SYMBOL symbols[] = { { "SQL_NO_CACHE", SYM(SQL_NO_CACHE_SYM)}, { "SQL_SMALL_RESULT", SYM(SQL_SMALL_RESULT)}, { "SQL_THREAD", SYM(SQL_THREAD)}, + { "SQL_TSI_FRAC_SECOND", SYM(FRAC_SECOND_SYM)}, + { "SQL_TSI_SECOND", SYM(SECOND_SYM)}, + { "SQL_TSI_MINUTE", SYM(MINUTE_SYM)}, + { "SQL_TSI_HOUR", SYM(HOUR_SYM)}, + { "SQL_TSI_DAY", SYM(DAY_SYM)}, + { "SQL_TSI_WEEK", SYM(WEEK_SYM)}, + { "SQL_TSI_MONTH", SYM(MONTH_SYM)}, + { "SQL_TSI_QUARTER", SYM(QUARTER_SYM)}, + { "SQL_TSI_YEAR", SYM(YEAR_SYM)}, { "SSL", SYM(SSL_SYM)}, { "START", SYM(START_SYM)}, { "STARTING", SYM(STARTING)}, @@ -411,25 +467,32 @@ static SYMBOL symbols[] = { { "TABLES", SYM(TABLES)}, { "TABLESPACE", SYM(TABLESPACE)}, { "TEMPORARY", SYM(TEMPORARY)}, + { "TEMPTABLE", SYM(TEMPTABLE_SYM)}, { "TERMINATED", SYM(TERMINATED)}, { "TEXT", SYM(TEXT_SYM)}, { "THEN", SYM(THEN_SYM)}, { "TIME", SYM(TIME_SYM)}, { "TIMESTAMP", SYM(TIMESTAMP)}, + { "TIMESTAMPADD", SYM(TIMESTAMP_ADD)}, + { "TIMESTAMPDIFF", SYM(TIMESTAMP_DIFF)}, { "TINYBLOB", SYM(TINYBLOB)}, { "TINYINT", SYM(TINYINT)}, { "TINYTEXT", SYM(TINYTEXT)}, { "TO", SYM(TO_SYM)}, { "TRAILING", SYM(TRAILING)}, { "TRANSACTION", SYM(TRANSACTION_SYM)}, + { "TRIGGER", SYM(TRIGGER_SYM)}, { "TRUE", SYM(TRUE_SYM)}, { "TRUNCATE", SYM(TRUNCATE_SYM)}, { "TYPE", SYM(TYPE_SYM)}, { "TYPES", SYM(TYPES_SYM)}, { "UNCOMMITTED", SYM(UNCOMMITTED_SYM)}, + { "UNDEFINED", SYM(UNDEFINED_SYM)}, + { "UNDO", SYM(UNDO_SYM)}, { "UNICODE", SYM(UNICODE_SYM)}, { "UNION", SYM(UNION_SYM)}, { "UNIQUE", SYM(UNIQUE_SYM)}, + { "UNKNOWN", SYM(UNKNOWN_SYM)}, { "UNLOCK", SYM(UNLOCK_SYM)}, { "UNSIGNED", SYM(UNSIGNED)}, { "UNTIL", SYM(UNTIL_SYM)}, @@ -451,8 +514,11 @@ static SYMBOL symbols[] = { { "VARIABLES", SYM(VARIABLES)}, { "VARYING", SYM(VARYING)}, { "WARNINGS", SYM(WARNINGS)}, + { "WEEK", SYM(WEEK_SYM)}, { "WHEN", SYM(WHEN_SYM)}, { "WHERE", SYM(WHERE)}, + { "WHILE", SYM(WHILE_SYM)}, + { "VIEW", SYM(VIEW_SYM)}, { "WITH", SYM(WITH)}, { "WORK", SYM(WORK_SYM)}, { "WRITE", SYM(WRITE_SYM)}, @@ -461,7 +527,7 @@ static SYMBOL symbols[] = { { "YEAR", SYM(YEAR_SYM)}, { "YEAR_MONTH", SYM(YEAR_MONTH_SYM)}, { "ZEROFILL", SYM(ZEROFILL)}, - { "||", SYM(OR_OR_CONCAT)} + { "||", SYM(OR_OR_SYM)} }; @@ -499,7 +565,6 @@ static SYMBOL sql_functions[] = { { "CONCAT", SYM(CONCAT)}, { "CONCAT_WS", SYM(CONCAT_WS)}, { "CONNECTION_ID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_connection_id)}, - { "CONTAINS", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_contains)}, { "CONV", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_conv)}, { "CONVERT_TZ", SYM(CONVERT_TZ_SYM)}, { "COUNT", SYM(COUNT_SYM)}, @@ -636,14 +701,13 @@ static SYMBOL sql_functions[] = { { "POSITION", SYM(POSITION_SYM)}, { "POW", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)}, { "POWER", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_pow)}, - { "QUARTER", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quarter)}, { "QUOTE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_quote)}, { "RADIANS", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_radians)}, { "RAND", SYM(RAND)}, { "RELEASE_LOCK", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_release_lock)}, - { "REPEAT", F_SYM(FUNC_ARG2),0,CREATE_FUNC(create_func_repeat)}, { "REVERSE", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_reverse)}, { "ROUND", SYM(ROUND)}, + { "ROW_COUNT", SYM(ROW_COUNT_SYM)}, { "RPAD", F_SYM(FUNC_ARG3),0,CREATE_FUNC(create_func_rpad)}, { "RTRIM", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_rtrim)}, { "SEC_TO_TIME", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_sec_to_time)}, @@ -686,7 +750,6 @@ static SYMBOL sql_functions[] = { { "UUID", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_uuid)}, { "VARIANCE", SYM(VARIANCE_SYM)}, { "VERSION", F_SYM(FUNC_ARG0),0,CREATE_FUNC(create_func_version)}, - { "WEEK", SYM(WEEK_SYM)}, { "WEEKDAY", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekday)}, { "WEEKOFYEAR", F_SYM(FUNC_ARG1),0,CREATE_FUNC(create_func_weekofyear)}, { "WITHIN", F_SYM(FUNC_ARG2),0,CREATE_FUNC_GEOM(create_func_within)}, diff --git a/sql/lock.cc b/sql/lock.cc index 7cfa2aebe7b..973e82b7b10 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -79,7 +79,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table,uint count, bool unlock, TABLE **write_locked); static int lock_external(THD *thd, TABLE **table,uint count); static int unlock_external(THD *thd, TABLE **table,uint count); -static void print_lock_error(int error); +static void print_lock_error(int error, const char *); MYSQL_LOCK *mysql_lock_tables(THD *thd,TABLE **tables,uint count) @@ -152,7 +152,7 @@ retry: thd->proc_info=0; if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); if (sql_lock) { mysql_unlock_tables(thd,sql_lock); @@ -187,7 +187,7 @@ static int lock_external(THD *thd, TABLE **tables, uint count) (*tables)->file->external_lock(thd, F_UNLCK); (*tables)->current_lock=F_UNLCK; } - print_lock_error(error); + print_lock_error(error, (*tables)->file->table_type()); DBUG_RETURN(error); } else @@ -380,7 +380,7 @@ static int unlock_external(THD *thd, TABLE **table,uint count) table++; } while (--count); if (error_code) - print_lock_error(error_code); + print_lock_error(error_code, (*table)->file->table_type()); DBUG_RETURN(error_code); } @@ -430,7 +430,7 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, *write_lock_used=table; if (table->db_stat & HA_READ_ONLY) { - my_error(ER_OPEN_AS_READONLY,MYF(0),table->table_name); + my_error(ER_OPEN_AS_READONLY, MYF(0), table->table_name); my_free((gptr) sql_lock,MYF(0)); return 0; } @@ -578,7 +578,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list) static bool locked_named_table(THD *thd, TABLE_LIST *table_list) { - for (; table_list ; table_list=table_list->next) + for (; table_list ; table_list=table_list->next_local) { if (table_list->table && table_is_used(table_list->table,0)) return 1; @@ -632,7 +632,7 @@ bool lock_table_names(THD *thd, TABLE_LIST *table_list) bool got_all_locks=1; TABLE_LIST *lock_table; - for (lock_table=table_list ; lock_table ; lock_table=lock_table->next) + for (lock_table= table_list; lock_table; lock_table= lock_table->next_local) { int got_lock; if ((got_lock=lock_table_name(thd,lock_table)) < 0) @@ -675,13 +675,15 @@ end: void unlock_table_names(THD *thd, TABLE_LIST *table_list, TABLE_LIST *last_table) { - for (TABLE_LIST *table=table_list ; table != last_table ; table=table->next) + for (TABLE_LIST *table= table_list; + table != last_table; + table= table->next_local) unlock_table_name(thd,table); pthread_cond_broadcast(&COND_refresh); } -static void print_lock_error(int error) +static void print_lock_error(int error, const char *table) { int textno; DBUG_ENTER("print_lock_error"); @@ -693,11 +695,22 @@ static void print_lock_error(int error) case HA_ERR_READ_ONLY_TRANSACTION: textno=ER_READ_ONLY_TRANSACTION; break; + case HA_ERR_LOCK_DEADLOCK: + textno=ER_LOCK_DEADLOCK; + break; + case HA_ERR_WRONG_COMMAND: + textno=ER_ILLEGAL_HA; + break; default: textno=ER_CANT_LOCK; break; } - my_error(textno,MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG),error); + + if ( textno == ER_ILLEGAL_HA ) + my_error(textno, MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG), table); + else + my_error(textno, MYF(ME_BELL+ME_OLDWIN+ME_WAITTANG), error); + DBUG_VOID_RETURN; } @@ -848,7 +861,8 @@ bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, if (thd->global_read_lock) // This thread had the read locks { if (is_not_commit) - my_error(ER_CANT_UPDATE_WITH_READLOCK,MYF(0)); + my_message(ER_CANT_UPDATE_WITH_READLOCK, + ER(ER_CANT_UPDATE_WITH_READLOCK), MYF(0)); (void) pthread_mutex_unlock(&LOCK_open); /* We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does. @@ -894,19 +908,34 @@ void start_waiting_global_read_lock(THD *thd) } -void make_global_read_lock_block_commit(THD *thd) +bool make_global_read_lock_block_commit(THD *thd) { + bool error; + const char *old_message; + DBUG_ENTER("make_global_read_lock_block_commit"); /* If we didn't succeed lock_global_read_lock(), or if we already suceeded make_global_read_lock_block_commit(), do nothing. */ if (thd->global_read_lock != GOT_GLOBAL_READ_LOCK) - return; + DBUG_RETURN(1); pthread_mutex_lock(&LOCK_open); /* increment this BEFORE waiting on cond (otherwise race cond) */ global_read_lock_blocks_commit++; - while (protect_against_global_read_lock) + /* For testing we set up some blocking, to see if we can be killed */ + DBUG_EXECUTE_IF("make_global_read_lock_block_commit_loop", + protect_against_global_read_lock++;); + old_message= thd->enter_cond(&COND_refresh, &LOCK_open, + "Waiting for all running commits to finish"); + while (protect_against_global_read_lock && !thd->killed) pthread_cond_wait(&COND_refresh, &LOCK_open); - pthread_mutex_unlock(&LOCK_open); - thd->global_read_lock= MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT; + DBUG_EXECUTE_IF("make_global_read_lock_block_commit_loop", + protect_against_global_read_lock--;); + if (error= thd->killed) + global_read_lock_blocks_commit--; // undo what we did + else + thd->global_read_lock= MADE_GLOBAL_READ_LOCK_BLOCK_COMMIT; + thd->exit_cond(old_message); + DBUG_RETURN(error); } + diff --git a/sql/log.cc b/sql/log.cc index ab0cb823f15..00c39acec09 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -34,7 +34,7 @@ #include "message.h" #endif -MYSQL_LOG mysql_log,mysql_update_log,mysql_slow_log,mysql_bin_log; +MYSQL_LOG mysql_log, mysql_slow_log, mysql_bin_log; ulong sync_binlog_counter= 0; static bool test_if_number(const char *str, @@ -128,7 +128,8 @@ static int find_uniq_filename(char *name) MYSQL_LOG::MYSQL_LOG() :bytes_written(0), last_time(0), query_start(0), name(0), file_id(1), open_count(1), log_type(LOG_CLOSED), write_error(0), inited(0), - need_start_event(1) + need_start_event(1), description_event_for_exec(0), + description_event_for_queue(0) { /* We don't want to initialize LOCK_Log here as such initialization depends on @@ -156,6 +157,8 @@ void MYSQL_LOG::cleanup() { inited= 0; close(LOG_CLOSE_INDEX); + delete description_event_for_queue; + delete description_event_for_exec; (void) pthread_mutex_destroy(&LOCK_log); (void) pthread_mutex_destroy(&LOCK_index); (void) pthread_cond_destroy(&update_cond); @@ -225,7 +228,8 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, const char *new_name, const char *index_file_name_arg, enum cache_type io_cache_type_arg, bool no_auto_events_arg, - ulong max_size_arg) + ulong max_size_arg, + bool null_created_arg) { char buff[512]; File file= -1, index_file_nr= -1; @@ -323,8 +327,8 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, if (my_b_safe_write(&log_file, (byte*) BINLOG_MAGIC, BIN_LOG_HEADER_SIZE)) goto err; - bytes_written += BIN_LOG_HEADER_SIZE; - write_file_name_to_index_file=1; + bytes_written+= BIN_LOG_HEADER_SIZE; + write_file_name_to_index_file= 1; } if (!my_b_inited(&index_file)) @@ -354,10 +358,49 @@ bool MYSQL_LOG::open(const char *log_name, enum_log_type log_type_arg, } if (need_start_event && !no_auto_events) { - need_start_event=0; - Start_log_event s; - s.set_log_pos(this); - s.write(&log_file); + /* + In 4.x we set need_start_event=0 here, but in 5.0 we want a Start event + even if this is not the very first binlog. + */ + Format_description_log_event s(BINLOG_VERSION); + if (!s.is_valid()) + goto err; + if (null_created_arg) + s.created= 0; + if (s.write(&log_file)) + goto err; + bytes_written+= s.data_written; + } + if (description_event_for_queue && + description_event_for_queue->binlog_version>=4) + { + /* + This is a relay log written to by the I/O slave thread. + Write the event so that others can later know the format of this relay + log. + Note that this event is very close to the original event from the + master (it has binlog version of the master, event types of the + master), so this is suitable to parse the next relay log's event. It + has been produced by + Format_description_log_event::Format_description_log_event(char* buf,). + Why don't we want to write the description_event_for_queue if this + event is for format<4 (3.23 or 4.x): this is because in that case, the + description_event_for_queue describes the data received from the + master, but not the data written to the relay log (*conversion*), + which is in format 4 (slave's). + */ + /* + Set 'created' to 0, so that in next relay logs this event does not + trigger cleaning actions on the slave in + Format_description_log_event::exec_event(). + */ + description_event_for_queue->created= 0; + /* Don't set log_pos in event header */ + description_event_for_queue->artificial_event=1; + + if (description_event_for_queue->write(&log_file)) + goto err; + bytes_written+= description_event_for_queue->data_written; } if (flush_io_cache(&log_file) || my_sync(log_file.file, MYF(MY_WME))) @@ -653,7 +696,7 @@ bool MYSQL_LOG::reset_logs(THD* thd) if (!thd->slave_thread) need_start_event=1; open(save_name, save_log_type, 0, index_file_name, - io_cache_type, no_auto_events, max_size); + io_cache_type, no_auto_events, max_size, 0); my_free((gptr) save_name, MYF(0)); err: @@ -835,22 +878,18 @@ int MYSQL_LOG::purge_logs(const char *to_log, while ((strcmp(to_log,log_info.log_file_name) || (exit_loop=included)) && !log_in_use(log_info.log_file_name)) { - ulong tmp; - LINT_INIT(tmp); + ulong file_size= 0; if (decrease_log_space) //stat the file we want to delete { MY_STAT s; + + /* + If we could not stat, we can't know the amount + of space that deletion will free. In most cases, + deletion won't work either, so it's not a problem. + */ if (my_stat(log_info.log_file_name,&s,MYF(0))) - tmp= s.st_size; - else - { - /* - If we could not stat, we can't know the amount - of space that deletion will free. In most cases, - deletion won't work either, so it's not a problem. - */ - tmp= 0; - } + file_size= s.st_size; } /* It's not fatal if we can't delete a log file ; @@ -858,7 +897,7 @@ int MYSQL_LOG::purge_logs(const char *to_log, */ DBUG_PRINT("info",("purging %s",log_info.log_file_name)); if (!my_delete(log_info.log_file_name, MYF(0)) && decrease_log_space) - *decrease_log_space-= tmp; + *decrease_log_space-= file_size; if (find_next_log(&log_info, 0) || exit_loop) break; } @@ -1023,9 +1062,8 @@ void MYSQL_LOG::new_file(bool need_lock) */ THD *thd = current_thd; /* may be 0 if we are reacting to SIGHUP */ Rotate_log_event r(thd,new_name+dirname_length(new_name)); - r.set_log_pos(this); r.write(&log_file); - bytes_written += r.get_event_len(); + bytes_written += r.data_written; } /* Update needs to be signalled even if there is no rotate event @@ -1043,8 +1081,17 @@ void MYSQL_LOG::new_file(bool need_lock) Note that at this point, log_type != LOG_CLOSED (important for is_open()). */ + /* + new_file() is only used for rotation (in FLUSH LOGS or because size > + max_binlog_size or max_relay_log_size). + If this is a binary log, the Format_description_log_event at the beginning of + the new file should have created=0 (to distinguish with the + Format_description_log_event written at server startup, which should + trigger temp tables deletion on slaves. + */ + open(old_name, save_log_type, new_name_ptr, index_file_name, io_cache_type, - no_auto_events, max_size); + no_auto_events, max_size, 1); if (this == &mysql_bin_log) report_pos_in_innodb(); my_free(old_name,MYF(0)); @@ -1075,7 +1122,7 @@ bool MYSQL_LOG::append(Log_event* ev) error=1; goto err; } - bytes_written += ev->get_event_len(); + bytes_written+= ev->data_written; DBUG_PRINT("info",("max_size: %lu",max_size)); if ((uint) my_b_append_tell(&log_file) > max_size) { @@ -1128,7 +1175,7 @@ err: /* Write to normal (not rotable) log - This is the format for the 'normal', 'slow' and 'update' logs. + This is the format for the 'normal' log. */ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, @@ -1219,11 +1266,15 @@ bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, inline bool sync_binlog(IO_CACHE *cache) { - return (sync_binlog_period && - (sync_binlog_period == ++sync_binlog_counter) && - (sync_binlog_counter= 0, my_sync(cache->file, MYF(MY_WME)))); + if (sync_binlog_period == ++sync_binlog_counter && sync_binlog_period) + { + sync_binlog_counter= 0; + return my_sync(cache->file, MYF(MY_WME)); + } + return 0; } + /* Write an event to the binary log */ @@ -1291,7 +1342,7 @@ bool MYSQL_LOG::write(Log_event* event_info) if (thd) { -#if MYSQL_VERSION_ID < 50000 + /* NOTE: CHARSET AND TZ REPL WILL BE REWRITTEN SHORTLY */ /* To make replication of charsets working in 4.1 we are writing values of charset related variables before every statement in the binlog, @@ -1317,7 +1368,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u", (uint) thd->variables.collation_database->number, (uint) thd->variables.collation_server->number); Query_log_event e(thd, buf, written, 0, FALSE); - e.set_log_pos(this); if (e.write(file)) goto err; } @@ -1333,31 +1383,26 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u", thd->variables.time_zone->get_name()->ptr(), "'", NullS); Query_log_event e(thd, buf, buf_end - buf, 0, FALSE); - e.set_log_pos(this); if (e.write(file)) goto err; } -#endif if (thd->last_insert_id_used) { Intvar_log_event e(thd,(uchar) LAST_INSERT_ID_EVENT, thd->current_insert_id); - e.set_log_pos(this); if (e.write(file)) goto err; } if (thd->insert_id_used) { Intvar_log_event e(thd,(uchar) INSERT_ID_EVENT,thd->last_insert_id); - e.set_log_pos(this); if (e.write(file)) goto err; } if (thd->rand_used) { Rand_log_event e(thd,thd->rand_saved_seed1,thd->rand_saved_seed2); - e.set_log_pos(this); if (e.write(file)) goto err; } @@ -1373,7 +1418,6 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u", user_var_event->length, user_var_event->type, user_var_event->charset_number); - e.set_log_pos(this); if (e.write(file)) goto err; } @@ -1384,49 +1428,18 @@ COLLATION_CONNECTION=%u,COLLATION_DATABASE=%u,COLLATION_SERVER=%u", char buf[256], *p; p= strmov(strmov(buf, "SET CHARACTER SET "), thd->variables.convert_set->name); - Query_log_event e(thd, buf, (ulong) (p - buf), 0); - e.set_log_pos(this); + Query_log_event e(thd, buf, (ulong) (p - buf), 0, FALSE); if (e.write(file)) goto err; } #endif - - /* - If the user has set FOREIGN_KEY_CHECKS=0 we wrap every SQL - command in the binlog inside: - SET FOREIGN_KEY_CHECKS=0; - <command>; - SET FOREIGN_KEY_CHECKS=1; - */ - - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) - { - Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=0", 24, 0, FALSE); - e.set_log_pos(this); - if (e.write(file)) - goto err; - } } /* Write the SQL command */ - event_info->set_log_pos(this); if (event_info->write(file)) goto err; - /* Write log events to reset the 'run environment' of the SQL command */ - - if (thd) - { - if (thd->options & OPTION_NO_FOREIGN_KEY_CHECKS) - { - Query_log_event e(thd, "SET FOREIGN_KEY_CHECKS=1", 24, 0, FALSE); - e.set_log_pos(this); - if (e.write(file)) - goto err; - } - } - /* Tell for transactional table handlers up to which position in the binlog file we wrote. The table handler can store this info, and @@ -1501,7 +1514,7 @@ err: if (error) { if (my_errno == EFBIG) - my_error(ER_TRANS_CACHE_FULL, MYF(0)); + my_message(ER_TRANS_CACHE_FULL, ER(ER_TRANS_CACHE_FULL), MYF(0)); else my_error(ER_ERROR_ON_WRITE, MYF(0), name, errno); write_error=1; @@ -1612,7 +1625,6 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) master's binlog, which would result in wrong positions being shown to the user, MASTER_POS_WAIT undue waiting etc. */ - qinfo.set_log_pos(this); if (qinfo.write(&log_file)) goto err; } @@ -1639,7 +1651,6 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, bool commit_or_rollback) commit_or_rollback ? 6 : 8, TRUE, FALSE); qinfo.error_code= 0; - qinfo.set_log_pos(this); if (qinfo.write(&log_file) || flush_io_cache(&log_file) || sync_binlog(&log_file)) goto err; @@ -1701,8 +1712,7 @@ err: /* - Write update log in a format suitable for incremental backup - This is also used by the slow query log. + Write to the slow query log. */ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, @@ -1886,8 +1896,8 @@ void MYSQL_LOG::close(uint exiting) (exiting & LOG_CLOSE_STOP_EVENT)) { Stop_log_event s; - s.set_log_pos(this); s.write(&log_file); + bytes_written+= s.data_written; signal_update(); } #endif /* HAVE_REPLICATION */ diff --git a/sql/log_event.cc b/sql/log_event.cc index 04395f121fd..2791f482ad6 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -180,10 +180,12 @@ static void cleanup_load_tmpdir() write_str() */ -static bool write_str(IO_CACHE *file, char *str, byte length) +static bool write_str(IO_CACHE *file, char *str, uint length) { - return (my_b_safe_write(file, &length, 1) || - my_b_safe_write(file, (byte*) str, (int) length)); + byte tmp[1]; + tmp[0]= (byte) length; + return (my_b_safe_write(file, tmp, sizeof(tmp)) || + my_b_safe_write(file, (byte*) str, length)); } @@ -191,17 +193,18 @@ static bool write_str(IO_CACHE *file, char *str, byte length) read_str() */ -static inline int read_str(char * &buf, char *buf_end, char * &str, - uint8 &len) +static inline int read_str(char **buf, char *buf_end, char **str, + uint8 *len) { - if (buf + (uint) (uchar) *buf >= buf_end) + if (*buf + ((uint) (uchar) **buf) >= buf_end) return 1; - len = (uint8) *buf; - str= buf+1; - buf+= (uint) len+1; + *len= (uint8) **buf; + *str= (*buf)+1; + (*buf)+= (uint) *len+1; return 0; } + /* Transforms a string into "" or its expression in 0x... form. */ @@ -225,9 +228,25 @@ static char *str_to_hex(char *to, char *from, uint len) return p; // pointer to end 0 of 'to' } +/* + Prints a "session_var=value" string. Used by mysqlbinlog to print some SET + commands just before it prints a query. +*/ + +static void print_set_option(FILE* file, uint32 bits_changed, uint32 option, + uint32 flags, const char* name, bool* need_comma) +{ + if (bits_changed & option) + { + if (*need_comma) + fprintf(file,", "); + fprintf(file,"%s=%d", name, (bool)(flags & option)); + *need_comma= 1; + } +} /************************************************************************** - Log_event methods + Log_event methods (= the parent class of all events) **************************************************************************/ /* @@ -237,7 +256,7 @@ static char *str_to_hex(char *to, char *from, uint len) const char* Log_event::get_type_str() { switch(get_type_code()) { - case START_EVENT: return "Start"; + case START_EVENT_V3: return "Start_v3"; case STOP_EVENT: return "Stop"; case QUERY_EVENT: return "Query"; case ROTATE_EVENT: return "Rotate"; @@ -251,6 +270,7 @@ const char* Log_event::get_type_str() case EXEC_LOAD_EVENT: return "Exec_load"; case RAND_EVENT: return "RAND"; case USER_VAR_EVENT: return "User var"; + case FORMAT_DESCRIPTION_EVENT: return "Format_desc"; default: return "Unknown"; /* impossible */ } } @@ -262,8 +282,7 @@ const char* Log_event::get_type_str() #ifndef MYSQL_CLIENT Log_event::Log_event(THD* thd_arg, uint16 flags_arg, bool using_trans) - :log_pos(0), temp_buf(0), exec_time(0), cached_event_len(0), - flags(flags_arg), thd(thd_arg) + :log_pos(0), temp_buf(0), exec_time(0), flags(flags_arg), thd(thd_arg) { server_id= thd->server_id; when= thd->start_time; @@ -280,7 +299,7 @@ Log_event::Log_event(THD* thd_arg, uint16 flags_arg, bool using_trans) */ Log_event::Log_event() - :temp_buf(0), exec_time(0), cached_event_len(0), flags(0), cache_stmt(0), + :temp_buf(0), exec_time(0), flags(0), cache_stmt(0), thd(0) { server_id= ::server_id; @@ -294,24 +313,70 @@ Log_event::Log_event() Log_event::Log_event() */ -Log_event::Log_event(const char* buf, bool old_format) - :temp_buf(0), cached_event_len(0), cache_stmt(0) +Log_event::Log_event(const char* buf, + const Format_description_log_event* description_event) + :temp_buf(0), cache_stmt(0) { +#ifndef MYSQL_CLIENT + thd = 0; +#endif when = uint4korr(buf); server_id = uint4korr(buf + SERVER_ID_OFFSET); - if (old_format) + if (description_event->binlog_version==1) { - log_pos=0; - flags=0; + log_pos= 0; + flags= 0; + return; } - else + /* 4.0 or newer */ + log_pos= uint4korr(buf + LOG_POS_OFFSET); + /* + If the log is 4.0 (so here it can only be a 4.0 relay log read by the SQL + thread or a 4.0 master binlog read by the I/O thread), log_pos is the + beginning of the event: we transform it into the end of the event, which is + more useful. + But how do you know that the log is 4.0: you know it if description_event + is version 3 *and* you are not reading a Format_desc (remember that + mysqlbinlog starts by assuming that 5.0 logs are in 4.0 format, until it + finds a Format_desc). + */ + if (description_event->binlog_version==3 && + buf[EVENT_TYPE_OFFSET]<FORMAT_DESCRIPTION_EVENT && log_pos) { - log_pos = uint4korr(buf + LOG_POS_OFFSET); - flags = uint2korr(buf + FLAGS_OFFSET); + /* + If log_pos=0, don't change it. log_pos==0 is a marker to mean + "don't change rli->group_master_log_pos" (see + inc_group_relay_log_pos()). As it is unreal log_pos, adding the event + len's is nonsense. For example, a fake Rotate event should + not have its log_pos (which is 0) changed or it will modify + Exec_master_log_pos in SHOW SLAVE STATUS, displaying a nonsense value + of (a non-zero offset which does not exist in the master's binlog, so + which will cause problems if the user uses this value in + CHANGE MASTER). + */ + log_pos+= uint4korr(buf + EVENT_LEN_OFFSET); } -#ifndef MYSQL_CLIENT - thd = 0; -#endif + DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); + + flags= uint2korr(buf + FLAGS_OFFSET); + if ((buf[EVENT_TYPE_OFFSET] == FORMAT_DESCRIPTION_EVENT) || + (buf[EVENT_TYPE_OFFSET] == ROTATE_EVENT)) + { + /* + These events always have a header which stops here (i.e. their header is + FROZEN). + */ + /* + Initialization to zero of all other Log_event members as they're not + specified. Currently there are no such members; in the future there will + be an event UID (but Format_description and Rotate don't need this UID, + as they are not propagated through --log-slave-updates (remember the UID + is used to not play a query twice when you have two masters which are + slaves of a 3rd master). Then we are done. + */ + return; + } + /* otherwise, go on with reading the header from buf (nothing now) */ } #ifndef MYSQL_CLIENT @@ -364,10 +429,10 @@ int Log_event::exec_event(struct st_relay_log_info* rli) has already been updated. */ if ((thd->options & OPTION_BEGIN) && opt_using_transactions) - rli->inc_event_relay_log_pos(get_event_len()); + rli->inc_event_relay_log_pos(); else { - rli->inc_group_relay_log_pos(get_event_len(),log_pos); + rli->inc_group_relay_log_pos(log_pos); flush_relay_log_info(rli); /* Note that Rotate_log_event::exec_event() does not call this function, @@ -432,49 +497,95 @@ void Log_event::init_show_field_list(List<Item>* field_list) field_list->push_back(new Item_empty_string("Event_type", 20)); field_list->push_back(new Item_return_int("Server_id", 10, MYSQL_TYPE_LONG)); - field_list->push_back(new Item_return_int("Orig_log_pos", 11, + field_list->push_back(new Item_return_int("End_log_pos", 11, MYSQL_TYPE_LONGLONG)); field_list->push_back(new Item_empty_string("Info", 20)); } #endif /* !MYSQL_CLIENT */ + /* Log_event::write() */ -int Log_event::write(IO_CACHE* file) +bool Log_event::write_header(IO_CACHE* file, ulong event_data_length) { - return (write_header(file) || write_data(file)) ? -1 : 0; -} + byte header[LOG_EVENT_HEADER_LEN]; + DBUG_ENTER("Log_event::write_header"); + /* Store number of bytes that will be written by this event */ + data_written= event_data_length + sizeof(header); -/* - Log_event::write_header() -*/ + /* + log_pos != 0 if this is relay-log event. In this case we should not + change the position + */ -int Log_event::write_header(IO_CACHE* file) -{ - char buf[LOG_EVENT_HEADER_LEN]; - char* pos = buf; - int4store(pos, (ulong) when); // timestamp - pos += 4; - *pos++ = get_type_code(); // event type code - int4store(pos, server_id); - pos += 4; - long tmp=get_data_size() + LOG_EVENT_HEADER_LEN; - int4store(pos, tmp); - pos += 4; - int4store(pos, log_pos); - pos += 4; - int2store(pos, flags); - pos += 2; - return (my_b_safe_write(file, (byte*) buf, (uint) (pos - buf))); + if (is_artificial_event()) + { + /* + We should not do any cleanup on slave when reading this. We + mark this by setting log_pos to 0. Start_log_event_v3() will + detect this on reading and set artificial_event=1 for the event. + */ + log_pos= 0; + } + else if (!log_pos) + { + /* + Calculate position of end of event + + Note that with a SEQ_READ_APPEND cache, my_b_tell() does not + work well. So this will give slightly wrong positions for the + Format_desc/Rotate/Stop events which the slave writes to its + relay log. For example, the initial Format_desc will have + end_log_pos=91 instead of 95. Because after writing the first 4 + bytes of the relay log, my_b_tell() still reports 0. Because + my_b_append() does not update the counter which my_b_tell() + later uses (one should probably use my_b_append_tell() to work + around this). To get right positions even when writing to the + relay log, we use the (new) my_b_safe_tell(). + + Note that this raises a question on the correctness of all these + DBUG_ASSERT(my_b_tell()=rli->event_relay_log_pos). + + If in a transaction, the log_pos which we calculate below is not + very good (because then my_b_safe_tell() returns start position + of the BEGIN, so it's like the statement was at the BEGIN's + place), but it's not a very serious problem (as the slave, when + it is in a transaction, does not take those end_log_pos into + account (as it calls inc_event_relay_log_pos()). To be fixed + later, so that it looks less strange. But not bug. + */ + + log_pos= my_b_safe_tell(file)+data_written; + } + + /* + Header will be of size LOG_EVENT_HEADER_LEN for all events, except for + FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT, where it will be + LOG_EVENT_MINIMAL_HEADER_LEN (remember these 2 have a frozen header, + because we read them before knowing the format). + */ + + int4store(header, (ulong) when); // timestamp + header[EVENT_TYPE_OFFSET]= get_type_code(); + int4store(header+ SERVER_ID_OFFSET, server_id); + int4store(header+ EVENT_LEN_OFFSET, data_written); + int4store(header+ LOG_POS_OFFSET, log_pos); + int2store(header+ FLAGS_OFFSET, flags); + + DBUG_RETURN(my_b_safe_write(file, header, sizeof(header)) != 0); } /* Log_event::read_log_event() + + This needn't be format-tolerant, because we only read + LOG_EVENT_MINIMAL_HEADER_LEN (we just want to read the event's length). + */ #ifndef MYSQL_CLIENT @@ -483,7 +594,7 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, { ulong data_len; int result=0; - char buf[LOG_EVENT_HEADER_LEN]; + char buf[LOG_EVENT_MINIMAL_HEADER_LEN]; DBUG_ENTER("read_log_event"); if (log_lock) @@ -503,24 +614,25 @@ int Log_event::read_log_event(IO_CACHE* file, String* packet, goto end; } data_len= uint4korr(buf + EVENT_LEN_OFFSET); - if (data_len < LOG_EVENT_HEADER_LEN || + if (data_len < LOG_EVENT_MINIMAL_HEADER_LEN || data_len > current_thd->variables.max_allowed_packet) { DBUG_PRINT("error",("data_len: %ld", data_len)); - result= ((data_len < LOG_EVENT_HEADER_LEN) ? LOG_READ_BOGUS : + result= ((data_len < LOG_EVENT_MINIMAL_HEADER_LEN) ? LOG_READ_BOGUS : LOG_READ_TOO_LARGE); goto end; } packet->append(buf, sizeof(buf)); - data_len-= LOG_EVENT_HEADER_LEN; + data_len-= LOG_EVENT_MINIMAL_HEADER_LEN; if (data_len) { if (packet->append(file, data_len)) { /* - Here we should never hit EOF in a non-error condition. + Here if we hit EOF it's really an error: as data_len is >=0 + there's supposed to be more bytes available. EOF means we are reading the event partially, which should - never happen. + never happen: either we read badly or the binlog is truncated. */ result= file->error >= 0 ? LOG_READ_TRUNC: LOG_READ_IO; /* Implicit goto end; */ @@ -548,24 +660,42 @@ end: Log_event::read_log_event() NOTE: - Allocates memory; The caller is responsible for clean-up + Allocates memory; The caller is responsible for clean-up. */ #ifndef MYSQL_CLIENT Log_event* Log_event::read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, - bool old_format) + const Format_description_log_event *description_event) #else -Log_event* Log_event::read_log_event(IO_CACHE* file, bool old_format) +Log_event* Log_event::read_log_event(IO_CACHE* file, + const Format_description_log_event *description_event) #endif { - char head[LOG_EVENT_HEADER_LEN]; - uint header_size= old_format ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + DBUG_ASSERT(description_event); + char head[LOG_EVENT_MINIMAL_HEADER_LEN]; + /* + First we only want to read at most LOG_EVENT_MINIMAL_HEADER_LEN, just to + check the event for sanity and to know its length; no need to really parse + it. We say "at most" because this could be a 3.23 master, which has header + of 13 bytes, whereas LOG_EVENT_MINIMAL_HEADER_LEN is 19 bytes (it's "minimal" + over the set {MySQL >=4.0}). + */ + uint header_size= min(description_event->common_header_len, + LOG_EVENT_MINIMAL_HEADER_LEN); LOCK_MUTEX; + DBUG_PRINT("info", ("my_b_tell=%lu", my_b_tell(file))); if (my_b_read(file, (byte *) head, header_size)) { + DBUG_PRINT("info", ("Log_event::read_log_event(IO_CACHE*,Format_desc*) \ +failed my_b_read")); UNLOCK_MUTEX; + /* + No error here; it could be that we are at the file's end. However if the + next my_b_read() fails (below), it will be an error as we were able to + read the first bytes. + */ return 0; } @@ -599,7 +729,8 @@ Log_event* Log_event::read_log_event(IO_CACHE* file, bool old_format) error = "read error"; goto err; } - if ((res = read_log_event(buf, data_len, &error, old_format))) + if ((res= read_log_event(buf, data_len, &error, + description_event))) res->register_temp_buf(buf); err: @@ -626,13 +757,18 @@ Error in Log_event::read_log_event(): '%s', data_len: %d, event_type: %d", /* Log_event::read_log_event() + Binlog format tolerance is in (buf, event_len, description_event) + constructors. */ -Log_event* Log_event::read_log_event(const char* buf, int event_len, - const char **error, bool old_format) +Log_event* Log_event::read_log_event(const char* buf, uint event_len, + const char **error, + const Format_description_log_event *description_event) { - DBUG_ENTER("Log_event::read_log_event"); - + Log_event* ev; + DBUG_ENTER("Log_event::read_log_event(char*,...)"); + DBUG_ASSERT(description_event); + DBUG_PRINT("info", ("binlog_version: %d", description_event->binlog_version)); if (event_len < EVENT_LEN_OFFSET || (uint) event_len != uint4korr(buf+EVENT_LEN_OFFSET)) { @@ -640,74 +776,87 @@ Log_event* Log_event::read_log_event(const char* buf, int event_len, DBUG_RETURN(NULL); // general sanity check - will fail on a partial read } - Log_event* ev = NULL; - switch(buf[EVENT_TYPE_OFFSET]) { case QUERY_EVENT: - ev = new Query_log_event(buf, event_len, old_format); + ev = new Query_log_event(buf, event_len, description_event); break; case LOAD_EVENT: - ev = new Create_file_log_event(buf, event_len, old_format); + ev = new Create_file_log_event(buf, event_len, description_event); break; case NEW_LOAD_EVENT: - ev = new Load_log_event(buf, event_len, old_format); + ev = new Load_log_event(buf, event_len, description_event); break; case ROTATE_EVENT: - ev = new Rotate_log_event(buf, event_len, old_format); + ev = new Rotate_log_event(buf, event_len, description_event); break; #ifdef HAVE_REPLICATION - case SLAVE_EVENT: + case SLAVE_EVENT: /* can never happen (unused event) */ ev = new Slave_log_event(buf, event_len); break; #endif /* HAVE_REPLICATION */ case CREATE_FILE_EVENT: - ev = new Create_file_log_event(buf, event_len, old_format); + ev = new Create_file_log_event(buf, event_len, description_event); break; case APPEND_BLOCK_EVENT: - ev = new Append_block_log_event(buf, event_len); + ev = new Append_block_log_event(buf, event_len, description_event); break; case DELETE_FILE_EVENT: - ev = new Delete_file_log_event(buf, event_len); + ev = new Delete_file_log_event(buf, event_len, description_event); break; case EXEC_LOAD_EVENT: - ev = new Execute_load_log_event(buf, event_len); + ev = new Execute_load_log_event(buf, event_len, description_event); break; - case START_EVENT: - ev = new Start_log_event(buf, old_format); + case START_EVENT_V3: /* this is sent only by MySQL <=4.x */ + ev = new Start_log_event_v3(buf, description_event); break; #ifdef HAVE_REPLICATION case STOP_EVENT: - ev = new Stop_log_event(buf, old_format); + ev = new Stop_log_event(buf, description_event); break; #endif /* HAVE_REPLICATION */ case INTVAR_EVENT: - ev = new Intvar_log_event(buf, old_format); + ev = new Intvar_log_event(buf, description_event); break; case RAND_EVENT: - ev = new Rand_log_event(buf, old_format); + ev = new Rand_log_event(buf, description_event); break; case USER_VAR_EVENT: - ev = new User_var_log_event(buf, old_format); + ev = new User_var_log_event(buf, description_event); + break; + case FORMAT_DESCRIPTION_EVENT: + ev = new Format_description_log_event(buf, event_len, description_event); break; default: + DBUG_PRINT("error",("Unknown evernt code: %d",(int) buf[EVENT_TYPE_OFFSET])); + ev= NULL; break; } + /* + is_valid() are small event-specific sanity tests which are important; for + example there are some my_malloc() in constructors + (e.g. Query_log_event::Query_log_event(char*...)); when these my_malloc() + fail we can't return an error out of the constructor (because constructor + is "void") ; so instead we leave the pointer we wanted to allocate + (e.g. 'query') to 0 and we test it in is_valid(). Same for + Format_description_log_event, member 'post_header_len'. + */ if (!ev || !ev->is_valid()) { + DBUG_PRINT("error",("Found invalid event in binary log")); + delete ev; #ifdef MYSQL_CLIENT - if (!force_opt) + if (!force_opt) /* then mysqlbinlog dies */ { *error= "Found invalid event in binary log"; DBUG_RETURN(0); } - ev= new Unknown_log_event(buf, old_format); + ev= new Unknown_log_event(buf, description_event); #else *error= "Found invalid event in binary log"; DBUG_RETURN(0); #endif } - ev->cached_event_len = event_len; DBUG_RETURN(ev); } @@ -722,7 +871,7 @@ void Log_event::print_header(FILE* file) char llbuff[22]; fputc('#', file); print_timestamp(file); - fprintf(file, " server id %d log_pos %s ", server_id, + fprintf(file, " server id %d end_log_pos %s ", server_id, llstr(log_pos,llbuff)); } @@ -754,19 +903,6 @@ void Log_event::print_timestamp(FILE* file, time_t* ts) #endif /* MYSQL_CLIENT */ -/* - Log_event::set_log_pos() -*/ - -#ifndef MYSQL_CLIENT -void Log_event::set_log_pos(MYSQL_LOG* log) -{ - if (!log_pos) - log_pos = my_b_tell(&log->log_file); -} -#endif /* !MYSQL_CLIENT */ - - /************************************************************************** Query_log_event methods **************************************************************************/ @@ -775,10 +911,15 @@ void Log_event::set_log_pos(MYSQL_LOG* log) /* Query_log_event::pack_info() + This (which is used only for SHOW BINLOG EVENTS) could be updated to + print SET @@session_var=. But this is not urgent, as SHOW BINLOG EVENTS is + only an information, it does not produce suitable queries to replay (for + example it does not print LOAD DATA INFILE). */ void Query_log_event::pack_info(Protocol *protocol) { + // TODO: show the catalog ?? char *buf, *pos; if (!(buf= my_malloc(9 + db_len + q_len, MYF(MY_WME)))) return; @@ -803,31 +944,27 @@ void Query_log_event::pack_info(Protocol *protocol) /* Query_log_event::write() -*/ - -int Query_log_event::write(IO_CACHE* file) -{ - return query ? Log_event::write(file) : -1; -} - -/* - Query_log_event::write_data() + NOTES: + In this event we have to modify the header to have the correct + EVENT_LEN_OFFSET as we don't yet know how many status variables we + will print! */ -int Query_log_event::write_data(IO_CACHE* file) +bool Query_log_event::write(IO_CACHE* file) { - char buf[QUERY_HEADER_LEN]; + uchar buf[QUERY_HEADER_LEN+1+4+1+8+1+1+FN_REFLEN+5], *start, *start_of_status; + ulong event_length; if (!query) - return -1; - + return 1; // Something wrong with event + /* We want to store the thread id: (- as an information for the user when he reads the binlog) - if the query uses temporary table: for the slave SQL thread to know to which master connection the temp table belongs. - Now imagine we (write_data()) are called by the slave SQL thread (we are + Now imagine we (write()) are called by the slave SQL thread (we are logging a query executed by this thread; the slave runs with --log-slave-updates). Then this query will be logged with thread_id=the_thread_id_of_the_SQL_thread. Imagine that 2 temp tables of @@ -864,9 +1001,77 @@ int Query_log_event::write_data(IO_CACHE* file) buf[Q_DB_LEN_OFFSET] = (char) db_len; int2store(buf + Q_ERR_CODE_OFFSET, error_code); - return (my_b_safe_write(file, (byte*) buf, QUERY_HEADER_LEN) || - my_b_safe_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) || - my_b_safe_write(file, (byte*) query, q_len)) ? -1 : 0; + /* + You MUST always write status vars in increasing order of code. This + guarantees that a slightly older slave will be able to parse those he + knows. + */ + start_of_status= start= buf+QUERY_HEADER_LEN; + if (flags2_inited) + { + *(start++)= Q_FLAGS2_CODE; + int4store(start, flags2); + start+= 4; + } + if (sql_mode_inited) + { + *(start++)= Q_SQL_MODE_CODE; + int8store(start, sql_mode); + start+= 8; + } + if (catalog_len >= 0) // i.e. "catalog inited" (false for 4.0 events) + { + *(start++)= Q_CATALOG_CODE; + *(start++)= (uchar) catalog_len; + bmove(start, catalog, catalog_len); + start+= catalog_len; + /* + We write a \0 at the end. As we also have written the length, it's + apparently useless; but in fact it enables us to just do + catalog= a_pointer_to_the_buffer_of_the_read_event + later in the slave SQL thread. + If we didn't have the \0, we would need to memdup to build the catalog in + the slave SQL thread. + And still the interest of having the length too is that in the slave SQL + thread we immediately know at which position the catalog ends (no need to + search for '\0'. In other words: length saves search, \0 saves mem alloc, + at the cost of 1 redundant byte on the disk. + Note that this is only a fix until we change 'catalog' to LEX_STRING + (then we won't need the \0). + */ + *(start++)= '\0'; + } + if (auto_increment_increment != 1) + { + *start++= Q_AUTO_INCREMENT; + int2store(start, auto_increment_increment); + int2store(start+2, auto_increment_offset); + start+= 4; + } + /* + Here there could be code like + if (command-line-option-which-says-"log_this_variable") + { + *(start++)= Q_THIS_VARIABLE_CODE; + int4store(start, this_variable); + start+= 4; + } + */ + + /* Store length of status variables */ + status_vars_len= (uint) (start-start_of_status); + int2store(buf + Q_STATUS_VARS_LEN_OFFSET, status_vars_len); + + /* + Calculate length of whole event + The "1" below is the \0 in the db's length + */ + event_length= (uint) (start-buf) + db_len + 1 + q_len; + + return (write_header(file, event_length) || + my_b_safe_write(file, (byte*) buf, (uint) (start-buf)) || + my_b_safe_write(file, (db) ? (byte*) db : (byte*)"", db_len + 1) || + my_b_safe_write(file, (byte*) query, q_len)) ? 1 : 0; } @@ -882,60 +1087,162 @@ Query_log_event::Query_log_event(THD* thd_arg, const char* query_arg, ((thd_arg->tmp_table_used ? LOG_EVENT_THREAD_SPECIFIC_F : 0) | (suppress_use ? LOG_EVENT_SUPPRESS_USE_F : 0)), using_trans), - data_buf(0), query(query_arg), + data_buf(0), query(query_arg), catalog(thd_arg->catalog), db(thd_arg->db), q_len((uint32) query_length), - error_code(thd_arg->killed ? + error_code((thd_arg->killed != THD::NOT_KILLED) ? ((thd_arg->system_thread & SYSTEM_THREAD_DELAYED_INSERT) ? - 0 : ER_SERVER_SHUTDOWN) : thd_arg->net.last_errno), + 0 : thd->killed_errno()) : thd_arg->net.last_errno), thread_id(thd_arg->thread_id), /* save the original thread id; we already know the server id */ - slave_proxy_id(thd_arg->variables.pseudo_thread_id) + slave_proxy_id(thd_arg->variables.pseudo_thread_id), + flags2_inited(1), sql_mode_inited(1), flags2(0), + sql_mode(thd_arg->variables.sql_mode), + auto_increment_increment(thd_arg->variables.auto_increment_increment), + auto_increment_offset(thd_arg->variables.auto_increment_offset) { time_t end_time; time(&end_time); exec_time = (ulong) (end_time - thd->start_time); + catalog_len = (catalog) ? (uint32) strlen(catalog) : 0; + status_vars_len= 1+4+1+8+1+1+catalog_len+1; db_len = (db) ? (uint32) strlen(db) : 0; + /* + If we don't use flags2 for anything else than options contained in + thd->options, it would be more efficient to flags2=thd_arg->options + (OPTIONS_WRITTEN_TO_BINLOG would be used only at reading time). + But it's likely that we don't want to use 32 bits for 3 bits; in the future + we will probably want to reclaim the 29 bits. So we need the &. + */ + flags2= thd_arg->options & OPTIONS_WRITTEN_TO_BIN_LOG; + + DBUG_PRINT("info",("Query_log_event has flags2=%lu sql_mode=%lu",flags2,sql_mode)); } #endif /* MYSQL_CLIENT */ /* Query_log_event::Query_log_event() + This is used by the SQL slave thread to prepare the event before execution. */ -Query_log_event::Query_log_event(const char* buf, int event_len, - bool old_format) - :Log_event(buf, old_format),data_buf(0), query(NULL), db(NULL) +Query_log_event::Query_log_event(const char* buf, uint event_len, + const Format_description_log_event *description_event) + :Log_event(buf, description_event), data_buf(0), query(NullS), catalog(NullS), + db(NullS), catalog_len(0), status_vars_len(0), + flags2_inited(0), sql_mode_inited(0) { ulong data_len; - if (old_format) + uint32 tmp; + uint8 common_header_len, post_header_len; + const char *start, *end; + DBUG_ENTER("Query_log_event::Query_log_event(char*,...)"); + + common_header_len= description_event->common_header_len; + post_header_len= description_event->post_header_len[QUERY_EVENT-1]; + DBUG_PRINT("info",("event_len=%ld, common_header_len=%d, post_header_len=%d", + event_len, common_header_len, post_header_len)); + + /* + We test if the event's length is sensible, and if so we compute data_len. + We cannot rely on QUERY_HEADER_LEN here as it would not be format-tolerant. + We use QUERY_HEADER_MINIMAL_LEN which is the same for 3.23, 4.0 & 5.0. + */ + if (event_len < (uint)(common_header_len + post_header_len)) + DBUG_VOID_RETURN; + data_len = event_len - (common_header_len + post_header_len); + buf+= common_header_len; + + slave_proxy_id= thread_id = uint4korr(buf + Q_THREAD_ID_OFFSET); + exec_time = uint4korr(buf + Q_EXEC_TIME_OFFSET); + db_len = (uint)buf[Q_DB_LEN_OFFSET]; + error_code = uint2korr(buf + Q_ERR_CODE_OFFSET); + /* If auto_increment is not set by query_event, they should not be used */ + auto_increment_increment= auto_increment_offset= 1; + + /* + 5.0 format starts here. + Depending on the format, we may or not have affected/warnings etc + The remnent post-header to be parsed has length: + */ + tmp= post_header_len - QUERY_HEADER_MINIMAL_LEN; + if (tmp) { - if ((uint)event_len < OLD_HEADER_LEN + QUERY_HEADER_LEN) - return; - data_len = event_len - (QUERY_HEADER_LEN + OLD_HEADER_LEN); - buf += OLD_HEADER_LEN; + status_vars_len= uint2korr(buf + Q_STATUS_VARS_LEN_OFFSET); + data_len-= status_vars_len; + DBUG_PRINT("info", ("Query_log_event has status_vars_len: %u", + (uint) status_vars_len)); + tmp-= 2; } - else + /* we have parsed everything we know in the post header */ +#ifndef DBUG_OFF + if (tmp) /* this is probably a master newer than us */ + DBUG_PRINT("info", ("Query_log_event has longer post header than we know\ + (%d more bytes)", tmp)); +#endif + + /* variable-part: the status vars; only in MySQL 5.0 */ + + start= (char*) (buf+post_header_len); + end= (char*) (start+status_vars_len); + for (const uchar* pos= (const uchar*) start; pos < (const uchar*) end;) { - if ((uint)event_len < QUERY_EVENT_OVERHEAD) - return; - data_len = event_len - QUERY_EVENT_OVERHEAD; - buf += LOG_EVENT_HEADER_LEN; + switch (*pos++) { + case Q_FLAGS2_CODE: + flags2_inited= 1; + flags2= uint4korr(pos); + DBUG_PRINT("info",("In Query_log_event, read flags2: %lu", flags2)); + pos+= 4; + break; + case Q_SQL_MODE_CODE: + { +#ifndef DBUG_OFF + char buff[22]; +#endif + sql_mode_inited= 1; + sql_mode= (ulong) uint8korr(pos); // QQ: Fix when sql_mode is ulonglong + DBUG_PRINT("info",("In Query_log_event, read sql_mode: %s", + llstr(sql_mode, buff))); + pos+= 8; + break; + } + case Q_CATALOG_CODE: + catalog_len= *pos; + if (catalog_len) + catalog= (char*) pos+1; // Will be copied later + pos+= catalog_len+2; + break; + case Q_AUTO_INCREMENT: + auto_increment_increment= uint2korr(pos); + auto_increment_offset= uint2korr(pos+2); + pos+= 4; + break; + default: + /* That's why you must write status vars in growing order of code */ + DBUG_PRINT("info",("Query_log_event has unknown status vars (first has\ + code: %u), skipping the rest of them", (uint) *(pos-1))); + pos= (const uchar*) end; // Break look + } } - - exec_time = uint4korr(buf + Q_EXEC_TIME_OFFSET); - error_code = uint2korr(buf + Q_ERR_CODE_OFFSET); - - if (!(data_buf = (char*) my_malloc(data_len + 1, MYF(MY_WME)))) - return; - - memcpy(data_buf, buf + Q_DATA_OFFSET, data_len); - slave_proxy_id= thread_id= uint4korr(buf + Q_THREAD_ID_OFFSET); - db = data_buf; - db_len = (uint)buf[Q_DB_LEN_OFFSET]; - query=data_buf + db_len + 1; - q_len = data_len - 1 - db_len; - *((char*)query+q_len) = 0; + + /* A 2nd variable part; this is common to all versions */ + + if (!(start= data_buf = (char*) my_malloc(catalog_len + data_len +2, MYF(MY_WME)))) + DBUG_VOID_RETURN; + if (catalog) // If catalog is given + { + memcpy((char*) start, catalog, catalog_len+1); // Copy name and end \0 + catalog= start; + start+= catalog_len+1; + } + memcpy((char*) start, end, data_len); // Copy db and query + ((char*) start)[data_len]= '\0'; // End query with \0 (For safetly) + db= start; + query= start + db_len + 1; + q_len= data_len - db_len -1; + /* This is used to detect wrong parsing. Could be removed in the future. */ + DBUG_PRINT("info", ("catalog: '%s' len: %u db: '%s' len: %u q_len: %lu", + catalog, (uint) catalog_len, db, (uint) db_len,q_len)); + DBUG_VOID_RETURN; } @@ -944,9 +1251,14 @@ Query_log_event::Query_log_event(const char* buf, int event_len, */ #ifdef MYSQL_CLIENT -void Query_log_event::print(FILE* file, bool short_form, char* last_db) +void Query_log_event::print(FILE* file, bool short_form, + LAST_EVENT_INFO* last_event_info) { + // TODO: print the catalog ?? char buff[40],*end; // Enough for SET TIMESTAMP + bool different_db= 1; + uint32 tmp; + if (!short_form) { print_header(file); @@ -954,20 +1266,12 @@ void Query_log_event::print(FILE* file, bool short_form, char* last_db) (ulong) thread_id, (ulong) exec_time, error_code); } - bool different_db= 1; - - if (!(flags & LOG_EVENT_SUPPRESS_USE_F)) + if (!(flags & LOG_EVENT_SUPPRESS_USE_F) && db) { - if (db && last_db) - { - if (different_db= memcmp(last_db, db, db_len + 1)) - memcpy(last_db, db, db_len + 1); - } - - if (db && db[0] && different_db) - { + if (different_db= memcmp(last_event_info->db, db, db_len + 1)) + memcpy(last_event_info->db, db, db_len + 1); + if (db[0] && different_db) fprintf(file, "use %s;\n", db); - } } end=int10_to_str((long) when, strmov(buff,"SET TIMESTAMP="),10); @@ -976,8 +1280,76 @@ void Query_log_event::print(FILE* file, bool short_form, char* last_db) my_fwrite(file, (byte*) buff, (uint) (end-buff),MYF(MY_NABP | MY_WME)); if (flags & LOG_EVENT_THREAD_SPECIFIC_F) fprintf(file,"SET @@session.pseudo_thread_id=%lu;\n",(ulong)thread_id); + /* + Now the session variables; + it's more efficient to pass SQL_MODE as a number instead of a + comma-separated list. + FOREIGN_KEY_CHECKS, SQL_AUTO_IS_NULL, UNIQUE_CHECKS are session-only + variables (they have no global version; they're not listed in sql_class.h), + The tests below work for pure binlogs or pure relay logs. Won't work for + mixed relay logs but we don't create mixed relay logs (that is, there is no + relay log with a format change except within the 3 first events, which + mysqlbinlog handles gracefully). So this code should always be good. + */ + + if (likely(flags2_inited)) /* likely as this will mainly read 5.0 logs */ + { + /* tmp is a bitmask of bits which have changed. */ + if (likely(last_event_info->flags2_inited)) + /* All bits which have changed */ + tmp= (last_event_info->flags2) ^ flags2; + else /* that's the first Query event we read */ + { + last_event_info->flags2_inited= 1; + tmp= ~((uint32)0); /* all bits have changed */ + } + + if (unlikely(tmp)) /* some bits have changed */ + { + bool need_comma= 0; + fprintf(file, "SET "); + print_set_option(file, tmp, OPTION_NO_FOREIGN_KEY_CHECKS, ~flags2, + "@@session.foreign_key_checks", &need_comma); + print_set_option(file, tmp, OPTION_AUTO_IS_NULL, flags2, + "@@session.sql_auto_is_null", &need_comma); + print_set_option(file, tmp, OPTION_RELAXED_UNIQUE_CHECKS, ~flags2, + "@@session.unique_checks", &need_comma); + fprintf(file,";\n"); + last_event_info->flags2= flags2; + } + } + + /* + If flags2_inited==0, this is an event from 3.23 or 4.0; nothing to print + (remember we don't produce mixed relay logs so there cannot be 5.0 events + before that one so there is nothing to reset). + */ + + if (likely(sql_mode_inited)) + { + if (unlikely(!last_event_info->sql_mode_inited)) /* first Query event */ + { + last_event_info->sql_mode_inited= 1; + /* force a difference to force write */ + last_event_info->sql_mode= ~sql_mode; + } + if (unlikely(last_event_info->sql_mode != sql_mode)) + { + fprintf(file,"SET @@session.sql_mode=%lu;\n",(ulong)sql_mode); + last_event_info->sql_mode= sql_mode; + } + } + if (last_event_info->auto_increment_increment != auto_increment_increment || + last_event_info->auto_increment_offset != auto_increment_offset) + { + fprintf(file,"SET @@session.auto_increment_increment=%lu, @@session.auto_increment_offset=%lu;\n", + auto_increment_increment,auto_increment_offset); + last_event_info->auto_increment_increment= auto_increment_increment; + last_event_info->auto_increment_offset= auto_increment_offset; + } + my_fwrite(file, (byte*) query, q_len, MYF(MY_NABP | MY_WME)); - fprintf(file, ";\n"); + fputs(";\n", file); } #endif /* MYSQL_CLIENT */ @@ -990,20 +1362,31 @@ void Query_log_event::print(FILE* file, bool short_form, char* last_db) int Query_log_event::exec_event(struct st_relay_log_info* rli) { int expected_error,actual_error= 0; + /* + Colleagues: please never free(thd->catalog) in MySQL. This would lead to + bugs as here thd->catalog is a part of an alloced block, not an entire + alloced block (see Query_log_event::exec_event()). Same for thd->db. + Thank you. + */ + thd->catalog= (char*) catalog; thd->db_length= db_len; thd->db= (char*) rewrite_db(db, &thd->db_length); + thd->variables.auto_increment_increment= auto_increment_increment; + thd->variables.auto_increment_offset= auto_increment_offset; /* - InnoDB internally stores the master log position it has processed so far; - position to store is of the END of the current log event. + InnoDB internally stores the master log position it has executed so far, + i.e. the position just after the COMMIT event. + When InnoDB will want to store, the positions in rli won't have + been updated yet, so group_master_log_* will point to old BEGIN + and event_master_log* will point to the beginning of current COMMIT. + But log_pos of the COMMIT Query event is what we want, i.e. the pos of the + END of the current log event (COMMIT). We save it in rli so that InnoDB can + access it. */ -#if MYSQL_VERSION_ID < 50000 - rli->future_group_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else - /* In 5.0 we store the end_log_pos in the relay log so no problem */ rli->future_group_master_log_pos= log_pos; -#endif + DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); + clear_all_errors(thd, rli); if (db_ok(thd->db, replicate_do_db, replicate_ignore_db)) @@ -1015,9 +1398,33 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli) thd->query_id = query_id++; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->variables.pseudo_thread_id= thread_id; // for temp tables - mysql_log.write(thd,COM_QUERY,"%s",thd->query); DBUG_PRINT("query",("%s",thd->query)); + + if (flags2_inited) + /* + all bits of thd->options which are 1 in OPTIONS_WRITTEN_TO_BIN_LOG must + take their value from flags2. + */ + thd->options= flags2|(thd->options & ~(ulong)OPTIONS_WRITTEN_TO_BIN_LOG); + /* + else, we are in a 3.23/4.0 binlog; we previously received a + Rotate_log_event which reset thd->options and sql_mode, so nothing to do. + */ + + /* + We do not replicate IGNORE_DIR_IN_CREATE. That is, if the master is a + slave which runs with SQL_MODE=IGNORE_DIR_IN_CREATE, this should not + force us to ignore the dir too. Imagine you are a ring of machines, and + one has a disk problem so that you temporarily need IGNORE_DIR_IN_CREATE + on this machine; you don't want it to propagate elsewhere (you don't want + all slaves to start ignoring the dirs). + */ + if (sql_mode_inited) + thd->variables.sql_mode= + (ulong) ((thd->variables.sql_mode & MODE_NO_DIR_IN_CREATE) | + (sql_mode & ~(ulong) MODE_NO_DIR_IN_CREATE)); + if (ignored_error_code((expected_error= error_code)) || !check_expected_error(thd,rli,expected_error)) mysql_parse(thd, thd->query, q_len); @@ -1045,20 +1452,20 @@ START SLAVE; . Query: '%s'", expected_error, thd->query); } goto end; } - + /* If we expected a non-zero error code, and we don't get the same error code, and none of them should be ignored. */ DBUG_PRINT("info",("expected_error: %d last_errno: %d", - expected_error, thd->net.last_errno)); + expected_error, thd->net.last_errno)); if ((expected_error != (actual_error= thd->net.last_errno)) && - expected_error && - !ignored_error_code(actual_error) && - !ignored_error_code(expected_error)) + expected_error && + !ignored_error_code(actual_error) && + !ignored_error_code(expected_error)) { slave_print_error(rli, 0, - "\ + "\ Query caused different errors on master and slave. \ Error on master: '%s' (%d), Error on slave: '%s' (%d). \ Default database: '%s'. Query: '%s'", @@ -1066,14 +1473,14 @@ Default database: '%s'. Query: '%s'", expected_error, actual_error ? thd->net.last_error: "no error", actual_error, - print_slave_db_safe(thd->db), query); + print_slave_db_safe(db), query); thd->query_error= 1; } /* If we get the same error code as expected, or they should be ignored. */ else if (expected_error == actual_error || - ignored_error_code(actual_error)) + ignored_error_code(actual_error)) { DBUG_PRINT("info",("error ignored")); clear_all_errors(thd, rli); @@ -1090,11 +1497,43 @@ Default database: '%s'. Query: '%s'", print_slave_db_safe(thd->db), query); thd->query_error= 1; } + + /* + TODO: compare the values of "affected rows" around here. Something + like: + if ((uint32) affected_in_event != (uint32) affected_on_slave) + { + sql_print_error("Slave: did not get the expected number of affected \ + rows running query from master - expected %d, got %d (this numbers \ + should have matched modulo 4294967296).", 0, ...); + thd->query_error = 1; + } + We may also want an option to tell the slave to ignore "affected" + mismatch. This mismatch could be implemented with a new ER_ code, and + to ignore it you would use --slave-skip-errors... + + To do the comparison we need to know the value of "affected" which the + above mysql_parse() computed. And we need to know the value of + "affected" in the master's binlog. Both will be implemented later. The + important thing is that we now have the format ready to log the values + of "affected" in the binlog. So we can release 5.0.0 before effectively + logging "affected" and effectively comparing it. + */ } /* End of if (db_ok(... */ end: VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->db= 0; // prevent db from being freed + /* + Probably we have set thd->query, thd->db, thd->catalog to point to places + in the data_buf of this event. Now the event is going to be deleted + probably, so data_buf will be freed, so the thd->... listed above will be + pointers to freed memory. + So we must set them to 0, so that those bad pointers values are not later + used. Note that "cleanup" queries (automatic DO RELEASE_LOCK() and DROP + TEMPORARY TABLE don't suffer from these assignments to 0 as DROP TEMPORARY + TABLE uses the db.table syntax). + */ + thd->db= thd->catalog= 0; // prevent db from being freed thd->query= 0; // just to be sure thd->query_length= thd->db_length =0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); @@ -1107,22 +1546,30 @@ end: updating query. */ return (thd->query_error ? thd->query_error : - (thd->one_shot_set ? (rli->inc_event_relay_log_pos(get_event_len()),0) : + (thd->one_shot_set ? (rli->inc_event_relay_log_pos(),0) : Log_event::exec_event(rli))); } #endif /************************************************************************** - Start_log_event methods + Start_log_event_v3 methods **************************************************************************/ +#ifndef MYSQL_CLIENT +Start_log_event_v3::Start_log_event_v3() :Log_event(), binlog_version(BINLOG_VERSION), artificial_event(0) +{ + created= when; + memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); +} +#endif + /* - Start_log_event::pack_info() + Start_log_event_v3::pack_info() */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -void Start_log_event::pack_info(Protocol *protocol) +void Start_log_event_v3::pack_info(Protocol *protocol) { char buf[12 + ST_SERVER_VER_LEN + 14 + 22], *pos; pos= strmov(buf, "Server ver: "); @@ -1135,57 +1582,69 @@ void Start_log_event::pack_info(Protocol *protocol) /* - Start_log_event::print() + Start_log_event_v3::print() */ #ifdef MYSQL_CLIENT -void Start_log_event::print(FILE* file, bool short_form, char* last_db) +void Start_log_event_v3::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info) { - if (short_form) - return; - - print_header(file); - fprintf(file, "\tStart: binlog v %d, server v %s created ", binlog_version, - server_version); - print_timestamp(file); - if (created) - fprintf(file," at startup"); - fputc('\n', file); + if (!short_form) + { + print_header(file); + fprintf(file, "\tStart: binlog v %d, server v %s created ", binlog_version, + server_version); + print_timestamp(file); + if (created) + fprintf(file," at startup"); + fputc('\n', file); + } +#ifdef WHEN_WE_HAVE_THE_RESET_CONNECTION_SQL_COMMAND + /* + This is for mysqlbinlog: like in replication, we want to delete the stale + tmp files left by an unclean shutdown of mysqld (temporary tables). Probably + this can be done with RESET CONNECTION (syntax to be defined). + */ + fprintf(file,"RESET CONNECTION;\n"); +#endif fflush(file); } #endif /* MYSQL_CLIENT */ /* - Start_log_event::Start_log_event() + Start_log_event_v3::Start_log_event_v3() */ -Start_log_event::Start_log_event(const char* buf, - bool old_format) - :Log_event(buf, old_format) +Start_log_event_v3::Start_log_event_v3(const char* buf, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) { - buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - binlog_version = uint2korr(buf+ST_BINLOG_VER_OFFSET); + buf+= description_event->common_header_len; + binlog_version= uint2korr(buf+ST_BINLOG_VER_OFFSET); memcpy(server_version, buf+ST_SERVER_VER_OFFSET, ST_SERVER_VER_LEN); - created = uint4korr(buf+ST_CREATED_OFFSET); + created= uint4korr(buf+ST_CREATED_OFFSET); + /* We use log_pos to mark if this was an artificial event or not */ + artificial_event= (log_pos == 0); } /* - Start_log_event::write_data() + Start_log_event_v3::write() */ -int Start_log_event::write_data(IO_CACHE* file) +bool Start_log_event_v3::write(IO_CACHE* file) { - char buff[START_HEADER_LEN]; + char buff[START_V3_HEADER_LEN]; int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version); memcpy(buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN); int4store(buff + ST_CREATED_OFFSET,created); - return (my_b_safe_write(file, (byte*) buff, sizeof(buff)) ? -1 : 0); + return (write_header(file, sizeof(buff)) || + my_b_safe_write(file, (byte*) buff, sizeof(buff))); } + /* - Start_log_event::exec_event() + Start_log_event_v3::exec_event() The master started @@ -1204,23 +1663,29 @@ int Start_log_event::write_data(IO_CACHE* file) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) -int Start_log_event::exec_event(struct st_relay_log_info* rli) +int Start_log_event_v3::exec_event(struct st_relay_log_info* rli) { - DBUG_ENTER("Start_log_event::exec_event"); - + DBUG_ENTER("Start_log_event_v3::exec_event"); /* If the I/O thread has not started, mi->old_format is BINLOG_FORMAT_CURRENT (that's what the MASTER_INFO constructor does), so the test below is not perfect at all. */ - switch (rli->mi->old_format) { - case BINLOG_FORMAT_CURRENT: - /* - This is 4.x, so a Start_log_event is only at master startup, - so we are sure the master has restarted and cleared his temp tables. + switch (rli->relay_log.description_event_for_exec->binlog_version) + { + case 3: + case 4: + /* + This can either be 4.x (then a Start_log_event_v3 is only at master + startup so we are sure the master has restarted and cleared his temp + tables; the event always has 'created'>0) or 5.0 (then we have to test + 'created'). */ - close_temporary_tables(thd); - cleanup_load_tmpdir(); + if (created) + { + close_temporary_tables(thd); + cleanup_load_tmpdir(); + } /* As a transaction NEVER spans on 2 or more binlogs: if we have an active transaction at this point, the master died while @@ -1228,8 +1693,12 @@ int Start_log_event::exec_event(struct st_relay_log_info* rli) cache to the binlog. As the write was started, the transaction had been committed on the master, so we lack of information to replay this transaction on the slave; all we can do is stop with error. + Note: this event could be sent by the master to inform us of the format + of its binlog; in other words maybe it is not at its original place when + it comes to us; we'll know this by checking log_pos ("artificial" events + have log_pos == 0). */ - if (thd->options & OPTION_BEGIN) + if (!artificial_event && (thd->options & OPTION_BEGIN)) { slave_print_error(rli, 0, "\ Rolling back unfinished transaction (no COMMIT or ROLLBACK) from relay log. \ @@ -1243,33 +1712,274 @@ binary log."); Now the older formats; in that case load_tmpdir is cleaned up by the I/O thread. */ - case BINLOG_FORMAT_323_LESS_57: - /* - Cannot distinguish a Start_log_event generated at master startup and - one generated by master FLUSH LOGS, so cannot be sure temp tables - have to be dropped. So do nothing. - */ - break; - case BINLOG_FORMAT_323_GEQ_57: + case 1: + if (strncmp(rli->relay_log.description_event_for_exec->server_version, + "3.23.57",7) >= 0 && created) + { + /* + Can distinguish, based on the value of 'created': this event was + generated at master startup. + */ + close_temporary_tables(thd); + } /* - Can distinguish, based on the value of 'created', - which was generated at master startup. + Otherwise, can't distinguish a Start_log_event generated at + master startup and one generated by master FLUSH LOGS, so cannot + be sure temp tables have to be dropped. So do nothing. */ - if (created) - close_temporary_tables(thd); break; default: /* this case is impossible */ - return 1; + DBUG_RETURN(1); } - DBUG_RETURN(Log_event::exec_event(rli)); } #endif /* defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) */ -/************************************************************************** - Load_log_event methods -**************************************************************************/ +/*************************************************************************** + Format_description_log_event methods +****************************************************************************/ + +/* + Format_description_log_event 1st ctor. + + SYNOPSIS + Format_description_log_event::Format_description_log_event + binlog_version the binlog version for which we want to build + an event. Can be 1 (=MySQL 3.23), 3 (=4.0.x + x>=2 and 4.1) or 4 (MySQL 5.0). Note that the + old 4.0 (binlog version 2) is not supported; + it should not be used for replication with + 5.0. + + DESCRIPTION + Ctor. Can be used to create the event to write to the binary log (when the + server starts or when FLUSH LOGS), or to create artificial events to parse + binlogs from MySQL 3.23 or 4.x. + When in a client, only the 2nd use is possible. + + TODO + Update this code with the new event for LOAD DATA, once they are pushed (in + 4.1 or 5.0). If it's in 5.0, only the "case 4" block should be updated. + +*/ + +Format_description_log_event:: +Format_description_log_event(uint8 binlog_ver, + const char* server_ver) + :Start_log_event_v3() +{ + created= when; + binlog_version= binlog_ver; + switch (binlog_ver) { + case 4: /* MySQL 5.0 */ + memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); + common_header_len= LOG_EVENT_HEADER_LEN; + number_of_event_types= LOG_EVENT_TYPES; + /* we'll catch my_malloc() error in is_valid() */ + post_header_len=(uint8*) my_malloc(number_of_event_types*sizeof(uint8), + MYF(0)); + /* + This long list of assignments is not beautiful, but I see no way to + make it nicer, as the right members are #defines, not array members, so + it's impossible to write a loop. + */ + if (post_header_len) + { + post_header_len[START_EVENT_V3-1]= START_V3_HEADER_LEN; + post_header_len[QUERY_EVENT-1]= QUERY_HEADER_LEN; + post_header_len[STOP_EVENT-1]= 0; + post_header_len[ROTATE_EVENT-1]= ROTATE_HEADER_LEN; + post_header_len[INTVAR_EVENT-1]= 0; + post_header_len[LOAD_EVENT-1]= LOAD_HEADER_LEN; + post_header_len[SLAVE_EVENT-1]= 0; + post_header_len[CREATE_FILE_EVENT-1]= CREATE_FILE_HEADER_LEN; + post_header_len[APPEND_BLOCK_EVENT-1]= APPEND_BLOCK_HEADER_LEN; + post_header_len[EXEC_LOAD_EVENT-1]= EXEC_LOAD_HEADER_LEN; + post_header_len[DELETE_FILE_EVENT-1]= DELETE_FILE_HEADER_LEN; + post_header_len[NEW_LOAD_EVENT-1]= post_header_len[LOAD_EVENT-1]; + post_header_len[RAND_EVENT-1]= 0; + post_header_len[USER_VAR_EVENT-1]= 0; + post_header_len[FORMAT_DESCRIPTION_EVENT-1]= FORMAT_DESCRIPTION_HEADER_LEN; + } + break; + + case 1: /* 3.23 */ + case 3: /* 4.0.x x>=2 */ + /* + We build an artificial (i.e. not sent by the master) event, which + describes what those old master versions send. + */ + if (binlog_ver==1) + strmov(server_version, server_ver ? server_ver : "3.23"); + else + strmov(server_version, server_ver ? server_ver : "4.0"); + common_header_len= binlog_ver==1 ? OLD_HEADER_LEN : + LOG_EVENT_MINIMAL_HEADER_LEN; + /* + The first new event in binlog version 4 is Format_desc. So any event type + after that does not exist in older versions. We use the events known by + version 3, even if version 1 had only a subset of them (this is not a + problem: it uses a few bytes for nothing but unifies code; it does not + make the slave detect less corruptions). + */ + number_of_event_types= FORMAT_DESCRIPTION_EVENT - 1; + post_header_len=(uint8*) my_malloc(number_of_event_types*sizeof(uint8), + MYF(0)); + if (post_header_len) + { + post_header_len[START_EVENT_V3-1]= START_V3_HEADER_LEN; + post_header_len[QUERY_EVENT-1]= QUERY_HEADER_MINIMAL_LEN; + post_header_len[STOP_EVENT-1]= 0; + post_header_len[ROTATE_EVENT-1]= (binlog_ver==1) ? 0 : ROTATE_HEADER_LEN; + post_header_len[INTVAR_EVENT-1]= 0; + post_header_len[LOAD_EVENT-1]= LOAD_HEADER_LEN; + post_header_len[SLAVE_EVENT-1]= 0; + post_header_len[CREATE_FILE_EVENT-1]= CREATE_FILE_HEADER_LEN; + post_header_len[APPEND_BLOCK_EVENT-1]= APPEND_BLOCK_HEADER_LEN; + post_header_len[EXEC_LOAD_EVENT-1]= EXEC_LOAD_HEADER_LEN; + post_header_len[DELETE_FILE_EVENT-1]= DELETE_FILE_HEADER_LEN; + post_header_len[NEW_LOAD_EVENT-1]= post_header_len[LOAD_EVENT-1]; + post_header_len[RAND_EVENT-1]= 0; + post_header_len[USER_VAR_EVENT-1]= 0; + } + break; + default: /* Includes binlog version 2 i.e. 4.0.x x<=1 */ + post_header_len= 0; /* will make is_valid() fail */ + break; + } +} + + +/* + The problem with this constructor is that the fixed header may have a + length different from this version, but we don't know this length as we + have not read the Format_description_log_event which says it, yet. This + length is in the post-header of the event, but we don't know where the + post-header starts. + So this type of event HAS to: + - either have the header's length at the beginning (in the header, at a + fixed position which will never be changed), not in the post-header. That + would make the header be "shifted" compared to other events. + - or have a header of size LOG_EVENT_MINIMAL_HEADER_LEN (19), in all future + versions, so that we know for sure. + I (Guilhem) chose the 2nd solution. Rotate has the same constraint (because + it is sent before Format_description_log_event). +*/ + +Format_description_log_event:: +Format_description_log_event(const char* buf, + uint event_len, + const + Format_description_log_event* + description_event) + :Start_log_event_v3(buf, description_event) +{ + DBUG_ENTER("Format_description_log_event::Format_description_log_event(char*,...)"); + buf+= LOG_EVENT_MINIMAL_HEADER_LEN; + if ((common_header_len=buf[ST_COMMON_HEADER_LEN_OFFSET]) < OLD_HEADER_LEN) + DBUG_VOID_RETURN; /* sanity check */ + number_of_event_types= + event_len-(LOG_EVENT_MINIMAL_HEADER_LEN+ST_COMMON_HEADER_LEN_OFFSET+1); + DBUG_PRINT("info", ("common_header_len=%d number_of_event_types=%d", + common_header_len, number_of_event_types)); + /* If alloc fails, we'll detect it in is_valid() */ + post_header_len= (uint8*) my_memdup((byte*)buf+ST_COMMON_HEADER_LEN_OFFSET+1, + number_of_event_types* + sizeof(*post_header_len), + MYF(0)); + DBUG_VOID_RETURN; +} + + +bool Format_description_log_event::write(IO_CACHE* file) +{ + /* + We don't call Start_log_event_v3::write() because this would make 2 + my_b_safe_write(). + */ + byte buff[FORMAT_DESCRIPTION_HEADER_LEN]; + int2store(buff + ST_BINLOG_VER_OFFSET,binlog_version); + memcpy((char*) buff + ST_SERVER_VER_OFFSET,server_version,ST_SERVER_VER_LEN); + int4store(buff + ST_CREATED_OFFSET,created); + buff[ST_COMMON_HEADER_LEN_OFFSET]= LOG_EVENT_HEADER_LEN; + memcpy((char*) buff+ST_COMMON_HEADER_LEN_OFFSET+1, (byte*) post_header_len, + LOG_EVENT_TYPES); + return (write_header(file, sizeof(buff)) || + my_b_safe_write(file, buff, sizeof(buff))); +} + +/* + SYNOPSIS + Format_description_log_event::exec_event() + + IMPLEMENTATION + Save the information which describes the binlog's format, to be able to + read all coming events. + Call Start_log_event_v3::exec_event(). +*/ + +#if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +int Format_description_log_event::exec_event(struct st_relay_log_info* rli) +{ + DBUG_ENTER("Format_description_log_event::exec_event"); + + /* save the information describing this binlog */ + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= this; + + /* + If this event comes from ourselves, there is no cleaning task to perform, + we don't call Start_log_event_v3::exec_event() (this was just to update the + log's description event). + */ + if (server_id == (uint32) ::server_id) + { + /* + Do not modify rli->group_master_log_pos, as this event did not exist on + the master. That is, just update the *relay log* coordinates; this is + done by passing log_pos=0 to inc_group_relay_log_pos, like we do in + Stop_log_event::exec_event(). + If in a transaction, don't touch group_* coordinates. + */ + if (thd->options & OPTION_BEGIN) + rli->inc_event_relay_log_pos(); + else + { + rli->inc_group_relay_log_pos(0); + flush_relay_log_info(rli); + } + DBUG_RETURN(0); + } + + /* + If the event was not requested by the slave i.e. the master sent it while + the slave asked for a position >4, the event will make + rli->group_master_log_pos advance. Say that the slave asked for position + 1000, and the Format_desc event's end is 95. Then in the beginning of + replication rli->group_master_log_pos will be 0, then 95, then jump to first + really asked event (which is >95). So this is ok. + */ + DBUG_RETURN(Start_log_event_v3::exec_event(rli)); +} +#endif + + /************************************************************************** + Load_log_event methods + General note about Load_log_event: the binlogging of LOAD DATA INFILE is + going to be changed in 5.0 (or maybe in 4.1; not decided yet). + However, the 5.0 slave could still have to read such events (from a 4.x + master), convert them (which just means maybe expand the header, when 5.0 + servers have a UID in events) (remember that whatever is after the header + will be like in 4.x, as this event's format is not modified in 5.0 as we + will use new types of events to log the new LOAD DATA INFILE features). + To be able to read/convert, we just need to not assume that the common + header is of length LOG_EVENT_HEADER_LEN (we must use the description + event). + Note that I (Guilhem) manually tested replication of a big LOAD DATA INFILE + between 3.23 and 5.0, and between 4.0 and 5.0, and it works fine (and the + positions displayed in SHOW SLAVE STATUS then are fine too). + **************************************************************************/ /* Load_log_event::pack_info() @@ -1376,7 +2086,7 @@ void Load_log_event::pack_info(Protocol *protocol) Load_log_event::write_data_header() */ -int Load_log_event::write_data_header(IO_CACHE* file) +bool Load_log_event::write_data_header(IO_CACHE* file) { char buf[LOAD_HEADER_LEN]; int4store(buf + L_THREAD_ID_OFFSET, slave_proxy_id); @@ -1385,7 +2095,7 @@ int Load_log_event::write_data_header(IO_CACHE* file) buf[L_TBL_LEN_OFFSET] = (char)table_name_len; buf[L_DB_LEN_OFFSET] = (char)db_len; int4store(buf + L_NUM_FIELDS_OFFSET, num_fields); - return my_b_safe_write(file, (byte*)buf, LOAD_HEADER_LEN); + return my_b_safe_write(file, (byte*)buf, LOAD_HEADER_LEN) != 0; } @@ -1393,7 +2103,7 @@ int Load_log_event::write_data_header(IO_CACHE* file) Load_log_event::write_data_body() */ -int Load_log_event::write_data_body(IO_CACHE* file) +bool Load_log_event::write_data_body(IO_CACHE* file) { if (sql_ex.write_data(file)) return 1; @@ -1497,6 +2207,7 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, } #endif /* !MYSQL_CLIENT */ + /* Load_log_event::Load_log_event() @@ -1505,15 +2216,25 @@ Load_log_event::Load_log_event(THD *thd_arg, sql_exchange *ex, constructed event. */ -Load_log_event::Load_log_event(const char *buf, int event_len, - bool old_format) - :Log_event(buf, old_format), num_fields(0), fields(0), - field_lens(0), field_block_len(0), +Load_log_event::Load_log_event(const char *buf, uint event_len, + const Format_description_log_event *description_event) + :Log_event(buf, description_event), num_fields(0), fields(0), + field_lens(0),field_block_len(0), table_name(0), db(0), fname(0), local_fname(FALSE) { DBUG_ENTER("Load_log_event"); - if (event_len) // derived class, will call copy_log_event() itself - copy_log_event(buf, event_len, old_format); + /* + I (Guilhem) manually tested replication of LOAD DATA INFILE for 3.23->5.0, + 4.0->5.0 and 5.0->5.0 and it works. + */ + if (event_len) + copy_log_event(buf, event_len, + ((buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ? + LOAD_HEADER_LEN + + description_event->common_header_len : + LOAD_HEADER_LEN + LOG_EVENT_HEADER_LEN), + description_event); + /* otherwise it's a derived class, will call copy_log_event() itself */ DBUG_VOID_RETURN; } @@ -1523,14 +2244,14 @@ Load_log_event::Load_log_event(const char *buf, int event_len, */ int Load_log_event::copy_log_event(const char *buf, ulong event_len, - bool old_format) + int body_offset, + const Format_description_log_event *description_event) { + DBUG_ENTER("Load_log_event::copy_log_event"); uint data_len; char* buf_end = (char*)buf + event_len; - uint header_len= old_format ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - const char* data_head = buf + header_len; - DBUG_ENTER("Load_log_event::copy_log_event"); - + /* this is the beginning of the post-header */ + const char* data_head = buf + description_event->common_header_len; slave_proxy_id= thread_id= uint4korr(data_head + L_THREAD_ID_OFFSET); exec_time = uint4korr(data_head + L_EXEC_TIME_OFFSET); skip_lines = uint4korr(data_head + L_SKIP_LINES_OFFSET); @@ -1538,21 +2259,17 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, db_len = (uint)data_head[L_DB_LEN_OFFSET]; num_fields = uint4korr(data_head + L_NUM_FIELDS_OFFSET); - int body_offset = ((buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ? - LOAD_HEADER_LEN + header_len : - get_data_body_offset()); - if ((int) event_len < body_offset) DBUG_RETURN(1); /* Sql_ex.init() on success returns the pointer to the first byte after the sql_ex structure, which is the start of field lengths array. */ - if (!(field_lens=(uchar*)sql_ex.init((char*)buf + body_offset, - buf_end, - buf[EVENT_TYPE_OFFSET] != LOAD_EVENT))) + if (!(field_lens= (uchar*)sql_ex.init((char*)buf + body_offset, + buf_end, + buf[EVENT_TYPE_OFFSET] != LOAD_EVENT))) DBUG_RETURN(1); - + data_len = event_len - body_offset; if (num_fields > data_len) // simple sanity check against corruption DBUG_RETURN(1); @@ -1565,6 +2282,12 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, fname = db + db_len + 1; fname_len = strlen(fname); // null termination is accomplished by the caller doing buf[event_len]=0 + + /* + In 5.0 this event will have the same format, as we are planning to log LOAD + DATA INFILE in a completely different way (as a plain-text query) since 4.1 + or 5.0 (Dmitri's WL#874) + */ DBUG_RETURN(0); } @@ -1574,13 +2297,13 @@ int Load_log_event::copy_log_event(const char *buf, ulong event_len, */ #ifdef MYSQL_CLIENT -void Load_log_event::print(FILE* file, bool short_form, char* last_db) +void Load_log_event::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info) { - print(file, short_form, last_db, 0); + print(file, short_form, last_event_info, 0); } -void Load_log_event::print(FILE* file, bool short_form, char* last_db, +void Load_log_event::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info, bool commented) { DBUG_ENTER("Load_log_event::print"); @@ -1592,7 +2315,7 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db, } bool different_db= 1; - if (db && last_db) + if (db) { /* If the database is different from the one of the previous statement, we @@ -1600,9 +2323,9 @@ void Load_log_event::print(FILE* file, bool short_form, char* last_db, But if commented, the "use" is going to be commented so we should not update the last_db. */ - if ((different_db= memcmp(last_db, db, db_len + 1)) && + if ((different_db= memcmp(last_event_info->db, db, db_len + 1)) && !commented) - memcpy(last_db, db, db_len + 1); + memcpy(last_event_info->db, db, db_len + 1); } if (db && db[0] && different_db) @@ -1738,15 +2461,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, mysql_init_query(thd, 0, 0); if (!use_rli_only_for_errors) { -#if MYSQL_VERSION_ID < 50000 - rli->future_group_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else + /* Saved for InnoDB, see comment in Query_log_event::exec_event() */ rli->future_group_master_log_pos= log_pos; -#endif + DBUG_PRINT("info", ("log_pos: %lu", (ulong) log_pos)); } - - /* + + /* We test replicate_*_db rules. Note that we have already prepared the file to load, even if we are going to ignore and delete it now. So it is possible that we did a lot of disk writes for nothing. In other words, a @@ -1859,7 +2579,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, thd->net.pkt_nr = net->pkt_nr; } if (mysql_load(thd, &ex, &tables, field_list, handle_dup, ignore, net != 0, - TL_WRITE)) + TL_WRITE, 0)) thd->query_error = 1; if (thd->cuted_fields) { @@ -1890,7 +2610,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, thd->net.vio = 0; char *save_db= thd->db; VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->db= 0; + thd->db= thd->catalog= 0; thd->query= 0; thd->query_length= thd->db_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); @@ -1899,7 +2619,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, my_afree(load_data_query); if (thd->query_error) { - /* this err/sql_errno code is copy-paste from send_error() */ + /* this err/sql_errno code is copy-paste from net_send_error() */ const char *err; int sql_errno; if ((err=thd->net.last_error)[0]) @@ -1957,17 +2677,17 @@ void Rotate_log_event::pack_info(Protocol *protocol) */ #ifdef MYSQL_CLIENT -void Rotate_log_event::print(FILE* file, bool short_form, char* last_db) +void Rotate_log_event::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info) { char buf[22]; + if (short_form) return; - print_header(file); fprintf(file, "\tRotate to "); if (new_log_ident) my_fwrite(file, (byte*) new_log_ident, (uint)ident_len, - MYF(MY_NABP | MY_WME)); + MYF(MY_NABP | MY_WME)); fprintf(file, " pos: %s", llstr(pos, buf)); fputc('\n', file); fflush(file); @@ -1979,31 +2699,22 @@ void Rotate_log_event::print(FILE* file, bool short_form, char* last_db) Rotate_log_event::Rotate_log_event() */ -Rotate_log_event::Rotate_log_event(const char* buf, int event_len, - bool old_format) - :Log_event(buf, old_format),new_log_ident(NULL),alloced(0) +Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) ,new_log_ident(NULL),alloced(0) { + DBUG_ENTER("Rotate_log_event::Rotate_log_event(char*,...)"); // The caller will ensure that event_len is what we have at EVENT_LEN_OFFSET - int header_size = (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + uint8 header_size= description_event->common_header_len; + uint8 post_header_len= description_event->post_header_len[ROTATE_EVENT-1]; uint ident_offset; - DBUG_ENTER("Rotate_log_event"); - if (event_len < header_size) DBUG_VOID_RETURN; - buf += header_size; - if (old_format) - { - ident_len = (uint)(event_len - OLD_HEADER_LEN); - pos = 4; - ident_offset = 0; - } - else - { - ident_len = (uint)(event_len - ROTATE_EVENT_OVERHEAD); - pos = uint8korr(buf + R_POS_OFFSET); - ident_offset = ROTATE_HEADER_LEN; - } + pos = post_header_len ? uint8korr(buf + R_POS_OFFSET) : 4; + ident_len = (uint)(event_len - + (header_size+post_header_len)); + ident_offset = post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); if (!(new_log_ident= my_strdup_with_length((byte*) buf + ident_offset, @@ -2016,29 +2727,31 @@ Rotate_log_event::Rotate_log_event(const char* buf, int event_len, /* - Rotate_log_event::write_data() + Rotate_log_event::write() */ -int Rotate_log_event::write_data(IO_CACHE* file) +bool Rotate_log_event::write(IO_CACHE* file) { char buf[ROTATE_HEADER_LEN]; int8store(buf + R_POS_OFFSET, pos); - return (my_b_safe_write(file, (byte*)buf, ROTATE_HEADER_LEN) || - my_b_safe_write(file, (byte*)new_log_ident, (uint) ident_len)); + return (write_header(file, ROTATE_HEADER_LEN + ident_len) || + my_b_safe_write(file, (byte*)buf, ROTATE_HEADER_LEN) || + my_b_safe_write(file, (byte*)new_log_ident, (uint) ident_len)); } /* Rotate_log_event::exec_event() - Got a rotate log even from the master + Got a rotate log event from the master IMPLEMENTATION This is mainly used so that we can later figure out the logname and position for the master. - We can't rotate the slave as this will cause infinitive rotations + We can't rotate the slave's BINlog as this will cause infinitive rotations in a A -> B -> A setup. + The NOTES below is a wrong comment which will disappear when 4.1 is merged. RETURN VALUES 0 ok @@ -2050,7 +2763,7 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli) DBUG_ENTER("Rotate_log_event::exec_event"); pthread_mutex_lock(&rli->data_lock); - rli->event_relay_log_pos += get_event_len(); + rli->event_relay_log_pos= my_b_tell(rli->cur_log); /* If we are in a transaction: the only normal case is when the I/O thread was copying a big transaction, then it was stopped and restarted: we have this @@ -2062,15 +2775,28 @@ int Rotate_log_event::exec_event(struct st_relay_log_info* rli) COMMIT or ROLLBACK In that case, we don't want to touch the coordinates which correspond to the beginning of the transaction. + Starting from 5.0.0, there also are some rotates from the slave itself, in + the relay log. */ if (!(thd->options & OPTION_BEGIN)) { memcpy(rli->group_master_log_name, new_log_ident, ident_len+1); rli->notify_group_master_log_name_update(); - rli->group_master_log_pos = pos; - rli->group_relay_log_pos = rli->event_relay_log_pos; - DBUG_PRINT("info", ("group_master_log_pos: %lu", + rli->group_master_log_pos= pos; + rli->group_relay_log_pos= rli->event_relay_log_pos; + DBUG_PRINT("info", ("group_master_log_name: '%s' group_master_log_pos:\ +%lu", + rli->group_master_log_name, (ulong) rli->group_master_log_pos)); + /* + Reset thd->options and sql_mode, because this could be the signal of a + master's downgrade from 5.0 to 4.0. + However, no need to reset description_event_for_exec: indeed, if the next + master is 5.0 (even 5.0.1) we will soon get a Format_desc; if the next + master is 4.0 then the events are in the slave's format (conversion). + */ + set_slave_thread_options(thd); + thd->variables.sql_mode= global_system_variables.sql_mode; } pthread_mutex_unlock(&rli->data_lock); pthread_cond_broadcast(&rli->data_cond); @@ -2104,12 +2830,13 @@ void Intvar_log_event::pack_info(Protocol *protocol) Intvar_log_event::Intvar_log_event() */ -Intvar_log_event::Intvar_log_event(const char* buf, bool old_format) - :Log_event(buf, old_format) +Intvar_log_event::Intvar_log_event(const char* buf, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) { - buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - type = buf[I_TYPE_OFFSET]; - val = uint8korr(buf+I_VAL_OFFSET); + buf+= description_event->common_header_len; + type= buf[I_TYPE_OFFSET]; + val= uint8korr(buf+I_VAL_OFFSET); } @@ -2128,15 +2855,16 @@ const char* Intvar_log_event::get_var_type_name() /* - Intvar_log_event::write_data() + Intvar_log_event::write() */ -int Intvar_log_event::write_data(IO_CACHE* file) +bool Intvar_log_event::write(IO_CACHE* file) { - char buf[9]; - buf[I_TYPE_OFFSET] = type; + byte buf[9]; + buf[I_TYPE_OFFSET]= (byte) type; int8store(buf + I_VAL_OFFSET, val); - return my_b_safe_write(file, (byte*) buf, sizeof(buf)); + return (write_header(file, sizeof(buf)) || + my_b_safe_write(file, buf, sizeof(buf))); } @@ -2145,7 +2873,8 @@ int Intvar_log_event::write_data(IO_CACHE* file) */ #ifdef MYSQL_CLIENT -void Intvar_log_event::print(FILE* file, bool short_form, char* last_db) +void Intvar_log_event::print(FILE* file, bool short_form, + LAST_EVENT_INFO* last_event_info) { char llbuff[22]; const char *msg; @@ -2188,7 +2917,7 @@ int Intvar_log_event::exec_event(struct st_relay_log_info* rli) thd->next_insert_id = val; break; } - rli->inc_event_relay_log_pos(get_event_len()); + rli->inc_event_relay_log_pos(); return 0; } #endif @@ -2211,26 +2940,28 @@ void Rand_log_event::pack_info(Protocol *protocol) #endif -Rand_log_event::Rand_log_event(const char* buf, bool old_format) - :Log_event(buf, old_format) +Rand_log_event::Rand_log_event(const char* buf, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) { - buf += (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; - seed1 = uint8korr(buf+RAND_SEED1_OFFSET); - seed2 = uint8korr(buf+RAND_SEED2_OFFSET); + buf+= description_event->common_header_len; + seed1= uint8korr(buf+RAND_SEED1_OFFSET); + seed2= uint8korr(buf+RAND_SEED2_OFFSET); } -int Rand_log_event::write_data(IO_CACHE* file) +bool Rand_log_event::write(IO_CACHE* file) { - char buf[16]; + byte buf[16]; int8store(buf + RAND_SEED1_OFFSET, seed1); int8store(buf + RAND_SEED2_OFFSET, seed2); - return my_b_safe_write(file, (byte*) buf, sizeof(buf)); + return (write_header(file, sizeof(buf)) || + my_b_safe_write(file, buf, sizeof(buf))); } #ifdef MYSQL_CLIENT -void Rand_log_event::print(FILE* file, bool short_form, char* last_db) +void Rand_log_event::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info) { char llbuff[22],llbuff2[22]; if (!short_form) @@ -2250,7 +2981,7 @@ int Rand_log_event::exec_event(struct st_relay_log_info* rli) { thd->rand.seed1= (ulong) seed1; thd->rand.seed2= (ulong) seed2; - rli->inc_event_relay_log_pos(get_event_len()); + rli->inc_event_relay_log_pos(); return 0; } #endif /* !MYSQL_CLIENT */ @@ -2321,10 +3052,12 @@ void User_var_log_event::pack_info(Protocol* protocol) #endif /* !MYSQL_CLIENT */ -User_var_log_event::User_var_log_event(const char* buf, bool old_format) - :Log_event(buf, old_format) +User_var_log_event:: +User_var_log_event(const char* buf, + const Format_description_log_event* description_event) + :Log_event(buf, description_event) { - buf+= (old_format) ? OLD_HEADER_LEN : LOG_EVENT_HEADER_LEN; + buf+= description_event->common_header_len; name_len= uint4korr(buf); name= (char *) buf + UV_NAME_LEN_SIZE; buf+= UV_NAME_LEN_SIZE + name_len; @@ -2348,13 +3081,14 @@ User_var_log_event::User_var_log_event(const char* buf, bool old_format) } -int User_var_log_event::write_data(IO_CACHE* file) +bool User_var_log_event::write(IO_CACHE* file) { char buf[UV_NAME_LEN_SIZE]; char buf1[UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE]; char buf2[8], *pos= buf2; uint buf1_length; + ulong event_length; int4store(buf, name_len); @@ -2386,7 +3120,12 @@ int User_var_log_event::write_data(IO_CACHE* file) return 0; } } - return (my_b_safe_write(file, (byte*) buf, sizeof(buf)) || + + /* Length of the whole event */ + event_length= sizeof(buf)+ name_len + buf1_length + val_len; + + return (write_header(file, event_length) || + my_b_safe_write(file, (byte*) buf, sizeof(buf)) || my_b_safe_write(file, (byte*) name, name_len) || my_b_safe_write(file, (byte*) buf1, buf1_length) || my_b_safe_write(file, (byte*) pos, val_len)); @@ -2398,7 +3137,7 @@ int User_var_log_event::write_data(IO_CACHE* file) */ #ifdef MYSQL_CLIENT -void User_var_log_event::print(FILE* file, bool short_form, char* last_db) +void User_var_log_event::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info) { if (!short_form) { @@ -2531,7 +3270,7 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) e.update_hash(val, val_len, type, charset, DERIVATION_NONE); free_root(thd->mem_root,0); - rli->inc_event_relay_log_pos(get_event_len()); + rli->inc_event_relay_log_pos(); return 0; } #endif /* !MYSQL_CLIENT */ @@ -2543,7 +3282,7 @@ int User_var_log_event::exec_event(struct st_relay_log_info* rli) #ifdef HAVE_REPLICATION #ifdef MYSQL_CLIENT -void Unknown_log_event::print(FILE* file, bool short_form, char* last_db) +void Unknown_log_event::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info) { if (short_form) return; @@ -2573,7 +3312,7 @@ void Slave_log_event::pack_info(Protocol *protocol) #ifndef MYSQL_CLIENT Slave_log_event::Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli) - :Log_event(thd_arg, 0, 0), mem_pool(0), master_host(0) + :Log_event(thd_arg, 0, 0) , mem_pool(0), master_host(0) { DBUG_ENTER("Slave_log_event"); if (!rli->inited) // QQ When can this happen ? @@ -2614,7 +3353,7 @@ Slave_log_event::~Slave_log_event() #ifdef MYSQL_CLIENT -void Slave_log_event::print(FILE* file, bool short_form, char* last_db) +void Slave_log_event::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info) { char llbuff[22]; if (short_form) @@ -2634,12 +3373,15 @@ int Slave_log_event::get_data_size() } -int Slave_log_event::write_data(IO_CACHE* file) +bool Slave_log_event::write(IO_CACHE* file) { + ulong event_length= get_data_size(); int8store(mem_pool + SL_MASTER_POS_OFFSET, master_pos); int2store(mem_pool + SL_MASTER_PORT_OFFSET, master_port); // log and host are already there - return my_b_safe_write(file, (byte*)mem_pool, get_data_size()); + + return (write_header(file, event_length) || + my_b_safe_write(file, (byte*) mem_pool, event_length)); } @@ -2660,12 +3402,13 @@ void Slave_log_event::init_from_mem_pool(int data_size) } -Slave_log_event::Slave_log_event(const char* buf, int event_len) - :Log_event(buf,0),mem_pool(0),master_host(0) +/* This code is not used, so has not been updated to be format-tolerant */ +Slave_log_event::Slave_log_event(const char* buf, uint event_len) + :Log_event(buf,0) /*unused event*/ ,mem_pool(0),master_host(0) { - event_len -= LOG_EVENT_HEADER_LEN; - if (event_len < 0) + if (event_len < LOG_EVENT_HEADER_LEN) return; + event_len -= LOG_EVENT_HEADER_LEN; if (!(mem_pool = (char*) my_malloc(event_len + 1, MYF(MY_WME)))) return; memcpy(mem_pool, buf + LOG_EVENT_HEADER_LEN, event_len); @@ -2693,7 +3436,7 @@ int Slave_log_event::exec_event(struct st_relay_log_info* rli) */ #ifdef MYSQL_CLIENT -void Stop_log_event::print(FILE* file, bool short_form, char* last_db) +void Stop_log_event::print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info) { if (short_form) return; @@ -2715,7 +3458,7 @@ void Stop_log_event::print(FILE* file, bool short_form, char* last_db) We used to clean up slave_load_tmpdir, but this is useless as it has been cleared at the end of LOAD DATA INFILE. So we have nothing to do here. - The place were we must do this cleaning is in Start_log_event::exec_event(), + The place were we must do this cleaning is in Start_log_event_v3::exec_event(), not here. Because if we come here, the master was sane. */ @@ -2729,8 +3472,13 @@ int Stop_log_event::exec_event(struct st_relay_log_info* rli) could give false triggers in MASTER_POS_WAIT() that we have reached the target position when in fact we have not. */ - rli->inc_group_relay_log_pos(get_event_len(), 0); - flush_relay_log_info(rli); + if (thd->options & OPTION_BEGIN) + rli->inc_event_relay_log_pos(); + else + { + rli->inc_group_relay_log_pos(0); + flush_relay_log_info(rli); + } return 0; } #endif /* !MYSQL_CLIENT */ @@ -2768,13 +3516,13 @@ Create_file_log_event(THD* thd_arg, sql_exchange* ex, Create_file_log_event::write_data_body() */ -int Create_file_log_event::write_data_body(IO_CACHE* file) +bool Create_file_log_event::write_data_body(IO_CACHE* file) { - int res; - if ((res = Load_log_event::write_data_body(file)) || fake_base) + bool res; + if ((res= Load_log_event::write_data_body(file)) || fake_base) return res; return (my_b_safe_write(file, (byte*) "", 1) || - my_b_safe_write(file, (byte*) block, block_len)); + my_b_safe_write(file, (byte*) block, block_len)); } @@ -2782,14 +3530,14 @@ int Create_file_log_event::write_data_body(IO_CACHE* file) Create_file_log_event::write_data_header() */ -int Create_file_log_event::write_data_header(IO_CACHE* file) +bool Create_file_log_event::write_data_header(IO_CACHE* file) { - int res; - if ((res = Load_log_event::write_data_header(file)) || fake_base) - return res; + bool res; byte buf[CREATE_FILE_HEADER_LEN]; + if ((res= Load_log_event::write_data_header(file)) || fake_base) + return res; int4store(buf + CF_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, CREATE_FILE_HEADER_LEN); + return my_b_safe_write(file, buf, CREATE_FILE_HEADER_LEN) != 0; } @@ -2797,12 +3545,12 @@ int Create_file_log_event::write_data_header(IO_CACHE* file) Create_file_log_event::write_base() */ -int Create_file_log_event::write_base(IO_CACHE* file) +bool Create_file_log_event::write_base(IO_CACHE* file) { - int res; - fake_base = 1; // pretend we are Load event - res = write(file); - fake_base = 0; + bool res; + fake_base= 1; // pretend we are Load event + res= write(file); + fake_base= 0; return res; } @@ -2811,28 +3559,43 @@ int Create_file_log_event::write_base(IO_CACHE* file) Create_file_log_event ctor */ -Create_file_log_event::Create_file_log_event(const char* buf, int len, - bool old_format) - :Load_log_event(buf,0,old_format),fake_base(0),block(0),inited_from_old(0) +Create_file_log_event::Create_file_log_event(const char* buf, uint len, + const Format_description_log_event* description_event) + :Load_log_event(buf,0,description_event),fake_base(0),block(0),inited_from_old(0) { - int block_offset; - DBUG_ENTER("Create_file_log_event"); - - /* - We must make copy of 'buf' as this event may have to live over a - rotate log entry when used in mysqlbinlog - */ + DBUG_ENTER("Create_file_log_event::Create_file_log_event(char*,...)"); + uint block_offset; + uint header_len= description_event->common_header_len; + uint8 load_header_len= description_event->post_header_len[LOAD_EVENT-1]; + uint8 create_file_header_len= description_event->post_header_len[CREATE_FILE_EVENT-1]; if (!(event_buf= my_memdup((byte*) buf, len, MYF(MY_WME))) || - (copy_log_event(event_buf, len, old_format))) + copy_log_event(event_buf,len, + ((buf[EVENT_TYPE_OFFSET] == LOAD_EVENT) ? + load_header_len + header_len : + (fake_base ? (header_len+load_header_len) : + (header_len+load_header_len) + + create_file_header_len)), + description_event)) DBUG_VOID_RETURN; - - if (!old_format) + if (description_event->binlog_version!=1) { - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + - + LOAD_HEADER_LEN + CF_FILE_ID_OFFSET); - // + 1 for \0 terminating fname - block_offset = (LOG_EVENT_HEADER_LEN + Load_log_event::get_data_size() + - CREATE_FILE_HEADER_LEN + 1); + file_id= uint4korr(buf + + header_len + + load_header_len + CF_FILE_ID_OFFSET); + /* + Note that it's ok to use get_data_size() below, because it is computed + with values we have already read from this event (because we called + copy_log_event()); we are not using slave's format info to decode + master's format, we are really using master's format info. + Anyway, both formats should be identical (except the common_header_len) + as these Load events are not changed between 4.0 and 5.0 (as logging of + LOAD DATA INFILE does not use Load_log_event in 5.0). + + The + 1 is for \0 terminating fname + */ + block_offset= (description_event->common_header_len + + Load_log_event::get_data_size() + + create_file_header_len + 1); if (len < block_offset) return; block = (char*)buf + block_offset; @@ -2853,18 +3616,18 @@ Create_file_log_event::Create_file_log_event(const char* buf, int len, #ifdef MYSQL_CLIENT void Create_file_log_event::print(FILE* file, bool short_form, - char* last_db, bool enable_local) + LAST_EVENT_INFO* last_event_info, bool enable_local) { if (short_form) { if (enable_local && check_fname_outside_temp_buf()) - Load_log_event::print(file, 1, last_db); + Load_log_event::print(file, 1, last_event_info); return; } if (enable_local) { - Load_log_event::print(file, short_form, last_db, !check_fname_outside_temp_buf()); + Load_log_event::print(file, short_form, last_event_info, !check_fname_outside_temp_buf()); /* That one is for "file_id: etc" below: in mysqlbinlog we want the #, in SHOW BINLOG EVENTS we don't. @@ -2877,9 +3640,9 @@ void Create_file_log_event::print(FILE* file, bool short_form, void Create_file_log_event::print(FILE* file, bool short_form, - char* last_db) + LAST_EVENT_INFO* last_event_info) { - print(file,short_form,last_db,0); + print(file,short_form,last_event_info,0); } #endif /* MYSQL_CLIENT */ @@ -2996,28 +3759,34 @@ Append_block_log_event::Append_block_log_event(THD* thd_arg, const char* db_arg, Append_block_log_event ctor */ -Append_block_log_event::Append_block_log_event(const char* buf, int len) - :Log_event(buf, 0),block(0) +Append_block_log_event::Append_block_log_event(const char* buf, uint len, + const Format_description_log_event* description_event) + :Log_event(buf, description_event),block(0) { - DBUG_ENTER("Append_block_log_event"); - if ((uint)len < APPEND_BLOCK_EVENT_OVERHEAD) + DBUG_ENTER("Append_block_log_event::Append_block_log_event(char*,...)"); + uint8 common_header_len= description_event->common_header_len; + uint8 append_block_header_len= + description_event->post_header_len[APPEND_BLOCK_EVENT-1]; + uint total_header_len= common_header_len+append_block_header_len; + if (len < total_header_len) DBUG_VOID_RETURN; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + AB_FILE_ID_OFFSET); - block = (char*)buf + APPEND_BLOCK_EVENT_OVERHEAD; - block_len = len - APPEND_BLOCK_EVENT_OVERHEAD; + file_id= uint4korr(buf + common_header_len + AB_FILE_ID_OFFSET); + block= (char*)buf + total_header_len; + block_len= len - total_header_len; DBUG_VOID_RETURN; } /* - Append_block_log_event::write_data() + Append_block_log_event::write() */ -int Append_block_log_event::write_data(IO_CACHE* file) +bool Append_block_log_event::write(IO_CACHE* file) { byte buf[APPEND_BLOCK_HEADER_LEN]; int4store(buf + AB_FILE_ID_OFFSET, file_id); - return (my_b_safe_write(file, buf, APPEND_BLOCK_HEADER_LEN) || + return (write_header(file, APPEND_BLOCK_HEADER_LEN + block_len) || + my_b_safe_write(file, buf, APPEND_BLOCK_HEADER_LEN) || my_b_safe_write(file, (byte*) block, block_len)); } @@ -3028,7 +3797,7 @@ int Append_block_log_event::write_data(IO_CACHE* file) #ifdef MYSQL_CLIENT void Append_block_log_event::print(FILE* file, bool short_form, - char* last_db) + LAST_EVENT_INFO* last_event_info) { if (short_form) return; @@ -3114,24 +3883,28 @@ Delete_file_log_event::Delete_file_log_event(THD *thd_arg, const char* db_arg, Delete_file_log_event ctor */ -Delete_file_log_event::Delete_file_log_event(const char* buf, int len) - :Log_event(buf, 0),file_id(0) +Delete_file_log_event::Delete_file_log_event(const char* buf, uint len, + const Format_description_log_event* description_event) + :Log_event(buf, description_event),file_id(0) { - if ((uint)len < DELETE_FILE_EVENT_OVERHEAD) + uint8 common_header_len= description_event->common_header_len; + uint8 delete_file_header_len= description_event->post_header_len[DELETE_FILE_EVENT-1]; + if (len < (uint)(common_header_len + delete_file_header_len)) return; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + AB_FILE_ID_OFFSET); + file_id= uint4korr(buf + common_header_len + DF_FILE_ID_OFFSET); } /* - Delete_file_log_event::write_data() + Delete_file_log_event::write() */ -int Delete_file_log_event::write_data(IO_CACHE* file) +bool Delete_file_log_event::write(IO_CACHE* file) { byte buf[DELETE_FILE_HEADER_LEN]; int4store(buf + DF_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, DELETE_FILE_HEADER_LEN); + return (write_header(file, sizeof(buf)) || + my_b_safe_write(file, buf, sizeof(buf))); } @@ -3141,7 +3914,7 @@ int Delete_file_log_event::write_data(IO_CACHE* file) #ifdef MYSQL_CLIENT void Delete_file_log_event::print(FILE* file, bool short_form, - char* last_db) + LAST_EVENT_INFO* last_event_info) { if (short_form) return; @@ -3204,24 +3977,28 @@ Execute_load_log_event::Execute_load_log_event(THD *thd_arg, const char* db_arg, Execute_load_log_event ctor */ -Execute_load_log_event::Execute_load_log_event(const char* buf, int len) - :Log_event(buf, 0), file_id(0) +Execute_load_log_event::Execute_load_log_event(const char* buf, uint len, + const Format_description_log_event* description_event) + :Log_event(buf, description_event), file_id(0) { - if ((uint)len < EXEC_LOAD_EVENT_OVERHEAD) + uint8 common_header_len= description_event->common_header_len; + uint8 exec_load_header_len= description_event->post_header_len[EXEC_LOAD_EVENT-1]; + if (len < (uint)(common_header_len+exec_load_header_len)) return; - file_id = uint4korr(buf + LOG_EVENT_HEADER_LEN + EL_FILE_ID_OFFSET); + file_id= uint4korr(buf + common_header_len + EL_FILE_ID_OFFSET); } /* - Execute_load_log_event::write_data() + Execute_load_log_event::write() */ -int Execute_load_log_event::write_data(IO_CACHE* file) +bool Execute_load_log_event::write(IO_CACHE* file) { byte buf[EXEC_LOAD_HEADER_LEN]; int4store(buf + EL_FILE_ID_OFFSET, file_id); - return my_b_safe_write(file, buf, EXEC_LOAD_HEADER_LEN); + return (write_header(file, sizeof(buf)) || + my_b_safe_write(file, buf, sizeof(buf))); } @@ -3231,7 +4008,7 @@ int Execute_load_log_event::write_data(IO_CACHE* file) #ifdef MYSQL_CLIENT void Execute_load_log_event::print(FILE* file, bool short_form, - char* last_db) + LAST_EVENT_INFO* last_event_info) { if (short_form) return; @@ -3278,8 +4055,8 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) goto err; } if (!(lev = (Load_log_event*)Log_event::read_log_event(&file, - (pthread_mutex_t*)0, - (bool)0)) || + (pthread_mutex_t*)0, + rli->relay_log.description_event_for_exec)) || lev->get_type_code() != NEW_LOAD_EVENT) { slave_print_error(rli,0, "Error in Exec_load event: file '%s' appears corrupted", fname); @@ -3294,15 +4071,7 @@ int Execute_load_log_event::exec_event(struct st_relay_log_info* rli) mysql_load()). */ -#if MYSQL_VERSION_ID < 40100 - rli->future_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#elif MYSQL_VERSION_ID < 50000 - rli->future_group_master_log_pos= log_pos + get_event_len() - - (rli->mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else - rli->future_group_master_log_pos= log_pos; -#endif + rli->future_group_master_log_pos= log_pos; if (lev->exec_event(0,rli,1)) { /* @@ -3360,15 +4129,15 @@ err: sql_ex_info::write_data() */ -int sql_ex_info::write_data(IO_CACHE* file) +bool sql_ex_info::write_data(IO_CACHE* file) { if (new_format()) { - return (write_str(file, field_term, field_term_len) || - write_str(file, enclosed, enclosed_len) || - write_str(file, line_term, line_term_len) || - write_str(file, line_start, line_start_len) || - write_str(file, escaped, escaped_len) || + return (write_str(file, field_term, (uint) field_term_len) || + write_str(file, enclosed, (uint) enclosed_len) || + write_str(file, line_term, (uint) line_term_len) || + write_str(file, line_start, (uint) line_start_len) || + write_str(file, escaped, (uint) escaped_len) || my_b_safe_write(file,(byte*) &opt_flags,1)); } else @@ -3381,7 +4150,7 @@ int sql_ex_info::write_data(IO_CACHE* file) old_ex.escaped= *escaped; old_ex.opt_flags= opt_flags; old_ex.empty_flags=empty_flags; - return my_b_safe_write(file, (byte*) &old_ex, sizeof(old_ex)); + return my_b_safe_write(file, (byte*) &old_ex, sizeof(old_ex)) != 0; } } @@ -3403,11 +4172,11 @@ char* sql_ex_info::init(char* buf,char* buf_end,bool use_new_format) the case when we have old format because we will be reusing net buffer to read the actual file before we write out the Create_file event. */ - if (read_str(buf, buf_end, field_term, field_term_len) || - read_str(buf, buf_end, enclosed, enclosed_len) || - read_str(buf, buf_end, line_term, line_term_len) || - read_str(buf, buf_end, line_start, line_start_len) || - read_str(buf, buf_end, escaped, escaped_len)) + if (read_str(&buf, buf_end, &field_term, &field_term_len) || + read_str(&buf, buf_end, &enclosed, &enclosed_len) || + read_str(&buf, buf_end, &line_term, &line_term_len) || + read_str(&buf, buf_end, &line_start, &line_start_len) || + read_str(&buf, buf_end, &escaped, &escaped_len)) return 0; opt_flags = *buf++; } diff --git a/sql/log_event.h b/sql/log_event.h index f848f2ae1b9..64bb9d502e9 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1,15 +1,15 @@ /* Copyright (C) 2000 MySQL AB & MySQL Finland AB & TCX DataKonsult AB - + This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - + This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - + You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ @@ -35,12 +35,42 @@ #define LOG_EVENT_OFFSET 4 -#define BINLOG_VERSION 3 +/* + 3 is MySQL 4.x; 4 is MySQL 5.0.0. + Compared to version 3, version 4 has: + - a different Start_log_event, which includes info about the binary log + (sizes of headers); this info is included for better compatibility if the + master's MySQL version is different from the slave's. + - all events have a unique ID (the triplet (server_id, timestamp at server + start, other) to be sure an event is not executed more than once in a + multimaster setup, example: + M1 + / \ + v v + M2 M3 + \ / + v v + S + if a query is run on M1, it will arrive twice on S, so we need that S + remembers the last unique ID it has processed, to compare and know if the + event should be skipped or not. Example of ID: we already have the server id + (4 bytes), plus: + timestamp_when_the_master_started (4 bytes), a counter (a sequence number + which increments every time we write an event to the binlog) (3 bytes). + Q: how do we handle when the counter is overflowed and restarts from 0 ? + + - Query and Load (Create or Execute) events may have a more precise timestamp + (with microseconds), number of matched/affected/warnings rows + and fields of session variables: SQL_MODE, + FOREIGN_KEY_CHECKS, UNIQUE_CHECKS, SQL_AUTO_IS_NULL, the collations and + charsets, the PASSWORD() version (old/new/...). +*/ +#define BINLOG_VERSION 4 /* We could have used SERVER_VERSION_LENGTH, but this introduces an obscure dependency - if somebody decided to change SERVER_VERSION_LENGTH - this would have broken the replication protocol + this would break the replication protocol */ #define ST_SERVER_VER_LEN 50 @@ -49,6 +79,12 @@ TERMINATED etc). */ +/* + These are flags and structs to handle all the LOAD DATA INFILE options (LINES + TERMINATED etc). + DUMPFILE_FLAG is probably useless (DUMPFILE is a clause of SELECT, not of LOAD + DATA). +*/ #define DUMPFILE_FLAG 0x1 #define OPT_ENCLOSED_FLAG 0x2 #define REPLACE_FLAG 0x4 @@ -92,18 +128,18 @@ struct sql_ex_info char* escaped; int cached_new_format; uint8 field_term_len,enclosed_len,line_term_len,line_start_len, escaped_len; - char opt_flags; + char opt_flags; char empty_flags; - + // store in new format even if old is possible - void force_new_format() { cached_new_format = 1;} + void force_new_format() { cached_new_format = 1;} int data_size() { return (new_format() ? field_term_len + enclosed_len + line_term_len + line_start_len + escaped_len + 6 : 7); } - int write_data(IO_CACHE* file); + bool write_data(IO_CACHE* file); char* init(char* buf,char* buf_end,bool use_new_format); bool new_format() { @@ -136,19 +172,31 @@ struct sql_ex_info #define LOG_EVENT_HEADER_LEN 19 /* the fixed header length */ #define OLD_HEADER_LEN 13 /* the fixed header length in 3.23 */ +/* + Fixed header length, where 4.x and 5.0 agree. That is, 5.0 may have a longer + header (it will for sure when we have the unique event's ID), but at least + the first 19 bytes are the same in 4.x and 5.0. So when we have the unique + event's ID, LOG_EVENT_HEADER_LEN will be something like 26, but + LOG_EVENT_MINIMAL_HEADER_LEN will remain 19. +*/ +#define LOG_EVENT_MINIMAL_HEADER_LEN 19 /* event-specific post-header sizes */ -#define QUERY_HEADER_LEN (4 + 4 + 1 + 2) +// where 3.23, 4.x and 5.0 agree +#define QUERY_HEADER_MINIMAL_LEN (4 + 4 + 1 + 2) +// where 5.0 differs: 2 for len of N-bytes vars. +#define QUERY_HEADER_LEN (QUERY_HEADER_MINIMAL_LEN + 2) #define LOAD_HEADER_LEN (4 + 4 + 4 + 1 +1 + 4) -#define START_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4) -#define ROTATE_HEADER_LEN 8 +#define START_V3_HEADER_LEN (2 + ST_SERVER_VER_LEN + 4) +#define ROTATE_HEADER_LEN 8 // this is FROZEN (the Rotate post-header is frozen) #define CREATE_FILE_HEADER_LEN 4 #define APPEND_BLOCK_HEADER_LEN 4 #define EXEC_LOAD_HEADER_LEN 4 #define DELETE_FILE_HEADER_LEN 4 +#define FORMAT_DESCRIPTION_HEADER_LEN (START_V3_HEADER_LEN+1+LOG_EVENT_TYPES) -/* - Event header offsets; +/* + Event header offsets; these point to places inside the fixed header. */ @@ -158,11 +206,12 @@ struct sql_ex_info #define LOG_POS_OFFSET 13 #define FLAGS_OFFSET 17 -/* start event post-header */ +/* start event post-header (for v3 and v4) */ #define ST_BINLOG_VER_OFFSET 0 #define ST_SERVER_VER_OFFSET 2 #define ST_CREATED_OFFSET (ST_SERVER_VER_OFFSET + ST_SERVER_VER_LEN) +#define ST_COMMON_HEADER_LEN_OFFSET (ST_CREATED_OFFSET + 4) /* slave event post-header (this event is never written) */ @@ -176,7 +225,13 @@ struct sql_ex_info #define Q_EXEC_TIME_OFFSET 4 #define Q_DB_LEN_OFFSET 8 #define Q_ERR_CODE_OFFSET 9 +#define Q_STATUS_VARS_LEN_OFFSET 11 #define Q_DATA_OFFSET QUERY_HEADER_LEN +/* these are codes, not offsets; not more than 256 values (1 byte). */ +#define Q_FLAGS2_CODE 0 +#define Q_SQL_MODE_CODE 1 +#define Q_CATALOG_CODE 2 +#define Q_AUTO_INCREMENT 3 /* Intvar event post-header */ @@ -228,16 +283,6 @@ struct sql_ex_info /* DF = "Delete File" */ #define DF_FILE_ID_OFFSET 0 -#define QUERY_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN) -#define QUERY_DATA_OFFSET (LOG_EVENT_HEADER_LEN+QUERY_HEADER_LEN) -#define ROTATE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+ROTATE_HEADER_LEN) -#define LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+LOAD_HEADER_LEN) -#define CREATE_FILE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+\ - +LOAD_HEADER_LEN+CREATE_FILE_HEADER_LEN) -#define DELETE_FILE_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+DELETE_FILE_HEADER_LEN) -#define EXEC_LOAD_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+EXEC_LOAD_HEADER_LEN) -#define APPEND_BLOCK_EVENT_OVERHEAD (LOG_EVENT_HEADER_LEN+APPEND_BLOCK_HEADER_LEN) - /* 4 bytes which all binlogs should begin with */ #define BINLOG_MAGIC "\xfe\x62\x69\x6e" @@ -249,20 +294,40 @@ struct sql_ex_info So they are now removed and their place may later be reused for other flags. Then one must remember that Rotate events in 4.x have LOG_EVENT_FORCED_ROTATE_F set, so one should not rely on the value of the - replacing flag when reading a Rotate event. + replacing flag when reading a Rotate event. I keep the defines here just to remember what they were. */ #ifdef TO_BE_REMOVED #define LOG_EVENT_TIME_F 0x1 -#define LOG_EVENT_FORCED_ROTATE_F 0x2 +#define LOG_EVENT_FORCED_ROTATE_F 0x2 #endif -/* +/* If the query depends on the thread (for example: TEMPORARY TABLE). Currently this is used by mysqlbinlog to know it must print SET @@PSEUDO_THREAD_ID=xx; before the query (it would not hurt to print it for every query but this would be slow). */ -#define LOG_EVENT_THREAD_SPECIFIC_F 0x4 +#define LOG_EVENT_THREAD_SPECIFIC_F 0x4 + +/* + OPTIONS_WRITTEN_TO_BIN_LOG are the bits of thd->options which must be written + to the binlog. OPTIONS_WRITTEN_TO_BINLOG could be written into the + Format_description_log_event, so that if later we don't want to replicate a + variable we did replicate, or the contrary, it's doable. But it should not be + too hard to decide once for all of what we replicate and what we don't, among + the fixed 32 bits of thd->options. + I (Guilhem) have read through every option's usage, and it looks like + OPTION_AUTO_IS_NULL and OPTION_NO_FOREIGN_KEYS are the only ones which alter + how the query modifies the table. It's good to replicate + OPTION_RELAXED_UNIQUE_CHECKS too because otherwise, the slave may insert data + slower than the master, in InnoDB. + OPTION_BIG_SELECTS is not needed (the slave thread runs with + max_join_size=HA_POS_ERROR) and OPTION_BIG_TABLES is not needed either, as + the manual says (because a too big in-memory temp table is automatically + written to disk). +*/ +#define OPTIONS_WRITTEN_TO_BIN_LOG (OPTION_AUTO_IS_NULL | \ +OPTION_NO_FOREIGN_KEY_CHECKS | OPTION_RELAXED_UNIQUE_CHECKS) /* Suppress the generation of 'USE' statements before the actual @@ -279,13 +344,32 @@ struct sql_ex_info enum Log_event_type { - UNKNOWN_EVENT= 0, START_EVENT= 1, QUERY_EVENT= 2, STOP_EVENT= 3, - ROTATE_EVENT= 4, INTVAR_EVENT= 5, LOAD_EVENT=6, SLAVE_EVENT= 7, - CREATE_FILE_EVENT= 8, APPEND_BLOCK_EVENT= 9, EXEC_LOAD_EVENT= 10, - DELETE_FILE_EVENT= 11, NEW_LOAD_EVENT= 12, RAND_EVENT= 13, - USER_VAR_EVENT= 14 + /* + Every time you update this enum (when you add a type), you have to + update the code of Format_description_log_event::Format_description_log_event(). + Make sure you always insert new types ***BEFORE*** ENUM_END_EVENT. + */ + UNKNOWN_EVENT= 0, START_EVENT_V3, QUERY_EVENT, STOP_EVENT, ROTATE_EVENT, + INTVAR_EVENT, LOAD_EVENT, SLAVE_EVENT, CREATE_FILE_EVENT, + APPEND_BLOCK_EVENT, EXEC_LOAD_EVENT, DELETE_FILE_EVENT, + /* + NEW_LOAD_EVENT is like LOAD_EVENT except that it has a longer sql_ex, + allowing multibyte TERMINATED BY etc; both types share the same class + (Load_log_event) + */ + NEW_LOAD_EVENT, + RAND_EVENT, USER_VAR_EVENT, + FORMAT_DESCRIPTION_EVENT, + ENUM_END_EVENT /* end marker */ }; +/* + The number of types we handle in Format_description_log_event (UNKNOWN_EVENT + is not to be handled, it does not exist in binlogs, it does not have a + format). +*/ +#define LOG_EVENT_TYPES (ENUM_END_EVENT-1) + enum Int_event_type { INVALID_INT_EVENT = 0, LAST_INSERT_ID_EVENT = 1, INSERT_ID_EVENT = 2 @@ -298,8 +382,35 @@ class MYSQL_LOG; class THD; #endif +class Format_description_log_event; + struct st_relay_log_info; +#ifdef MYSQL_CLIENT +/* + A structure for mysqlbinlog to remember the last db, flags2, sql_mode etc; it + is passed to events' print() methods, so that they print only the necessary + USE and SET commands. +*/ +typedef struct st_last_event_info +{ + // TODO: have the last catalog here ?? + char db[FN_REFLEN+1]; // TODO: make this a LEX_STRING when thd->db is + bool flags2_inited; + uint32 flags2; + bool sql_mode_inited; + ulong sql_mode; /* must be same as THD.variables.sql_mode */ + ulong auto_increment_increment, auto_increment_offset; + st_last_event_info() + :flags2_inited(0), flags2(0), sql_mode_inited(0), sql_mode(0), + auto_increment_increment(1),auto_increment_offset(1) + { + db[0]= 0; /* initially, the db is unknown */ + } +} LAST_EVENT_INFO; +#endif + + /***************************************************************************** Log_event class @@ -310,37 +421,41 @@ struct st_relay_log_info; class Log_event { public: - /* - The offset in the log where this event originally appeared (it is preserved - in relay logs, making SHOW SLAVE STATUS able to print coordinates of the - event in the master's binlog). Note: when a transaction is written by the - master to its binlog (wrapped in BEGIN/COMMIT) the log_pos of all the - queries it contains is the one of the BEGIN (this way, when one does SHOW - SLAVE STATUS it sees the offset of the BEGIN, which is logical as rollback - may occur), except the COMMIT query which has its real offset. + /* + The offset in the log where this event originally appeared (it is + preserved in relay logs, making SHOW SLAVE STATUS able to print + coordinates of the event in the master's binlog). Note: when a + transaction is written by the master to its binlog (wrapped in + BEGIN/COMMIT) the log_pos of all the queries it contains is the + one of the BEGIN (this way, when one does SHOW SLAVE STATUS it + sees the offset of the BEGIN, which is logical as rollback may + occur), except the COMMIT query which has its real offset. */ my_off_t log_pos; - /* + /* A temp buffer for read_log_event; it is later analysed according to the event's type, and its content is distributed in the event-specific fields. */ - char *temp_buf; + char *temp_buf; /* - Timestamp on the master(for debugging and replication of NOW()/TIMESTAMP). - It is important for queries and LOAD DATA INFILE. This is set at the event's - creation time, except for Query and Load (et al.) events where this is set - at the query's execution time, which guarantees good replication (otherwise, - we could have a query and its event with different timestamps). + Timestamp on the master(for debugging and replication of + NOW()/TIMESTAMP). It is important for queries and LOAD DATA + INFILE. This is set at the event's creation time, except for Query + and Load (et al.) events where this is set at the query's + execution time, which guarantees good replication (otherwise, we + could have a query and its event with different timestamps). */ time_t when; /* The number of seconds the query took to run on the master. */ ulong exec_time; - /* - The master's server id (is preserved in the relay log; used to prevent from - infinite loops in circular replication). + /* Number of bytes written by write() function */ + ulong data_written; + + /* + The master's server id (is preserved in the relay log; used to prevent from + infinite loops in circular replication). */ uint32 server_id; - uint cached_event_len; /* Some 16 flags. Only one is really used now; look above for @@ -351,28 +466,32 @@ public: uint16 flags; bool cache_stmt; + #ifndef MYSQL_CLIENT THD* thd; - Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt); Log_event(); + Log_event(THD* thd_arg, uint16 flags_arg, bool cache_stmt); /* - read_log_event() functions read an event from a binlog or relay log; used by - SHOW BINLOG EVENTS, the binlog_dump thread on the master (reads master's - binlog), the slave IO thread (reads the event sent by binlog_dump), the - slave SQL thread (reads the event from the relay log). + read_log_event() functions read an event from a binlog or relay + log; used by SHOW BINLOG EVENTS, the binlog_dump thread on the + master (reads master's binlog), the slave IO thread (reads the + event sent by binlog_dump), the slave SQL thread (reads the event + from the relay log). If mutex is 0, the read will proceed without + mutex. We need the description_event to be able to parse the + event (to know the post-header's size); in fact in read_log_event + we detect the event's type, then call the specific event's + constructor and pass description_event as an argument. */ - // if mutex is 0, the read will proceed without mutex static Log_event* read_log_event(IO_CACHE* file, pthread_mutex_t* log_lock, - bool old_format); + const Format_description_log_event *description_event); static int read_log_event(IO_CACHE* file, String* packet, pthread_mutex_t* log_lock); - /* set_log_pos() is used to fill log_pos with tell(log). */ - void set_log_pos(MYSQL_LOG* log); /* - init_show_field_list() prepares the column names and types for the output of - SHOW BINLOG EVENTS; it is used only by SHOW BINLOG EVENTS. + init_show_field_list() prepares the column names and types for the + output of SHOW BINLOG EVENTS; it is used only by SHOW BINLOG + EVENTS. */ static void init_show_field_list(List<Item>* field_list); #ifdef HAVE_REPLICATION @@ -393,13 +512,15 @@ public: return thd ? thd->db : 0; } #else - // avoid having to link mysqlbinlog against libpthread - static Log_event* read_log_event(IO_CACHE* file, bool old_format); + Log_event() : temp_buf(0) {} + /* avoid having to link mysqlbinlog against libpthread */ + static Log_event* read_log_event(IO_CACHE* file, + const Format_description_log_event *description_event); /* print*() functions are used by mysqlbinlog */ - virtual void print(FILE* file, bool short_form = 0, char* last_db = 0) = 0; + virtual void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0) = 0; void print_timestamp(FILE* file, time_t *ts = 0); void print_header(FILE* file); -#endif +#endif static void *operator new(size_t size) { @@ -409,19 +530,23 @@ public: { my_free((gptr) ptr, MYF(MY_WME|MY_ALLOW_ZERO_PTR)); } - - int write(IO_CACHE* file); - int write_header(IO_CACHE* file); - virtual int write_data(IO_CACHE* file) - { return write_data_header(file) || write_data_body(file); } - virtual int write_data_header(IO_CACHE* file __attribute__((unused))) + + bool write_header(IO_CACHE* file, ulong data_length); + virtual bool write(IO_CACHE* file) + { + return (write_header(file, get_data_size()) || + write_data_header(file) || + write_data_body(file)); + } + virtual bool is_artificial_event() { return 0; } + virtual bool write_data_header(IO_CACHE* file) { return 0; } - virtual int write_data_body(IO_CACHE* file __attribute__((unused))) + virtual bool write_data_body(IO_CACHE* file __attribute__((unused))) { return 0; } virtual Log_event_type get_type_code() = 0; - virtual bool is_valid() = 0; + virtual bool is_valid() const = 0; inline bool get_cache_stmt() { return cache_stmt; } - Log_event(const char* buf, bool old_format); + Log_event(const char* buf, const Format_description_log_event* description_event); virtual ~Log_event() { free_temp_buf();} void register_temp_buf(char* buf) { temp_buf = buf; } void free_temp_buf() @@ -433,18 +558,30 @@ public: } } virtual int get_data_size() { return 0;} - virtual int get_data_body_offset() { return 0; } - int get_event_len() - { - return (cached_event_len ? cached_event_len : - (cached_event_len = LOG_EVENT_HEADER_LEN + get_data_size())); - } - static Log_event* read_log_event(const char* buf, int event_len, - const char **error, bool old_format); + /* + Get event length for simple events. For complicated events the length + is calculated during write() + */ + static Log_event* read_log_event(const char* buf, uint event_len, + const char **error, + const Format_description_log_event + *description_event); /* returns the human readable name of the event's type */ const char* get_type_str(); }; +/* + One class for each type of event. + Two constructors for each class: + - one to create the event for logging (when the server acts as a master), + called after an update to the database is done, + which accepts parameters like the query, the database, the options for LOAD + DATA INFILE... + - one to create the event from a packet (when the server acts as a slave), + called before reproducing the update, which accepts parameters (like a + buffer). Used to read from the master, from the relay log, and in + mysqlbinlog. This constructor must be format-tolerant. +*/ /***************************************************************************** @@ -459,6 +596,7 @@ protected: char* data_buf; public: const char* query; + const char* catalog; const char* db; /* If we already know the length of the query string @@ -469,13 +607,60 @@ public: uint32 db_len; uint16 error_code; ulong thread_id; - /* - For events created by Query_log_event::exec_event (and - Load_log_event::exec_event()) we need the *original* thread id, to be able - to log the event with the original (=master's) thread id (fix for - BUG#1686). + /* + For events created by Query_log_event::exec_event (and + Load_log_event::exec_event()) we need the *original* thread id, to be able + to log the event with the original (=master's) thread id (fix for + BUG#1686). */ ulong slave_proxy_id; + + /* + Binlog format 3 and 4 start to differ (as far as class members are + concerned) from here. + */ + + int catalog_len; // <= 255 char; -1 means uninited + + /* + We want to be able to store a variable number of N-bit status vars: + (generally N=32; but N=64 for SQL_MODE) a user may want to log the number + of affected rows (for debugging) while another does not want to lose 4 + bytes in this. + The storage on disk is the following: + status_vars_len is part of the post-header, + status_vars are in the variable-length part, after the post-header, before + the db & query. + status_vars on disk is a sequence of pairs (code, value) where 'code' means + 'sql_mode', 'affected' etc. Sometimes 'value' must be a short string, so + its first byte is its length. For now the order of status vars is: + flags2 - sql_mode - catalog. + We should add the same thing to Load_log_event, but in fact + LOAD DATA INFILE is going to be logged with a new type of event (logging of + the plain text query), so Load_log_event would be frozen, so no need. The + new way of logging LOAD DATA INFILE would use a derived class of + Query_log_event, so automatically benefit from the work already done for + status variables in Query_log_event. + */ + uint16 status_vars_len; + + /* + 'flags2' is a second set of flags (on top of those in Log_event), for + session variables. These are thd->options which is & against a mask + (OPTIONS_WRITTEN_TO_BINLOG). + flags2_inited helps make a difference between flags2==0 (3.23 or 4.x + master, we don't know flags2, so use the slave server's global options) and + flags2==0 (5.0 master, we know this has a meaning of flags all down which + must influence the query). + */ + bool flags2_inited; + bool sql_mode_inited; + + uint32 flags2; + /* In connections sql_mode is 32 bits now but will be 64 bits soon */ + ulong sql_mode; + ulong auto_increment_increment, auto_increment_offset; + #ifndef MYSQL_CLIENT Query_log_event(THD* thd_arg, const char* query_arg, ulong query_length, @@ -486,10 +671,11 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - Query_log_event(const char* buf, int event_len, bool old_format); + Query_log_event(const char* buf, uint event_len, + const Format_description_log_event *description_event); ~Query_log_event() { if (data_buf) @@ -498,17 +684,8 @@ public: } } Log_event_type get_type_code() { return QUERY_EVENT; } - int write(IO_CACHE* file); - int write_data(IO_CACHE* file); // returns 0 on success, -1 on error - bool is_valid() { return query != 0; } - int get_data_size() - { - return (q_len + db_len + 2 - + 4 // thread_id - + 4 // exec_time - + 2 // error_code - ); - } + bool write(IO_CACHE* file); + bool is_valid() const { return query != 0; } }; #ifdef HAVE_REPLICATION @@ -518,6 +695,7 @@ public: Slave Log Event class Note that this class is currently not used at all; no code writes a Slave_log_event (though some code in repl_failsafe.cc reads Slave_log_event). + So it's not a problem if this code is not maintained. ****************************************************************************/ class Slave_log_event: public Log_event @@ -533,20 +711,20 @@ public: int master_log_len; uint16 master_port; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Slave_log_event(THD* thd_arg, struct st_relay_log_info* rli); void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif - Slave_log_event(const char* buf, int event_len); + Slave_log_event(const char* buf, uint event_len); ~Slave_log_event(); int get_data_size(); - bool is_valid() { return master_host != 0; } + bool is_valid() const { return master_host != 0; } Log_event_type get_type_code() { return SLAVE_EVENT; } - int write_data(IO_CACHE* file ); + bool write(IO_CACHE* file); }; #endif /* HAVE_REPLICATION */ @@ -560,12 +738,18 @@ public: class Load_log_event: public Log_event { protected: - int copy_log_event(const char *buf, ulong event_len, bool old_format); + int copy_log_event(const char *buf, ulong event_len, + int body_offset, const Format_description_log_event* description_event); public: ulong thread_id; ulong slave_proxy_id; uint32 table_name_len; + /* + No need to have a catalog, as these events can only come from 4.x. + TODO: this may become false if Dmitri pushes his new LOAD DATA INFILE in + 5.0 only (not in 4.x). + */ uint32 db_len; uint32 fname_len; uint32 num_fields; @@ -596,7 +780,7 @@ public: #ifndef MYSQL_CLIENT String field_lens_buf; String fields_buf; - + Load_log_event(THD* thd, sql_exchange* ex, const char* db_arg, const char* table_name_arg, List<Item>& fields_arg, enum enum_duplicates handle_dup, bool ignore, @@ -609,60 +793,74 @@ public: { return exec_event(thd->slave_net,rli,0); } - int exec_event(NET* net, struct st_relay_log_info* rli, + int exec_event(NET* net, struct st_relay_log_info* rli, bool use_rli_only_for_errors); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool commented); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info = 0); + void print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info, bool commented); #endif - Load_log_event(const char* buf, int event_len, bool old_format); + /* + Note that for all the events related to LOAD DATA (Load_log_event, + Create_file/Append/Exec/Delete, we pass description_event; however as + logging of LOAD DATA is going to be changed in 4.1 or 5.0, this is only used + for the common_header_len (post_header_len will not be changed). + */ + Load_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Load_log_event() {} Log_event_type get_type_code() { return sql_ex.new_format() ? NEW_LOAD_EVENT: LOAD_EVENT; } - int write_data_header(IO_CACHE* file); - int write_data_body(IO_CACHE* file); - bool is_valid() { return table_name != 0; } + bool write_data_header(IO_CACHE* file); + bool write_data_body(IO_CACHE* file); + bool is_valid() const { return table_name != 0; } int get_data_size() { - return (table_name_len + 2 + db_len + 2 + fname_len - + 4 // thread_id - + 4 // exec_time - + 4 // skip_lines - + 4 // field block len + return (table_name_len + db_len + 2 + fname_len + + LOAD_HEADER_LEN + sql_ex.data_size() + field_block_len + num_fields); } - int get_data_body_offset() { return LOAD_EVENT_OVERHEAD; } }; extern char server_version[SERVER_VERSION_LENGTH]; /***************************************************************************** - Start Log Event class + Start Log Event_v3 class + + Start_log_event_v3 is the Start_log_event of binlog format 3 (MySQL 3.23 and + 4.x). + Format_description_log_event derives from Start_log_event_v3; it is the + Start_log_event of binlog format 4 (MySQL 5.0), that is, the event that + describes the other events' header/postheader lengths. This event is sent by + MySQL 5.0 whenever it starts sending a new binlog if the requested position + is >4 (otherwise if ==4 the event will be sent naturally). ****************************************************************************/ -class Start_log_event: public Log_event + +class Start_log_event_v3: public Log_event { public: - /* - If this event is at the start of the first binary log since server startup - 'created' should be the timestamp when the event (and the binary log) was - created. - In the other case (i.e. this event is at the start of a binary log created - by FLUSH LOGS or automatic rotation), 'created' should be 0. - This "trick" is used by MySQL >=4.0.14 slaves to know if they must drop the - stale temporary tables or not. - Note that when 'created'!=0, it is always equal to the event's timestamp; - indeed Start_log_event is written only in log.cc where the first - constructor below is called, in which 'created' is set to 'when'. - So in fact 'created' is a useless variable. When it is 0 - we can read the actual value from timestamp ('when') and when it is - non-zero we can read the same value from timestamp ('when'). Conclusion: + /* + If this event is at the start of the first binary log since server + startup 'created' should be the timestamp when the event (and the + binary log) was created. In the other case (i.e. this event is at + the start of a binary log created by FLUSH LOGS or automatic + rotation), 'created' should be 0. This "trick" is used by MySQL + >=4.0.14 slaves to know if they must drop the stale temporary + tables or not. + + Note that when 'created'!=0, it is always equal to the event's + timestamp; indeed Start_log_event is written only in log.cc where + the first constructor below is called, in which 'created' is set + to 'when'. So in fact 'created' is a useless variable. When it is + 0 we can read the actual value from timestamp ('when') and when it + is non-zero we can read the same value from timestamp + ('when'). Conclusion: - we use timestamp to print when the binlog was created. - we use 'created' only to know if this is a first binlog or not. In 3.23.57 we did not pay attention to this identity, so mysqlbinlog in @@ -672,29 +870,85 @@ public: time_t created; uint16 binlog_version; char server_version[ST_SERVER_VER_LEN]; + /* + artifical_event is 1 in the case where this is a generated event that + should not case any cleanup actions. We handle this in the log by + setting log_event == 0 (for now). + */ + bool artificial_event; #ifndef MYSQL_CLIENT - Start_log_event() :Log_event(), binlog_version(BINLOG_VERSION) - { - created = (time_t) when; - memcpy(server_version, ::server_version, ST_SERVER_VER_LEN); - } + Start_log_event_v3(); #ifdef HAVE_REPLICATION void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif - - Start_log_event(const char* buf, bool old_format); - ~Start_log_event() {} - Log_event_type get_type_code() { return START_EVENT;} - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + Start_log_event_v3() {} + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif + + Start_log_event_v3(const char* buf, + const Format_description_log_event* description_event); + ~Start_log_event_v3() {} + Log_event_type get_type_code() { return START_EVENT_V3;} + bool write(IO_CACHE* file); + bool is_valid() const { return 1; } + int get_data_size() + { + return START_V3_HEADER_LEN; //no variable-sized part + } + virtual bool is_artificial_event() { return artificial_event; } +}; + + +/* + For binlog version 4. + This event is saved by threads which read it, as they need it for future + use (to decode the ordinary events). +*/ + +class Format_description_log_event: public Start_log_event_v3 +{ +public: + /* + The size of the fixed header which _all_ events have + (for binlogs written by this version, this is equal to + LOG_EVENT_HEADER_LEN), except FORMAT_DESCRIPTION_EVENT and ROTATE_EVENT + (those have a header of size LOG_EVENT_MINIMAL_HEADER_LEN). + */ + uint8 common_header_len; + uint8 number_of_event_types; + /* The list of post-headers' lengthes */ + uint8 *post_header_len; + + Format_description_log_event(uint8 binlog_ver, const char* server_ver=0); + +#ifndef MYSQL_CLIENT +#ifdef HAVE_REPLICATION + int exec_event(struct st_relay_log_info* rli); +#endif /* HAVE_REPLICATION */ +#endif + + Format_description_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); + ~Format_description_log_event() { my_free((gptr)post_header_len, MYF(0)); } + Log_event_type get_type_code() { return FORMAT_DESCRIPTION_EVENT;} + bool write(IO_CACHE* file); + bool is_valid() const + { + return ((common_header_len >= ((binlog_version==1) ? OLD_HEADER_LEN : + LOG_EVENT_MINIMAL_HEADER_LEN)) && + (post_header_len != NULL)); + } int get_data_size() { - return START_HEADER_LEN; + /* + The vector of post-header lengths is considered as part of the + post-header, because in a given version it never changes (contrary to the + query in a Query_log_event). + */ + return FORMAT_DESCRIPTION_HEADER_LEN; } }; @@ -706,13 +960,14 @@ public: Logs special variables such as auto_increment values ****************************************************************************/ + class Intvar_log_event: public Log_event { public: ulonglong val; uchar type; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Intvar_log_event(THD* thd_arg,uchar type_arg, ulonglong val_arg) :Log_event(thd_arg,0,0),val(val_arg),type(type_arg) {} @@ -721,25 +976,30 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif - Intvar_log_event(const char* buf, bool old_format); + Intvar_log_event(const char* buf, const Format_description_log_event* description_event); ~Intvar_log_event() {} Log_event_type get_type_code() { return INTVAR_EVENT;} const char* get_var_type_name(); int get_data_size() { return 9; /* sizeof(type) + sizeof(val) */;} - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + bool write(IO_CACHE* file); + bool is_valid() const { return 1; } }; + /***************************************************************************** Rand Log Event class - Logs random seed used by the next RAND(), and by PASSWORD() in 4.1. + Logs random seed used by the next RAND(), and by PASSWORD() in 4.1.0. + 4.1.1 does not need it (it's repeatable again) so this event needn't be + written in 4.1.1 for PASSWORD() (but the fact that it is written is just a + waste, it does not cause bugs). ****************************************************************************/ + class Rand_log_event: public Log_event { public: @@ -755,17 +1015,18 @@ class Rand_log_event: public Log_event int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - Rand_log_event(const char* buf, bool old_format); + Rand_log_event(const char* buf, const Format_description_log_event* description_event); ~Rand_log_event() {} Log_event_type get_type_code() { return RAND_EVENT;} int get_data_size() { return 16; /* sizeof(ulonglong) * 2*/ } - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + bool write(IO_CACHE* file); + bool is_valid() const { return 1; } }; + /***************************************************************************** User var Log Event class @@ -773,7 +1034,11 @@ class Rand_log_event: public Log_event Every time a query uses the value of a user variable, a User_var_log_event is written before the Query_log_event, to set the user variable. + Every time a query uses the value of a user variable, a User_var_log_event is + written before the Query_log_event, to set the user variable. + ****************************************************************************/ + class User_var_log_event: public Log_event { public: @@ -794,22 +1059,17 @@ public: void pack_info(Protocol* protocol); int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - User_var_log_event(const char* buf, bool old_format); + User_var_log_event(const char* buf, const Format_description_log_event* description_event); ~User_var_log_event() {} Log_event_type get_type_code() { return USER_VAR_EVENT;} - int get_data_size() - { - return (is_null ? UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL : - UV_NAME_LEN_SIZE + name_len + UV_VAL_IS_NULL + UV_VAL_TYPE_SIZE + - UV_CHARSET_NUMBER_SIZE + UV_VAL_LEN_SIZE + val_len); - } - int write_data(IO_CACHE* file); - bool is_valid() { return 1; } + bool write(IO_CACHE* file); + bool is_valid() const { return 1; } }; + /***************************************************************************** Stop Log Event class @@ -825,15 +1085,15 @@ public: {} int exec_event(struct st_relay_log_info* rli); #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif - Stop_log_event(const char* buf, bool old_format): - Log_event(buf, old_format) + Stop_log_event(const char* buf, const Format_description_log_event* description_event): + Log_event(buf, description_event) {} ~Stop_log_event() {} Log_event_type get_type_code() { return STOP_EVENT;} - bool is_valid() { return 1; } + bool is_valid() const { return 1; } }; #endif /* HAVE_REPLICATION */ @@ -846,6 +1106,7 @@ public: This will be depricated when we move to using sequence ids. ****************************************************************************/ + class Rotate_log_event: public Log_event { public: @@ -853,7 +1114,7 @@ public: ulonglong pos; uint ident_len; bool alloced; -#ifndef MYSQL_CLIENT +#ifndef MYSQL_CLIENT Rotate_log_event(THD* thd_arg, const char* new_log_ident_arg, uint ident_len_arg = 0, ulonglong pos_arg = LOG_EVENT_OFFSET) @@ -866,10 +1127,11 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - Rotate_log_event(const char* buf, int event_len, bool old_format); + Rotate_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Rotate_log_event() { if (alloced) @@ -877,17 +1139,17 @@ public: } Log_event_type get_type_code() { return ROTATE_EVENT;} int get_data_size() { return ident_len + ROTATE_HEADER_LEN;} - bool is_valid() { return new_log_ident != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return new_log_ident != 0; } + bool write(IO_CACHE* file); }; + /* the classes below are for the new LOAD DATA INFILE logging */ /***************************************************************************** - Create File Log Event class - ****************************************************************************/ + class Create_file_log_event: public Load_log_event { protected: @@ -896,7 +1158,7 @@ protected: our Load part - used on the slave when writing event out to SQL_LOAD-*.info file */ - bool fake_base; + bool fake_base; public: char* block; const char *event_buf; @@ -916,11 +1178,12 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool enable_local); -#endif - - Create_file_log_event(const char* buf, int event_len, bool old_format); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); + void print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info, bool enable_local); +#endif + + Create_file_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Create_file_log_event() { my_free((char*) event_buf, MYF(MY_ALLOW_ZERO_PTR)); @@ -936,19 +1199,14 @@ public: Load_log_event::get_data_size() + 4 + 1 + block_len); } - int get_data_body_offset() - { - return (fake_base ? LOAD_EVENT_OVERHEAD: - LOAD_EVENT_OVERHEAD + CREATE_FILE_HEADER_LEN); - } - bool is_valid() { return inited_from_old || block != 0; } - int write_data_header(IO_CACHE* file); - int write_data_body(IO_CACHE* file); + bool is_valid() const { return inited_from_old || block != 0; } + bool write_data_header(IO_CACHE* file); + bool write_data_body(IO_CACHE* file); /* Cut out Create_file extentions and write it as Load event - used on the slave */ - int write_base(IO_CACHE* file); + bool write_base(IO_CACHE* file); }; @@ -957,6 +1215,7 @@ public: Append Block Log Event class ****************************************************************************/ + class Append_block_log_event: public Log_event { public: @@ -964,14 +1223,15 @@ public: uint block_len; uint file_id; /* - 'db' is filled when the event is created in mysql_load() (the event needs to - have a 'db' member to be well filtered by binlog-*-db rules). 'db' is not - written to the binlog (it's not used by Append_block_log_event::write()), so - it can't be read in the Append_block_log_event(const char* buf, int - event_len) constructor. - In other words, 'db' is used only for filtering by binlog-*-db rules. - Create_file_log_event is different: its 'db' (which is inherited from - Load_log_event) is written to the binlog and can be re-read. + 'db' is filled when the event is created in mysql_load() (the + event needs to have a 'db' member to be well filtered by + binlog-*-db rules). 'db' is not written to the binlog (it's not + used by Append_block_log_event::write()), so it can't be read in + the Append_block_log_event(const char* buf, int event_len) + constructor. In other words, 'db' is used only for filtering by + binlog-*-db rules. Create_file_log_event is different: it's 'db' + (which is inherited from Load_log_event) is written to the binlog + and can be re-read. */ const char* db; @@ -983,29 +1243,32 @@ public: void pack_info(Protocol* protocol); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); #endif - - Append_block_log_event(const char* buf, int event_len); + + Append_block_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Append_block_log_event() {} Log_event_type get_type_code() { return APPEND_BLOCK_EVENT;} int get_data_size() { return block_len + APPEND_BLOCK_HEADER_LEN ;} - bool is_valid() { return block != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return block != 0; } + bool write(IO_CACHE* file); const char* get_db() { return db; } }; + /***************************************************************************** Delete File Log Event class ****************************************************************************/ + class Delete_file_log_event: public Log_event { public: uint file_id; const char* db; /* see comment in Append_block_log_event */ - + #ifndef MYSQL_CLIENT Delete_file_log_event(THD* thd, const char* db_arg, bool using_trans); #ifdef HAVE_REPLICATION @@ -1013,29 +1276,32 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); - void print(FILE* file, bool short_form, char* last_db, bool enable_local); -#endif - - Delete_file_log_event(const char* buf, int event_len); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); + void print(FILE* file, bool short_form, LAST_EVENT_INFO* last_event_info, bool enable_local); +#endif + + Delete_file_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Delete_file_log_event() {} Log_event_type get_type_code() { return DELETE_FILE_EVENT;} int get_data_size() { return DELETE_FILE_HEADER_LEN ;} - bool is_valid() { return file_id != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return file_id != 0; } + bool write(IO_CACHE* file); const char* get_db() { return db; } }; + /***************************************************************************** Execute Load Log Event class ****************************************************************************/ + class Execute_load_log_event: public Log_event { public: uint file_id; - const char* db; /* see comment in Append_block_log_event */ + const char* db; /* see comment in Append_block_log_event */ #ifndef MYSQL_CLIENT Execute_load_log_event(THD* thd, const char* db_arg, bool using_trans); @@ -1044,30 +1310,37 @@ public: int exec_event(struct st_relay_log_info* rli); #endif /* HAVE_REPLICATION */ #else - void print(FILE* file, bool short_form = 0, char* last_db = 0); -#endif - - Execute_load_log_event(const char* buf, int event_len); + void print(FILE* file, bool short_form = 0, LAST_EVENT_INFO* last_event_info= 0); +#endif + + Execute_load_log_event(const char* buf, uint event_len, + const Format_description_log_event* description_event); ~Execute_load_log_event() {} Log_event_type get_type_code() { return EXEC_LOAD_EVENT;} int get_data_size() { return EXEC_LOAD_HEADER_LEN ;} - bool is_valid() { return file_id != 0; } - int write_data(IO_CACHE* file); + bool is_valid() const { return file_id != 0; } + bool write(IO_CACHE* file); const char* get_db() { return db; } }; + #ifdef MYSQL_CLIENT class Unknown_log_event: public Log_event { public: - Unknown_log_event(const char* buf, bool old_format): - Log_event(buf, old_format) + /* + Even if this is an unknown event, we still pass description_event to + Log_event's ctor, this way we can extract maximum information from the + event's header (the unique ID for example). + */ + Unknown_log_event(const char* buf, const Format_description_log_event* description_event): + Log_event(buf, description_event) {} ~Unknown_log_event() {} - void print(FILE* file, bool short_form= 0, char* last_db= 0); + void print(FILE* file, bool short_form= 0, LAST_EVENT_INFO* last_event_info= 0); Log_event_type get_type_code() { return UNKNOWN_EVENT;} - bool is_valid() { return 1; } + bool is_valid() const { return 1; } }; -#endif +#endif #endif /* _log_event_h */ diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 204b319138e..01e2510328d 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -14,6 +14,15 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* + Mostly this file is used in the server. But a little part of it is used in + mysqlbinlog too (definition of SELECT_DISTINCT and others). + The consequence is that 90% of the file is wrapped in #ifndef MYSQL_CLIENT, + except the part which must be in the server and in the client. +*/ + +#ifndef MYSQL_CLIENT + #include <my_global.h> #include <mysql_version.h> #include <mysql_embed.h> @@ -55,7 +64,7 @@ char *sql_strmake_with_convert(const char *str, uint32 arg_length, CHARSET_INFO *from_cs, uint32 max_res_length, CHARSET_INFO *to_cs, uint32 *result_length); -void kill_one_thread(THD *thd, ulong id); +void kill_one_thread(THD *thd, ulong id, bool only_kill_query); bool net_request_file(NET* net, const char* fname); char* query_table_status(THD *thd,const char *db,const char *table_name); @@ -112,6 +121,26 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; #define TIME_FOR_COMPARE 5 // 5 compares == one read /* + Number of comparisons of table rowids equivalent to reading one row from a + table. +*/ +#define TIME_FOR_COMPARE_ROWID (TIME_FOR_COMPARE*2) + +/* + For sequential disk seeks the cost formula is: + DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST * #blocks_to_skip + + The cost of average seek + DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST*BLOCKS_IN_AVG_SEEK =1.0. +*/ +#define DISK_SEEK_BASE_COST ((double)0.5) + +#define BLOCKS_IN_AVG_SEEK 128 + +#define DISK_SEEK_PROP_COST ((double)0.5/BLOCKS_IN_AVG_SEEK) + + +/* Number of rows in a reference table when refereed through a not unique key. This value is only used when we don't know anything about the key distribution. @@ -173,8 +202,15 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; #define TEST_SIGINT 1024 /* Allow sigint on threads */ #define TEST_SYNCHRONIZATION 2048 /* get server to do sleep in some places */ +#endif + +/* + This is included in the server and in the client. + Options for select set by the yacc parser (stored in lex->options). + None of the 32 defines below should have its value changed, or this will + break replication. +*/ -/* options for select set by the yacc parser (stored in lex->options) */ #define SELECT_DISTINCT (1L << 0) #define SELECT_STRAIGHT_JOIN (1L << 1) #define SELECT_DESCRIBE (1L << 2) @@ -211,6 +247,12 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; key checks in some cases */ #define OPTION_RELAXED_UNIQUE_CHECKS (1L << 27) #define SELECT_NO_UNLOCK (1L << 28) +#define OPTION_SCHEMA_TABLE (1L << 29) +/* Flag set if setup_tables already done */ +#define OPTION_SETUP_TABLES_DONE (1L << 30) + +/* The rest of the file is included in the server only */ +#ifndef MYSQL_CLIENT /* Bits for different SQL modes modes (including ANSI mode) */ #define MODE_REAL_AS_FLOAT 1 @@ -227,12 +269,22 @@ extern CHARSET_INFO *national_charset_info, *table_alias_charset; #define MODE_DB2 2048 #define MODE_MAXDB 4096 #define MODE_NO_KEY_OPTIONS 8192 -#define MODE_NO_TABLE_OPTIONS 16384 -#define MODE_NO_FIELD_OPTIONS 32768 -#define MODE_MYSQL323 65536 -#define MODE_MYSQL40 (MODE_MYSQL323*2) -#define MODE_ANSI (MODE_MYSQL40*2) -#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2) +#define MODE_NO_TABLE_OPTIONS 16384 +#define MODE_NO_FIELD_OPTIONS 32768 +#define MODE_MYSQL323 65536 +#define MODE_MYSQL40 (MODE_MYSQL323*2) +#define MODE_ANSI (MODE_MYSQL40*2) +#define MODE_NO_AUTO_VALUE_ON_ZERO (MODE_ANSI*2) +#define MODE_NO_BACKSLASH_ESCAPES (MODE_NO_AUTO_VALUE_ON_ZERO*2) +#define MODE_STRICT_TRANS_TABLES (MODE_NO_BACKSLASH_ESCAPES*2) +#define MODE_STRICT_ALL_TABLES (MODE_STRICT_TRANS_TABLES*2) +#define MODE_NO_ZERO_IN_DATE (MODE_STRICT_ALL_TABLES*2) +#define MODE_NO_ZERO_DATE (MODE_NO_ZERO_IN_DATE*2) +#define MODE_INVALID_DATES (MODE_NO_ZERO_DATE*2) +#define MODE_ERROR_FOR_DIVISION_BY_ZERO (MODE_INVALID_DATES*2) +#define MODE_TRADITIONAL (MODE_ERROR_FOR_DIVISION_BY_ZERO*2) +#define MODE_NO_AUTO_CREATE_USER (MODE_TRADITIONAL*2) +#define MODE_HIGH_NOT_PRECEDENCE (MODE_NO_AUTO_CREATE_USER*2) #define RAID_BLOCK_SIZE 1024 @@ -328,6 +380,15 @@ typedef struct st_sql_list { first= save->first; elements+= save->elements; } + inline void push_back(struct st_sql_list *save) + { + if (save->first) + { + *next= save->first; + next= save->next; + elements+= save->elements; + } + } } SQL_LIST; @@ -344,6 +405,7 @@ inline THD *_current_thd(void) #include "sql_list.h" #include "sql_map.h" #include "handler.h" +#include "parse_file.h" #include "table.h" #include "field.h" /* Field definitions */ #include "protocol.h" @@ -355,18 +417,25 @@ typedef Comp_creator* (*chooser_compare_func_creator)(bool invert); void free_items(Item *item); void cleanup_items(Item *item); class THD; -void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0); -int check_one_table_access(THD *thd, ulong privilege, +void close_thread_tables(THD *thd, bool locked=0, bool skip_derived=0, + TABLE *stopper= 0); +bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables); +bool check_procedure_access(THD *thd,ulong want_access,char *db,char *name, + bool no_errors); +bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table); bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list); -int multi_update_precheck(THD *thd, TABLE_LIST *tables); -int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count); -int update_precheck(THD *thd, TABLE_LIST *tables); -int delete_precheck(THD *thd, TABLE_LIST *tables); -int insert_precheck(THD *thd, TABLE_LIST *tables); -int create_table_precheck(THD *thd, TABLE_LIST *tables, - TABLE_LIST *create_table); +bool multi_update_precheck(THD *thd, TABLE_LIST *tables); +bool multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count); +bool mysql_multi_update_prepare(THD *thd); +bool mysql_multi_delete_prepare(THD *thd); +bool mysql_insert_select_prepare(THD *thd); +bool update_precheck(THD *thd, TABLE_LIST *tables); +bool delete_precheck(THD *thd, TABLE_LIST *tables); +bool insert_precheck(THD *thd, TABLE_LIST *tables); +bool create_table_precheck(THD *thd, TABLE_LIST *tables, + TABLE_LIST *create_table); Item *negate_expression(THD *thd, Item *expr); #include "sql_class.h" #include "sql_acl.h" @@ -418,19 +487,20 @@ struct Query_cache_query_flags #define prepare_execute(A) ((A)->command == COM_EXECUTE) -int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); -int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create); -int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent); +bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); +bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create); +bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent); void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, ushort flags); -int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, - my_bool drop_temporary); +bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, + my_bool drop_temporary); int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool log_query); + bool drop_temporary, bool drop_view, bool log_query); int mysql_rm_table_part2_with_lock(THD *thd, TABLE_LIST *tables, bool if_exists, bool drop_temporary, bool log_query); int quick_rm_table(enum db_type base,const char *db, const char *table_name); +void close_cached_table(THD *thd, TABLE *table); bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list); bool mysql_change_db(THD *thd,const char *name); void mysql_parse(THD *thd,char *inBuf,uint length); @@ -438,8 +508,8 @@ bool mysql_test_parse_for_slave(THD *thd,char *inBuf,uint length); bool is_update_query(enum enum_sql_command command); bool alloc_query(THD *thd, char *packet, ulong packet_length); void mysql_init_select(LEX *lex); -void mysql_init_query(THD *thd, uchar *buf, uint length); void mysql_reset_thd_for_next_command(THD *thd); +void mysql_init_query(THD *thd, uchar *buf, uint length); bool mysql_new_select(LEX *lex, bool move_down); void create_select_for_variable(const char *var_name); void mysql_init_multi_delete(LEX *lex); @@ -450,7 +520,7 @@ extern "C" pthread_handler_decl(handle_one_connection,arg); extern "C" pthread_handler_decl(handle_bootstrap,arg); void end_thread(THD *thd,bool put_in_cache); void flush_thread_cache(); -void mysql_execute_command(THD *thd); +bool mysql_execute_command(THD *thd); bool do_command(THD *thd); bool dispatch_command(enum enum_server_command command, THD *thd, char* packet, uint packet_length); @@ -469,22 +539,22 @@ bool check_table_access(THD *thd, ulong want_access, TABLE_LIST *tables, bool no_errors); bool check_global_access(THD *thd, ulong want_access); -int mysql_backup_table(THD* thd, TABLE_LIST* table_list); -int mysql_restore_table(THD* thd, TABLE_LIST* table_list); - -int mysql_checksum_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_check_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_repair_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_analyze_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_optimize_table(THD* thd, TABLE_LIST* table_list, - HA_CHECK_OPT* check_opt); -int mysql_assign_to_keycache(THD* thd, TABLE_LIST* table_list, - LEX_STRING *key_cache_name); -int mysql_preload_keys(THD* thd, TABLE_LIST* table_list); +bool mysql_backup_table(THD* thd, TABLE_LIST* table_list); +bool mysql_restore_table(THD* thd, TABLE_LIST* table_list); + +bool mysql_checksum_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_check_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_repair_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_analyze_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_optimize_table(THD* thd, TABLE_LIST* table_list, + HA_CHECK_OPT* check_opt); +bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* table_list, + LEX_STRING *key_cache_name); +bool mysql_preload_keys(THD* thd, TABLE_LIST* table_list); int reassign_keycache_tables(THD* thd, KEY_CACHE *src_cache, KEY_CACHE *dst_cache); @@ -497,21 +567,26 @@ int setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, List<Item> &fields, List<Item> &all_fields, ORDER *order, bool *hidden_group_fields); -int handle_select(THD *thd, LEX *lex, select_result *result); -int mysql_select(THD *thd, Item ***rref_pointer_array, - TABLE_LIST *tables, uint wild_num, List<Item> &list, - COND *conds, uint og_num, ORDER *order, ORDER *group, - Item *having, ORDER *proc_param, ulong select_type, - select_result *result, SELECT_LEX_UNIT *unit, - SELECT_LEX *select_lex); +bool handle_select(THD *thd, LEX *lex, select_result *result, + ulong setup_tables_done_option); +bool mysql_select(THD *thd, Item ***rref_pointer_array, + TABLE_LIST *tables, uint wild_num, List<Item> &list, + COND *conds, uint og_num, ORDER *order, ORDER *group, + Item *having, ORDER *proc_param, ulong select_type, + select_result *result, SELECT_LEX_UNIT *unit, + SELECT_LEX *select_lex); void free_underlaid_joins(THD *thd, SELECT_LEX *select); -int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, - select_result *result); +bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, + select_result *result); int mysql_explain_select(THD *thd, SELECT_LEX *sl, char const *type, select_result *result); -int mysql_union(THD *thd, LEX *lex, select_result *result, - SELECT_LEX_UNIT *unit); -int mysql_handle_derived(LEX *lex); +bool mysql_union(THD *thd, LEX *lex, select_result *result, + SELECT_LEX_UNIT *unit, ulong setup_tables_done_option); +int mysql_handle_derived(LEX *lex, int (*processor)(THD *thd, + LEX *lex, + TABLE_LIST *table)); +int mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *t); +int mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *t); Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, Item ***copy_func, Field **from_field, bool group, bool modify_item, uint convert_blob_length); @@ -520,64 +595,66 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, List<Key> &keys, uint &db_options, handler *file, KEY *&key_info_buffer, uint &key_count, int select_field_count); -int mysql_create_table(THD *thd,const char *db, const char *table_name, - HA_CREATE_INFO *create_info, - List<create_field> &fields, List<Key> &keys, - bool tmp_table, uint select_field_count); +bool mysql_create_table(THD *thd,const char *db, const char *table_name, + HA_CREATE_INFO *create_info, + List<create_field> &fields, List<Key> &keys, + bool tmp_table, uint select_field_count); TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, - const char *db, const char *name, + TABLE_LIST *create_table, List<create_field> *extra_fields, List<Key> *keys, List<Item> *items, - MYSQL_LOCK **lock); -int mysql_alter_table(THD *thd, char *new_db, char *new_name, - HA_CREATE_INFO *create_info, - TABLE_LIST *table_list, - List<create_field> &fields, - List<Key> &keys, - uint order_num, ORDER *order, - enum enum_duplicates handle_duplicates, - bool ignore, - ALTER_INFO *alter_info, bool do_send_ok=1); -int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok); -int mysql_create_like_table(THD *thd, TABLE_LIST *table, - HA_CREATE_INFO *create_info, - Table_ident *src_table); + MYSQL_LOCK **lock); +bool mysql_alter_table(THD *thd, char *new_db, char *new_name, + HA_CREATE_INFO *create_info, + TABLE_LIST *table_list, + List<create_field> &fields, + List<Key> &keys, + uint order_num, ORDER *order, + enum enum_duplicates handle_duplicates, + bool ignore, + ALTER_INFO *alter_info, bool do_send_ok=1); +bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, bool do_send_ok); +bool mysql_create_like_table(THD *thd, TABLE_LIST *table, + HA_CREATE_INFO *create_info, + Table_ident *src_table); bool mysql_rename_table(enum db_type base, const char *old_db, const char * old_name, const char *new_db, const char * new_name); -int mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys); -int mysql_drop_index(THD *thd, TABLE_LIST *table_list, - ALTER_INFO *alter_info); -int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *update_table_list, - Item **conds, uint order_num, ORDER *order); +bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys); +bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, + ALTER_INFO *alter_info); +bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, + Item **conds, uint order_num, ORDER *order); int mysql_update(THD *thd,TABLE_LIST *tables,List<Item> &fields, List<Item> &values,COND *conds, - uint order_num, ORDER *order, ha_rows limit, + uint order_num, ORDER *order, ha_rows limit, enum enum_duplicates handle_duplicates, bool ignore); -int mysql_multi_update(THD *thd, TABLE_LIST *table_list, - List<Item> *fields, List<Item> *values, - COND *conds, ulong options, - enum enum_duplicates handle_duplicates, bool ignore, - SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex); -int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *insert_table_list, TABLE *table, - List<Item> &fields, List_item *values, - List<Item> &update_fields, - List<Item> &update_values, enum_duplicates duplic); -int mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, - List<List_item> &values, List<Item> &update_fields, - List<Item> &update_values, enum_duplicates flag, bool ignore); -int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); -int mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order, - ha_rows rows, ulong options); -int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok); +bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, + List<Item> *fields, List<Item> *values, + COND *conds, ulong options, + enum enum_duplicates handle_duplicates, bool ignore, + SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex); +bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, + List<Item> &fields, List_item *values, + List<Item> &update_fields, + List<Item> &update_values, enum_duplicates duplic, + COND **where, bool select_insert); +bool mysql_insert(THD *thd,TABLE_LIST *table,List<Item> &fields, + List<List_item> &values, List<Item> &update_fields, + List<Item> &update_values, enum_duplicates flag, + bool ignore); +int check_that_all_fields_are_given_values(THD *thd, TABLE *entry); +bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds); +bool mysql_delete(THD *thd, TABLE_LIST *table, COND *conds, SQL_LIST *order, + ha_rows rows, ulong options); +bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok); +bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create); TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type update); -TABLE *open_table(THD *thd,const char *db,const char *table,const char *alias, +TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT* mem, bool *refresh); TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table); TABLE *find_locked_table(THD *thd, const char *db,const char *table_name); @@ -592,12 +669,28 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name); void abort_locked_tables(THD *thd,const char *db, const char *table_name); void execute_init_command(THD *thd, sys_var_str *init_command_var, rw_lock_t *var_mutex); -extern const Field *not_found_field; +extern Field *not_found_field; +extern Field *view_ref_found; + +enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, + IGNORE_ERRORS, REPORT_EXCEPT_NON_UNIQUE, + IGNORE_EXCEPT_NON_UNIQUE}; Field *find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, - TABLE_LIST **where, bool report_error); -Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, - bool check_grant,bool allow_rowid, - uint *cached_field_index_ptr); + Item **ref, + find_item_error_report_type report_error, + bool check_privileges); +Field * +find_field_in_table(THD *thd, TABLE_LIST *table_list, + const char *name, const char *item_name, + uint length, Item **ref, + bool check_grants_table, bool check_grants_view, + bool allow_rowid, + uint *cached_field_index_ptr, + bool register_tree_change); +Field * +find_field_in_real_table(THD *thd, TABLE *table, const char *name, + uint length, bool check_grants, bool allow_rowid, + uint *cached_field_index_ptr); #ifdef HAVE_OPENSSL #include <openssl/des.h> struct st_des_keyblock @@ -617,50 +710,60 @@ void free_des_key_file(); #endif /* HAVE_OPENSSL */ /* sql_do.cc */ -int mysql_do(THD *thd, List<Item> &values); +bool mysql_do(THD *thd, List<Item> &values); /* sql_show.cc */ -int mysqld_show_dbs(THD *thd,const char *wild); -int mysqld_show_open_tables(THD *thd,const char *wild); -int mysqld_show_tables(THD *thd,const char *db,const char *wild); -int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild); -int mysqld_show_fields(THD *thd,TABLE_LIST *table, const char *wild, - bool verbose); -int mysqld_show_keys(THD *thd, TABLE_LIST *table); -int mysqld_show_logs(THD *thd); +bool mysqld_show_open_tables(THD *thd,const char *wild); +bool mysqld_show_logs(THD *thd); void append_identifier(THD *thd, String *packet, const char *name, uint length); int get_quote_char_for_identifier(THD *thd, const char *name, uint length); void mysqld_list_fields(THD *thd,TABLE_LIST *table, const char *wild); -int mysqld_dump_create_info(THD *thd, TABLE *table, int fd = -1); -int mysqld_show_create(THD *thd, TABLE_LIST *table_list); -int mysqld_show_create_db(THD *thd, char *dbname, HA_CREATE_INFO *create); +int mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd = -1); +bool mysqld_show_create(THD *thd, TABLE_LIST *table_list); +bool mysqld_show_create_db(THD *thd, char *dbname, HA_CREATE_INFO *create); void mysqld_list_processes(THD *thd,const char *user,bool verbose); int mysqld_show_status(THD *thd); int mysqld_show_variables(THD *thd,const char *wild); -int mysqld_show(THD *thd, const char *wild, show_var_st *variables, - enum enum_var_type value_type, - pthread_mutex_t *mutex); int mysql_find_files(THD *thd,List<char> *files, const char *db, const char *path, const char *wild, bool dir); -int mysqld_show_charsets(THD *thd,const char *wild); -int mysqld_show_collations(THD *thd,const char *wild); -int mysqld_show_storage_engines(THD *thd); -int mysqld_show_privileges(THD *thd); -int mysqld_show_column_types(THD *thd); -int mysqld_help (THD *thd, const char *text); +bool mysqld_show_storage_engines(THD *thd); +bool mysqld_show_privileges(THD *thd); +bool mysqld_show_column_types(THD *thd); +bool mysqld_help (THD *thd, const char *text); +void calc_sum_of_all_status(STATUS_VAR *to); + + + +/* information schema */ +extern LEX_STRING information_schema_name; +LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, + const char* str, uint length, + bool allocate_lex_string= 0); +ST_SCHEMA_TABLE *find_schema_table(THD *thd, const char* table_name); +ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx); +int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, + enum enum_schema_tables schema_table_idx); +int make_schema_select(THD *thd, SELECT_LEX *sel, + enum enum_schema_tables schema_table_idx); +int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list); +int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond); +int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond); +int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond); +int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond); +bool get_schema_tables_result(JOIN *join); /* sql_prepare.cc */ -int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, - LEX_STRING *name=NULL); +bool mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, + LEX_STRING *name=NULL); void mysql_stmt_execute(THD *thd, char *packet, uint packet_length); void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name); +void mysql_stmt_fetch(THD *thd, char *packet, uint packet_length); void mysql_stmt_free(THD *thd, char *packet); void mysql_stmt_reset(THD *thd, char *packet); void mysql_stmt_get_longdata(THD *thd, char *pos, ulong packet_length); -int check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, - List<Item> &values, ulong counter); +void reset_stmt_for_execute(THD *thd, LEX *lex); /* sql_error.cc */ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, uint code, @@ -668,13 +771,13 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, uint void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, uint code, const char *format, ...); void mysql_reset_errors(THD *thd); -my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show); +bool mysqld_show_warnings(THD *thd, ulong levels_to_show); /* sql_handler.cc */ -int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen= 0); -int mysql_ha_close(THD *thd, TABLE_LIST *tables); -int mysql_ha_read(THD *, TABLE_LIST *,enum enum_ha_read_modes,char *, - List<Item> *,enum ha_rkey_function,Item *,ha_rows,ha_rows); +bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen= 0); +bool mysql_ha_close(THD *thd, TABLE_LIST *tables); +bool mysql_ha_read(THD *, TABLE_LIST *,enum enum_ha_read_modes,char *, + List<Item> *,enum ha_rkey_function,Item *,ha_rows,ha_rows); int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags); /* mysql_ha_flush mode_flags bits */ #define MYSQL_HA_CLOSE_FINAL 0x00 @@ -700,10 +803,9 @@ bool add_proc_to_list(THD *thd, Item *item); TABLE *unlink_open_table(THD *thd,TABLE *list,TABLE *find); SQL_SELECT *make_select(TABLE *head, table_map const_tables, - table_map read_tables, COND *conds, int *error); -enum find_item_error_report_type {REPORT_ALL_ERRORS, REPORT_EXCEPT_NOT_FOUND, - IGNORE_ERRORS}; -extern const Item **not_found_item; + table_map read_tables, COND *conds, int *error, + bool allow_null_cond= false); +extern Item **not_found_item; Item ** find_item_in_list(Item *item, List<Item> &items, uint *counter, find_item_error_report_type report_error, bool *unaliased); @@ -711,21 +813,24 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, List<String> *index_list); bool insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, const char *table_name, - List_iterator<Item> *it); -bool setup_tables(TABLE_LIST *tables); + List_iterator<Item> *it, bool any_privileges, + bool allocate_view_names); +bool setup_tables(THD *thd, TABLE_LIST *tables, Item **conds, + TABLE_LIST **leaves, bool refresh_only, + bool select_insert); int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, List<Item> *sum_func_list, uint wild_num); -int setup_fields(THD *thd, Item** ref_pointer_array, TABLE_LIST *tables, - List<Item> &item, bool set_query_id, - List<Item> *sum_func_list, bool allow_sum_func); -int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds); +bool setup_fields(THD *thd, Item** ref_pointer_array, TABLE_LIST *tables, + List<Item> &item, bool set_query_id, + List<Item> *sum_func_list, bool allow_sum_func); +int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, + COND **conds); int setup_ftfuncs(SELECT_LEX* select); int init_ftfuncs(THD *thd, SELECT_LEX* select, bool no_order); void wait_for_refresh(THD *thd); int open_tables(THD *thd, TABLE_LIST *tables, uint *counter); int simple_open_n_lock_tables(THD *thd,TABLE_LIST *tables); -int open_and_lock_tables(THD *thd,TABLE_LIST *tables); -void relink_tables_for_derived(THD *thd); +bool open_and_lock_tables(THD *thd,TABLE_LIST *tables); int lock_tables(THD *thd, TABLE_LIST *tables, uint counter); TABLE *open_temporary_table(THD *thd, const char *path, const char *db, const char *table_name, bool link_in_list); @@ -734,11 +839,11 @@ void free_io_cache(TABLE *entry); void intern_close_table(TABLE *entry); bool close_thread_table(THD *thd, TABLE **table_ptr); void close_temporary_tables(THD *thd); -TABLE_LIST * find_table_in_list(TABLE_LIST *table, - const char *db_name, const char *table_name); -TABLE_LIST * find_real_table_in_list(TABLE_LIST *table, - const char *db_name, - const char *table_name); +TABLE_LIST *find_table_in_list(TABLE_LIST *table, + uint offset_to_list, + const char *db_name, + const char *table_name); +TABLE_LIST *unique_table(TABLE_LIST *table, TABLE_LIST *table_list); TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name); bool close_temporary_table(THD *thd, const char *db, const char *table_name); void close_temporary(TABLE *table, bool delete_table=1); @@ -750,19 +855,37 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table, bool return_if_owned_by_thd=0); bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables); void copy_field_from_tmp_record(Field *field,int offset); -int fill_record(List<Item> &fields,List<Item> &values, bool ignore_errors); -int fill_record(Field **field,List<Item> &values, bool ignore_errors); +bool fill_record(THD *thd, List<Item> &fields, List<Item> &values, + bool ignore_errors); +bool fill_record(THD *thd, Field **field, List<Item> &values, + bool ignore_errors); OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild); +inline TABLE_LIST *find_table_in_global_list(TABLE_LIST *table, + const char *db_name, + const char *table_name) +{ + return find_table_in_list(table, offsetof(TABLE_LIST, next_global), + db_name, table_name); +} + +inline TABLE_LIST *find_table_in_local_list(TABLE_LIST *table, + const char *db_name, + const char *table_name) +{ + return find_table_in_list(table, offsetof(TABLE_LIST, next_local), + db_name, table_name); +} + + /* sql_calc.cc */ bool eval_const_cond(COND *cond); /* sql_load.cc */ -int mysql_load(THD *thd,sql_exchange *ex, TABLE_LIST *table_list, - List<Item> &fields, enum enum_duplicates handle_duplicates, - bool ignore, - bool local_file,thr_lock_type lock_type); -int write_record(TABLE *table,COPY_INFO *info); +bool mysql_load(THD *thd, sql_exchange *ex, TABLE_LIST *table_list, + List<Item> &fields, enum enum_duplicates handle_duplicates, + bool ignore, bool local_file, thr_lock_type lock_type); +int write_record(THD *thd, TABLE *table, COPY_INFO *info); /* sql_manager.cc */ /* bits set in manager_status */ @@ -777,12 +900,15 @@ extern "C" pthread_handler_decl(handle_manager, arg); void print_where(COND *cond,const char *info); void print_cached_tables(void); void TEST_filesort(SORT_FIELD *sortorder,uint s_length); +void print_plan(JOIN* join, double read_time, double record_count, + uint idx, const char *info); #endif void mysql_print_status(THD *thd); /* key.cc */ int find_ref_key(TABLE *form,Field *field, uint *offset); -void key_copy(byte *key,TABLE *form,uint index,uint key_length); -void key_restore(TABLE *form,byte *key,uint index,uint key_length); +void key_copy(byte *to_key, byte *from_record, KEY *key_info, uint key_length); +void key_restore(byte *to_record, byte *from_key, KEY *key_info, + uint key_length); bool key_cmp_if_same(TABLE *form,const byte *key,uint index,uint key_length); void key_unpack(String *to,TABLE *form,uint index); bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields); @@ -855,42 +981,36 @@ extern char language[LIBLEN],reg_ext[FN_EXTLEN]; extern char glob_hostname[FN_REFLEN], mysql_home[FN_REFLEN]; extern char pidfile_name[FN_REFLEN], system_time_zone[30], *opt_init_file; extern char log_error_file[FN_REFLEN]; +extern double last_query_cost; extern double log_10[32]; extern ulonglong log_10_int[20]; extern ulonglong keybuff_size; -extern ulong refresh_version,flush_version, thread_id,query_id,opened_tables; -extern ulong created_tmp_tables, created_tmp_disk_tables, bytes_sent; +extern ulong refresh_version,flush_version, thread_id,query_id; extern ulong binlog_cache_use, binlog_cache_disk_use; extern ulong aborted_threads,aborted_connects; extern ulong delayed_insert_timeout; extern ulong delayed_insert_limit, delayed_queue_size; extern ulong delayed_insert_threads, delayed_insert_writes; extern ulong delayed_rows_in_use,delayed_insert_errors; -extern ulong filesort_rows, filesort_range_count, filesort_scan_count; -extern ulong filesort_merge_passes; -extern ulong select_range_check_count, select_range_count, select_scan_count; -extern ulong select_full_range_join_count,select_full_join_count; extern ulong slave_open_temp_tables; extern ulong query_cache_size, query_cache_min_res_unit; extern ulong thd_startup_options, slow_launch_threads, slow_launch_time; extern ulong server_id, concurrency; -extern ulong ha_read_count, ha_write_count, ha_delete_count, ha_update_count; -extern ulong ha_read_key_count, ha_read_next_count, ha_read_prev_count; -extern ulong ha_read_first_count, ha_read_last_count; -extern ulong ha_read_rnd_count, ha_read_rnd_next_count, ha_discover_count; -extern ulong ha_commit_count, ha_rollback_count,table_cache_size; +extern ulong ha_read_count, ha_discover_count; +extern ulong table_cache_size; extern ulong max_connections,max_connect_errors, connect_timeout; extern ulong slave_net_timeout; extern ulong max_user_connections; -extern ulong long_query_count, what_to_log,flush_time; +extern my_bool timed_mutexes; +extern ulong what_to_log,flush_time; extern ulong query_buff_size, thread_stack,thread_stack_min; extern ulong binlog_cache_size, max_binlog_cache_size, open_files_limit; extern ulong max_binlog_size, max_relay_log_size; extern ulong rpl_recovery_rank, thread_cache_size; -extern ulong com_stat[(uint) SQLCOM_END], com_other, back_log; +extern ulong back_log; extern ulong specialflag, current_pid; extern ulong expire_logs_days, sync_binlog_period, sync_binlog_counter; -extern my_bool relay_log_purge, opt_innodb_safe_binlog; +extern my_bool relay_log_purge, opt_innodb_safe_binlog, opt_innodb; extern uint test_flags,select_errors,ha_open_options; extern uint protocol_version, mysqld_port, dropping_tables; extern uint delay_key_write_options, lower_case_table_names; @@ -900,6 +1020,7 @@ extern bool using_update_log, opt_large_files, server_id_supplied; extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log; extern bool opt_disable_networking, opt_skip_show_db; extern bool volatile abort_loop, shutdown_in_progress, grant_option; +extern bool mysql_proc_table_exists; extern uint volatile thread_count, thread_running, global_read_lock; extern my_bool opt_sql_bin_update, opt_safe_user_create, opt_no_mix_types; extern my_bool opt_safe_show_db, opt_local_infile; @@ -907,12 +1028,16 @@ extern my_bool opt_slave_compressed_protocol, use_temp_pool; extern my_bool opt_readonly, lower_case_file_system; extern my_bool opt_enable_named_pipe, opt_sync_frm; extern my_bool opt_secure_auth; +extern my_bool sp_automatic_privileges; +extern my_bool opt_old_style_user_limits; extern uint opt_crash_binlog_innodb; extern char *shared_memory_base_name, *mysqld_unix_port; extern bool opt_enable_shared_memory; extern char *default_tz_name; +extern my_bool opt_large_pages; +extern uint opt_large_page_size; -extern MYSQL_LOG mysql_log,mysql_update_log,mysql_slow_log,mysql_bin_log; +extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log; extern FILE *bootstrap_file; extern pthread_key(MEM_ROOT**,THR_MALLOC); extern pthread_mutex_t LOCK_mysql_create_db,LOCK_Acl,LOCK_open, @@ -936,6 +1061,7 @@ extern SHOW_COMP_OPTION have_berkeley_db; extern SHOW_COMP_OPTION have_ndbcluster; extern struct system_variables global_system_variables; extern struct system_variables max_system_variables; +extern struct system_status_var global_status_var; extern struct rand_struct sql_rand; extern KEY_CACHE *sql_key_cache; @@ -953,6 +1079,7 @@ extern struct my_option my_long_options[]; extern SHOW_COMP_OPTION have_isam, have_innodb, have_berkeley_db; extern SHOW_COMP_OPTION have_example_db, have_archive_db, have_csv_db; +extern SHOW_COMP_OPTION have_federated_db; extern SHOW_COMP_OPTION have_raid, have_openssl, have_symlink; extern SHOW_COMP_OPTION have_query_cache, have_berkeley_db, have_innodb; extern SHOW_COMP_OPTION have_geometry, have_rtree_keys; @@ -977,9 +1104,10 @@ void mysql_lock_abort_for_thread(THD *thd, TABLE *table); MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b); bool lock_global_read_lock(THD *thd); void unlock_global_read_lock(THD *thd); -bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, bool is_not_commit); +bool wait_if_global_read_lock(THD *thd, bool abort_on_refresh, + bool is_not_commit); void start_waiting_global_read_lock(THD *thd); -void make_global_read_lock_block_commit(THD *thd); +bool make_global_read_lock_block_commit(THD *thd); /* Lock based on name */ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list); @@ -1004,8 +1132,8 @@ int rea_create_table(THD *thd, my_string file_name,HA_CREATE_INFO *create_info, uint key_count,KEY *key_info); int format_number(uint inputflag,uint max_length,my_string pos,uint length, my_string *errpos); -int openfrm(const char *name,const char *alias,uint filestat,uint prgflag, - uint ha_open_flags, TABLE *outparam); +int openfrm(THD *thd, const char *name,const char *alias,uint filestat, + uint prgflag, uint ha_open_flags, TABLE *outparam); int readfrm(const char *name, const void** data, uint* length); int writefrm(const char* name, const void* data, uint len); int closefrm(TABLE *table); @@ -1015,20 +1143,18 @@ void free_blobs(TABLE *table); int set_zone(int nr,int min_zone,int max_zone); ulong convert_period_to_month(ulong period); ulong convert_month_to_period(ulong month); -uint calc_days_in_year(uint year); void get_date_from_daynr(long daynr,uint *year, uint *month, uint *day); my_time_t TIME_to_timestamp(THD *thd, const TIME *t, bool *not_exist); bool str_to_time_with_warn(const char *str,uint length,TIME *l_time); timestamp_type str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, uint flags); -longlong number_to_TIME(longlong nr, TIME *time_res, bool fuzzy_date, - int *was_cut); void localtime_to_TIME(TIME *to, struct tm *from); void calc_time_from_sec(TIME *to, long seconds, long microseconds); void make_truncated_value_warning(THD *thd, const char *str_val, - uint str_length, timestamp_type time_type); + uint str_length, timestamp_type time_type, + const char *field_name); extern DATE_TIME_FORMAT *date_time_format_make(timestamp_type format_type, const char *format_str, uint format_length); @@ -1044,10 +1170,6 @@ void make_date(const DATE_TIME_FORMAT *format, const TIME *l_time, String *str); void make_time(const DATE_TIME_FORMAT *format, const TIME *l_time, String *str); -ulonglong TIME_to_ulonglong_datetime(const TIME *time); -ulonglong TIME_to_ulonglong_date(const TIME *time); -ulonglong TIME_to_ulonglong_time(const TIME *time); -ulonglong TIME_to_ulonglong(const TIME *time); int test_if_number(char *str,int *res,bool allow_wildcards); void change_byte(byte *,uint,char,char); @@ -1119,6 +1241,8 @@ extern int yyparse(void *thd); SQL_CRYPT *get_crypt_for_frm(void); #endif +#include "sql_view.h" + /* Some inline functions for more speed */ inline bool add_item_to_list(THD *thd, Item *item) @@ -1234,3 +1358,5 @@ bool check_stack_overrun(THD *thd,char *dummy); inline void kill_delayed_threads(void) {} #define check_stack_overrun(A, B) 0 #endif + +#endif /* MYSQL_CLIENT */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 476d7e440e9..e9d4a088d16 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -22,6 +22,7 @@ #include "repl_failsafe.h" #include "stacktrace.h" #include "mysqld_suffix.h" +#include "mysys_err.h" #ifdef HAVE_BERKELEY_DB #include "ha_berkeley.h" #endif @@ -66,6 +67,8 @@ #include <thr_alarm.h> #include <ft_global.h> #include <errmsg.h> +#include "sp_rcontext.h" +#include "sp_cache.h" #define mysqld_charset &my_charset_latin1 @@ -79,10 +82,6 @@ #define IF_PURIFY(A,B) (B) #endif -#ifndef INADDR_NONE -#define INADDR_NONE -1 // Error value from inet_addr -#endif - /* stack traces are only supported on linux intel */ #if defined(__linux__) && defined(__i386__) && defined(USE_PSTACK) #define HAVE_STACK_TRACE_ON_SEGV @@ -112,10 +111,11 @@ extern "C" { // Because of SCO 3.2V4.2 #ifdef HAVE_GRP_H #include <grp.h> #endif +#include <my_net.h> #if defined(OS2) # include <sys/un.h> -#elif !defined( __WIN__) +#elif !defined(__WIN__) # ifndef __NETWARE__ #include <sys/resource.h> # endif /* __NETWARE__ */ @@ -234,7 +234,10 @@ const char *sql_mode_names[] = "NO_DIR_IN_CREATE", "POSTGRESQL", "ORACLE", "MSSQL", "DB2", "MAXDB", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "MYSQL323", "MYSQL40", "ANSI", - "NO_AUTO_VALUE_ON_ZERO", NullS + "NO_AUTO_VALUE_ON_ZERO", "NO_BACKSLASH_ESCAPES", "STRICT_TRANS_TABLES", "STRICT_ALL_TABLES", + "NO_ZERO_IN_DATE", "NO_ZERO_DATE", "ALLOW_INVALID_DATES", "ERROR_FOR_DIVISION_BY_ZERO", + "TRADITIONAL", "NO_AUTO_CREATE_USER", "HIGH_NOT_PRECEDENCE", + NullS }; TYPELIB sql_mode_typelib= { array_elements(sql_mode_names)-1,"", sql_mode_names, NULL }; @@ -292,7 +295,16 @@ my_bool opt_short_log_format= 0; my_bool opt_log_queries_not_using_indexes= 0; my_bool lower_case_file_system= 0; my_bool opt_innodb_safe_binlog= 0; +my_bool opt_large_pages= 0; +uint opt_large_page_size= 0; +my_bool opt_old_style_user_limits= 0; +/* + True if there is at least one per-hour limit for some user, so we should check + them before each query (and possibly reset counters when hour is changed). + False otherwise. +*/ volatile bool mqh_used = 0; +my_bool sp_automatic_privileges= 1; uint mysqld_port, test_flags, select_errors, dropping_tables, ha_open_options; uint delay_key_write_options, protocol_version; @@ -308,29 +320,24 @@ ulong open_files_limit, max_binlog_size, max_relay_log_size; ulong slave_net_timeout; ulong thread_cache_size=0, binlog_cache_size=0, max_binlog_cache_size=0; ulong query_cache_size=0; -ulong com_stat[(uint) SQLCOM_END], com_other; -ulong bytes_sent, bytes_received, net_big_packet_count; ulong refresh_version, flush_version; /* Increments on each reload */ -ulong query_id, long_query_count; +ulong query_id; ulong aborted_threads, killed_threads, aborted_connects; ulong delayed_insert_timeout, delayed_insert_limit, delayed_queue_size; ulong delayed_insert_threads, delayed_insert_writes, delayed_rows_in_use; ulong delayed_insert_errors,flush_time, thread_created; -ulong filesort_rows, filesort_range_count, filesort_scan_count; -ulong filesort_merge_passes; -ulong select_range_check_count, select_range_count, select_scan_count; -ulong select_full_range_join_count,select_full_join_count; -ulong specialflag=0,opened_tables=0,created_tmp_tables=0, - created_tmp_disk_tables=0; +ulong specialflag=0; ulong binlog_cache_use= 0, binlog_cache_disk_use= 0; ulong max_connections,max_used_connections, max_connect_errors, max_user_connections = 0; ulong thread_id=1L,current_pid; +my_bool timed_mutexes= 0; ulong slow_launch_threads = 0, sync_binlog_period; ulong expire_logs_days = 0; ulong rpl_recovery_rank=0; ulong my_bind_addr; /* the address we bind to */ volatile ulong cached_thread_count= 0; +double last_query_cost= -1; /* -1 denotes that no query was compiled yet */ double log_10[32]; /* 10 potences */ time_t start_time; @@ -380,6 +387,7 @@ I_List<NAMED_LIST> key_caches; struct system_variables global_system_variables; struct system_variables max_system_variables; +struct system_status_var global_status_var; MY_TMPDIR mysql_tmpdir_list; MY_BITMAP temp_pool; @@ -390,6 +398,7 @@ CHARSET_INFO *national_charset_info, *table_alias_charset; SHOW_COMP_OPTION have_berkeley_db, have_innodb, have_isam, have_ndbcluster, have_example_db, have_archive_db, have_csv_db; +SHOW_COMP_OPTION have_federated_db; SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache; SHOW_COMP_OPTION have_geometry, have_rtree_keys; SHOW_COMP_OPTION have_crypt, have_compress; @@ -538,6 +547,8 @@ static void close_connections(void) #ifdef EXTRA_DEBUG int count=0; #endif + THD *thd= current_thd; + DBUG_ENTER("close_connections"); /* Clear thread cache */ @@ -548,14 +559,14 @@ static void close_connections(void) (void) pthread_mutex_lock(&LOCK_manager); if (manager_thread_in_use) { - DBUG_PRINT("quit",("killing manager thread: %lx",manager_thread)); + DBUG_PRINT("quit",("killing manager thread: 0x%lx",manager_thread)); (void) pthread_cond_signal(&COND_manager); } (void) pthread_mutex_unlock(&LOCK_manager); /* kill connection thread */ #if !defined(__WIN__) && !defined(__EMX__) && !defined(OS2) && !defined(__NETWARE__) - DBUG_PRINT("quit",("waiting for select thread: %lx",select_thread)); + DBUG_PRINT("quit",("waiting for select thread: 0x%lx",select_thread)); (void) pthread_mutex_lock(&LOCK_thread_count); while (select_thread_in_use) @@ -589,7 +600,7 @@ static void close_connections(void) /* Abort listening to new connections */ DBUG_PRINT("quit",("Closing sockets")); - if ( !opt_disable_networking ) + if (!opt_disable_networking ) { if (ip_sock != INVALID_SOCKET) { @@ -602,7 +613,7 @@ static void close_connections(void) if (hPipe != INVALID_HANDLE_VALUE && opt_enable_named_pipe) { HANDLE temp; - DBUG_PRINT( "quit", ("Closing named pipes") ); + DBUG_PRINT("quit", ("Closing named pipes") ); /* Create connection to the handle named pipe handler to break the loop */ if ((temp = CreateFile(pipe_name, @@ -644,7 +655,7 @@ static void close_connections(void) { DBUG_PRINT("quit",("Informing thread %ld that it's time to die", tmp->thread_id)); - tmp->killed=1; + tmp->killed= THD::KILL_CONNECTION; if (tmp->mysys_var) { tmp->mysys_var->abort=1; @@ -777,7 +788,7 @@ void kill_mysql(void) #endif #endif #elif defined(OS2) - pthread_cond_signal( &eventShutdown); // post semaphore + pthread_cond_signal(&eventShutdown); // post semaphore #elif defined(HAVE_PTHREAD_KILL) if (pthread_kill(signal_thread, MYSQL_KILL_SIGNAL)) { @@ -836,9 +847,10 @@ static void __cdecl kill_server(int sig_ptr) unireg_abort(1); /* purecov: inspected */ else unireg_end(); + #ifdef __NETWARE__ - if (!event_flag) - pthread_join(select_thread, NULL); // wait for main thread + if(!event_flag) + pthread_join(select_thread, NULL); // wait for main thread #endif /* __NETWARE__ */ pthread_exit(0); /* purecov: deadcode */ @@ -928,7 +940,6 @@ void clean_up(bool print_message) mysql_log.cleanup(); mysql_slow_log.cleanup(); - mysql_update_log.cleanup(); mysql_bin_log.cleanup(); #ifdef HAVE_REPLICATION @@ -1000,7 +1011,9 @@ void clean_up(bool print_message) if (!opt_bootstrap) (void) my_delete(pidfile_name,MYF(0)); // This may not always exist #endif - x_free((gptr) my_errmsg[ERRMAPP]); /* Free messages */ + finish_client_errs(); + const char **errmsgs= my_error_unregister(ER_ERROR_FIRST, ER_ERROR_LAST); + x_free((gptr) errmsgs); /* Free messages */ DBUG_PRINT("quit", ("Error messages freed")); /* Tell main we are ready */ (void) pthread_mutex_lock(&LOCK_thread_count); @@ -1197,12 +1210,13 @@ static void server_init(void) DBUG_ENTER("server_init"); #ifdef __WIN__ - if ( !opt_disable_networking ) + if (!opt_disable_networking) { WSADATA WsaData; if (SOCKET_ERROR == WSAStartup (0x0101, &WsaData)) { - my_message(0,"WSAStartup Failed\n",MYF(0)); + /* errors are not read yet, so we use test here */ + my_message(ER_WSAS_FAILED, "WSAStartup Failed", MYF(0)); unireg_abort(1); } } @@ -1281,7 +1295,7 @@ static void server_init(void) sql_perror("Can't start server : Set security descriptor"); unireg_abort(1); } - saPipeSecurity.nLength = sizeof( SECURITY_ATTRIBUTES ); + saPipeSecurity.nLength = sizeof(SECURITY_ATTRIBUTES); saPipeSecurity.lpSecurityDescriptor = &sdPipeDescriptor; saPipeSecurity.bInheritHandle = FALSE; if ((hPipe= CreateNamedPipe(pipe_name, @@ -1301,9 +1315,9 @@ static void server_init(void) FORMAT_MESSAGE_FROM_SYSTEM, NULL, error, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPTSTR) &lpMsgBuf, 0, NULL ); - MessageBox( NULL, (LPTSTR) lpMsgBuf, "Error from CreateNamedPipe", - MB_OK|MB_ICONINFORMATION ); - LocalFree( lpMsgBuf ); + MessageBox(NULL, (LPTSTR) lpMsgBuf, "Error from CreateNamedPipe", + MB_OK|MB_ICONINFORMATION); + LocalFree(lpMsgBuf); unireg_abort(1); } } @@ -1364,8 +1378,9 @@ void yyerror(const char *s) /* "parse error" changed into "syntax error" between bison 1.75 and 1.875 */ if (strcmp(s,"parse error") == 0 || strcmp(s,"syntax error") == 0) s=ER(ER_SYNTAX_ERROR); - net_printf(thd,ER_PARSE_ERROR, s, yytext ? (char*) yytext : "", - thd->lex->yylineno); + my_printf_error(ER_PARSE_ERROR, ER(ER_PARSE_ERROR), MYF(0), s, + (yytext ? (char*) yytext : ""), + thd->lex->yylineno); } @@ -1396,7 +1411,7 @@ void close_connection(THD *thd, uint errcode, bool lock) if ((vio=thd->net.vio) != 0) { if (errcode) - send_error(thd, errcode, ER(errcode)); /* purecov: inspected */ + net_send_error(thd, errcode, ER(errcode)); /* purecov: inspected */ vio_close(vio); /* vio is freed in delete thd */ } if (lock) @@ -1508,7 +1523,7 @@ extern "C" sig_handler abort_thread(int sig __attribute__((unused))) THD *thd=current_thd; DBUG_ENTER("abort_thread"); if (thd) - thd->killed=1; + thd->killed= THD::KILL_CONNECTION; DBUG_VOID_RETURN; } #endif @@ -1524,7 +1539,7 @@ static void init_signals(void) { int signals[] = {SIGINT,SIGILL,SIGFPE,SIGSEGV,SIGTERM,SIGABRT } ; for (uint i=0 ; i < sizeof(signals)/sizeof(int) ; i++) - signal( signals[i], kill_server) ; + signal(signals[i], kill_server) ; #if defined(__WIN__) signal(SIGBREAK,SIG_IGN); //ignore SIGBREAK for NT #else @@ -1548,7 +1563,7 @@ static void check_data_home(const char *path) // down server event callback void mysql_down_server_cb(void *, void *) { - event_flag = TRUE; + event_flag= TRUE; kill_server(0); } @@ -2110,7 +2125,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) if (!(opt_specialflag & SPECIAL_NO_PRIOR)) my_pthread_attr_setprio(&connection_attrib,INTERRUPT_PRIOR); if (pthread_create(&tmp,&connection_attrib, kill_server_thread, - (void*) sig)) + (void*) &sig)) sql_print_error("Can't create thread to kill server"); #else kill_server((void*) sig); // MIT THREAD has a alarm thread @@ -2122,7 +2137,7 @@ extern "C" void *signal_hand(void *arg __attribute__((unused))) { reload_acl_and_cache((THD*) 0, (REFRESH_LOG | REFRESH_TABLES | REFRESH_FAST | - REFRESH_STATUS | REFRESH_GRANT | + REFRESH_GRANT | REFRESH_THREADS | REFRESH_HOSTS), (TABLE_LIST*) 0, NULL); // Flush logs mysql_print_status((THD*) 0); // Send debug some info @@ -2161,9 +2176,22 @@ extern "C" int my_message_sql(uint error, const char *str, myf MyFlags) { THD *thd; DBUG_ENTER("my_message_sql"); - DBUG_PRINT("error", ("Message: '%s'", str)); + DBUG_PRINT("error", ("error: %u message: '%s'", error, str)); + /* + Put here following assertion when situation with EE_* error codes + will be fixed + DBUG_ASSERT(error != 0); + */ if ((thd= current_thd)) { + if (thd->spcont && + thd->spcont->find_handler(error, MYSQL_ERROR::WARN_LEVEL_ERROR)) + { + DBUG_RETURN(0); + } + + thd->query_error= 1; // needed to catch query errors during replication + /* thd->lex->current_select == 0 if lex structure is not inited (not query command (COM_QUERY)) @@ -2182,6 +2210,9 @@ extern "C" int my_message_sql(uint error, const char *str, myf MyFlags) { NET *net= &thd->net; net->report_error= 1; +#ifndef EMBEDDED_LIBRARY /* TODO query cache in embedded library*/ + query_cache_abort(net); +#endif if (!net->last_error[0]) // Return only first message { strmake(net->last_error, str, sizeof(net->last_error)-1); @@ -2242,10 +2273,10 @@ extern "C" pthread_handler_decl(handle_shutdown,arg) my_thread_init(); // wait semaphore - pthread_cond_wait( &eventShutdown, NULL); + pthread_cond_wait(&eventShutdown, NULL); // close semaphore and kill server - pthread_cond_destroy( &eventShutdown); + pthread_cond_destroy(&eventShutdown); /* Exit main loop on main thread, so kill will be done from @@ -2297,7 +2328,7 @@ bool open_log(MYSQL_LOG *log, const char *hostname, } return log->open(opt_name, type, 0, index_file_name, (read_append) ? SEQ_READ_APPEND : WRITE_CACHE, - no_auto_events, max_size); + no_auto_events, max_size, 0); } @@ -2385,7 +2416,6 @@ static int init_common_variables(const char *conf_file_name, int argc, before MY_INIT(). So we do it here. */ mysql_log.init_pthread_objects(); - mysql_update_log.init_pthread_objects(); mysql_slow_log.init_pthread_objects(); mysql_bin_log.init_pthread_objects(); @@ -2402,6 +2432,19 @@ static int init_common_variables(const char *conf_file_name, int argc, DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname, server_version, SYSTEM_TYPE,MACHINE_TYPE)); +#ifdef HAVE_LARGE_PAGES + /* Initialize large page size */ + if (opt_large_pages && (opt_large_page_size= my_get_large_page_size())) + { + my_use_large_pages= 1; + my_large_page_size= opt_large_page_size; +#ifdef HAVE_INNOBASE_DB + innobase_use_large_pages= 1; + innobase_large_page_size= opt_large_page_size; +#endif + } +#endif /* HAVE_LARGE_PAGES */ + /* connections and databases needs lots of files */ { uint files, wanted_files; @@ -2523,6 +2566,7 @@ static int init_thread_environment() (void) pthread_mutex_init(&LOCK_rpl_status, MY_MUTEX_INIT_FAST); (void) pthread_cond_init(&COND_rpl_status, NULL); #endif + sp_cache_init(); /* Parameter for threads created for connections */ (void) pthread_attr_init(&connection_attrib); (void) pthread_attr_setdetachstate(&connection_attrib, @@ -2550,7 +2594,7 @@ static void init_ssl() ssl_acceptor_fd= new_VioSSLAcceptorFd(opt_ssl_key, opt_ssl_cert, opt_ssl_ca, opt_ssl_capath, opt_ssl_cipher); - DBUG_PRINT("info",("ssl_acceptor_fd: %lx", (long) ssl_acceptor_fd)); + DBUG_PRINT("info",("ssl_acceptor_fd: 0x%lx", (long) ssl_acceptor_fd)); if (!ssl_acceptor_fd) opt_use_ssl = 0; } @@ -2581,9 +2625,55 @@ static int init_server_components() LOG_NORMAL, 0, 0, 0); if (opt_update_log) { - open_log(&mysql_update_log, glob_hostname, opt_update_logname, "", - NullS, LOG_NEW, 0, 0, 0); - using_update_log=1; + /* + Update log is removed since 5.0. But we still accept the option. + The idea is if the user already uses the binlog and the update log, + we completely ignore any option/variable related to the update log, like + if the update log did not exist. But if the user uses only the update log, + then we translate everything into binlog for him (with warnings). + Implementation of the above : + - If mysqld is started with --log-update and --log-bin, + ignore --log-update (print a warning), push a warning when SQL_LOG_UPDATE + is used, and turn off --sql-bin-update-same. + This will completely ignore SQL_LOG_UPDATE + - If mysqld is started with --log-update only, + change it to --log-bin (with the filename passed to log-update, + plus '-bin') (print a warning), push a warning when SQL_LOG_UPDATE is + used, and turn on --sql-bin-update-same. + This will translate SQL_LOG_UPDATE to SQL_LOG_BIN. + + Note that we tell the user that --sql-bin-update-same is deprecated and + does nothing, and we don't take into account if he used this option or + not; but internally we give this variable a value to have the behaviour we + want (i.e. have SQL_LOG_UPDATE influence SQL_LOG_BIN or not). + As sql-bin-update-same, log-update and log-bin cannot be changed by the + user after starting the server (they are not variables), the user will not + later interfere with the settings we do here. + */ + if (opt_bin_log) + { + opt_sql_bin_update= 0; + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log."); + } + else + { + opt_sql_bin_update= 1; + opt_bin_log= 1; + if (opt_update_logname) + { + // as opt_bin_log==0, no need to free opt_bin_logname + if (!(opt_bin_logname= my_strdup(opt_update_logname, MYF(MY_WME)))) + exit(EXIT_OUT_OF_MEMORY); + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log. Now starting MySQL \ +with --log-bin='%s' instead.",opt_bin_logname); + } + else + sql_print_error("The update log is no longer supported by MySQL in \ +version 5.0 and above. It is replaced by the binary log. Now starting MySQL \ +with --log-bin instead."); + } } if (opt_slow_log) open_log(&mysql_slow_log, glob_hostname, opt_slow_logname, "-slow.log", @@ -2853,7 +2943,7 @@ int main(int argc, char **argv) if (_cust_check_startup()) { / * _cust_check_startup will report startup failure error * / - exit( 1 ); + exit(1); } #endif @@ -3030,7 +3120,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); handle_connections_methods(); #else #ifdef __WIN__ - if ( !have_tcpip || opt_disable_networking) + if (!have_tcpip || opt_disable_networking) { sql_print_error("TCP/IP unavailable or disabled with --skip-networking; no available interfaces"); unireg_abort(1); @@ -3147,7 +3237,8 @@ default_service_handling(char **argv, const char *servicename, const char *displayname, const char *file_path, - const char *extra_opt) + const char *extra_opt, + const char *account_name) { char path_and_service[FN_REFLEN+FN_REFLEN+32], *pos, *end; end= path_and_service + sizeof(path_and_service)-3; @@ -3166,12 +3257,12 @@ default_service_handling(char **argv, if (Service.got_service_option(argv, "install")) { - Service.Install(1, servicename, displayname, path_and_service); + Service.Install(1, servicename, displayname, path_and_service, account_name); return 0; } if (Service.got_service_option(argv, "install-manual")) { - Service.Install(0, servicename, displayname, path_and_service); + Service.Install(0, servicename, displayname, path_and_service, account_name); return 0; } if (Service.got_service_option(argv, "remove")) @@ -3206,7 +3297,7 @@ int main(int argc, char **argv) if (argc == 2) { if (!default_service_handling(argv, MYSQL_SERVICENAME, MYSQL_SERVICENAME, - file_path, "")) + file_path, "", NULL)) return 0; if (Service.IsService(argv[1])) /* Start an optional service */ { @@ -3225,7 +3316,7 @@ int main(int argc, char **argv) } else if (argc == 3) /* install or remove any optional service */ { - if (!default_service_handling(argv, argv[2], argv[2], file_path, "")) + if (!default_service_handling(argv, argv[2], argv[2], file_path, "", NULL)) return 0; if (Service.IsService(argv[2])) { @@ -3243,15 +3334,39 @@ int main(int argc, char **argv) return 0; } } - else if (argc == 4) + else if (argc >= 4) { - /* - Install an optional service with optional config file - mysqld --install-manual mysqldopt --defaults-file=c:\miguel\my.ini - */ - if (!default_service_handling(argv, argv[2], argv[2], file_path, - argv[3])) - return 0; + const char *defaults_file = "--defaults-file"; + const char *service = "--local-service"; + char extra_opt[FN_REFLEN] = ""; + char *account_name = NULL; + char *option; + int index; + for (index = 3; index < argc; index++) + { + option= argv[index]; + /* + Install an optional service with optional config file + mysqld --install-manual mysqldopt --defaults-file=c:\miguel\my.ini + */ + if (strncmp(option, defaults_file, strlen(defaults_file)) == 0) + { + strmov(extra_opt, option); + } + else + /* + Install an optional service as local service + mysqld --install-manual mysqldopt --local-service + */ + if (strncmp(option, service, strlen(service)) == 0) + { + account_name=(char*)malloc(27); + strmov(account_name, "NT AUTHORITY\\LocalService\0"); + } + } + + if (!default_service_handling(argv, argv[2], argv[2], file_path, extra_opt, account_name)) + return 0; } else if (argc == 1 && Service.IsService(MYSQL_SERVICENAME)) { @@ -3392,10 +3507,10 @@ static void create_new_thread(THD *thd) ("Can't create thread to handle request (error %d)", error)); thread_count--; - thd->killed=1; // Safety + thd->killed= THD::KILL_CONNECTION; // Safety (void) pthread_mutex_unlock(&LOCK_thread_count); statistic_increment(aborted_connects,&LOCK_status); - net_printf(thd,ER_CANT_CREATE_THREAD,error); + net_printf_error(thd, ER_CANT_CREATE_THREAD, error); (void) pthread_mutex_lock(&LOCK_thread_count); close_connection(thd,0,0); delete thd; @@ -3666,25 +3781,27 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) while (!abort_loop) { /* wait for named pipe connection */ - fConnected = ConnectNamedPipe( hPipe, NULL ); + fConnected = ConnectNamedPipe(hPipe, NULL); if (abort_loop) break; if (!fConnected) fConnected = GetLastError() == ERROR_PIPE_CONNECTED; if (!fConnected) { - CloseHandle( hPipe ); - if ((hPipe = CreateNamedPipe(pipe_name, - PIPE_ACCESS_DUPLEX, - PIPE_TYPE_BYTE | - PIPE_READMODE_BYTE | - PIPE_WAIT, - PIPE_UNLIMITED_INSTANCES, - (int) global_system_variables.net_buffer_length, - (int) global_system_variables.net_buffer_length, - NMPWAIT_USE_DEFAULT_WAIT, - &saPipeSecurity )) == - INVALID_HANDLE_VALUE ) + CloseHandle(hPipe); + if ((hPipe= CreateNamedPipe(pipe_name, + PIPE_ACCESS_DUPLEX, + PIPE_TYPE_BYTE | + PIPE_READMODE_BYTE | + PIPE_WAIT, + PIPE_UNLIMITED_INSTANCES, + (int) global_system_variables. + net_buffer_length, + (int) global_system_variables. + net_buffer_length, + NMPWAIT_USE_DEFAULT_WAIT, + &saPipeSecurity)) == + INVALID_HANDLE_VALUE) { sql_perror("Can't create new named pipe!"); break; // Abort @@ -3711,8 +3828,8 @@ extern "C" pthread_handler_decl(handle_connections_namedpipes,arg) if (!(thd = new THD)) { - DisconnectNamedPipe( hConnectedPipe ); - CloseHandle( hConnectedPipe ); + DisconnectNamedPipe(hConnectedPipe); + CloseHandle(hConnectedPipe); continue; } if (!(thd->net.vio = vio_new_win32pipe(hConnectedPipe)) || @@ -4007,6 +4124,8 @@ enum options_mysqld OPT_INNODB_LOG_ARCHIVE, OPT_INNODB_FLUSH_LOG_AT_TRX_COMMIT, OPT_INNODB_FLUSH_METHOD, + OPT_INNODB_DOUBLEWRITE, + OPT_INNODB_CHECKSUMS, OPT_INNODB_FAST_SHUTDOWN, OPT_INNODB_FILE_PER_TABLE, OPT_CRASH_BINLOG_INNODB, OPT_INNODB_LOCKS_UNSAFE_FOR_BINLOG, @@ -4047,7 +4166,7 @@ enum options_mysqld OPT_MAX_SEEKS_FOR_KEY, OPT_MAX_TMP_TABLES, OPT_MAX_USER_CONNECTIONS, OPT_MAX_LENGTH_FOR_SORT_DATA, OPT_MAX_WRITE_LOCK_COUNT, OPT_BULK_INSERT_BUFFER_SIZE, - OPT_MAX_ERROR_COUNT, OPT_MYISAM_DATA_POINTER_SIZE, + OPT_MAX_ERROR_COUNT, OPT_MULTI_RANGE_COUNT, OPT_MYISAM_DATA_POINTER_SIZE, OPT_MYISAM_BLOCK_SIZE, OPT_MYISAM_MAX_EXTRA_SORT_FILE_SIZE, OPT_MYISAM_MAX_SORT_FILE_SIZE, OPT_MYISAM_SORT_BUFFER_SIZE, OPT_NET_BUFFER_LENGTH, OPT_NET_RETRY_COUNT, @@ -4102,7 +4221,15 @@ enum options_mysqld OPT_TIME_FORMAT, OPT_DATETIME_FORMAT, OPT_LOG_QUERIES_NOT_USING_INDEXES, - OPT_DEFAULT_TIME_ZONE + OPT_DEFAULT_TIME_ZONE, + OPT_OPTIMIZER_SEARCH_DEPTH, + OPT_OPTIMIZER_PRUNE_LEVEL, + OPT_UPDATABLE_VIEWS_WITH_LIMIT, + OPT_SP_AUTOMATIC_PRIVILEGES, + OPT_AUTO_INCREMENT, OPT_AUTO_INCREMENT_OFFSET, + OPT_ENABLE_LARGE_PAGES, + OPT_TIMED_MUTEXES, + OPT_OLD_STYLE_USER_LIMITS }; @@ -4121,6 +4248,20 @@ struct my_option my_long_options[] = #endif /* HAVE_REPLICATION */ {"ansi", 'a', "Use ANSI SQL syntax instead of MySQL syntax.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, + {"auto-increment-increment", OPT_AUTO_INCREMENT, + "Auto-increment columns are incremented by this", + (gptr*) &global_system_variables.auto_increment_increment, + (gptr*) &max_system_variables.auto_increment_increment, 0, GET_ULONG, + OPT_ARG, 1, 1, 65535, 0, 1, 0 }, + {"auto-increment-offset", OPT_AUTO_INCREMENT_OFFSET, + "Offset added to Auto-increment columns. Used when auto-increment-increment != 1", + (gptr*) &global_system_variables.auto_increment_offset, + (gptr*) &max_system_variables.auto_increment_offset, 0, GET_ULONG, OPT_ARG, + 1, 1, 65535, 0, 1, 0 }, + {"automatic-sp-privileges", OPT_SP_AUTOMATIC_PRIVILEGES, + "Creating and dropping stored procedures alters ACLs. Disable with --skip-automatic-sp-privileges.", + (gptr*) &sp_automatic_privileges, (gptr*) &sp_automatic_privileges, + 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"basedir", 'b', "Path to installation directory. All paths are usually resolved relative to this.", (gptr*) &mysql_home_ptr, (gptr*) &mysql_home_ptr, 0, GET_STR, REQUIRED_ARG, @@ -4251,6 +4392,12 @@ Disable with --skip-bdb (will save memory).", "Set up signals usable for debugging", (gptr*) &opt_debugging, (gptr*) &opt_debugging, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef HAVE_LARGE_PAGES + {"large-pages", OPT_ENABLE_LARGE_PAGES, "Enable support for large pages. \ +Disable with --skip-large-pages.", + (gptr*) &opt_large_pages, (gptr*) &opt_large_pages, 0, GET_BOOL, NO_ARG, 0, 0, 0, + 0, 0, 0}, +#endif {"init-connect", OPT_INIT_CONNECT, "Command(s) that are executed for each new connection", (gptr*) &opt_init_connect, (gptr*) &opt_init_connect, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -4274,6 +4421,12 @@ Disable with --skip-innodb (will save memory).", "The common part for InnoDB table spaces.", (gptr*) &innobase_data_home_dir, (gptr*) &innobase_data_home_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + {"innodb_doublewrite", OPT_INNODB_DOUBLEWRITE, "Enable InnoDB doublewrite buffer (enabled by default). \ +Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, + (gptr*) &innobase_use_doublewrite, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"innodb_checksums", OPT_INNODB_CHECKSUMS, "Enable InnoDB checksums validation (enabled by default). \ +Disable with --skip-innodb-checksums.", (gptr*) &innobase_use_checksums, + (gptr*) &innobase_use_checksums, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"innodb_fast_shutdown", OPT_INNODB_FAST_SHUTDOWN, "Speeds up server shutdown process.", (gptr*) &innobase_fast_shutdown, (gptr*) &innobase_fast_shutdown, 0, GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0}, @@ -4371,12 +4524,13 @@ Disable with --skip-isam.", (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"log-update", OPT_UPDATE_LOG, - "Log updates to file.# where # is a unique number if not given.", + "The update log is deprecated since version 5.0, is replaced by the binary \ +log and this option justs turns on --log-bin instead.", (gptr*) &opt_update_logname, (gptr*) &opt_update_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"log-warnings", 'W', "Log some non-critical warnings to the error log file. Use this option twice or --log-warnings=2 if you also want 'Aborted connections' warnings.", + {"log-warnings", 'W', "Log some not critical warnings to the log file.", (gptr*) &global_system_variables.log_warnings, - (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, ~0L, + (gptr*) &max_system_variables.log_warnings, 0, GET_ULONG, OPT_ARG, 1, 0, 0, 0, 0, 0}, {"low-priority-updates", OPT_LOW_PRIORITY_UPDATES, "INSERT/DELETE/UPDATE has lower priority than selects.", @@ -4515,6 +4669,10 @@ Disable with --skip-ndbcluster (will save memory).", "Only use one thread (for debugging under Linux).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"old-style-user-limits", OPT_OLD_STYLE_USER_LIMITS, + "Enable old-style user limits (before 5.0.3 user resources were counted per each user+host vs. per account)", + (gptr*) &opt_old_style_user_limits, (gptr*) &opt_old_style_user_limits, + 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"pid-file", OPT_PID_FILE, "Pid file used by safe_mysqld.", (gptr*) &pidfile_name_ptr, (gptr*) &pidfile_name_ptr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, @@ -4678,9 +4836,9 @@ replicating a LOAD DATA INFILE command.", 0}, #endif /* HAVE_REPLICATION */ {"sql-bin-update-same", OPT_SQL_BIN_UPDATE_SAME, - "If set, setting SQL_LOG_BIN to a value will automatically set SQL_LOG_UPDATE to the same value and vice versa.", - (gptr*) &opt_sql_bin_update, (gptr*) &opt_sql_bin_update, 0, GET_BOOL, - NO_ARG, 0, 0, 0, 0, 0, 0}, + "The update log is deprecated since version 5.0, is replaced by the binary \ +log and this option does nothing anymore.", + 0, 0, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"sql-mode", OPT_SQL_MODE, "Syntax: sql-mode=option[,option[,option...]] where option can be one of: REAL_AS_FLOAT, PIPES_AS_CONCAT, ANSI_QUOTES, IGNORE_SPACE, ONLY_FULL_GROUP_BY, NO_UNSIGNED_SUBTRACTION.", (gptr*) &sql_mode_str, (gptr*) &sql_mode_str, 0, GET_STR, REQUIRED_ARG, 0, @@ -4700,9 +4858,13 @@ replicating a LOAD DATA INFILE command.", "Using this option will cause most temporary files created to use a small set of names, rather than a unique name for each new file.", (gptr*) &use_temp_pool, (gptr*) &use_temp_pool, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"timed_mutexes", OPT_TIMED_MUTEXES, + "Specify whether to time mutexes (only InnoDB mutexes are currently supported)", + (gptr*) &timed_mutexes, (gptr*) &timed_mutexes, 0, GET_BOOL, NO_ARG, 0, + 0, 0, 0, 0, 0}, {"tmpdir", 't', "Path for temporary files. Several paths may be specified, separated by a " -#if defined( __WIN__) || defined(OS2) || defined(__NETWARE__) +#if defined(__WIN__) || defined(OS2) || defined(__NETWARE__) "semicolon (;)" #else "colon (:)" @@ -5033,6 +5195,11 @@ The minimum value for this variable is 4096.", "After this many write locks, allow some read locks to run in between.", (gptr*) &max_write_lock_count, (gptr*) &max_write_lock_count, 0, GET_ULONG, REQUIRED_ARG, ~0L, 1, ~0L, 0, 1, 0}, + {"multi_range_count", OPT_MULTI_RANGE_COUNT, + "Number of key ranges to request at once.", + (gptr*) &global_system_variables.multi_range_count, + (gptr*) &max_system_variables.multi_range_count, 0, + GET_ULONG, REQUIRED_ARG, 256, 1, ~0L, 0, 1, 0}, {"myisam_block_size", OPT_MYISAM_BLOCK_SIZE, "Block size to be used for MyISAM index pages.", (gptr*) &opt_myisam_block_size, @@ -5090,6 +5257,16 @@ The minimum value for this variable is 4096.", "If this is not 0, then mysqld will use this value to reserve file descriptors to use with setrlimit(). If this value is 0 then mysqld will reserve max_connections*5 or max_connections + table_cache*2 (whichever is larger) number of files.", (gptr*) &open_files_limit, (gptr*) &open_files_limit, 0, GET_ULONG, REQUIRED_ARG, 0, 0, OS_FILE_LIMIT, 0, 1, 0}, + {"optimizer_prune_level", OPT_OPTIMIZER_PRUNE_LEVEL, + "Controls the heuristic(s) applied during query optimization to prune less-promising partial plans from the optimizer search space. Meaning: 0 - do not apply any heuristic, thus perform exhaustive search; 1 - prune plans based on number of retrieved rows.", + (gptr*) &global_system_variables.optimizer_prune_level, + (gptr*) &max_system_variables.optimizer_prune_level, + 0, GET_ULONG, OPT_ARG, 1, 0, 1, 0, 1, 0}, + {"optimizer_search_depth", OPT_OPTIMIZER_SEARCH_DEPTH, + "Maximum depth of search performed by the query optimizer. Values larger than the number of relations in a query result in better query plans, but take longer to compile a query. Smaller values than the number of tables in a relation result in faster optimization, but may produce very bad query plans. If set to 0, the system will automatically pick a reasonable value; if set to MAX_TABLES+2, the optimizer will switch to the original find_best (used for testing/comparison).", + (gptr*) &global_system_variables.optimizer_search_depth, + (gptr*) &max_system_variables.optimizer_search_depth, + 0, GET_ULONG, OPT_ARG, MAX_TABLES+1, 0, MAX_TABLES+2, 0, 1, 0}, {"preload_buffer_size", OPT_PRELOAD_BUFFER_SIZE, "The size of the buffer that is allocated when preloading indexes", (gptr*) &global_system_variables.preload_buff_size, @@ -5240,6 +5417,11 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.trans_prealloc_size, (gptr*) &max_system_variables.trans_prealloc_size, 0, GET_ULONG, REQUIRED_ARG, TRANS_ALLOC_PREALLOC_SIZE, 1024, ~0L, 0, 1024, 0}, + {"updatable_views_with_limit", OPT_UPDATABLE_VIEWS_WITH_LIMIT, + "1 = YES = Don't issue an error message (warning only) if a VIEW without presence of a key of the underlying table is used in queries with a LIMIT clause for updating. 0 = NO = Prohibit update of a VIEW, which does not contain a key of the underlying table and the query uses a LIMIT clause (usually get from GUI tools).", + (gptr*) &global_system_variables.updatable_views_with_limit, + (gptr*) &max_system_variables.updatable_views_with_limit, + 0, GET_ULONG, REQUIRED_ARG, 1, 0, 1, 0, 1, 0}, {"wait_timeout", OPT_WAIT_TIMEOUT, "The number of seconds the server waits for activity on a connection before closing it.", (gptr*) &global_system_variables.net_wait_timeout, @@ -5255,118 +5437,140 @@ struct show_var_st status_vars[]= { {"Aborted_connects", (char*) &aborted_connects, SHOW_LONG}, {"Binlog_cache_disk_use", (char*) &binlog_cache_disk_use, SHOW_LONG}, {"Binlog_cache_use", (char*) &binlog_cache_use, SHOW_LONG}, - {"Bytes_received", (char*) &bytes_received, SHOW_LONG}, - {"Bytes_sent", (char*) &bytes_sent, SHOW_LONG}, - {"Com_admin_commands", (char*) &com_other, SHOW_LONG}, - {"Com_alter_db", (char*) (com_stat+(uint) SQLCOM_ALTER_DB),SHOW_LONG}, - {"Com_alter_table", (char*) (com_stat+(uint) SQLCOM_ALTER_TABLE),SHOW_LONG}, - {"Com_analyze", (char*) (com_stat+(uint) SQLCOM_ANALYZE),SHOW_LONG}, - {"Com_backup_table", (char*) (com_stat+(uint) SQLCOM_BACKUP_TABLE),SHOW_LONG}, - {"Com_begin", (char*) (com_stat+(uint) SQLCOM_BEGIN),SHOW_LONG}, - {"Com_change_db", (char*) (com_stat+(uint) SQLCOM_CHANGE_DB),SHOW_LONG}, - {"Com_change_master", (char*) (com_stat+(uint) SQLCOM_CHANGE_MASTER),SHOW_LONG}, - {"Com_check", (char*) (com_stat+(uint) SQLCOM_CHECK),SHOW_LONG}, - {"Com_checksum", (char*) (com_stat+(uint) SQLCOM_CHECKSUM),SHOW_LONG}, - {"Com_commit", (char*) (com_stat+(uint) SQLCOM_COMMIT),SHOW_LONG}, - {"Com_create_db", (char*) (com_stat+(uint) SQLCOM_CREATE_DB),SHOW_LONG}, - {"Com_create_function", (char*) (com_stat+(uint) SQLCOM_CREATE_FUNCTION),SHOW_LONG}, - {"Com_create_index", (char*) (com_stat+(uint) SQLCOM_CREATE_INDEX),SHOW_LONG}, - {"Com_create_table", (char*) (com_stat+(uint) SQLCOM_CREATE_TABLE),SHOW_LONG}, - {"Com_dealloc_sql", (char*) (com_stat+(uint) - SQLCOM_DEALLOCATE_PREPARE), SHOW_LONG}, - {"Com_delete", (char*) (com_stat+(uint) SQLCOM_DELETE),SHOW_LONG}, - {"Com_delete_multi", (char*) (com_stat+(uint) SQLCOM_DELETE_MULTI),SHOW_LONG}, - {"Com_do", (char*) (com_stat+(uint) SQLCOM_DO),SHOW_LONG}, - {"Com_drop_db", (char*) (com_stat+(uint) SQLCOM_DROP_DB),SHOW_LONG}, - {"Com_drop_function", (char*) (com_stat+(uint) SQLCOM_DROP_FUNCTION),SHOW_LONG}, - {"Com_drop_index", (char*) (com_stat+(uint) SQLCOM_DROP_INDEX),SHOW_LONG}, - {"Com_drop_table", (char*) (com_stat+(uint) SQLCOM_DROP_TABLE),SHOW_LONG}, - {"Com_drop_user", (char*) (com_stat+(uint) SQLCOM_DROP_USER),SHOW_LONG}, - {"Com_execute_sql", (char*) (com_stat+(uint) SQLCOM_EXECUTE), - SHOW_LONG}, - {"Com_flush", (char*) (com_stat+(uint) SQLCOM_FLUSH),SHOW_LONG}, - {"Com_grant", (char*) (com_stat+(uint) SQLCOM_GRANT),SHOW_LONG}, - {"Com_ha_close", (char*) (com_stat+(uint) SQLCOM_HA_CLOSE),SHOW_LONG}, - {"Com_ha_open", (char*) (com_stat+(uint) SQLCOM_HA_OPEN),SHOW_LONG}, - {"Com_ha_read", (char*) (com_stat+(uint) SQLCOM_HA_READ),SHOW_LONG}, - {"Com_help", (char*) (com_stat+(uint) SQLCOM_HELP),SHOW_LONG}, - {"Com_insert", (char*) (com_stat+(uint) SQLCOM_INSERT),SHOW_LONG}, - {"Com_insert_select", (char*) (com_stat+(uint) SQLCOM_INSERT_SELECT),SHOW_LONG}, - {"Com_kill", (char*) (com_stat+(uint) SQLCOM_KILL),SHOW_LONG}, - {"Com_load", (char*) (com_stat+(uint) SQLCOM_LOAD),SHOW_LONG}, - {"Com_load_master_data", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_DATA),SHOW_LONG}, - {"Com_load_master_table", (char*) (com_stat+(uint) SQLCOM_LOAD_MASTER_TABLE),SHOW_LONG}, - {"Com_lock_tables", (char*) (com_stat+(uint) SQLCOM_LOCK_TABLES),SHOW_LONG}, - {"Com_optimize", (char*) (com_stat+(uint) SQLCOM_OPTIMIZE),SHOW_LONG}, - {"Com_preload_keys", (char*) (com_stat+(uint) SQLCOM_PRELOAD_KEYS),SHOW_LONG}, - {"Com_prepare_sql", (char*) (com_stat+(uint) SQLCOM_PREPARE), - SHOW_LONG}, - {"Com_purge", (char*) (com_stat+(uint) SQLCOM_PURGE),SHOW_LONG}, - {"Com_purge_before_date", (char*) (com_stat+(uint) SQLCOM_PURGE_BEFORE),SHOW_LONG}, - {"Com_rename_table", (char*) (com_stat+(uint) SQLCOM_RENAME_TABLE),SHOW_LONG}, - {"Com_repair", (char*) (com_stat+(uint) SQLCOM_REPAIR),SHOW_LONG}, - {"Com_replace", (char*) (com_stat+(uint) SQLCOM_REPLACE),SHOW_LONG}, - {"Com_replace_select", (char*) (com_stat+(uint) SQLCOM_REPLACE_SELECT),SHOW_LONG}, - {"Com_reset", (char*) (com_stat+(uint) SQLCOM_RESET),SHOW_LONG}, - {"Com_restore_table", (char*) (com_stat+(uint) SQLCOM_RESTORE_TABLE),SHOW_LONG}, - {"Com_revoke", (char*) (com_stat+(uint) SQLCOM_REVOKE),SHOW_LONG}, - {"Com_revoke_all", (char*) (com_stat+(uint) SQLCOM_REVOKE_ALL),SHOW_LONG}, - {"Com_rollback", (char*) (com_stat+(uint) SQLCOM_ROLLBACK),SHOW_LONG}, - {"Com_savepoint", (char*) (com_stat+(uint) SQLCOM_SAVEPOINT),SHOW_LONG}, - {"Com_select", (char*) (com_stat+(uint) SQLCOM_SELECT),SHOW_LONG}, - {"Com_set_option", (char*) (com_stat+(uint) SQLCOM_SET_OPTION),SHOW_LONG}, - {"Com_show_binlog_events", (char*) (com_stat+(uint) SQLCOM_SHOW_BINLOG_EVENTS),SHOW_LONG}, - {"Com_show_binlogs", (char*) (com_stat+(uint) SQLCOM_SHOW_BINLOGS),SHOW_LONG}, - {"Com_show_charsets", (char*) (com_stat+(uint) SQLCOM_SHOW_CHARSETS),SHOW_LONG}, - {"Com_show_collations", (char*) (com_stat+(uint) SQLCOM_SHOW_COLLATIONS),SHOW_LONG}, - {"Com_show_column_types", (char*) (com_stat+(uint) SQLCOM_SHOW_COLUMN_TYPES),SHOW_LONG}, - {"Com_show_create_db", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE_DB),SHOW_LONG}, - {"Com_show_create_table", (char*) (com_stat+(uint) SQLCOM_SHOW_CREATE),SHOW_LONG}, - {"Com_show_databases", (char*) (com_stat+(uint) SQLCOM_SHOW_DATABASES),SHOW_LONG}, - {"Com_show_errors", (char*) (com_stat+(uint) SQLCOM_SHOW_ERRORS),SHOW_LONG}, - {"Com_show_fields", (char*) (com_stat+(uint) SQLCOM_SHOW_FIELDS),SHOW_LONG}, - {"Com_show_grants", (char*) (com_stat+(uint) SQLCOM_SHOW_GRANTS),SHOW_LONG}, - {"Com_show_innodb_status", (char*) (com_stat+(uint) SQLCOM_SHOW_INNODB_STATUS),SHOW_LONG}, - {"Com_show_keys", (char*) (com_stat+(uint) SQLCOM_SHOW_KEYS),SHOW_LONG}, - {"Com_show_logs", (char*) (com_stat+(uint) SQLCOM_SHOW_LOGS),SHOW_LONG}, - {"Com_show_master_status", (char*) (com_stat+(uint) SQLCOM_SHOW_MASTER_STAT),SHOW_LONG}, - {"Com_show_new_master", (char*) (com_stat+(uint) SQLCOM_SHOW_NEW_MASTER),SHOW_LONG}, - {"Com_show_open_tables", (char*) (com_stat+(uint) SQLCOM_SHOW_OPEN_TABLES),SHOW_LONG}, - {"Com_show_privileges", (char*) (com_stat+(uint) SQLCOM_SHOW_PRIVILEGES),SHOW_LONG}, - {"Com_show_processlist", (char*) (com_stat+(uint) SQLCOM_SHOW_PROCESSLIST),SHOW_LONG}, - {"Com_show_slave_hosts", (char*) (com_stat+(uint) SQLCOM_SHOW_SLAVE_HOSTS),SHOW_LONG}, - {"Com_show_slave_status", (char*) (com_stat+(uint) SQLCOM_SHOW_SLAVE_STAT),SHOW_LONG}, - {"Com_show_status", (char*) (com_stat+(uint) SQLCOM_SHOW_STATUS),SHOW_LONG}, - {"Com_show_storage_engines", (char*) (com_stat+(uint) SQLCOM_SHOW_STORAGE_ENGINES),SHOW_LONG}, - {"Com_show_tables", (char*) (com_stat+(uint) SQLCOM_SHOW_TABLES),SHOW_LONG}, - {"Com_show_variables", (char*) (com_stat+(uint) SQLCOM_SHOW_VARIABLES),SHOW_LONG}, - {"Com_show_warnings", (char*) (com_stat+(uint) SQLCOM_SHOW_WARNS),SHOW_LONG}, - {"Com_slave_start", (char*) (com_stat+(uint) SQLCOM_SLAVE_START),SHOW_LONG}, - {"Com_slave_stop", (char*) (com_stat+(uint) SQLCOM_SLAVE_STOP),SHOW_LONG}, - {"Com_truncate", (char*) (com_stat+(uint) SQLCOM_TRUNCATE),SHOW_LONG}, - {"Com_unlock_tables", (char*) (com_stat+(uint) SQLCOM_UNLOCK_TABLES),SHOW_LONG}, - {"Com_update", (char*) (com_stat+(uint) SQLCOM_UPDATE),SHOW_LONG}, - {"Com_update_multi", (char*) (com_stat+(uint) SQLCOM_UPDATE_MULTI),SHOW_LONG}, + {"Bytes_received", (char*) offsetof(STATUS_VAR, bytes_received), + SHOW_LONG_STATUS}, + {"Bytes_sent", (char*) offsetof(STATUS_VAR, bytes_sent), + SHOW_LONG_STATUS}, + {"Com_admin_commands", (char*) offsetof(STATUS_VAR, com_other), + SHOW_LONG_STATUS}, + {"Com_alter_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_DB]), SHOW_LONG_STATUS}, + {"Com_alter_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ALTER_TABLE]), SHOW_LONG_STATUS}, + {"Com_analyze", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ANALYZE]), SHOW_LONG_STATUS}, + {"Com_backup_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BACKUP_TABLE]), SHOW_LONG_STATUS}, + {"Com_begin", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_BEGIN]), SHOW_LONG_STATUS}, + {"Com_change_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_DB]), SHOW_LONG_STATUS}, + {"Com_change_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHANGE_MASTER]), SHOW_LONG_STATUS}, + {"Com_check", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECK]), SHOW_LONG_STATUS}, + {"Com_checksum", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CHECKSUM]), SHOW_LONG_STATUS}, + {"Com_commit", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_COMMIT]), SHOW_LONG_STATUS}, + {"Com_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_DB]), SHOW_LONG_STATUS}, + {"Com_create_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_FUNCTION]), SHOW_LONG_STATUS}, + {"Com_create_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_INDEX]), SHOW_LONG_STATUS}, + {"Com_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_CREATE_TABLE]), SHOW_LONG_STATUS}, + {"Com_dealloc_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DEALLOCATE_PREPARE]), SHOW_LONG_STATUS}, + {"Com_delete", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE]), SHOW_LONG_STATUS}, + {"Com_delete_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DELETE_MULTI]), SHOW_LONG_STATUS}, + {"Com_do", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DO]), SHOW_LONG_STATUS}, + {"Com_drop_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_DB]), SHOW_LONG_STATUS}, + {"Com_drop_function", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_FUNCTION]), SHOW_LONG_STATUS}, + {"Com_drop_index", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_INDEX]), SHOW_LONG_STATUS}, + {"Com_drop_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_TABLE]), SHOW_LONG_STATUS}, + {"Com_drop_user", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_DROP_USER]), SHOW_LONG_STATUS}, + {"Com_execute_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_EXECUTE]), SHOW_LONG_STATUS}, + {"Com_flush", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_FLUSH]), SHOW_LONG_STATUS}, + {"Com_grant", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_GRANT]), SHOW_LONG_STATUS}, + {"Com_ha_close", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_CLOSE]), SHOW_LONG_STATUS}, + {"Com_ha_open", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_OPEN]), SHOW_LONG_STATUS}, + {"Com_ha_read", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HA_READ]), SHOW_LONG_STATUS}, + {"Com_help", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_HELP]), SHOW_LONG_STATUS}, + {"Com_insert", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT]), SHOW_LONG_STATUS}, + {"Com_insert_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_INSERT_SELECT]), SHOW_LONG_STATUS}, + {"Com_kill", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_KILL]), SHOW_LONG_STATUS}, + {"Com_load", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD]), SHOW_LONG_STATUS}, + {"Com_load_master_data", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_DATA]), SHOW_LONG_STATUS}, + {"Com_load_master_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOAD_MASTER_TABLE]), SHOW_LONG_STATUS}, + {"Com_lock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_LOCK_TABLES]), SHOW_LONG_STATUS}, + {"Com_optimize", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_OPTIMIZE]), SHOW_LONG_STATUS}, + {"Com_preload_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PRELOAD_KEYS]), SHOW_LONG_STATUS}, + {"Com_prepare_sql", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PREPARE]), SHOW_LONG_STATUS}, + {"Com_purge", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE]), SHOW_LONG_STATUS}, + {"Com_purge_before_date", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_PURGE_BEFORE]), SHOW_LONG_STATUS}, + {"Com_rename_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RENAME_TABLE]), SHOW_LONG_STATUS}, + {"Com_repair", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPAIR]), SHOW_LONG_STATUS}, + {"Com_replace", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE]), SHOW_LONG_STATUS}, + {"Com_replace_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REPLACE_SELECT]), SHOW_LONG_STATUS}, + {"Com_reset", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESET]), SHOW_LONG_STATUS}, + {"Com_restore_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_RESTORE_TABLE]), SHOW_LONG_STATUS}, + {"Com_revoke", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE]), SHOW_LONG_STATUS}, + {"Com_revoke_all", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_REVOKE_ALL]), SHOW_LONG_STATUS}, + {"Com_rollback", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_ROLLBACK]), SHOW_LONG_STATUS}, + {"Com_savepoint", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SAVEPOINT]), SHOW_LONG_STATUS}, + {"Com_select", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SELECT]), SHOW_LONG_STATUS}, + {"Com_set_option", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SET_OPTION]), SHOW_LONG_STATUS}, + {"Com_show_binlogs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOGS]), SHOW_LONG_STATUS}, + {"Com_show_binlog_events", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_BINLOG_EVENTS]), SHOW_LONG_STATUS}, + {"Com_show_charsets", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CHARSETS]), SHOW_LONG_STATUS}, + {"Com_show_collations", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLLATIONS]), SHOW_LONG_STATUS}, + {"Com_show_column_types", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_COLUMN_TYPES]), SHOW_LONG_STATUS}, + {"Com_show_create_table", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE]), SHOW_LONG_STATUS}, + {"Com_show_create_db", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_CREATE_DB]), SHOW_LONG_STATUS}, + {"Com_show_databases", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_DATABASES]), SHOW_LONG_STATUS}, + {"Com_show_errors", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_ERRORS]), SHOW_LONG_STATUS}, + {"Com_show_fields", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_FIELDS]), SHOW_LONG_STATUS}, + {"Com_show_grants", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_GRANTS]), SHOW_LONG_STATUS}, + {"Com_show_innodb_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_INNODB_STATUS]), SHOW_LONG_STATUS}, + {"Com_show_keys", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_KEYS]), SHOW_LONG_STATUS}, + {"Com_show_logs", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_LOGS]), SHOW_LONG_STATUS}, + {"Com_show_master_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_MASTER_STAT]), SHOW_LONG_STATUS}, + {"Com_show_new_master", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_NEW_MASTER]), SHOW_LONG_STATUS}, + {"Com_show_open_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_OPEN_TABLES]), SHOW_LONG_STATUS}, + {"Com_show_privileges", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PRIVILEGES]), SHOW_LONG_STATUS}, + {"Com_show_processlist", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_PROCESSLIST]), SHOW_LONG_STATUS}, + {"Com_show_slave_hosts", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_HOSTS]), SHOW_LONG_STATUS}, + {"Com_show_slave_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_SLAVE_STAT]), SHOW_LONG_STATUS}, + {"Com_show_status", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STATUS]), SHOW_LONG_STATUS}, + {"Com_show_storage_engines", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_STORAGE_ENGINES]), SHOW_LONG_STATUS}, + {"Com_show_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_TABLES]), SHOW_LONG_STATUS}, + {"Com_show_variables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_VARIABLES]), SHOW_LONG_STATUS}, + {"Com_show_warnings", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SHOW_WARNS]), SHOW_LONG_STATUS}, + {"Com_slave_start", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_START]), SHOW_LONG_STATUS}, + {"Com_slave_stop", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_SLAVE_STOP]), SHOW_LONG_STATUS}, + {"Com_truncate", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_TRUNCATE]), SHOW_LONG_STATUS}, + {"Com_unlock_tables", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UNLOCK_TABLES]), SHOW_LONG_STATUS}, + {"Com_update", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE]), SHOW_LONG_STATUS}, + {"Com_update_multi", (char*) offsetof(STATUS_VAR, com_stat[(uint) SQLCOM_UPDATE_MULTI]), SHOW_LONG_STATUS}, {"Connections", (char*) &thread_id, SHOW_LONG_CONST}, - {"Created_tmp_disk_tables", (char*) &created_tmp_disk_tables,SHOW_LONG}, + {"Created_tmp_disk_tables", (char*) offsetof(STATUS_VAR, + created_tmp_disk_tables), + SHOW_LONG_STATUS}, {"Created_tmp_files", (char*) &my_tmp_file_created, SHOW_LONG}, - {"Created_tmp_tables", (char*) &created_tmp_tables, SHOW_LONG}, + {"Created_tmp_tables", (char*) offsetof(STATUS_VAR, + created_tmp_tables), + SHOW_LONG_STATUS}, {"Delayed_errors", (char*) &delayed_insert_errors, SHOW_LONG}, {"Delayed_insert_threads", (char*) &delayed_insert_threads, SHOW_LONG_CONST}, {"Delayed_writes", (char*) &delayed_insert_writes, SHOW_LONG}, {"Flush_commands", (char*) &refresh_version, SHOW_LONG_CONST}, - {"Handler_commit", (char*) &ha_commit_count, SHOW_LONG}, - {"Handler_delete", (char*) &ha_delete_count, SHOW_LONG}, + {"Handler_commit", (char*) offsetof(STATUS_VAR, ha_commit_count), + SHOW_LONG_STATUS}, + {"Handler_delete", (char*) offsetof(STATUS_VAR, ha_delete_count), + SHOW_LONG_STATUS}, {"Handler_discover", (char*) &ha_discover_count, SHOW_LONG}, - {"Handler_read_first", (char*) &ha_read_first_count, SHOW_LONG}, - {"Handler_read_key", (char*) &ha_read_key_count, SHOW_LONG}, - {"Handler_read_next", (char*) &ha_read_next_count, SHOW_LONG}, - {"Handler_read_prev", (char*) &ha_read_prev_count, SHOW_LONG}, - {"Handler_read_rnd", (char*) &ha_read_rnd_count, SHOW_LONG}, - {"Handler_read_rnd_next", (char*) &ha_read_rnd_next_count, SHOW_LONG}, - {"Handler_rollback", (char*) &ha_rollback_count, SHOW_LONG}, - {"Handler_update", (char*) &ha_update_count, SHOW_LONG}, - {"Handler_write", (char*) &ha_write_count, SHOW_LONG}, + {"Handler_read_first", (char*) offsetof(STATUS_VAR, + ha_read_first_count), + SHOW_LONG_STATUS}, + {"Handler_read_key", (char*) offsetof(STATUS_VAR, ha_read_key_count), + SHOW_LONG_STATUS}, + {"Handler_read_next", (char*) offsetof(STATUS_VAR, + ha_read_next_count), + SHOW_LONG_STATUS}, + {"Handler_read_prev", (char*) offsetof(STATUS_VAR, + ha_read_prev_count), + SHOW_LONG_STATUS}, + {"Handler_read_rnd", (char*) offsetof(STATUS_VAR, ha_read_rnd_count), + SHOW_LONG_STATUS}, + {"Handler_read_rnd_next", (char*) offsetof(STATUS_VAR, + ha_read_rnd_next_count), + SHOW_LONG_STATUS}, + {"Handler_rollback", (char*) offsetof(STATUS_VAR, ha_rollback_count), + SHOW_LONG_STATUS}, + {"Handler_update", (char*) offsetof(STATUS_VAR, ha_update_count), + SHOW_LONG_STATUS}, + {"Handler_write", (char*) offsetof(STATUS_VAR, ha_write_count), + SHOW_LONG_STATUS}, +#ifdef HAVE_INNOBASE_DB + {"Innodb_", (char*) &innodb_status_variables, SHOW_VARS}, +#endif /*HAVE_INNOBASE_DB*/ {"Key_blocks_not_flushed", (char*) &dflt_key_cache_var.global_blocks_changed, SHOW_KEY_CACHE_LONG}, {"Key_blocks_unused", (char*) &dflt_key_cache_var.blocks_unused, @@ -5381,12 +5585,14 @@ struct show_var_st status_vars[]= { SHOW_KEY_CACHE_LONG}, {"Key_writes", (char*) &dflt_key_cache_var.global_cache_write, SHOW_KEY_CACHE_LONG}, + {"Last_query_cost", (char*) &last_query_cost, SHOW_DOUBLE}, {"Max_used_connections", (char*) &max_used_connections, SHOW_LONG}, {"Not_flushed_delayed_rows", (char*) &delayed_rows_in_use, SHOW_LONG_CONST}, {"Open_files", (char*) &my_file_opened, SHOW_LONG_CONST}, {"Open_streams", (char*) &my_stream_opened, SHOW_LONG_CONST}, {"Open_tables", (char*) 0, SHOW_OPENTABLES}, - {"Opened_tables", (char*) &opened_tables, SHOW_LONG}, + {"Opened_tables", (char*) offsetof(STATUS_VAR, opened_tables), + SHOW_LONG_STATUS}, #ifdef HAVE_QUERY_CACHE {"Qcache_free_blocks", (char*) &query_cache.free_memory_blocks, SHOW_LONG_CONST}, @@ -5402,19 +5608,36 @@ struct show_var_st status_vars[]= { #endif /*HAVE_QUERY_CACHE*/ {"Questions", (char*) 0, SHOW_QUESTION}, {"Rpl_status", (char*) 0, SHOW_RPL_STATUS}, - {"Select_full_join", (char*) &select_full_join_count, SHOW_LONG}, - {"Select_full_range_join", (char*) &select_full_range_join_count, SHOW_LONG}, - {"Select_range", (char*) &select_range_count, SHOW_LONG}, - {"Select_range_check", (char*) &select_range_check_count, SHOW_LONG}, - {"Select_scan", (char*) &select_scan_count, SHOW_LONG}, + {"Select_full_join", (char*) offsetof(STATUS_VAR, + select_full_join_count), + SHOW_LONG_STATUS}, + {"Select_full_range_join", (char*) offsetof(STATUS_VAR, + select_full_range_join_count), + SHOW_LONG_STATUS}, + {"Select_range", (char*) offsetof(STATUS_VAR, + select_range_count), + SHOW_LONG_STATUS}, + {"Select_range_check", (char*) offsetof(STATUS_VAR, + select_range_check_count), + SHOW_LONG_STATUS}, + {"Select_scan", (char*) offsetof(STATUS_VAR, select_scan_count), + SHOW_LONG_STATUS}, {"Slave_open_temp_tables", (char*) &slave_open_temp_tables, SHOW_LONG}, {"Slave_running", (char*) 0, SHOW_SLAVE_RUNNING}, {"Slow_launch_threads", (char*) &slow_launch_threads, SHOW_LONG}, - {"Slow_queries", (char*) &long_query_count, SHOW_LONG}, - {"Sort_merge_passes", (char*) &filesort_merge_passes, SHOW_LONG}, - {"Sort_range", (char*) &filesort_range_count, SHOW_LONG}, - {"Sort_rows", (char*) &filesort_rows, SHOW_LONG}, - {"Sort_scan", (char*) &filesort_scan_count, SHOW_LONG}, + {"Slow_queries", (char*) offsetof(STATUS_VAR, long_query_count), + SHOW_LONG_STATUS}, + {"Sort_merge_passes", (char*) offsetof(STATUS_VAR, + filesort_merge_passes), + SHOW_LONG_STATUS}, + {"Sort_range", (char*) offsetof(STATUS_VAR, + filesort_range_count), + SHOW_LONG_STATUS}, + {"Sort_rows", (char*) offsetof(STATUS_VAR, filesort_rows), + SHOW_LONG_STATUS}, + {"Sort_scan", (char*) offsetof(STATUS_VAR, + filesort_scan_count), + SHOW_LONG_STATUS}, #ifdef HAVE_OPENSSL {"Ssl_accept_renegotiates", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE}, {"Ssl_accepts", (char*) 0, SHOW_SSL_CTX_SESS_ACCEPT}, @@ -5545,28 +5768,24 @@ static void mysql_init_variables(void) test_flags= select_errors= dropping_tables= ha_open_options=0; thread_count= thread_running= kill_cached_threads= wake_thread=0; slave_open_temp_tables= 0; - com_other= 0; cached_thread_count= 0; - bytes_sent= bytes_received= 0; opt_endinfo= using_udf_functions= 0; opt_using_transactions= using_update_log= 0; abort_loop= select_thread_in_use= signal_thread_in_use= 0; ready_to_exit= shutdown_in_progress= grant_option= 0; - long_query_count= aborted_threads= aborted_connects= 0; + aborted_threads= aborted_connects= 0; delayed_insert_threads= delayed_insert_writes= delayed_rows_in_use= 0; delayed_insert_errors= thread_created= 0; - filesort_rows= filesort_range_count= filesort_scan_count= 0; - filesort_merge_passes= select_range_check_count= select_range_count= 0; - select_scan_count= select_full_range_join_count= select_full_join_count= 0; - specialflag= opened_tables= created_tmp_tables= created_tmp_disk_tables= 0; + specialflag= 0; binlog_cache_use= binlog_cache_disk_use= 0; max_used_connections= slow_launch_threads = 0; mysqld_user= mysqld_chroot= opt_init_file= opt_bin_logname = 0; errmesg= 0; mysqld_unix_port= opt_mysql_tmpdir= my_bind_addr_str= NullS; bzero((gptr) &mysql_tmpdir_list, sizeof(mysql_tmpdir_list)); - bzero((gptr) &com_stat, sizeof(com_stat)); - + bzero((char *) &global_status_var, sizeof(global_status_var)); + opt_large_pages= 0; + /* Character sets */ system_charset_info= &my_charset_utf8_general_ci; files_charset_info= &my_charset_utf8_general_ci; @@ -5672,6 +5891,11 @@ static void mysql_init_variables(void) #else have_archive_db= SHOW_OPTION_NO; #endif +#ifdef HAVE_FEDERATED_DB + have_federated_db= SHOW_OPTION_YES; +#else + have_federated_db= SHOW_OPTION_NO; +#endif #ifdef HAVE_CSV_DB have_csv_db= SHOW_OPTION_YES; #else @@ -5973,6 +6197,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), delay_key_write_options= (uint) DELAY_KEY_WRITE_NONE; myisam_concurrent_insert=0; myisam_recover_options= HA_RECOVER_NONE; + sp_automatic_privileges=0; my_use_symdir=0; ha_open_options&= ~(HA_OPEN_ABORT_IF_CRASHED | HA_OPEN_DELAY_KEY_WRITE); #ifdef HAVE_QUERY_CACHE @@ -6145,7 +6370,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *end; uint length= strlen(argument); long value= my_strntol(&my_charset_latin1, argument, length, 10, &end, &err); - if (test_if_int(argument,(uint) length, end, &my_charset_latin1)) + if (end == argument+length) berkeley_lock_scan_time= value; else { diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 1e34ed90fee..3bec00a5177 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -72,7 +72,7 @@ /* The following is because alarms doesn't work on windows. */ #define NO_ALARM #endif - + #ifndef NO_ALARM #include "my_pthread.h" void sql_print_error(const char *format,...); @@ -83,7 +83,6 @@ void sql_print_error(const char *format,...); #include "thr_alarm.h" #ifdef MYSQL_SERVER -#define USE_QUERY_CACHE /* The following variables/functions should really not be declared extern, but as it's hard to include mysql_priv.h here, we have to @@ -92,12 +91,16 @@ void sql_print_error(const char *format,...); extern uint test_flags; extern ulong bytes_sent, bytes_received, net_big_packet_count; extern pthread_mutex_t LOCK_bytes_sent , LOCK_bytes_received; +#ifndef MYSQL_INSTANCE_MANAGER extern void query_cache_insert(NET *net, const char *packet, ulong length); -#else -#undef statistic_add -#undef statistic_increment -#define statistic_add(A,B,C) -#define statistic_increment(A,B) +#define USE_QUERY_CACHE +#define update_statistics(A) A +#endif /* MYSQL_INSTANCE_MANGER */ +#endif /* defined(MYSQL_SERVER) && !defined(MYSQL_INSTANCE_MANAGER) */ + +#if !defined(MYSQL_SERVER) || defined(MYSQL_INSTANCE_MANAGER) +#define update_statistics(A) +#define thd_increment_bytes_sent(N) #endif #define TEST_BLOCKING 8 @@ -119,6 +122,7 @@ my_bool my_net_init(NET *net, Vio* vio) net->buff_end=net->buff+net->max_packet; net->vio = vio; net->no_send_ok = 0; + net->no_send_eof = 0; net->error=0; net->return_errno=0; net->return_status=0; net->pkt_nr=net->compress_pkt_nr=0; net->write_pos=net->read_pos = net->buff; @@ -443,7 +447,8 @@ net_real_write(NET *net,const char *packet,ulong len) my_bool net_blocking = vio_is_blocking(net->vio); DBUG_ENTER("net_real_write"); -#if defined(MYSQL_SERVER) && defined(HAVE_QUERY_CACHE) +#if defined(MYSQL_SERVER) && defined(HAVE_QUERY_CACHE) \ + && !defined(MYSQL_INSTANCE_MANAGER) if (net->query_cache_query != 0) query_cache_insert(net, packet, len); #endif @@ -552,7 +557,7 @@ net_real_write(NET *net,const char *packet,ulong len) break; } pos+=length; - statistic_add(bytes_sent,length,&LOCK_bytes_sent); + update_statistics(thd_increment_bytes_sent(length)); } #ifndef __WIN__ end: @@ -623,7 +628,7 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, DBUG_PRINT("enter",("bytes_to_skip: %u", (uint) remain)); /* The following is good for debugging */ - statistic_increment(net_big_packet_count,&LOCK_bytes_received); + update_statistics(thd_increment_net_big_packet_count(1)); if (!thr_alarm_in_use(alarmed)) { @@ -639,7 +644,7 @@ static my_bool my_net_skip_rest(NET *net, uint32 remain, thr_alarm_t *alarmed, uint length= min(remain, net->max_packet); if (net_safe_read(net, (char*) net->buff, length, alarmed)) DBUG_RETURN(1); - statistic_add(bytes_received, length, &LOCK_bytes_received); + update_statistics(thd_increment_bytes_received(length)); remain -= (uint32) length; } if (old != MAX_PACKET_LENGTH) @@ -764,7 +769,7 @@ my_real_read(NET *net, ulong *complen) } remain -= (uint32) length; pos+= (ulong) length; - statistic_add(bytes_received,(ulong) length,&LOCK_bytes_received); + update_statistics(thd_increment_bytes_received(length)); } if (i == 0) { /* First parts is packet length */ diff --git a/sql/opt_range.cc b/sql/opt_range.cc index d25901e56b1..e7f03b51fc0 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -23,6 +23,19 @@ */ +/* + Classes in this file are used in the following way: + 1. For a selection condition a tree of SEL_IMERGE/SEL_TREE/SEL_ARG objects + is created. #of rows in table and index statistics are ignored at this + step. + 2. Created SEL_TREE and index stats data are used to construct a + TABLE_READ_PLAN-derived object (TRP_*). Several 'candidate' table read + plans may be created. + 3. The least expensive table read plan is used to create a tree of + QUICK_SELECT_I-derived objects which are later used for row retrieval. + QUICK_RANGEs are also created in this step. +*/ + #ifdef __GNUC__ #pragma implementation // gcc: Class implementation #endif @@ -167,8 +180,7 @@ public: min_value=arg->max_value; min_flag=arg->max_flag & NEAR_MAX ? 0 : NEAR_MIN; } - void store(uint length,char **min_key,uint min_key_flag, - char **max_key, uint max_key_flag) + void store_min(uint length,char **min_key,uint min_key_flag) { if ((min_flag & GEOM_FLAG) || (!(min_flag & NO_MIN_RANGE) && @@ -183,6 +195,11 @@ public: memcpy(*min_key,min_value,length); (*min_key)+= length; } + } + void store(uint length,char **min_key,uint min_key_flag, + char **max_key, uint max_key_flag) + { + store_min(length, min_key, min_key_flag); if (!(max_flag & NO_MAX_RANGE) && !(max_key_flag & (NO_MAX_RANGE | NEAR_MAX))) { @@ -267,31 +284,78 @@ public: SEL_ARG *clone_tree(); }; +class SEL_IMERGE; + class SEL_TREE :public Sql_alloc { public: enum Type { IMPOSSIBLE, ALWAYS, MAYBE, KEY, KEY_SMALLER } type; SEL_TREE(enum Type type_arg) :type(type_arg) {} - SEL_TREE() :type(KEY) { bzero((char*) keys,sizeof(keys));} + SEL_TREE() :type(KEY) + { + keys_map.clear_all(); + bzero((char*) keys,sizeof(keys)); + } SEL_ARG *keys[MAX_KEY]; + key_map keys_map; /* bitmask of non-NULL elements in keys */ + + /* + Possible ways to read rows using index_merge. The list is non-empty only + if type==KEY. Currently can be non empty only if keys_map.is_clear_all(). + */ + List<SEL_IMERGE> merges; + + /* The members below are filled/used only after get_mm_tree is done */ + key_map ror_scans_map; /* bitmask of ROR scan-able elements in keys */ + uint n_ror_scans; /* number of set bits in ror_scans_map */ + + struct st_ror_scan_info **ror_scans; /* list of ROR key scans */ + struct st_ror_scan_info **ror_scans_end; /* last ROR scan */ + /* Note that #records for each key scan is stored in table->quick_rows */ }; typedef struct st_qsel_param { THD *thd; TABLE *table; - KEY_PART *key_parts,*key_parts_end,*key[MAX_KEY]; + KEY_PART *key_parts,*key_parts_end; + KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */ MEM_ROOT *mem_root; table_map prev_tables,read_tables,current_table; - uint baseflag, keys, max_key_part, range_count; + uint baseflag, max_key_part, range_count; + + uint keys; /* number of keys used in the query */ + + /* used_key_no -> table_key_no translation table */ uint real_keynr[MAX_KEY]; + char min_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH], max_key[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; bool quick; // Don't calulate possible keys COND *cond; + + uint fields_bitmap_size; + MY_BITMAP needed_fields; /* bitmask of fields needed by the query */ + + key_map *needed_reg; /* ptr to SQL_SELECT::needed_reg */ + + uint *imerge_cost_buff; /* buffer for index_merge cost estimates */ + uint imerge_cost_buff_size; /* size of the buffer */ + + /* TRUE if last checked tree->key can be used for ROR-scan */ + bool is_ror_scan; } PARAM; +class TABLE_READ_PLAN; + class TRP_RANGE; + class TRP_ROR_INTERSECT; + class TRP_ROR_UNION; + class TRP_ROR_INDEX_MERGE; + class TRP_GROUP_MIN_MAX; + +struct st_ror_scan_info; + static SEL_TREE * get_mm_parts(PARAM *param,COND *cond_func,Field *field, Item_func::Functype type,Item *value, Item_result cmp_type); @@ -299,32 +363,279 @@ static SEL_ARG *get_mm_leaf(PARAM *param,COND *cond_func,Field *field, KEY_PART *key_part, Item_func::Functype type,Item *value); static SEL_TREE *get_mm_tree(PARAM *param,COND *cond); + +static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts); static ha_rows check_quick_select(PARAM *param,uint index,SEL_ARG *key_tree); static ha_rows check_quick_keys(PARAM *param,uint index,SEL_ARG *key_tree, char *min_key,uint min_key_flag, char *max_key, uint max_key_flag); -static QUICK_SELECT *get_quick_select(PARAM *param,uint index, - SEL_ARG *key_tree); +QUICK_RANGE_SELECT *get_quick_select(PARAM *param,uint index, + SEL_ARG *key_tree, + MEM_ROOT *alloc = NULL); +static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, + bool index_read_must_be_used, + double read_time); +static +TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree, + double read_time, + bool *are_all_covering); +static +TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, + SEL_TREE *tree, + double read_time); +static +TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, + double read_time); +static +TRP_GROUP_MIN_MAX *get_best_group_min_max(PARAM *param, SEL_TREE *tree); +static int get_index_merge_params(PARAM *param, key_map& needed_reg, + SEL_IMERGE *imerge, double *read_time, + ha_rows* imerge_rows); +inline double get_index_only_read_time(const PARAM* param, ha_rows records, + int keynr); + #ifndef DBUG_OFF -static void print_quick(QUICK_SELECT *quick,const key_map* needed_reg); +static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, + const char *msg); +static void print_ror_scans_arr(TABLE *table, const char *msg, + struct st_ror_scan_info **start, + struct st_ror_scan_info **end); +static void print_rowid(byte* val, int len); +static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg); #endif + static SEL_TREE *tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_TREE *tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2); static SEL_ARG *sel_add(SEL_ARG *key1,SEL_ARG *key2); static SEL_ARG *key_or(SEL_ARG *key1,SEL_ARG *key2); static SEL_ARG *key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag); static bool get_range(SEL_ARG **e1,SEL_ARG **e2,SEL_ARG *root1); -static bool get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, +bool get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, char *max_key,uint max_key_flag); static bool eq_tree(SEL_ARG* a,SEL_ARG *b); static SEL_ARG null_element(SEL_ARG::IMPOSSIBLE); -static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length); +static bool null_part_in_key(KEY_PART *key_part, const char *key, + uint length); +bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param); + + +/* + SEL_IMERGE is a list of possible ways to do index merge, i.e. it is + a condition in the following form: + (t_1||t_2||...||t_N) && (next) + + where all t_i are SEL_TREEs, next is another SEL_IMERGE and no pair + (t_i,t_j) contains SEL_ARGS for the same index. + + SEL_TREE contained in SEL_IMERGE always has merges=NULL. + + This class relies on memory manager to do the cleanup. +*/ + +class SEL_IMERGE : public Sql_alloc +{ + enum { PREALLOCED_TREES= 10}; +public: + SEL_TREE *trees_prealloced[PREALLOCED_TREES]; + SEL_TREE **trees; /* trees used to do index_merge */ + SEL_TREE **trees_next; /* last of these trees */ + SEL_TREE **trees_end; /* end of allocated space */ + + SEL_ARG ***best_keys; /* best keys to read in SEL_TREEs */ + + SEL_IMERGE() : + trees(&trees_prealloced[0]), + trees_next(trees), + trees_end(trees + PREALLOCED_TREES) + {} + int or_sel_tree(PARAM *param, SEL_TREE *tree); + int or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree); + int or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge); +}; + + +/* + Add SEL_TREE to this index_merge without any checks, + + NOTES + This function implements the following: + (x_1||...||x_N) || t = (x_1||...||x_N||t), where x_i, t are SEL_TREEs + + RETURN + 0 - OK + -1 - Out of memory. +*/ + +int SEL_IMERGE::or_sel_tree(PARAM *param, SEL_TREE *tree) +{ + if (trees_next == trees_end) + { + const int realloc_ratio= 2; /* Double size for next round */ + uint old_elements= (trees_end - trees); + uint old_size= sizeof(SEL_TREE**) * old_elements; + uint new_size= old_size * realloc_ratio; + SEL_TREE **new_trees; + if (!(new_trees= (SEL_TREE**)alloc_root(param->mem_root, new_size))) + return -1; + memcpy(new_trees, trees, old_size); + trees= new_trees; + trees_next= trees + old_elements; + trees_end= trees + old_elements * realloc_ratio; + } + *(trees_next++)= tree; + return 0; +} + + +/* + Perform OR operation on this SEL_IMERGE and supplied SEL_TREE new_tree, + combining new_tree with one of the trees in this SEL_IMERGE if they both + have SEL_ARGs for the same key. + + SYNOPSIS + or_sel_tree_with_checks() + param PARAM from SQL_SELECT::test_quick_select + new_tree SEL_TREE with type KEY or KEY_SMALLER. + + NOTES + This does the following: + (t_1||...||t_k)||new_tree = + either + = (t_1||...||t_k||new_tree) + or + = (t_1||....||(t_j|| new_tree)||...||t_k), + + where t_i, y are SEL_TREEs. + new_tree is combined with the first t_j it has a SEL_ARG on common + key with. As a consequence of this, choice of keys to do index_merge + read may depend on the order of conditions in WHERE part of the query. + + RETURN + 0 OK + 1 One of the trees was combined with new_tree to SEL_TREE::ALWAYS, + and (*this) should be discarded. + -1 An error occurred. +*/ + +int SEL_IMERGE::or_sel_tree_with_checks(PARAM *param, SEL_TREE *new_tree) +{ + for (SEL_TREE** tree = trees; + tree != trees_next; + tree++) + { + if (sel_trees_can_be_ored(*tree, new_tree, param)) + { + *tree = tree_or(param, *tree, new_tree); + if (!*tree) + return 1; + if (((*tree)->type == SEL_TREE::MAYBE) || + ((*tree)->type == SEL_TREE::ALWAYS)) + return 1; + /* SEL_TREE::IMPOSSIBLE is impossible here */ + return 0; + } + } + + /* New tree cannot be combined with any of existing trees. */ + return or_sel_tree(param, new_tree); +} + + +/* + Perform OR operation on this index_merge and supplied index_merge list. + + RETURN + 0 - OK + 1 - One of conditions in result is always TRUE and this SEL_IMERGE + should be discarded. + -1 - An error occurred +*/ + +int SEL_IMERGE::or_sel_imerge_with_checks(PARAM *param, SEL_IMERGE* imerge) +{ + for (SEL_TREE** tree= imerge->trees; + tree != imerge->trees_next; + tree++) + { + if (or_sel_tree_with_checks(param, *tree)) + return 1; + } + return 0; +} + + +/* + Perform AND operation on two index_merge lists and store result in *im1. +*/ + +inline void imerge_list_and_list(List<SEL_IMERGE> *im1, List<SEL_IMERGE> *im2) +{ + im1->concat(im2); +} + + +/* + Perform OR operation on 2 index_merge lists, storing result in first list. + + NOTES + The following conversion is implemented: + (a_1 &&...&& a_N)||(b_1 &&...&& b_K) = AND_i,j(a_i || b_j) => + => (a_1||b_1). + + i.e. all conjuncts except the first one are currently dropped. + This is done to avoid producing N*K ways to do index_merge. + + If (a_1||b_1) produce a condition that is always TRUE, NULL is returned + and index_merge is discarded (while it is actually possible to try + harder). + + As a consequence of this, choice of keys to do index_merge read may depend + on the order of conditions in WHERE part of the query. + + RETURN + 0 OK, result is stored in *im1 + other Error, both passed lists are unusable +*/ + +int imerge_list_or_list(PARAM *param, + List<SEL_IMERGE> *im1, + List<SEL_IMERGE> *im2) +{ + SEL_IMERGE *imerge= im1->head(); + im1->empty(); + im1->push_back(imerge); + + return imerge->or_sel_imerge_with_checks(param, im2->head()); +} + + +/* + Perform OR operation on index_merge list and key tree. + + RETURN + 0 OK, result is stored in *im1. + other Error +*/ + +int imerge_list_or_tree(PARAM *param, + List<SEL_IMERGE> *im1, + SEL_TREE *tree) +{ + SEL_IMERGE *imerge; + List_iterator<SEL_IMERGE> it(*im1); + while((imerge= it++)) + { + if (imerge->or_sel_tree_with_checks(param, tree)) + it.remove(); + } + return im1->is_empty(); +} /*************************************************************************** -** Basic functions for SQL_SELECT and QUICK_SELECT +** Basic functions for SQL_SELECT and QUICK_RANGE_SELECT ***************************************************************************/ /* make a select from mysql info @@ -334,13 +645,15 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length); */ SQL_SELECT *make_select(TABLE *head, table_map const_tables, - table_map read_tables, COND *conds, int *error) + table_map read_tables, COND *conds, int *error, + bool allow_null_cond) { SQL_SELECT *select; DBUG_ENTER("make_select"); *error=0; - if (!conds) + + if (!conds && !allow_null_cond) DBUG_RETURN(0); if (!(select= new SQL_SELECT)) { @@ -380,7 +693,7 @@ void SQL_SELECT::cleanup() free_cond=0; delete cond; cond= 0; - } + } close_cached_file(&file); } @@ -392,11 +705,29 @@ SQL_SELECT::~SQL_SELECT() #undef index // Fix for Unixware 7 -QUICK_SELECT::QUICK_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc) - :dont_free(0),sorted(0),error(0),index(key_nr),max_used_key_length(0), - used_key_parts(0), head(table), it(ranges),range(0) +QUICK_SELECT_I::QUICK_SELECT_I() + :max_used_key_length(0), + used_key_parts(0) +{} + +QUICK_RANGE_SELECT::QUICK_RANGE_SELECT(THD *thd, TABLE *table, uint key_nr, + bool no_alloc, MEM_ROOT *parent_alloc) + :dont_free(0),error(0),free_file(0),cur_range(NULL),range(0),in_range(0) { - if (!no_alloc) + sorted= 0; + index= key_nr; + head= table; + key_part_info= head->key_info[index].key_part; + my_init_dynamic_array(&ranges, sizeof(QUICK_RANGE*), 16, 16); + + /* 'thd' is not accessible in QUICK_RANGE_SELECT::get_next_init(). */ + multi_range_bufsiz= thd->variables.read_rnd_buff_size; + multi_range_count= thd->variables.multi_range_count; + multi_range_length= 0; + multi_range= NULL; + multi_range_buff= NULL; + + if (!no_alloc && !parent_alloc) { // Allocates everything through the internal memroot init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); @@ -404,21 +735,461 @@ QUICK_SELECT::QUICK_SELECT(THD *thd, TABLE *table, uint key_nr, bool no_alloc) } else bzero((char*) &alloc,sizeof(alloc)); - file=head->file; - record=head->record[0]; - init(); + file= head->file; + record= head->record[0]; } -QUICK_SELECT::~QUICK_SELECT() + +int QUICK_RANGE_SELECT::init() { + DBUG_ENTER("QUICK_RANGE_SELECT::init"); + + if ((error= get_next_init())) + DBUG_RETURN(error); + + if (file->inited == handler::NONE) + DBUG_RETURN(error= file->ha_index_init(index)); + error= 0; + DBUG_RETURN(0); +} + + +void QUICK_RANGE_SELECT::range_end() +{ + if (file->inited != handler::NONE) + file->ha_index_or_rnd_end(); +} + + +QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT() +{ + DBUG_ENTER("QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT"); if (!dont_free) { - if (file->inited) - file->ha_index_end(); + /* file is NULL for CPK scan on covering ROR-intersection */ + if (file) + { + range_end(); + file->extra(HA_EXTRA_NO_KEYREAD); + if (free_file) + { + DBUG_PRINT("info", ("Freeing separate handler %p (free=%d)", file, + free_file)); + file->reset(); + file->close(); + } + } + delete_dynamic(&ranges); /* ranges are allocated in alloc */ free_root(&alloc,MYF(0)); } + if (multi_range) + my_free((char*) multi_range, MYF(0)); + if (multi_range_buff) + my_free((char*) multi_range_buff, MYF(0)); + DBUG_VOID_RETURN; +} + + +QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT(THD *thd_param, + TABLE *table) + :pk_quick_select(NULL), thd(thd_param) +{ + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::QUICK_INDEX_MERGE_SELECT"); + index= MAX_KEY; + head= table; + bzero(&read_record, sizeof(read_record)); + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + DBUG_VOID_RETURN; +} + +int QUICK_INDEX_MERGE_SELECT::init() +{ + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::init"); + DBUG_RETURN(0); +} + +int QUICK_INDEX_MERGE_SELECT::reset() +{ + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::reset"); + DBUG_RETURN(read_keys_and_merge()); +} + +bool +QUICK_INDEX_MERGE_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick_sel_range) +{ + /* + Save quick_select that does scan on clustered primary key as it will be + processed separately. + */ + if (head->file->primary_key_is_clustered() && + quick_sel_range->index == head->primary_key) + pk_quick_select= quick_sel_range; + else + return quick_selects.push_back(quick_sel_range); + return 0; +} + +QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT() +{ + List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); + QUICK_RANGE_SELECT* quick; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT"); + quick_it.rewind(); + while ((quick= quick_it++)) + quick->file= NULL; + quick_selects.delete_elements(); + delete pk_quick_select; + free_root(&alloc,MYF(0)); + DBUG_VOID_RETURN; +} + + +QUICK_ROR_INTERSECT_SELECT::QUICK_ROR_INTERSECT_SELECT(THD *thd_param, + TABLE *table, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) + : cpk_quick(NULL), thd(thd_param), need_to_fetch_row(retrieve_full_rows), + scans_inited(false) +{ + index= MAX_KEY; + head= table; + record= head->record[0]; + if (!parent_alloc) + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + else + bzero(&alloc, sizeof(MEM_ROOT)); + last_rowid= (byte*)alloc_root(parent_alloc? parent_alloc : &alloc, + head->file->ref_length); +} + + +/* + Do post-constructor initialization. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::init() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_INTERSECT_SELECT::init() +{ + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::init"); + /* Check if last_rowid was successfully allocated in ctor */ + DBUG_RETURN(!last_rowid); +} + + +/* + Initialize this quick select to be a ROR-merged scan. + + SYNOPSIS + QUICK_RANGE_SELECT::init_ror_merged_scan() + reuse_handler If TRUE, use head->file, otherwise create a separate + handler object + + NOTES + This function creates and prepares for subsequent use a separate handler + object if it can't reuse head->file. The reason for this is that during + ROR-merge several key scans are performed simultaneously, and a single + handler is only capable of preserving context of a single key scan. + + In ROR-merge the quick select doing merge does full records retrieval, + merged quick selects read only keys. + + RETURN + 0 ROR child scan initialized, ok to use. + 1 error +*/ + +int QUICK_RANGE_SELECT::init_ror_merged_scan(bool reuse_handler) +{ + handler *save_file= file; + DBUG_ENTER("QUICK_RANGE_SELECT::init_ror_merged_scan"); + + if (reuse_handler) + { + DBUG_PRINT("info", ("Reusing handler %p", file)); + if (file->extra(HA_EXTRA_KEYREAD) || + file->extra(HA_EXTRA_RETRIEVE_ALL_COLS) | + init() || reset()) + { + DBUG_RETURN(1); + } + DBUG_RETURN(0); + } + + /* Create a separate handler object for this quick select */ + if (free_file) + { + /* already have own 'handler' object. */ + DBUG_RETURN(0); + } + + if (!(file= get_new_handler(head, head->db_type))) + goto failure; + DBUG_PRINT("info", ("Allocated new handler %p", file)); + if (file->ha_open(head->path, head->db_stat, HA_OPEN_IGNORE_IF_LOCKED)) + { + /* Caller will free the memory */ + goto failure; + } + + if (file->extra(HA_EXTRA_KEYREAD) || + file->extra(HA_EXTRA_RETRIEVE_ALL_COLS) || + init() || reset()) + { + file->close(); + goto failure; + } + free_file= TRUE; + last_rowid= file->ref; + DBUG_RETURN(0); + +failure: + file= save_file; + DBUG_RETURN(1); +} + + +/* + Initialize this quick select to be a part of a ROR-merged scan. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan() + reuse_handler If TRUE, use head->file, otherwise create separate + handler object. + RETURN + 0 OK + other error code +*/ +int QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan(bool reuse_handler) +{ + List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); + QUICK_RANGE_SELECT* quick; + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::init_ror_merged_scan"); + + /* Initialize all merged "children" quick selects */ + DBUG_ASSERT(!need_to_fetch_row || reuse_handler); + if (!need_to_fetch_row && reuse_handler) + { + quick= quick_it++; + /* + There is no use of this->file. Use it for the first of merged range + selects. + */ + if (quick->init_ror_merged_scan(TRUE)) + DBUG_RETURN(1); + quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS); + } + while((quick= quick_it++)) + { + if (quick->init_ror_merged_scan(FALSE)) + DBUG_RETURN(1); + quick->file->extra(HA_EXTRA_KEYREAD_PRESERVE_FIELDS); + /* All merged scans share the same record buffer in intersection. */ + quick->record= head->record[0]; + } + + if (need_to_fetch_row && head->file->ha_rnd_init(1)) + { + DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); + DBUG_RETURN(1); + } + DBUG_RETURN(0); +} + + +/* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_INTERSECT_SELECT::reset() +{ + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::reset"); + if (!scans_inited && init_ror_merged_scan(TRUE)) + DBUG_RETURN(1); + scans_inited= true; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + QUICK_RANGE_SELECT *quick; + while ((quick= it++)) + quick->reset(); + DBUG_RETURN(0); +} + + +/* + Add a merged quick select to this ROR-intersection quick select. + + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::push_quick_back() + quick Quick select to be added. The quick select must return + rows in rowid order. + NOTES + This call can only be made before init() is called. + + RETURN + FALSE OK + TRUE Out of memory. +*/ + +bool +QUICK_ROR_INTERSECT_SELECT::push_quick_back(QUICK_RANGE_SELECT *quick) +{ + return quick_selects.push_back(quick); +} + +QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT() +{ + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::~QUICK_ROR_INTERSECT_SELECT"); + quick_selects.delete_elements(); + delete cpk_quick; + free_root(&alloc,MYF(0)); + if (need_to_fetch_row && head->file->inited != handler::NONE) + head->file->ha_rnd_end(); + DBUG_VOID_RETURN; +} + + +QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param, + TABLE *table) + : thd(thd_param), scans_inited(false) +{ + index= MAX_KEY; + head= table; + rowid_length= table->file->ref_length; + record= head->record[0]; + init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); + thd_param->mem_root= &alloc; +} + + +/* + Do post-constructor initialization. + SYNOPSIS + QUICK_ROR_UNION_SELECT::init() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_UNION_SELECT::init() +{ + DBUG_ENTER("QUICK_ROR_UNION_SELECT::init"); + if (init_queue(&queue, quick_selects.elements, 0, + FALSE , QUICK_ROR_UNION_SELECT::queue_cmp, + (void*) this)) + { + bzero(&queue, sizeof(QUEUE)); + DBUG_RETURN(1); + } + + if (!(cur_rowid= (byte*)alloc_root(&alloc, 2*head->file->ref_length))) + DBUG_RETURN(1); + prev_rowid= cur_rowid + head->file->ref_length; + DBUG_RETURN(0); +} + + +/* + Comparison function to be used QUICK_ROR_UNION_SELECT::queue priority + queue. + + SYNPOSIS + QUICK_ROR_UNION_SELECT::queue_cmp() + arg Pointer to QUICK_ROR_UNION_SELECT + val1 First merged select + val2 Second merged select +*/ +int QUICK_ROR_UNION_SELECT::queue_cmp(void *arg, byte *val1, byte *val2) +{ + QUICK_ROR_UNION_SELECT *self= (QUICK_ROR_UNION_SELECT*)arg; + return self->head->file->cmp_ref(((QUICK_SELECT_I*)val1)->last_rowid, + ((QUICK_SELECT_I*)val2)->last_rowid); } + +/* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + + RETURN + 0 OK + other Error code +*/ + +int QUICK_ROR_UNION_SELECT::reset() +{ + QUICK_SELECT_I* quick; + int error; + DBUG_ENTER("QUICK_ROR_UNION_SELECT::reset"); + have_prev_rowid= FALSE; + if (!scans_inited) + { + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (quick->init_ror_merged_scan(FALSE)) + DBUG_RETURN(1); + } + scans_inited= true; + } + queue_remove_all(&queue); + /* + Initialize scans for merged quick selects and put all merged quick + selects into the queue. + */ + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (quick->reset()) + DBUG_RETURN(1); + if ((error= quick->get_next())) + { + if (error == HA_ERR_END_OF_FILE) + continue; + DBUG_RETURN(error); + } + quick->save_last_pos(); + queue_insert(&queue, (byte*)quick); + } + + if (head->file->ha_rnd_init(1)) + { + DBUG_PRINT("error", ("ROR index_merge rnd_init call failed")); + DBUG_RETURN(1); + } + + DBUG_RETURN(0); +} + + +bool +QUICK_ROR_UNION_SELECT::push_quick_back(QUICK_SELECT_I *quick_sel_range) +{ + return quick_selects.push_back(quick_sel_range); +} + +QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT() +{ + DBUG_ENTER("QUICK_ROR_UNION_SELECT::~QUICK_ROR_UNION_SELECT"); + delete_queue(&queue); + quick_selects.delete_elements(); + if (head->file->inited != handler::NONE) + head->file->ha_rnd_end(); + free_root(&alloc,MYF(0)); + DBUG_VOID_RETURN; +} + + QUICK_RANGE::QUICK_RANGE() :min_key(0),max_key(0),min_length(0),max_length(0), flag(NO_MIN_RANGE | NO_MAX_RANGE) @@ -582,28 +1353,269 @@ SEL_ARG *SEL_ARG::clone_tree() return root; } + /* - Test if a key can be used in different ranges + Table rows retrieval plan. Range optimizer creates QUICK_SELECT_I-derived + objects from table read plans. +*/ +class TABLE_READ_PLAN +{ +public: + /* + Plan read cost, with or without cost of full row retrieval, depending + on plan creation parameters. + */ + double read_cost; + ha_rows records; /* estimate of #rows to be examined */ + + /* + If TRUE, the scan returns rows in rowid order. This is used only for + scans that can be both ROR and non-ROR. + */ + bool is_ror; + /* + Create quick select for this plan. + SYNOPSIS + make_quick() + param Parameter from test_quick_select + retrieve_full_rows If TRUE, created quick select will do full record + retrieval. + parent_alloc Memory pool to use, if any. + + NOTES + retrieve_full_rows is ignored by some implementations. + + RETURN + created quick select + NULL on any error. + */ + virtual QUICK_SELECT_I *make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc=NULL) = 0; + + /* Table read plans are allocated on MEM_ROOT and are never deleted */ + static void *operator new(size_t size, MEM_ROOT *mem_root) + { return (void*) alloc_root(mem_root, (uint) size); } + static void operator delete(void *ptr,size_t size) { TRASH(ptr, size); } +}; + +class TRP_ROR_INTERSECT; +class TRP_ROR_UNION; +class TRP_INDEX_MERGE; + + +/* + Plan for a QUICK_RANGE_SELECT scan. + TRP_RANGE::make_quick ignores retrieve_full_rows parameter because + QUICK_RANGE_SELECT doesn't distinguish between 'index only' scans and full + record retrieval scans. +*/ + +class TRP_RANGE : public TABLE_READ_PLAN +{ +public: + SEL_ARG *key; /* set of intervals to be used in "range" method retrieval */ + uint key_idx; /* key number in PARAM::key */ + + TRP_RANGE(SEL_ARG *key_arg, uint idx_arg) + : key(key_arg), key_idx(idx_arg) + {} + + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc) + { + DBUG_ENTER("TRP_RANGE::make_quick"); + QUICK_RANGE_SELECT *quick; + if ((quick= get_quick_select(param, key_idx, key, parent_alloc))) + { + quick->records= records; + quick->read_time= read_cost; + } + DBUG_RETURN(quick); + } +}; + + +/* Plan for QUICK_ROR_INTERSECT_SELECT scan. */ + +class TRP_ROR_INTERSECT : public TABLE_READ_PLAN +{ +public: + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + + /* Array of pointers to ROR range scans used in this intersection */ + struct st_ror_scan_info **first_scan; + struct st_ror_scan_info **last_scan; /* End of the above array */ + struct st_ror_scan_info *cpk_scan; /* Clustered PK scan, if there is one */ + bool is_covering; /* TRUE if no row retrieval phase is necessary */ + double index_scan_costs; /* SUM(cost(index_scan)) */ +}; + + +/* + Plan for QUICK_ROR_UNION_SELECT scan. + QUICK_ROR_UNION_SELECT always retrieves full rows, so retrieve_full_rows + is ignored by make_quick. +*/ + +class TRP_ROR_UNION : public TABLE_READ_PLAN +{ +public: + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + TABLE_READ_PLAN **first_ror; /* array of ptrs to plans for merged scans */ + TABLE_READ_PLAN **last_ror; /* end of the above array */ +}; + + +/* + Plan for QUICK_INDEX_MERGE_SELECT scan. + QUICK_ROR_INTERSECT_SELECT always retrieves full rows, so retrieve_full_rows + is ignored by make_quick. +*/ + +class TRP_INDEX_MERGE : public TABLE_READ_PLAN +{ +public: + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + TRP_RANGE **range_scans; /* array of ptrs to plans of merged scans */ + TRP_RANGE **range_scans_end; /* end of the array */ +}; + + +/* + Plan for a QUICK_GROUP_MIN_MAX_SELECT scan. +*/ + +class TRP_GROUP_MIN_MAX : public TABLE_READ_PLAN +{ +private: + bool have_min, have_max; + KEY_PART_INFO *min_max_arg_part; + uint group_prefix_len; + uint used_key_parts; + uint group_key_parts; + KEY *index_info; + uint index; + uint key_infix_len; + byte key_infix[MAX_KEY_LENGTH]; + SEL_TREE *range_tree; /* Represents all range predicates in the query. */ + SEL_ARG *index_tree; /* The SEL_ARG sub-tree corresponding to index_info. */ + uint param_idx; /* Index of used key in param->key. */ + /* Number of records selected by the ranges in index_tree. */ +public: + ha_rows quick_prefix_records; +public: + TRP_GROUP_MIN_MAX(bool have_min_arg, bool have_max_arg, + KEY_PART_INFO *min_max_arg_part_arg, + uint group_prefix_len_arg, uint used_key_parts_arg, + uint group_key_parts_arg, KEY *index_info_arg, + uint index_arg, uint key_infix_len_arg, + byte *key_infix_arg, + SEL_TREE *tree_arg, SEL_ARG *index_tree_arg, + uint param_idx_arg, ha_rows quick_prefix_records_arg) + : have_min(have_min_arg), have_max(have_max_arg), + min_max_arg_part(min_max_arg_part_arg), + group_prefix_len(group_prefix_len_arg), used_key_parts(used_key_parts_arg), + group_key_parts(group_key_parts_arg), index_info(index_info_arg), + index(index_arg), key_infix_len(key_infix_len_arg), range_tree(tree_arg), + index_tree(index_tree_arg), param_idx(param_idx_arg), + quick_prefix_records(quick_prefix_records_arg) + { + if (key_infix_len) + memcpy(this->key_infix, key_infix_arg, key_infix_len); + } + + QUICK_SELECT_I *make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc); +}; + + +/* + Fill param->needed_fields with bitmap of fields used in the query. SYNOPSIS - SQL_SELECT::test_quick_select(thd,keys_to_use, prev_tables, - limit, force_quick_range) + fill_used_fields_bitmap() + param Parameter from test_quick_select function. - Updates the following in the select parameter: - needed_reg - Bits for keys with may be used if all prev regs are read - quick - Parameter to use when reading records. - In the table struct the following information is updated: - quick_keys - Which keys can be used - quick_rows - How many rows the key matches + NOTES + Clustered PK members are not put into the bitmap as they are implicitly + present in all keys (and it is impossible to avoid reading them). + RETURN + 0 Ok + 1 Out of memory. +*/ - RETURN VALUES - -1 if impossible select - 0 if can't use quick_select - 1 if found usable range +static int fill_used_fields_bitmap(PARAM *param) +{ + TABLE *table= param->table; + param->fields_bitmap_size= (table->fields/8 + 1); + uchar *tmp; + uint pk; + if (!(tmp= (uchar*)alloc_root(param->mem_root,param->fields_bitmap_size)) || + bitmap_init(¶m->needed_fields, tmp, param->fields_bitmap_size*8, + FALSE)) + return 1; + + bitmap_clear_all(¶m->needed_fields); + for (uint i= 0; i < table->fields; i++) + { + if (param->thd->query_id == table->field[i]->query_id) + bitmap_set_bit(¶m->needed_fields, i+1); + } - TODO - check if the function really needs to modify keys_to_use, and change the - code to pass it by reference if not + pk= param->table->primary_key; + if (param->table->file->primary_key_is_clustered() && pk != MAX_KEY) + { + /* The table uses clustered PK and it is not internally generated */ + KEY_PART_INFO *key_part= param->table->key_info[pk].key_part; + KEY_PART_INFO *key_part_end= key_part + + param->table->key_info[pk].key_parts; + for(;key_part != key_part_end; ++key_part) + { + bitmap_clear_bit(¶m->needed_fields, key_part->fieldnr); + } + } + return 0; +} + + +/* + Test if a key can be used in different ranges + + SYNOPSIS + SQL_SELECT::test_quick_select() + thd Current thread + keys_to_use Keys to use for range retrieval + prev_tables Tables assumed to be already read when the scan is + performed (but not read at the moment of this call) + limit Query limit + force_quick_range Prefer to use range (instead of full table scan) even + if it is more expensive. + + NOTES + Updates the following in the select parameter: + needed_reg - Bits for keys with may be used if all prev regs are read + quick - Parameter to use when reading records. + + In the table struct the following information is updated: + quick_keys - Which keys can be used + quick_rows - How many rows the key matches + + TODO + Check if this function really needs to modify keys_to_use, and change the + code to pass it by reference if it doesn't. + + In addition to force_quick_range other means can be (an usually are) used + to make this function prefer range over full table scan. Figure out if + force_quick_range is really needed. + + RETURN + -1 if impossible select (i.e. certainly no rows will be selected) + 0 if can't use quick_select + 1 if found usable ranges and quick select has been successfully created. */ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, @@ -612,28 +1624,28 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, { uint idx; double scan_time; - DBUG_ENTER("test_quick_select"); + DBUG_ENTER("SQL_SELECT::test_quick_select"); DBUG_PRINT("enter",("keys_to_use: %lu prev_tables: %lu const_tables: %lu", keys_to_use.to_ulonglong(), (ulong) prev_tables, (ulong) const_tables)); - delete quick; quick=0; - needed_reg.clear_all(); quick_keys.clear_all(); - if (!cond || (specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range || + needed_reg.clear_all(); + quick_keys.clear_all(); + if ((specialflag & SPECIAL_SAFE_MODE) && ! force_quick_range || !limit) DBUG_RETURN(0); /* purecov: inspected */ if (keys_to_use.is_clear_all()) DBUG_RETURN(0); - records=head->file->records; + records= head->file->records; if (!records) records++; /* purecov: inspected */ - scan_time=(double) records / TIME_FOR_COMPARE+1; - read_time=(double) head->file->scan_time()+ scan_time + 1.1; + scan_time= (double) records / TIME_FOR_COMPARE + 1; + read_time= (double) head->file->scan_time() + scan_time + 1.1; if (head->force_index) scan_time= read_time= DBL_MAX; if (limit < records) - read_time=(double) records+scan_time+1; // Force to use index + read_time= (double) records + scan_time + 1; // Force to use index else if (read_time <= 2.0 && !force_quick_range) DBUG_RETURN(0); /* No need for quick select */ @@ -643,7 +1655,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, if (!keys_to_use.is_clear_all()) { MEM_ROOT *old_root,alloc; - SEL_TREE *tree; + SEL_TREE *tree= NULL; KEY_PART *key_parts; KEY *key_info; PARAM param; @@ -657,11 +1669,15 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, param.table=head; param.keys=0; param.mem_root= &alloc; + param.needed_reg= &needed_reg; + param.imerge_cost_buff_size= 0; + thd->no_errors=1; // Don't warn about NULL init_sql_alloc(&alloc, thd->variables.range_alloc_block_size, 0); if (!(param.key_parts = (KEY_PART*) alloc_root(&alloc, sizeof(KEY_PART)* - head->key_parts))) + head->key_parts)) + || fill_used_fields_bitmap(¶m)) { thd->no_errors=0; free_root(&alloc,MYF(0)); // Return memory & allocator @@ -671,6 +1687,10 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, old_root= thd->mem_root; thd->mem_root= &alloc; + /* + Make an array with description of all key parts of all table keys. + This is used in get_mm_parts function. + */ key_info= head->key_info; for (idx=0 ; idx < head->keys ; idx++, key_info++) { @@ -698,81 +1718,141 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, } param.key_parts_end=key_parts; - if ((tree=get_mm_tree(¶m,cond))) + /* Calculate cost of full index read for the shortest covering index */ + if (!head->used_keys.is_clear_all()) + { + int key_for_use= find_shortest_key(head, &head->used_keys); + double key_read_time= (get_index_only_read_time(¶m, records, + key_for_use) + + (double) records / TIME_FOR_COMPARE); + DBUG_PRINT("info", ("'all'+'using index' scan will be using key %d, " + "read time %g", key_for_use, key_read_time)); + if (key_read_time < read_time) + read_time= key_read_time; + } + + TABLE_READ_PLAN *best_trp= NULL; + TRP_GROUP_MIN_MAX *group_trp; + double best_read_time= read_time; + + if (cond) + tree= get_mm_tree(¶m,cond); + + if (tree && tree->type == SEL_TREE::IMPOSSIBLE) + { + records=0L; /* Return -1 from this function. */ + read_time= (double) HA_POS_ERROR; + goto free_mem; + } + else if (tree && tree->type != SEL_TREE::KEY && + tree->type != SEL_TREE::KEY_SMALLER) + goto free_mem; + + + /* + Try to construct a QUICK_GROUP_MIN_MAX_SELECT. + Notice that it can be constructed no matter if there is a range tree. + */ + group_trp= get_best_group_min_max(¶m, tree); + if (group_trp && group_trp->read_cost < best_read_time) + { + best_trp= group_trp; + best_read_time= best_trp->read_cost; + } + + if (tree) { - if (tree->type == SEL_TREE::IMPOSSIBLE) + /* + It is possible to use a range-based quick select (but it might be + slower than 'all' table scan). + */ + if (tree->merges.is_empty()) { - records=0L; // Return -1 from this function - read_time= (double) HA_POS_ERROR; + TRP_RANGE *range_trp; + TRP_ROR_INTERSECT *rori_trp; + bool can_build_covering= FALSE; + + /* Get best 'range' plan and prepare data for making other plans */ + if ((range_trp= get_key_scans_params(¶m, tree, FALSE, + best_read_time))) + { + best_trp= range_trp; + best_read_time= best_trp->read_cost; + } + + /* + Simultaneous key scans and row deletes on several handler + objects are not allowed so don't use ROR-intersection for + table deletes. + */ + if ((thd->lex->sql_command != SQLCOM_DELETE)) +#ifdef NOT_USED + if ((thd->lex->sql_command != SQLCOM_UPDATE)) +#endif + { + /* + Get best non-covering ROR-intersection plan and prepare data for + building covering ROR-intersection. + */ + if ((rori_trp= get_best_ror_intersect(¶m, tree, best_read_time, + &can_build_covering))) + { + best_trp= rori_trp; + best_read_time= best_trp->read_cost; + /* + Try constructing covering ROR-intersect only if it looks possible + and worth doing. + */ + if (!rori_trp->is_covering && can_build_covering && + (rori_trp= get_best_covering_ror_intersect(¶m, tree, + best_read_time))) + best_trp= rori_trp; + } + } } - else if (tree->type == SEL_TREE::KEY || - tree->type == SEL_TREE::KEY_SMALLER) + else { - SEL_ARG **key,**end,**best_key=0; + /* Try creating index_merge/ROR-union scan. */ + SEL_IMERGE *imerge; + TABLE_READ_PLAN *best_conj_trp= NULL, *new_conj_trp; + LINT_INIT(new_conj_trp); /* no empty index_merge lists possible */ + + DBUG_PRINT("info",("No range reads possible," + " trying to construct index_merge")); + List_iterator_fast<SEL_IMERGE> it(tree->merges); + while ((imerge= it++)) + { + new_conj_trp= get_best_disjunct_quick(¶m, imerge, best_read_time); + if (!best_conj_trp || (new_conj_trp && new_conj_trp->read_cost < + best_conj_trp->read_cost)) + best_conj_trp= new_conj_trp; + } + if (best_conj_trp) + best_trp= best_conj_trp; + } + } + thd->mem_root= old_root; - for (idx=0,key=tree->keys, end=key+param.keys ; - key != end ; - key++,idx++) - { - ha_rows found_records; - double found_read_time; - if (*key) - { - uint keynr= param.real_keynr[idx]; - if ((*key)->type == SEL_ARG::MAYBE_KEY || - (*key)->maybe_flag) - needed_reg.set_bit(keynr); - - found_records=check_quick_select(¶m, idx, *key); - if (found_records != HA_POS_ERROR && found_records > 2 && - head->used_keys.is_set(keynr) && - (head->file->index_flags(keynr, param.max_key_part, 1) & - HA_KEYREAD_ONLY)) - { - /* - We can resolve this by only reading through this key. - Assume that we will read trough the whole key range - and that all key blocks are half full (normally things are - much better). - */ - uint keys_per_block= (head->file->block_size/2/ - (head->key_info[keynr].key_length+ - head->file->ref_length) + 1); - found_read_time=((double) (found_records+keys_per_block-1)/ - (double) keys_per_block); - } - else - found_read_time= (head->file->read_time(keynr, - param.range_count, - found_records)+ - (double) found_records / TIME_FOR_COMPARE); - DBUG_PRINT("info",("read_time: %g found_read_time: %g", - read_time, found_read_time)); - if (read_time > found_read_time && found_records != HA_POS_ERROR) - { - read_time=found_read_time; - records=found_records; - best_key=key; - } - } - } - if (best_key && records) - { - if ((quick=get_quick_select(¶m,(uint) (best_key-tree->keys), - *best_key))) - { - quick->records=records; - quick->read_time=read_time; - } - } + /* If we got a read plan, create a quick select from it. */ + if (best_trp) + { + records= best_trp->records; + if (!(quick= best_trp->make_quick(¶m, TRUE)) || quick->init()) + { + delete quick; + quick= NULL; } } + + free_mem: free_root(&alloc,MYF(0)); // Return memory & allocator thd->mem_root= old_root; thd->no_errors=0; } - DBUG_EXECUTE("info",print_quick(quick,&needed_reg);); + + DBUG_EXECUTE("info", print_quick(quick, &needed_reg);); + /* Assume that if the user is using 'limit' we will only need to scan limit rows if we are using a key @@ -780,11 +1860,1516 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, DBUG_RETURN(records ? test(quick) : -1); } + +/* + Get cost of 'sweep' full records retrieval. + SYNOPSIS + get_sweep_read_cost() + param Parameter from test_quick_select + records # of records to be retrieved + RETURN + cost of sweep +*/ + +double get_sweep_read_cost(const PARAM *param, ha_rows records) +{ + double result; + if (param->table->file->primary_key_is_clustered()) + { + result= param->table->file->read_time(param->table->primary_key, + records, records); + } + else + { + double n_blocks= + ceil((double)param->table->file->data_file_length / IO_SIZE); + double busy_blocks= + n_blocks * (1.0 - pow(1.0 - 1.0/n_blocks, rows2double(records))); + if (busy_blocks < 1.0) + busy_blocks= 1.0; + DBUG_PRINT("info",("sweep: nblocks=%g, busy_blocks=%g", n_blocks, + busy_blocks)); + /* + Disabled: Bail out if # of blocks to read is bigger than # of blocks in + table data file. + if (max_cost != DBL_MAX && (busy_blocks+index_reads_cost) >= n_blocks) + return 1; + */ + JOIN *join= param->thd->lex->select_lex.join; + if (!join || join->tables == 1) + { + /* No join, assume reading is done in one 'sweep' */ + result= busy_blocks*(DISK_SEEK_BASE_COST + + DISK_SEEK_PROP_COST*n_blocks/busy_blocks); + } + else + { + /* + Possibly this is a join with source table being non-last table, so + assume that disk seeks are random here. + */ + result= busy_blocks; + } + } + DBUG_PRINT("info",("returning cost=%g", result)); + return result; +} + + +/* + Get best plan for a SEL_IMERGE disjunctive expression. + SYNOPSIS + get_best_disjunct_quick() + param Parameter from check_quick_select function + imerge Expression to use + read_time Don't create scans with cost > read_time + + NOTES + index_merge cost is calculated as follows: + index_merge_cost = + cost(index_reads) + (see #1) + cost(rowid_to_row_scan) + (see #2) + cost(unique_use) (see #3) + + 1. cost(index_reads) =SUM_i(cost(index_read_i)) + For non-CPK scans, + cost(index_read_i) = {cost of ordinary 'index only' scan} + For CPK scan, + cost(index_read_i) = {cost of non-'index only' scan} + + 2. cost(rowid_to_row_scan) + If table PK is clustered then + cost(rowid_to_row_scan) = + {cost of ordinary clustered PK scan with n_ranges=n_rows} + + Otherwise, we use the following model to calculate costs: + We need to retrieve n_rows rows from file that occupies n_blocks blocks. + We assume that offsets of rows we need are independent variates with + uniform distribution in [0..max_file_offset] range. + + We'll denote block as "busy" if it contains row(s) we need to retrieve + and "empty" if doesn't contain rows we need. + + Probability that a block is empty is (1 - 1/n_blocks)^n_rows (this + applies to any block in file). Let x_i be a variate taking value 1 if + block #i is empty and 0 otherwise. + + Then E(x_i) = (1 - 1/n_blocks)^n_rows; + + E(n_empty_blocks) = E(sum(x_i)) = sum(E(x_i)) = + = n_blocks * ((1 - 1/n_blocks)^n_rows) = + ~= n_blocks * exp(-n_rows/n_blocks). + + E(n_busy_blocks) = n_blocks*(1 - (1 - 1/n_blocks)^n_rows) = + ~= n_blocks * (1 - exp(-n_rows/n_blocks)). + + Average size of "hole" between neighbor non-empty blocks is + E(hole_size) = n_blocks/E(n_busy_blocks). + + The total cost of reading all needed blocks in one "sweep" is: + + E(n_busy_blocks)* + (DISK_SEEK_BASE_COST + DISK_SEEK_PROP_COST*n_blocks/E(n_busy_blocks)). + + 3. Cost of Unique use is calculated in Unique::get_use_cost function. + + ROR-union cost is calculated in the same way index_merge, but instead of + Unique a priority queue is used. + + RETURN + Created read plan + NULL - Out of memory or no read scan could be built. +*/ + +static +TABLE_READ_PLAN *get_best_disjunct_quick(PARAM *param, SEL_IMERGE *imerge, + double read_time) +{ + SEL_TREE **ptree; + TRP_INDEX_MERGE *imerge_trp= NULL; + uint n_child_scans= imerge->trees_next - imerge->trees; + TRP_RANGE **range_scans; + TRP_RANGE **cur_child; + TRP_RANGE **cpk_scan= NULL; + bool imerge_too_expensive= FALSE; + double imerge_cost= 0.0; + ha_rows cpk_scan_records= 0; + ha_rows non_cpk_scan_records= 0; + bool pk_is_clustered= param->table->file->primary_key_is_clustered(); + bool all_scans_ror_able= TRUE; + bool all_scans_rors= TRUE; + uint unique_calc_buff_size; + TABLE_READ_PLAN **roru_read_plans; + TABLE_READ_PLAN **cur_roru_plan; + double roru_index_costs; + double blocks_in_index_read; + ha_rows roru_total_records; + double roru_intersect_part= 1.0; + DBUG_ENTER("get_best_disjunct_quick"); + DBUG_PRINT("info", ("Full table scan cost =%g", read_time)); + + if (!(range_scans= (TRP_RANGE**)alloc_root(param->mem_root, + sizeof(TRP_RANGE*)* + n_child_scans))) + DBUG_RETURN(NULL); + /* + Collect best 'range' scan for each of disjuncts, and, while doing so, + analyze possibility of ROR scans. Also calculate some values needed by + other parts of the code. + */ + for (ptree= imerge->trees, cur_child= range_scans; + ptree != imerge->trees_next; + ptree++, cur_child++) + { + DBUG_EXECUTE("info", print_sel_tree(param, *ptree, &(*ptree)->keys_map, + "tree in SEL_IMERGE");); + if (!(*cur_child= get_key_scans_params(param, *ptree, TRUE, read_time))) + { + /* + One of index scans in this index_merge is more expensive than entire + table read for another available option. The entire index_merge (and + any possible ROR-union) will be more expensive then, too. We continue + here only to update SQL_SELECT members. + */ + imerge_too_expensive= TRUE; + } + if (imerge_too_expensive) + continue; + + imerge_cost += (*cur_child)->read_cost; + all_scans_ror_able &= ((*ptree)->n_ror_scans > 0); + all_scans_rors &= (*cur_child)->is_ror; + if (pk_is_clustered && + param->real_keynr[(*cur_child)->key_idx] == param->table->primary_key) + { + cpk_scan= cur_child; + cpk_scan_records= (*cur_child)->records; + } + else + non_cpk_scan_records += (*cur_child)->records; + } + + DBUG_PRINT("info", ("index_merge scans cost=%g", imerge_cost)); + if (imerge_too_expensive || (imerge_cost > read_time) || + (non_cpk_scan_records+cpk_scan_records >= param->table->file->records) && + read_time != DBL_MAX) + { + /* + Bail out if it is obvious that both index_merge and ROR-union will be + more expensive + */ + DBUG_PRINT("info", ("Sum of index_merge scans is more expensive than " + "full table scan, bailing out")); + DBUG_RETURN(NULL); + } + if (all_scans_rors) + { + roru_read_plans= (TABLE_READ_PLAN**)range_scans; + goto skip_to_ror_scan; + } + blocks_in_index_read= imerge_cost; + if (cpk_scan) + { + /* + Add one ROWID comparison for each row retrieved on non-CPK scan. (it + is done in QUICK_RANGE_SELECT::row_in_ranges) + */ + imerge_cost += non_cpk_scan_records / TIME_FOR_COMPARE_ROWID; + } + + /* Calculate cost(rowid_to_row_scan) */ + imerge_cost += get_sweep_read_cost(param, non_cpk_scan_records); + DBUG_PRINT("info",("index_merge cost with rowid-to-row scan: %g", + imerge_cost)); + if (imerge_cost > read_time) + goto build_ror_index_merge; + + /* Add Unique operations cost */ + unique_calc_buff_size= + Unique::get_cost_calc_buff_size(non_cpk_scan_records, + param->table->file->ref_length, + param->thd->variables.sortbuff_size); + if (param->imerge_cost_buff_size < unique_calc_buff_size) + { + if (!(param->imerge_cost_buff= (uint*)alloc_root(param->mem_root, + unique_calc_buff_size))) + DBUG_RETURN(NULL); + param->imerge_cost_buff_size= unique_calc_buff_size; + } + + imerge_cost += + Unique::get_use_cost(param->imerge_cost_buff, non_cpk_scan_records, + param->table->file->ref_length, + param->thd->variables.sortbuff_size); + DBUG_PRINT("info",("index_merge total cost: %g (wanted: less then %g)", + imerge_cost, read_time)); + if (imerge_cost < read_time) + { + if ((imerge_trp= new (param->mem_root)TRP_INDEX_MERGE)) + { + imerge_trp->read_cost= imerge_cost; + imerge_trp->records= non_cpk_scan_records + cpk_scan_records; + imerge_trp->records= min(imerge_trp->records, + param->table->file->records); + imerge_trp->range_scans= range_scans; + imerge_trp->range_scans_end= range_scans + n_child_scans; + read_time= imerge_cost; + } + } + +build_ror_index_merge: + if (!all_scans_ror_able || param->thd->lex->sql_command == SQLCOM_DELETE) + DBUG_RETURN(imerge_trp); + + /* Ok, it is possible to build a ROR-union, try it. */ + bool dummy; + if (!(roru_read_plans= + (TABLE_READ_PLAN**)alloc_root(param->mem_root, + sizeof(TABLE_READ_PLAN*)* + n_child_scans))) + DBUG_RETURN(imerge_trp); +skip_to_ror_scan: + roru_index_costs= 0.0; + roru_total_records= 0; + cur_roru_plan= roru_read_plans; + + /* Find 'best' ROR scan for each of trees in disjunction */ + for (ptree= imerge->trees, cur_child= range_scans; + ptree != imerge->trees_next; + ptree++, cur_child++, cur_roru_plan++) + { + /* + Assume the best ROR scan is the one that has cheapest full-row-retrieval + scan cost. + Also accumulate index_only scan costs as we'll need them to calculate + overall index_intersection cost. + */ + double cost; + if ((*cur_child)->is_ror) + { + /* Ok, we have index_only cost, now get full rows scan cost */ + cost= param->table->file-> + read_time(param->real_keynr[(*cur_child)->key_idx], 1, + (*cur_child)->records) + + rows2double((*cur_child)->records) / TIME_FOR_COMPARE; + } + else + cost= read_time; + + TABLE_READ_PLAN *prev_plan= *cur_child; + if (!(*cur_roru_plan= get_best_ror_intersect(param, *ptree, cost, + &dummy))) + { + if (prev_plan->is_ror) + *cur_roru_plan= prev_plan; + else + DBUG_RETURN(imerge_trp); + roru_index_costs += (*cur_roru_plan)->read_cost; + } + else + roru_index_costs += + ((TRP_ROR_INTERSECT*)(*cur_roru_plan))->index_scan_costs; + roru_total_records += (*cur_roru_plan)->records; + roru_intersect_part *= (*cur_roru_plan)->records / + param->table->file->records; + } + + /* + rows to retrieve= + SUM(rows_in_scan_i) - table_rows * PROD(rows_in_scan_i / table_rows). + This is valid because index_merge construction guarantees that conditions + in disjunction do not share key parts. + */ + roru_total_records -= (ha_rows)(roru_intersect_part* + param->table->file->records); + /* ok, got a ROR read plan for each of the disjuncts + Calculate cost: + cost(index_union_scan(scan_1, ... scan_n)) = + SUM_i(cost_of_index_only_scan(scan_i)) + + queue_use_cost(rowid_len, n) + + cost_of_row_retrieval + See get_merge_buffers_cost function for queue_use_cost formula derivation. + */ + + double roru_total_cost; + roru_total_cost= roru_index_costs + + rows2double(roru_total_records)*log((double)n_child_scans) / + (TIME_FOR_COMPARE_ROWID * M_LN2) + + get_sweep_read_cost(param, roru_total_records); + + DBUG_PRINT("info", ("ROR-union: cost %g, %d members", roru_total_cost, + n_child_scans)); + TRP_ROR_UNION* roru; + if (roru_total_cost < read_time) + { + if ((roru= new (param->mem_root) TRP_ROR_UNION)) + { + roru->first_ror= roru_read_plans; + roru->last_ror= roru_read_plans + n_child_scans; + roru->read_cost= roru_total_cost; + roru->records= roru_total_records; + DBUG_RETURN(roru); + } + } + DBUG_RETURN(imerge_trp); +} + + +/* + Calculate cost of 'index only' scan for given index and number of records. + + SYNOPSIS + get_index_only_read_time() + param parameters structure + records #of records to read + keynr key to read + + NOTES + It is assumed that we will read trough the whole key range and that all + key blocks are half full (normally things are much better). It is also + assumed that each time we read the next key from the index, the handler + performs a random seek, thus the cost is proportional to the number of + blocks read. + + TODO: + Move this to handler->read_time() by adding a flag 'index-only-read' to + this call. The reason for doing this is that the current function doesn't + handle the case when the row is stored in the b-tree (like in innodb + clustered index) +*/ + +inline double get_index_only_read_time(const PARAM* param, ha_rows records, + int keynr) +{ + double read_time; + uint keys_per_block= (param->table->file->block_size/2/ + (param->table->key_info[keynr].key_length+ + param->table->file->ref_length) + 1); + read_time=((double) (records+keys_per_block-1)/ + (double) keys_per_block); + return read_time; +} + + +typedef struct st_ror_scan_info +{ + uint idx; /* # of used key in param->keys */ + uint keynr; /* # of used key in table */ + ha_rows records; /* estimate of # records this scan will return */ + + /* Set of intervals over key fields that will be used for row retrieval. */ + SEL_ARG *sel_arg; + + /* Fields used in the query and covered by this ROR scan. */ + MY_BITMAP covered_fields; + uint used_fields_covered; /* # of set bits in covered_fields */ + int key_rec_length; /* length of key record (including rowid) */ + + /* + Cost of reading all index records with values in sel_arg intervals set + (assuming there is no need to access full table records) + */ + double index_read_cost; + uint first_uncovered_field; /* first unused bit in covered_fields */ + uint key_components; /* # of parts in the key */ +} ROR_SCAN_INFO; + + +/* + Create ROR_SCAN_INFO* structure with a single ROR scan on index idx using + sel_arg set of intervals. + + SYNOPSIS + make_ror_scan() + param Parameter from test_quick_select function + idx Index of key in param->keys + sel_arg Set of intervals for a given key + RETURN + NULL - out of memory + ROR scan structure containing a scan for {idx, sel_arg} +*/ + +static +ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg) +{ + ROR_SCAN_INFO *ror_scan; + uchar *bitmap_buf; + uint keynr; + DBUG_ENTER("make_ror_scan"); + if (!(ror_scan= (ROR_SCAN_INFO*)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO)))) + DBUG_RETURN(NULL); + + ror_scan->idx= idx; + ror_scan->keynr= keynr= param->real_keynr[idx]; + ror_scan->key_rec_length= param->table->key_info[keynr].key_length + + param->table->file->ref_length; + ror_scan->sel_arg= sel_arg; + ror_scan->records= param->table->quick_rows[keynr]; + + if (!(bitmap_buf= (uchar*)alloc_root(param->mem_root, + param->fields_bitmap_size))) + DBUG_RETURN(NULL); + + if (bitmap_init(&ror_scan->covered_fields, bitmap_buf, + param->fields_bitmap_size*8, FALSE)) + DBUG_RETURN(NULL); + bitmap_clear_all(&ror_scan->covered_fields); + + KEY_PART_INFO *key_part= param->table->key_info[keynr].key_part; + KEY_PART_INFO *key_part_end= key_part + + param->table->key_info[keynr].key_parts; + uint n_used_covered= 0; + for (;key_part != key_part_end; ++key_part) + { + if (bitmap_is_set(¶m->needed_fields, key_part->fieldnr)) + { + n_used_covered++; + bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr); + } + } + ror_scan->index_read_cost= + get_index_only_read_time(param, param->table->quick_rows[ror_scan->keynr], + ror_scan->keynr); + DBUG_RETURN(ror_scan); +} + + +/* + Compare two ROR_SCAN_INFO** by E(#records_matched) * key_record_length. + SYNOPSIS + cmp_ror_scan_info() + a ptr to first compared value + b ptr to second compared value + + RETURN + -1 a < b + 0 a = b + 1 a > b +*/ +static int cmp_ror_scan_info(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) +{ + double val1= rows2double((*a)->records) * (*a)->key_rec_length; + double val2= rows2double((*b)->records) * (*b)->key_rec_length; + return (val1 < val2)? -1: (val1 == val2)? 0 : 1; +} + +/* + Compare two ROR_SCAN_INFO** by + (#covered fields in F desc, + #components asc, + number of first not covered component asc) + + SYNOPSIS + cmp_ror_scan_info_covering() + a ptr to first compared value + b ptr to second compared value + + RETURN + -1 a < b + 0 a = b + 1 a > b +*/ +static int cmp_ror_scan_info_covering(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) +{ + if ((*a)->used_fields_covered > (*b)->used_fields_covered) + return -1; + if ((*a)->used_fields_covered < (*b)->used_fields_covered) + return 1; + if ((*a)->key_components < (*b)->key_components) + return -1; + if ((*a)->key_components > (*b)->key_components) + return 1; + if ((*a)->first_uncovered_field < (*b)->first_uncovered_field) + return -1; + if ((*a)->first_uncovered_field > (*b)->first_uncovered_field) + return 1; + return 0; +} + +/* Auxiliary structure for incremental ROR-intersection creation */ +typedef struct +{ + const PARAM *param; + MY_BITMAP covered_fields; /* union of fields covered by all scans */ + + /* TRUE if covered_fields is a superset of needed_fields */ + bool is_covering; + + double index_scan_costs; /* SUM(cost of 'index-only' scans) */ + double total_cost; + /* + Fraction of table records that satisfies conditions of all scans. + This is the number of full records that will be retrieved if a + non-index_only index intersection will be employed. + */ + double records_fract; + ha_rows index_records; /* sum(#records to look in indexes) */ +} ROR_INTERSECT_INFO; + + +/* + Re-initialize an allocated intersect info to contain zero scans. + SYNOPSIS + info Intersection info structure to re-initialize. +*/ + +static void ror_intersect_reinit(ROR_INTERSECT_INFO *info) +{ + info->is_covering= FALSE; + info->index_scan_costs= 0.0f; + info->records_fract= 1.0f; + bitmap_clear_all(&info->covered_fields); +} + +/* + Allocate a ROR_INTERSECT_INFO and initialize it to contain zero scans. + + SYNOPSIS + ror_intersect_init() + param Parameter from test_quick_select + is_index_only If TRUE, set ROR_INTERSECT_INFO to be covering + + RETURN + allocated structure + NULL on error +*/ + +static +ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param, bool is_index_only) +{ + ROR_INTERSECT_INFO *info; + uchar* buf; + if (!(info= (ROR_INTERSECT_INFO*)alloc_root(param->mem_root, + sizeof(ROR_INTERSECT_INFO)))) + return NULL; + info->param= param; + if (!(buf= (uchar*)alloc_root(param->mem_root, param->fields_bitmap_size))) + return NULL; + if (bitmap_init(&info->covered_fields, buf, param->fields_bitmap_size*8, + FALSE)) + return NULL; + ror_intersect_reinit(info); + return info; +} + + +/* + Check if adding a ROR scan to a ROR-intersection reduces its cost of + ROR-intersection and if yes, update parameters of ROR-intersection, + including its cost. + + SYNOPSIS + ror_intersect_add() + param Parameter from test_quick_select + info ROR-intersection structure to add the scan to. + ror_scan ROR scan info to add. + is_cpk_scan If TRUE, add the scan as CPK scan (this can be inferred + from other parameters and is passed separately only to + avoid duplicating the inference code) + + NOTES + Adding a ROR scan to ROR-intersect "makes sense" iff the cost of ROR- + intersection decreases. The cost of ROR-intersection is caclulated as + follows: + + cost= SUM_i(key_scan_cost_i) + cost_of_full_rows_retrieval + + if (union of indexes used covers all needed fields) + cost_of_full_rows_retrieval= 0; + else + { + cost_of_full_rows_retrieval= + cost_of_sweep_read(E(rows_to_retrieve), rows_in_table); + } + + E(rows_to_retrieve) is caclulated as follows: + Suppose we have a condition on several keys + cond=k_11=c_11 AND k_12=c_12 AND ... // parts of first key + k_21=c_21 AND k_22=c_22 AND ... // parts of second key + ... + k_n1=c_n1 AND k_n3=c_n3 AND ... (1) + + where k_ij may be the same as any k_pq (i.e. keys may have common parts). + + A full row is retrieved iff entire cond holds. + + The recursive procedure for finding P(cond) is as follows: + + First step: + Pick 1st part of 1st key and break conjunction (1) into two parts: + cond= (k_11=c_11 AND R) + + Here R may still contain condition(s) equivalent to k_11=c_11. + Nevertheless, the following holds: + + P(k_11=c_11 AND R) = P(k_11=c_11) * P(R|k_11=c_11). + + Mark k_11 as fixed field (and satisfied condition) F, save P(F), + save R to be cond and proceed to recursion step. + + Recursion step: + We have a set of fixed fields/satisfied conditions) F, probability P(F), + and remaining conjunction R + Pick next key part on current key and its condition "k_ij=c_ij". + We will add "k_ij=c_ij" into F and update P(F). + Lets denote k_ij as t, R = t AND R1, where R1 may still contain t. Then + + P((t AND R1)|F) = P(t|F) * P(R1|t|F) = P(t|F) * P(R1|(t AND F)) (2) + + (where '|' mean conditional probability, not "or") + + Consider the first multiplier in (2). One of the following holds: + a) F contains condition on field used in t (i.e. t AND F = F). + Then P(t|F) = 1 + + b) F doesn't contain condition on field used in t. Then F and t are + considered independent. + + P(t|F) = P(t|(fields_before_t_in_key AND other_fields)) = + = P(t|fields_before_t_in_key). + + P(t|fields_before_t_in_key) = #records(fields_before_t_in_key) / + #records(fields_before_t_in_key, t) + + The second multiplier is calculated by applying this step recursively. + + IMPLEMENTATION + This function calculates the result of application of the "recursion step" + described above for all fixed key members of a single key, accumulating set + of covered fields, selectivity, etc. + + The calculation is conducted as follows: + Lets denote #records(keypart1, ... keypartK) as n_k. We need to calculate + + n_{k1} n_{k_2} + --------- * --------- * .... (3) + n_{k1-1} n_{k2_1} + + where k1,k2,... are key parts which fields were not yet marked as fixed + ( this is result of application of option b) of the recursion step for + parts of a single key). + Since it is reasonable to expect that most of the fields are not marked + as fixed, we calcualate (3) as + + n_{i1} n_{i_2} + (3) = n_{max_key_part} / ( --------- * --------- * .... ) + n_{i1-1} n_{i2_1} + + where i1,i2, .. are key parts that were already marked as fixed. + + In order to minimize number of expensive records_in_range calls we group + and reduce adjacent fractions. + + RETURN + TRUE ROR scan added to ROR-intersection, cost updated. + FALSE It doesn't make sense to add this ROR scan to this ROR-intersection. +*/ + +bool ror_intersect_add(const PARAM *param, ROR_INTERSECT_INFO *info, + ROR_SCAN_INFO* ror_scan, bool is_cpk_scan=FALSE) +{ + int i; + SEL_ARG *sel_arg; + KEY_PART_INFO *key_part= + info->param->table->key_info[ror_scan->keynr].key_part; + double selectivity_mult= 1.0; + byte key_val[MAX_KEY_LENGTH+MAX_FIELD_WIDTH]; /* key values tuple */ + + DBUG_ENTER("ror_intersect_add"); + DBUG_PRINT("info", ("Current selectivity= %g", info->records_fract)); + DBUG_PRINT("info", ("Adding scan on %s", + info->param->table->key_info[ror_scan->keynr].name)); + SEL_ARG *tuple_arg= NULL; + char *key_ptr= (char*) key_val; + bool cur_covered, prev_covered= + bitmap_is_set(&info->covered_fields, key_part->fieldnr); + + ha_rows prev_records= param->table->file->records; + key_range min_range; + key_range max_range; + min_range.key= (byte*) key_val; + min_range.flag= HA_READ_KEY_EXACT; + max_range.key= (byte*) key_val; + max_range.flag= HA_READ_AFTER_KEY; + + for(i= 0, sel_arg= ror_scan->sel_arg; sel_arg; + i++, sel_arg= sel_arg->next_key_part) + { + cur_covered= bitmap_is_set(&info->covered_fields, (key_part + i)->fieldnr); + if (cur_covered != prev_covered) + { + /* create (part1val, ..., part{n-1}val) tuple. */ + { + if (!tuple_arg) + { + tuple_arg= ror_scan->sel_arg; + tuple_arg->store_min(key_part->length, &key_ptr, 0); + } + while (tuple_arg->next_key_part != sel_arg) + { + tuple_arg= tuple_arg->next_key_part; + tuple_arg->store_min(key_part->length, &key_ptr, 0); + } + } + ha_rows records; + min_range.length= max_range.length= ((char*) key_ptr - (char*) key_val); + records= param->table->file-> + records_in_range(ror_scan->keynr, + &min_range, + &max_range); + if (cur_covered) + { + /* uncovered -> covered */ + double tmp= rows2double(records)/rows2double(prev_records); + DBUG_PRINT("info", ("Selectivity multiplier: %g", tmp)); + selectivity_mult *= tmp; + prev_records= HA_POS_ERROR; + } + else + { + /* covered -> uncovered */ + prev_records= records; + } + } + prev_covered= cur_covered; + } + if (!prev_covered) + { + double tmp= rows2double(param->table->quick_rows[ror_scan->keynr]) / + rows2double(prev_records); + DBUG_PRINT("info", ("Selectivity multiplier: %g", tmp)); + selectivity_mult *= tmp; + } + + if (selectivity_mult == 1.0) + { + /* Don't add this scan if it doesn't improve selectivity. */ + DBUG_PRINT("info", ("The scan doesn't improve selectivity.")); + DBUG_RETURN(FALSE); + } + + info->records_fract *= selectivity_mult; + ha_rows cur_scan_records= info->param->table->quick_rows[ror_scan->keynr]; + if (is_cpk_scan) + { + info->index_scan_costs += rows2double(cur_scan_records)* + TIME_FOR_COMPARE_ROWID; + } + else + { + info->index_records += cur_scan_records; + info->index_scan_costs += ror_scan->index_read_cost; + bitmap_union(&info->covered_fields, &ror_scan->covered_fields); + } + + if (!info->is_covering && bitmap_is_subset(&info->param->needed_fields, + &info->covered_fields)) + { + DBUG_PRINT("info", ("ROR-intersect is covering now")); + info->is_covering= TRUE; + } + + info->total_cost= info->index_scan_costs; + if (!info->is_covering) + { + ha_rows table_recs= info->param->table->file->records; + info->total_cost += + get_sweep_read_cost(info->param, + (ha_rows)(info->records_fract*table_recs)); + } + DBUG_PRINT("info", ("New selectivity= %g", info->records_fract)); + DBUG_PRINT("info", ("New cost= %g, %scovering", info->total_cost, + info->is_covering?"" : "non-")); + DBUG_RETURN(TRUE); +} + + +/* + Get best ROR-intersection plan using non-covering ROR-intersection search + algorithm. The returned plan may be covering. + + SYNOPSIS + get_best_ror_intersect() + param Parameter from test_quick_select function. + tree Transformed restriction condition to be used to look + for ROR scans. + read_time Do not return read plans with cost > read_time. + are_all_covering [out] set to TRUE if union of all scans covers all + fields needed by the query (and it is possible to build + a covering ROR-intersection) + + NOTES + get_key_scans_params must be called before for the same SEL_TREE before + this function can be called. + + The approximate best non-covering plan search algorithm is as follows: + + find_min_ror_intersection_scan() + { + R= select all ROR scans; + order R by (E(#records_matched) * key_record_length). + + S= first(R); -- set of scans that will be used for ROR-intersection + R= R-first(S); + min_cost= cost(S); + min_scan= make_scan(S); + while (R is not empty) + { + if (!selectivity(S + first(R) < selectivity(S))) + continue; + + S= S + first(R); + R= R - first(R); + if (cost(S) < min_cost) + { + min_cost= cost(S); + min_scan= make_scan(S); + } + } + return min_scan; + } + + See ror_intersect_add function for ROR intersection costs. + + Special handling for Clustered PK scans + Clustered PK contains all table fields, so using it as a regular scan in + index intersection doesn't make sense: a range scan on CPK will be less + expensive in this case. + Clustered PK scan has special handling in ROR-intersection: it is not used + to retrieve rows, instead its condition is used to filter row references + we get from scans on other keys. + + RETURN + ROR-intersection table read plan + NULL if out of memory or no suitable plan found. +*/ + +static +TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree, + double read_time, + bool *are_all_covering) +{ + uint idx; + double min_cost= read_time; + DBUG_ENTER("get_best_ror_intersect"); + + if ((tree->n_ror_scans < 2) || !param->table->file->records) + DBUG_RETURN(NULL); + + /* + Collect ROR-able SEL_ARGs and create ROR_SCAN_INFO for each of them. + Also find and save clustered PK scan if there is one. + */ + ROR_SCAN_INFO **cur_ror_scan; + ROR_SCAN_INFO *cpk_scan= NULL; + bool cpk_scan_used= FALSE; + if (!(tree->ror_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + param->keys))) + return NULL; + uint cpk_no= (param->table->file->primary_key_is_clustered())? + param->table->primary_key : MAX_KEY; + + for (idx= 0, cur_ror_scan= tree->ror_scans; idx < param->keys; idx++) + { + ROR_SCAN_INFO *scan; + if (!tree->ror_scans_map.is_set(idx)) + continue; + if (!(scan= make_ror_scan(param, idx, tree->keys[idx]))) + return NULL; + if (param->real_keynr[idx] == cpk_no) + { + cpk_scan= scan; + tree->n_ror_scans--; + } + else + *(cur_ror_scan++)= scan; + } + + tree->ror_scans_end= cur_ror_scan; + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, "original", + tree->ror_scans, + tree->ror_scans_end);); + /* + Ok, [ror_scans, ror_scans_end) is array of ptrs to initialized + ROR_SCAN_INFOs. + Now, get a minimal key scan using an approximate algorithm. + */ + qsort(tree->ror_scans, tree->n_ror_scans, sizeof(ROR_SCAN_INFO*), + (qsort_cmp)cmp_ror_scan_info); + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, "ordered", + tree->ror_scans, + tree->ror_scans_end);); + + ROR_SCAN_INFO **intersect_scans; /* ROR scans used in index intersection */ + ROR_SCAN_INFO **intersect_scans_end; + if (!(intersect_scans= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + tree->n_ror_scans))) + return NULL; + intersect_scans_end= intersect_scans; + + /* Create and incrementally update ROR intersection. */ + ROR_INTERSECT_INFO *intersect; + if (!(intersect= ror_intersect_init(param, FALSE))) + return NULL; + + /* [intersect_scans, intersect_scans_best) will hold the best combination */ + ROR_SCAN_INFO **intersect_scans_best; + ha_rows best_rows; + bool is_best_covering; + double best_index_scan_costs; + LINT_INIT(best_rows); /* protected by intersect_scans_best */ + LINT_INIT(is_best_covering); + LINT_INIT(best_index_scan_costs); + + cur_ror_scan= tree->ror_scans; + /* Start with one scan */ + intersect_scans_best= intersect_scans; + while (cur_ror_scan != tree->ror_scans_end && !intersect->is_covering) + { + /* S= S + first(R); */ + if (ror_intersect_add(param, intersect, *cur_ror_scan)) + *(intersect_scans_end++)= *cur_ror_scan; + /* R= R - first(R); */ + cur_ror_scan++; + + if (intersect->total_cost < min_cost) + { + /* Local minimum found, save it */ + min_cost= intersect->total_cost; + best_rows= (ha_rows)(intersect->records_fract* + rows2double(param->table->file->records)); + /* Prevent divisons by zero */ + if (!best_rows) + best_rows= 1; + is_best_covering= intersect->is_covering; + intersect_scans_best= intersect_scans_end; + best_index_scan_costs= intersect->index_scan_costs; + } + } + + DBUG_EXECUTE("info",print_ror_scans_arr(param->table, + "best ROR-intersection", + intersect_scans, + intersect_scans_best);); + + *are_all_covering= intersect->is_covering; + uint best_num= intersect_scans_best - intersect_scans; + /* + Ok, found the best ROR-intersection of non-CPK key scans. + Check if we should add a CPK scan. + + If the obtained ROR-intersection is covering, it doesn't make sense + to add CPK scan - Clustered PK contains all fields and if we're doing + CPK scan doing other CPK scans will only add more overhead. + */ + if (cpk_scan && !intersect->is_covering) + { + /* + Handle the special case: ROR-intersect(PRIMARY, key1) is + the best, but cost(range(key1)) > cost(best_non_ror_range_scan) + */ + if (best_num == 0) + { + cur_ror_scan= tree->ror_scans; + intersect_scans_end= intersect_scans; + ror_intersect_reinit(intersect); + if (!ror_intersect_add(param, intersect, *cur_ror_scan)) + DBUG_RETURN(NULL); /* shouldn't happen actually */ + *(intersect_scans_end++)= *cur_ror_scan; + best_num++; + } + + if (ror_intersect_add(param, intersect, cpk_scan)) + { + cpk_scan_used= TRUE; + min_cost= intersect->total_cost; + best_rows= (ha_rows)(intersect->records_fract* + rows2double(param->table->file->records)); + /* Prevent divisons by zero */ + if (!best_rows) + best_rows= 1; + is_best_covering= intersect->is_covering; + best_index_scan_costs= intersect->index_scan_costs; + } + } + + /* Ok, return ROR-intersect plan if we have found one */ + TRP_ROR_INTERSECT *trp= NULL; + if (best_num > 1 || cpk_scan_used) + { + if (!(trp= new (param->mem_root) TRP_ROR_INTERSECT)) + DBUG_RETURN(trp); + if (!(trp->first_scan= + (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)*best_num))) + DBUG_RETURN(NULL); + memcpy(trp->first_scan, intersect_scans, best_num*sizeof(ROR_SCAN_INFO*)); + trp->last_scan= trp->first_scan + best_num; + trp->is_covering= is_best_covering; + trp->read_cost= min_cost; + trp->records= best_rows; + trp->index_scan_costs= best_index_scan_costs; + trp->cpk_scan= cpk_scan; + DBUG_PRINT("info", + ("Returning non-covering ROR-intersect plan: cost %g, records %lu", + trp->read_cost, (ulong) trp->records)); + } + DBUG_RETURN(trp); +} + + +/* + Get best covering ROR-intersection. + SYNOPSIS + get_best_covering_ror_intersect() + param Parameter from test_quick_select function. + tree SEL_TREE with sets of intervals for different keys. + read_time Don't return table read plans with cost > read_time. + + RETURN + Best covering ROR-intersection plan + NULL if no plan found. + + NOTES + get_best_ror_intersect must be called for a tree before calling this + function for it. + This function invalidates tree->ror_scans member values. + + The following approximate algorithm is used: + I=set of all covering indexes + F=set of all fields to cover + S={} + + do { + Order I by (#covered fields in F desc, + #components asc, + number of first not covered component asc); + F=F-covered by first(I); + S=S+first(I); + I=I-first(I); + } while F is not empty. +*/ + +static +TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, + SEL_TREE *tree, + double read_time) +{ + ROR_SCAN_INFO **ror_scan_mark; + ROR_SCAN_INFO **ror_scans_end= tree->ror_scans_end; + DBUG_ENTER("get_best_covering_ror_intersect"); + uint nbits= param->fields_bitmap_size*8; + + for (ROR_SCAN_INFO **scan= tree->ror_scans; scan != ror_scans_end; ++scan) + (*scan)->key_components= + param->table->key_info[(*scan)->keynr].key_parts; + + /* + Run covering-ROR-search algorithm. + Assume set I is [ror_scan .. ror_scans_end) + */ + + /*I=set of all covering indexes */ + ror_scan_mark= tree->ror_scans; + + uchar buf[MAX_KEY/8+1]; + MY_BITMAP covered_fields; + if (bitmap_init(&covered_fields, buf, nbits, FALSE)) + DBUG_RETURN(0); + bitmap_clear_all(&covered_fields); + + double total_cost= 0.0f; + ha_rows records=0; + bool all_covered; + + DBUG_PRINT("info", ("Building covering ROR-intersection")); + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "building covering ROR-I", + ror_scan_mark, ror_scans_end);); + do { + /* + Update changed sorting info: + #covered fields, + number of first not covered component + Calculate and save these values for each of remaining scans. + */ + for (ROR_SCAN_INFO **scan= ror_scan_mark; scan != ror_scans_end; ++scan) + { + bitmap_subtract(&(*scan)->covered_fields, &covered_fields); + (*scan)->used_fields_covered= + bitmap_bits_set(&(*scan)->covered_fields); + (*scan)->first_uncovered_field= + bitmap_get_first(&(*scan)->covered_fields); + } + + qsort(ror_scan_mark, ror_scans_end-ror_scan_mark, sizeof(ROR_SCAN_INFO*), + (qsort_cmp)cmp_ror_scan_info_covering); + + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "remaining scans", + ror_scan_mark, ror_scans_end);); + + /* I=I-first(I) */ + total_cost += (*ror_scan_mark)->index_read_cost; + records += (*ror_scan_mark)->records; + DBUG_PRINT("info", ("Adding scan on %s", + param->table->key_info[(*ror_scan_mark)->keynr].name)); + if (total_cost > read_time) + DBUG_RETURN(NULL); + /* F=F-covered by first(I) */ + bitmap_union(&covered_fields, &(*ror_scan_mark)->covered_fields); + all_covered= bitmap_is_subset(¶m->needed_fields, &covered_fields); + } while (!all_covered && (++ror_scan_mark < ror_scans_end)); + + if (!all_covered) + DBUG_RETURN(NULL); /* should not happen actually */ + + /* + Ok, [tree->ror_scans .. ror_scan) holds covering index_intersection with + cost total_cost. + */ + DBUG_PRINT("info", ("Covering ROR-intersect scans cost: %g", total_cost)); + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "creating covering ROR-intersect", + tree->ror_scans, ror_scan_mark);); + + /* Add priority queue use cost. */ + total_cost += rows2double(records)* + log((double)(ror_scan_mark - tree->ror_scans)) / + (TIME_FOR_COMPARE_ROWID * M_LN2); + DBUG_PRINT("info", ("Covering ROR-intersect full cost: %g", total_cost)); + + if (total_cost > read_time) + DBUG_RETURN(NULL); + + TRP_ROR_INTERSECT *trp; + if (!(trp= new (param->mem_root) TRP_ROR_INTERSECT)) + DBUG_RETURN(trp); + uint best_num= (ror_scan_mark - tree->ror_scans); + if (!(trp->first_scan= (ROR_SCAN_INFO**)alloc_root(param->mem_root, + sizeof(ROR_SCAN_INFO*)* + best_num))) + DBUG_RETURN(NULL); + memcpy(trp->first_scan, ror_scan_mark, best_num*sizeof(ROR_SCAN_INFO*)); + trp->last_scan= trp->first_scan + best_num; + trp->is_covering= TRUE; + trp->read_cost= total_cost; + trp->records= records; + + DBUG_PRINT("info", + ("Returning covering ROR-intersect plan: cost %g, records %lu", + trp->read_cost, (ulong) trp->records)); + DBUG_RETURN(trp); +} + + +/* + Get best "range" table read plan for given SEL_TREE. + Also update PARAM members and store ROR scans info in the SEL_TREE. + SYNOPSIS + get_key_scans_params + param parameters from test_quick_select + tree make range select for this SEL_TREE + index_read_must_be_used if TRUE, assume 'index only' option will be set + (except for clustered PK indexes) + read_time don't create read plans with cost > read_time. + RETURN + Best range read plan + NULL if no plan found or error occurred +*/ + +static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, + bool index_read_must_be_used, + double read_time) +{ + int idx; + SEL_ARG **key,**end, **key_to_read= NULL; + ha_rows best_records; + TRP_RANGE* read_plan= NULL; + bool pk_is_clustered= param->table->file->primary_key_is_clustered(); + DBUG_ENTER("get_key_scans_params"); + LINT_INIT(best_records); /* protected by key_to_read */ + /* + Note that there may be trees that have type SEL_TREE::KEY but contain no + key reads at all, e.g. tree for expression "key1 is not null" where key1 + is defined as "not null". + */ + DBUG_EXECUTE("info", print_sel_tree(param, tree, &tree->keys_map, + "tree scans");); + tree->ror_scans_map.clear_all(); + tree->n_ror_scans= 0; + for (idx= 0,key=tree->keys, end=key+param->keys; + key != end ; + key++,idx++) + { + ha_rows found_records; + double found_read_time; + if (*key) + { + uint keynr= param->real_keynr[idx]; + if ((*key)->type == SEL_ARG::MAYBE_KEY || + (*key)->maybe_flag) + param->needed_reg->set_bit(keynr); + + bool read_index_only= index_read_must_be_used ? TRUE : + (bool) param->table->used_keys.is_set(keynr); + + found_records= check_quick_select(param, idx, *key); + if (param->is_ror_scan) + { + tree->n_ror_scans++; + tree->ror_scans_map.set_bit(idx); + } + double cpu_cost= (double) found_records / TIME_FOR_COMPARE; + if (found_records != HA_POS_ERROR && found_records > 2 && + read_index_only && + (param->table->file->index_flags(keynr, param->max_key_part,1) & + HA_KEYREAD_ONLY) && + !(pk_is_clustered && keynr == param->table->primary_key)) + /* We can resolve this by only reading through this key. */ + found_read_time= get_index_only_read_time(param,found_records,keynr) + + cpu_cost; + else + /* + cost(read_through_index) = cost(disk_io) + cost(row_in_range_checks) + The row_in_range check is in QUICK_RANGE_SELECT::cmp_next function. + */ + found_read_time= param->table->file->read_time(keynr, + param->range_count, + found_records) + + cpu_cost; + + DBUG_PRINT("info",("read_time: %g found_read_time: %g", + read_time, found_read_time)); + + if (read_time > found_read_time && found_records != HA_POS_ERROR + /*|| read_time == DBL_MAX*/ ) + { + read_time= found_read_time; + best_records= found_records; + key_to_read= key; + } + + } + } + + DBUG_EXECUTE("info", print_sel_tree(param, tree, &tree->ror_scans_map, + "ROR scans");); + if (key_to_read) + { + idx= key_to_read - tree->keys; + if ((read_plan= new (param->mem_root) TRP_RANGE(*key_to_read, idx))) + { + read_plan->records= best_records; + read_plan->is_ror= tree->ror_scans_map.is_set(idx); + read_plan->read_cost= read_time; + DBUG_PRINT("info", + ("Returning range plan for key %s, cost %g, records %lu", + param->table->key_info[param->real_keynr[idx]].name, + read_plan->read_cost, (ulong) read_plan->records)); + } + } + else + DBUG_PRINT("info", ("No 'range' table read plan found")); + + DBUG_RETURN(read_plan); +} + + +QUICK_SELECT_I *TRP_INDEX_MERGE::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_INDEX_MERGE_SELECT *quick_imerge; + QUICK_RANGE_SELECT *quick; + /* index_merge always retrieves full rows, ignore retrieve_full_rows */ + if (!(quick_imerge= new QUICK_INDEX_MERGE_SELECT(param->thd, param->table))) + return NULL; + + quick_imerge->records= records; + quick_imerge->read_time= read_cost; + for(TRP_RANGE **range_scan= range_scans; range_scan != range_scans_end; + range_scan++) + { + if (!(quick= (QUICK_RANGE_SELECT*) + ((*range_scan)->make_quick(param, FALSE, &quick_imerge->alloc)))|| + quick_imerge->push_quick_back(quick)) + { + delete quick; + delete quick_imerge; + return NULL; + } + } + return quick_imerge; +} + +QUICK_SELECT_I *TRP_ROR_INTERSECT::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_ROR_INTERSECT_SELECT *quick_intrsect; + QUICK_RANGE_SELECT *quick; + DBUG_ENTER("TRP_ROR_INTERSECT::make_quick"); + MEM_ROOT *alloc; + + if ((quick_intrsect= + new QUICK_ROR_INTERSECT_SELECT(param->thd, param->table, + retrieve_full_rows? (!is_covering):FALSE, + parent_alloc))) + { + DBUG_EXECUTE("info", print_ror_scans_arr(param->table, + "creating ROR-intersect", + first_scan, last_scan);); + alloc= parent_alloc? parent_alloc: &quick_intrsect->alloc; + for(; first_scan != last_scan;++first_scan) + { + if (!(quick= get_quick_select(param, (*first_scan)->idx, + (*first_scan)->sel_arg, alloc)) || + quick_intrsect->push_quick_back(quick)) + { + delete quick_intrsect; + DBUG_RETURN(NULL); + } + } + if (cpk_scan) + { + if (!(quick= get_quick_select(param, cpk_scan->idx, + cpk_scan->sel_arg, alloc))) + { + delete quick_intrsect; + DBUG_RETURN(NULL); + } + quick->file= NULL; + quick_intrsect->cpk_quick= quick; + } + quick_intrsect->records= records; + quick_intrsect->read_time= read_cost; + } + DBUG_RETURN(quick_intrsect); +} + + +QUICK_SELECT_I *TRP_ROR_UNION::make_quick(PARAM *param, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_ROR_UNION_SELECT *quick_roru; + TABLE_READ_PLAN **scan; + QUICK_SELECT_I *quick; + DBUG_ENTER("TRP_ROR_UNION::make_quick"); + /* + It is impossible to construct a ROR-union that will not retrieve full + rows, ignore retrieve_full_rows parameter. + */ + if ((quick_roru= new QUICK_ROR_UNION_SELECT(param->thd, param->table))) + { + for(scan= first_ror; scan != last_ror; scan++) + { + if (!(quick= (*scan)->make_quick(param, FALSE, &quick_roru->alloc)) || + quick_roru->push_quick_back(quick)) + DBUG_RETURN(NULL); + } + quick_roru->records= records; + quick_roru->read_time= read_cost; + } + DBUG_RETURN(quick_roru); +} + + +/* + Build a SEL_TREE for a simple predicate + + SYNOPSIS + get_func_mm_tree() + param PARAM from SQL_SELECT::test_quick_select + cond_func item for the predicate + field field in the predicate + value constant in the predicate + cmp_type compare type for the field + + RETURN + Pointer to thre built tree +*/ + +static SEL_TREE *get_func_mm_tree(PARAM *param, Item_func *cond_func, + Field *field, Item *value, + Item_result cmp_type) +{ + SEL_TREE *tree= 0; + DBUG_ENTER("get_func_mm_tree"); + + switch (cond_func->functype()) { + case Item_func::NE_FUNC: + tree= get_mm_parts(param, cond_func, field, Item_func::LT_FUNC, + value, cmp_type); + if (tree) + { + tree= tree_or(param, tree, get_mm_parts(param, cond_func, field, + Item_func::GT_FUNC, + value, cmp_type)); + } + break; + case Item_func::BETWEEN: + tree= get_mm_parts(param, cond_func, field, Item_func::GE_FUNC, + cond_func->arguments()[1],cmp_type); + if (tree) + { + tree= tree_and(param, tree, get_mm_parts(param, cond_func, field, + Item_func::LE_FUNC, + cond_func->arguments()[2], + cmp_type)); + } + break; + case Item_func::IN_FUNC: + { + Item_func_in *func=(Item_func_in*) cond_func; + tree= get_mm_parts(param, cond_func, field, Item_func::EQ_FUNC, + func->arguments()[1], cmp_type); + if (tree) + { + Item **arg, **end; + for (arg= func->arguments()+2, end= arg+func->argument_count()-2; + arg < end ; arg++) + { + tree= tree_or(param, tree, get_mm_parts(param, cond_func, field, + Item_func::EQ_FUNC, + *arg, + cmp_type)); + } + } + break; + } + default: + { + /* + Here the function for the following predicates are processed: + <, <=, =, >=, >, LIKE, IS NULL, IS NOT NULL. + If the predicate is of the form (value op field) it is handled + as the equivalent predicate (field rev_op value), e.g. + 2 <= a is handled as a >= 2. + */ + Item_func::Functype func_type= + (value != cond_func->arguments()[0]) ? cond_func->functype() : + ((Item_bool_func2*) cond_func)->rev_functype(); + tree= get_mm_parts(param, cond_func, field, func_type, value, cmp_type); + } + } + + DBUG_RETURN(tree); + +} + /* make a select tree of all keys in condition */ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) { SEL_TREE *tree=0; + SEL_TREE *ftree= 0; + Item_field *field_item= 0; + Item *value; DBUG_ENTER("get_mm_tree"); if (cond->type() == Item::COND_ITEM) @@ -832,9 +3417,12 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) DBUG_RETURN(new SEL_TREE(SEL_TREE::IMPOSSIBLE)); } - table_map ref_tables=cond->used_tables(); + table_map ref_tables= 0; + table_map param_comp= ~(param->prev_tables | param->read_tables | + param->current_table); if (cond->type() != Item::FUNC_ITEM) { // Should be a field + ref_tables= cond->used_tables(); if ((ref_tables & param->current_table) || (ref_tables & ~(param->prev_tables | param->read_tables))) DBUG_RETURN(0); @@ -847,97 +3435,111 @@ static SEL_TREE *get_mm_tree(PARAM *param,COND *cond) param->cond= cond; - if (cond_func->functype() == Item_func::BETWEEN) + switch (cond_func->functype()) { + case Item_func::BETWEEN: + if (cond_func->arguments()[0]->type() != Item::FIELD_ITEM) + DBUG_RETURN(0); + field_item= (Item_field*) (cond_func->arguments()[0]); + value= NULL; + break; + case Item_func::IN_FUNC: + { + Item_func_in *func=(Item_func_in*) cond_func; + if (func->key_item()->type() != Item::FIELD_ITEM) + DBUG_RETURN(0); + field_item= (Item_field*) (func->key_item()); + value= NULL; + break; + } + case Item_func::MULT_EQUAL_FUNC: { + Item_equal *item_equal= (Item_equal *) cond; + if (!(value= item_equal->get_const())) + DBUG_RETURN(0); + Item_equal_iterator it(*item_equal); + ref_tables= value->used_tables(); + while ((field_item= it++)) + { + Field *field= field_item->field; + Item_result cmp_type= field->cmp_type(); + if (!((ref_tables | field->table->map) & param_comp)) + { + tree= get_mm_parts(param, cond, field, Item_func::EQ_FUNC, + value,cmp_type); + ftree= !ftree ? tree : tree_and(param, ftree, tree); + } + } + + DBUG_RETURN(ftree); + } + default: if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) { - Field *field=((Item_field*) (cond_func->arguments()[0]))->field; - Item_result cmp_type=field->cmp_type(); - DBUG_RETURN(tree_and(param, - get_mm_parts(param, cond_func, field, - Item_func::GE_FUNC, - cond_func->arguments()[1], cmp_type), - get_mm_parts(param, cond_func, field, - Item_func::LE_FUNC, - cond_func->arguments()[2], cmp_type))); + field_item= (Item_field*) (cond_func->arguments()[0]); + value= cond_func->arg_count > 1 ? cond_func->arguments()[1] : 0; } - DBUG_RETURN(0); + else if (cond_func->have_rev_func() && + cond_func->arguments()[1]->type() == Item::FIELD_ITEM) + { + field_item= (Item_field*) (cond_func->arguments()[1]); + value= cond_func->arguments()[0]; + } + else + DBUG_RETURN(0); } - if (cond_func->functype() == Item_func::IN_FUNC) - { // COND OR - Item_func_in *func=(Item_func_in*) cond_func; - if (func->key_item()->type() == Item::FIELD_ITEM) - { - Field *field=((Item_field*) (func->key_item()))->field; - Item_result cmp_type=field->cmp_type(); - tree= get_mm_parts(param,cond_func,field,Item_func::EQ_FUNC, - func->arguments()[1],cmp_type); - if (!tree) - DBUG_RETURN(tree); // Not key field - for (uint i=2 ; i < func->argument_count(); i++) - { - SEL_TREE *new_tree=get_mm_parts(param,cond_func,field, - Item_func::EQ_FUNC, - func->arguments()[i],cmp_type); - tree=tree_or(param,tree,new_tree); - } - DBUG_RETURN(tree); - } - DBUG_RETURN(0); // Can't optimize this IN - } - - if (ref_tables & ~(param->prev_tables | param->read_tables | - param->current_table)) - DBUG_RETURN(0); // Can't be calculated yet - if (!(ref_tables & param->current_table)) - DBUG_RETURN(new SEL_TREE(SEL_TREE::MAYBE)); // This may be false or true - - /* check field op const */ - /* btw, ft_func's arguments()[0] isn't FIELD_ITEM. SerG*/ - if (cond_func->arguments()[0]->type() == Item::FIELD_ITEM) - { - tree= get_mm_parts(param, cond_func, - ((Item_field*) (cond_func->arguments()[0]))->field, - cond_func->functype(), - cond_func->arg_count > 1 ? cond_func->arguments()[1] : - 0, - ((Item_field*) (cond_func->arguments()[0]))->field-> - cmp_type()); - } - /* check const op field */ - if (!tree && - cond_func->have_rev_func() && - cond_func->arguments()[1]->type() == Item::FIELD_ITEM) - { - DBUG_RETURN(get_mm_parts(param, cond_func, - ((Item_field*) - (cond_func->arguments()[1]))->field, - ((Item_bool_func2*) cond_func)->rev_functype(), - cond_func->arguments()[0], - ((Item_field*) - (cond_func->arguments()[1]))->field->cmp_type() - )); + + /* + If the where condition contains a predicate (ti.field op const), + then not only SELL_TREE for this predicate is built, but + the trees for the results of substitution of ti.field for + each tj.field belonging to the same multiple equality as ti.field + are built as well. + E.g. for WHERE t1.a=t2.a AND t2.a > 10 + a SEL_TREE for t2.a > 10 will be built for quick select from t2 + and + a SEL_TREE for t1.a > 10 will be built for quick select from t1. + */ + + for (uint i= 0; i < cond_func->arg_count; i++) + { + Item *arg= cond_func->arguments()[i]; + if (arg != field_item) + ref_tables|= arg->used_tables(); } - DBUG_RETURN(tree); + Field *field= field_item->field; + Item_result cmp_type= field->cmp_type(); + if (!((ref_tables | field->table->map) & param_comp)) + ftree= get_func_mm_tree(param, cond_func, field, value, cmp_type); + Item_equal *item_equal= field_item->item_equal; + if (item_equal) + { + Item_equal_iterator it(*item_equal); + Item_field *item; + while ((item= it++)) + { + Field *f= item->field; + if (field->eq(f)) + continue; + if (!((ref_tables | f->table->map) & param_comp)) + { + tree= get_func_mm_tree(param, cond_func, f, value, cmp_type); + ftree= !ftree ? tree : tree_and(param, ftree, tree); + } + } + } + DBUG_RETURN(ftree); } static SEL_TREE * get_mm_parts(PARAM *param, COND *cond_func, Field *field, - Item_func::Functype type, + Item_func::Functype type, Item *value, Item_result cmp_type) { - bool ne_func= FALSE; DBUG_ENTER("get_mm_parts"); if (field->table != param->table) DBUG_RETURN(0); - if (type == Item_func::NE_FUNC) - { - ne_func= TRUE; - type= Item_func::LT_FUNC; - } - KEY_PART *key_part = param->key_parts; KEY_PART *end = param->key_parts_end; SEL_TREE *tree=0; @@ -966,22 +3568,15 @@ get_mm_parts(PARAM *param, COND *cond_func, Field *field, else { // This key may be used later - if (!(sel_arg= new SEL_ARG(SEL_ARG::MAYBE_KEY))) + if (!(sel_arg= new SEL_ARG(SEL_ARG::MAYBE_KEY))) DBUG_RETURN(0); // OOM } sel_arg->part=(uchar) key_part->part; tree->keys[key_part->key]=sel_add(tree->keys[key_part->key],sel_arg); + tree->keys_map.set_bit(key_part->key); } } - if (ne_func) - { - SEL_TREE *tree2= get_mm_parts(param, cond_func, - field, Item_func::GT_FUNC, - value, cmp_type); - if (tree2) - tree= tree_or(param,tree,tree2); - } DBUG_RETURN(tree); } @@ -992,6 +3587,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, { uint maybe_null=(uint) field->real_maybe_null(), copies; uint field_length=field->pack_length()+maybe_null; + bool optimize_range; SEL_ARG *tree; char *str, *str2; DBUG_ENTER("get_mm_leaf"); @@ -1031,6 +3627,9 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, !(conf_func->compare_collation()->state & MY_CS_BINSORT)) DBUG_RETURN(0); + optimize_range= field->optimize_range(param->real_keynr[key_part->key], + key_part->part); + if (type == Item_func::LIKE_FUNC) { bool like_error; @@ -1038,8 +3637,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, String tmp(buff1,sizeof(buff1),value->collation.collation),*res; uint length,offset,min_length,max_length; - if (!field->optimize_range(param->real_keynr[key_part->key], - key_part->part)) + if (!optimize_range) DBUG_RETURN(0); // Can't optimize this if (!(res= value->val_str(&tmp))) DBUG_RETURN(&null_element); @@ -1104,8 +3702,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, DBUG_RETURN(new SEL_ARG(field,min_str,max_str)); } - if (!field->optimize_range(param->real_keynr[key_part->key], - key_part->part) && + if (!optimize_range && type != Item_func::EQ_FUNC && type != Item_func::EQUAL_FUNC) DBUG_RETURN(0); // Can't optimize this @@ -1118,15 +3715,16 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, value->result_type() != STRING_RESULT && field->cmp_type() != value->result_type()) DBUG_RETURN(0); - - if (value->save_in_field(field, 1) < 0) + + if (value->save_in_field_no_warnings(field, 1) < 0) { /* This happens when we try to insert a NULL field in a not null column */ - DBUG_RETURN(&null_element); // cmp with NULL is never true + DBUG_RETURN(&null_element); // cmp with NULL is never TRUE } /* Get local copy of key */ copies= 1; - if (field->key_type() == HA_KEYTYPE_VARTEXT) + if (field->key_type() == HA_KEYTYPE_VARTEXT1 || + field->key_type() == HA_KEYTYPE_VARTEXT2) copies= 2; str= str2= (char*) alloc_root(param->mem_root, (key_part->store_length)*copies+1); @@ -1134,8 +3732,7 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, DBUG_RETURN(0); if (maybe_null) *str= (char) field->is_real_null(); // Set to 1 if null - field->get_key_image(str+maybe_null, key_part->length, - field->charset(), key_part->image_type); + field->get_key_image(str+maybe_null, key_part->length, key_part->image_type); if (copies == 2) { /* @@ -1227,8 +3824,8 @@ get_mm_leaf(PARAM *param, COND *conf_func, Field *field, KEY_PART *key_part, ** If tree is 0 it means that the condition can't be tested. It refers ** to a non existent table or to a field in current table with isn't a key. ** The different tree flags: -** IMPOSSIBLE: Condition is never true -** ALWAYS: Condition is always true +** IMPOSSIBLE: Condition is never TRUE +** ALWAYS: Condition is always TRUE ** MAYBE: Condition may exists when tables are read ** MAYBE_KEY: Condition refers to a key that may be used in join loop ** KEY_RANGE: Condition uses a key @@ -1298,6 +3895,8 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) DBUG_RETURN(tree1); } + key_map result_keys; + result_keys.clear_all(); /* Join the trees key per key */ SEL_ARG **key1,**key2,**end; for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ; @@ -1314,17 +3913,60 @@ tree_and(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) if (*key1 && (*key1)->type == SEL_ARG::IMPOSSIBLE) { tree1->type= SEL_TREE::IMPOSSIBLE; + DBUG_RETURN(tree1); + } + result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG + if (*key1) (*key1)->test_use_count(*key1); #endif - break; - } } } + tree1->keys_map= result_keys; + /* dispose index_merge if there is a "range" option */ + if (!result_keys.is_clear_all()) + { + tree1->merges.empty(); + DBUG_RETURN(tree1); + } + + /* ok, both trees are index_merge trees */ + imerge_list_and_list(&tree1->merges, &tree2->merges); DBUG_RETURN(tree1); } +/* + Check if two SEL_TREES can be combined into one (i.e. a single key range + read can be constructed for "cond_of_tree1 OR cond_of_tree2" ) without + using index_merge. +*/ + +bool sel_trees_can_be_ored(SEL_TREE *tree1, SEL_TREE *tree2, PARAM* param) +{ + key_map common_keys= tree1->keys_map; + DBUG_ENTER("sel_trees_can_be_ored"); + common_keys.intersect(tree2->keys_map); + + if (common_keys.is_clear_all()) + DBUG_RETURN(FALSE); + + /* trees have a common key, check if they refer to same key part */ + SEL_ARG **key1,**key2; + for (uint key_no=0; key_no < param->keys; key_no++) + { + if (common_keys.is_set(key_no)) + { + key1= tree1->keys + key_no; + key2= tree2->keys + key_no; + if ((*key1)->part == (*key2)->part) + { + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); +} static SEL_TREE * tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) @@ -1341,19 +3983,62 @@ tree_or(PARAM *param,SEL_TREE *tree1,SEL_TREE *tree2) if (tree2->type == SEL_TREE::MAYBE) DBUG_RETURN(tree2); - /* Join the trees key per key */ - SEL_ARG **key1,**key2,**end; - SEL_TREE *result=0; - for (key1= tree1->keys,key2= tree2->keys,end=key1+param->keys ; - key1 != end ; key1++,key2++) + SEL_TREE *result= 0; + key_map result_keys; + result_keys.clear_all(); + if (sel_trees_can_be_ored(tree1, tree2, param)) { - *key1=key_or(*key1,*key2); - if (*key1) + /* Join the trees key per key */ + SEL_ARG **key1,**key2,**end; + for (key1= tree1->keys,key2= tree2->keys,end= key1+param->keys ; + key1 != end ; key1++,key2++) { - result=tree1; // Added to tree1 + *key1=key_or(*key1,*key2); + if (*key1) + { + result=tree1; // Added to tree1 + result_keys.set_bit(key1 - tree1->keys); #ifdef EXTRA_DEBUG - (*key1)->test_use_count(*key1); + (*key1)->test_use_count(*key1); #endif + } + } + if (result) + result->keys_map= result_keys; + } + else + { + /* ok, two trees have KEY type but cannot be used without index merge */ + if (tree1->merges.is_empty() && tree2->merges.is_empty()) + { + SEL_IMERGE *merge; + /* both trees are "range" trees, produce new index merge structure */ + if (!(result= new SEL_TREE()) || !(merge= new SEL_IMERGE()) || + (result->merges.push_back(merge)) || + (merge->or_sel_tree(param, tree1)) || + (merge->or_sel_tree(param, tree2))) + result= NULL; + else + result->type= tree1->type; + } + else if (!tree1->merges.is_empty() && !tree2->merges.is_empty()) + { + if (imerge_list_or_list(param, &tree1->merges, &tree2->merges)) + result= new SEL_TREE(SEL_TREE::ALWAYS); + else + result= tree1; + } + else + { + /* one tree is index merge tree and another is range tree */ + if (tree1->merges.is_empty()) + swap_variables(SEL_TREE*, tree1, tree2); + + /* add tree2 to tree1->merges, checking if it collapses to ALWAYS */ + if (imerge_list_or_tree(param, &tree1->merges, tree2)) + result= new SEL_TREE(SEL_TREE::ALWAYS); + else + result= tree1; } } DBUG_RETURN(result); @@ -1402,7 +4087,6 @@ and_all_keys(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) } - static SEL_ARG * key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) { @@ -1472,6 +4156,13 @@ key_and(SEL_ARG *key1,SEL_ARG *key2,uint clone_flag) return 0; // Can't optimize this } + if ((key1->min_flag | key2->min_flag) & GEOM_FLAG) + { + key1->free_tree(); + key2->free_tree(); + return 0; // Can't optimize this + } + key1->use_count--; key2->use_count--; SEL_ARG *e1=key1->first(), *e2=key2->first(), *new_tree=0; @@ -1895,7 +4586,7 @@ SEL_ARG::find_range(SEL_ARG *key) SYNOPSIS tree_delete() key Key that is to be deleted from tree (this) - + NOTE This also frees all sub trees that is used by the element @@ -2142,7 +4833,7 @@ SEL_ARG *rb_delete_fixup(SEL_ARG *root,SEL_ARG *key,SEL_ARG *par) } - /* Test that the proporties for a red-black tree holds */ + /* Test that the properties for a red-black tree hold */ #ifdef EXTRA_DEBUG int test_rb_tree(SEL_ARG *element,SEL_ARG *parent) @@ -2215,7 +4906,7 @@ void SEL_ARG::test_use_count(SEL_ARG *root) ulong count=count_key_part_usage(root,pos->next_key_part); if (count > pos->next_key_part->use_count) { - sql_print_information("Use_count: Wrong count for key at %lx, %lu should be %lu", + sql_print_information("Use_count: Wrong count for key at 0x%lx, %lu should be %lu", pos,pos->next_key_part->use_count,count); return; } @@ -2223,68 +4914,167 @@ void SEL_ARG::test_use_count(SEL_ARG *root) } } if (e_count != elements) - sql_print_warning("Wrong use count: %u (should be %u) for tree at %lx", + sql_print_warning("Wrong use count: %u (should be %u) for tree at 0x%lx", e_count, elements, (gptr) this); } #endif +/* + Calculate estimate of number records that will be retrieved by a range + scan on given index using given SEL_ARG intervals tree. + SYNOPSIS + check_quick_select + param Parameter from test_quick_select + idx Number of index to use in PARAM::key SEL_TREE::key + tree Transformed selection condition, tree->key[idx] holds intervals + tree to be used for scanning. + NOTES + param->is_ror_scan is set to reflect if the key scan is a ROR (see + is_key_scan_ror function for more info) + param->table->quick_*, param->range_count (and maybe others) are + updated with data of given key scan, see check_quick_keys for details. -/***************************************************************************** -** Check how many records we will find by using the found tree -*****************************************************************************/ + RETURN + Estimate # of records to be retrieved. + HA_POS_ERROR if estimate calculation failed due to table handler problems. + +*/ static ha_rows check_quick_select(PARAM *param,uint idx,SEL_ARG *tree) { ha_rows records; + bool cpk_scan; + uint key; DBUG_ENTER("check_quick_select"); + param->is_ror_scan= FALSE; + if (!tree) DBUG_RETURN(HA_POS_ERROR); // Can't use it param->max_key_part=0; param->range_count=0; + key= param->real_keynr[idx]; + if (tree->type == SEL_ARG::IMPOSSIBLE) DBUG_RETURN(0L); // Impossible select. return if (tree->type != SEL_ARG::KEY_RANGE || tree->part != 0) DBUG_RETURN(HA_POS_ERROR); // Don't use tree + + enum ha_key_alg key_alg= param->table->key_info[key].algorithm; + if ((key_alg != HA_KEY_ALG_BTREE) && (key_alg!= HA_KEY_ALG_UNDEF)) + { + /* Records are not ordered by rowid for other types of indexes. */ + cpk_scan= FALSE; + } + else + { + /* + Clustered PK scan is a special case, check_quick_keys doesn't recognize + CPK scans as ROR scans (while actually any CPK scan is a ROR scan). + */ + cpk_scan= (param->table->primary_key == param->real_keynr[idx]) && + param->table->file->primary_key_is_clustered(); + param->is_ror_scan= !cpk_scan; + } + records=check_quick_keys(param,idx,tree,param->min_key,0,param->max_key,0); if (records != HA_POS_ERROR) { - uint key=param->real_keynr[idx]; param->table->quick_keys.set_bit(key); param->table->quick_rows[key]=records; param->table->quick_key_parts[key]=param->max_key_part+1; + + if (cpk_scan) + param->is_ror_scan= TRUE; } DBUG_PRINT("exit", ("Records: %lu", (ulong) records)); DBUG_RETURN(records); } +/* + Recursively calculate estimate of # rows that will be retrieved by + key scan on key idx. + SYNOPSIS + check_quick_keys() + param Parameter from test_quick select function. + idx Number of key to use in PARAM::keys in list of used keys + (param->real_keynr[idx] holds the key number in table) + key_tree SEL_ARG tree being examined. + min_key Buffer with partial min key value tuple + min_key_flag + max_key Buffer with partial max key value tuple + max_key_flag + + NOTES + The function does the recursive descent on the tree via SEL_ARG::left, + SEL_ARG::right, and SEL_ARG::next_key_part edges. The #rows estimates + are calculated using records_in_range calls at the leaf nodes and then + summed. + + param->min_key and param->max_key are used to hold prefixes of key value + tuples. + + The side effects are: + + param->max_key_part is updated to hold the maximum number of key parts used + in scan minus 1. + + param->range_count is incremented if the function finds a range that + wasn't counted by the caller. + + param->is_ror_scan is cleared if the function detects that the key scan is + not a Rowid-Ordered Retrieval scan ( see comments for is_key_scan_ror + function for description of which key scans are ROR scans) +*/ + static ha_rows check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, char *min_key,uint min_key_flag, char *max_key, uint max_key_flag) { - ha_rows records=0,tmp; + ha_rows records=0, tmp; + uint tmp_min_flag, tmp_max_flag, keynr, min_key_length, max_key_length; + char *tmp_min_key, *tmp_max_key; param->max_key_part=max(param->max_key_part,key_tree->part); if (key_tree->left != &null_element) { + /* + There are at least two intervals for current key part, i.e. condition + was converted to something like + (keyXpartY less/equals c1) OR (keyXpartY more/equals c2). + This is not a ROR scan if the key is not Clustered Primary Key. + */ + param->is_ror_scan= FALSE; records=check_quick_keys(param,idx,key_tree->left,min_key,min_key_flag, max_key,max_key_flag); if (records == HA_POS_ERROR) // Impossible return records; } - uint tmp_min_flag,tmp_max_flag,keynr; - char *tmp_min_key=min_key,*tmp_max_key=max_key; - + tmp_min_key= min_key; + tmp_max_key= max_key; key_tree->store(param->key[idx][key_tree->part].store_length, &tmp_min_key,min_key_flag,&tmp_max_key,max_key_flag); - uint min_key_length= (uint) (tmp_min_key- param->min_key); - uint max_key_length= (uint) (tmp_max_key- param->max_key); + min_key_length= (uint) (tmp_min_key- param->min_key); + max_key_length= (uint) (tmp_max_key- param->max_key); + + if (param->is_ror_scan) + { + /* + If the index doesn't cover entire key, mark the scan as non-ROR scan. + Actually we're cutting off some ROR scans here. + */ + uint16 fieldnr= param->table->key_info[param->real_keynr[idx]]. + key_part[key_tree->part].fieldnr - 1; + if (param->table->field[fieldnr]->key_length() != + param->key[idx][key_tree->part].length) + param->is_ror_scan= FALSE; + } if (key_tree->next_key_part && key_tree->next_key_part->part == key_tree->part+1 && @@ -2299,6 +5089,12 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, tmp_max_key, max_key_flag | key_tree->max_flag); goto end; // Ugly, but efficient } + else + { + /* The interval for current key part is not c1 <= keyXpartY <= c1 */ + param->is_ror_scan= FALSE; + } + tmp_min_flag=key_tree->min_flag; tmp_max_flag=key_tree->max_flag; if (!tmp_min_flag) @@ -2327,6 +5123,24 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, tmp=1; // Max one record else { + if (param->is_ror_scan) + { + /* + If we get here, the condition on the key was converted to form + "(keyXpart1 = c1) AND ... AND (keyXpart{key_tree->part - 1} = cN) AND + somecond(keyXpart{key_tree->part})" + Check if + somecond is "keyXpart{key_tree->part} = const" and + uncovered "tail" of KeyX parts is either empty or is identical to + first members of clustered primary key. + */ + if (!(min_key_length == max_key_length && + !memcmp(min_key,max_key, (uint) (tmp_max_key - max_key)) && + !key_tree->min_flag && !key_tree->max_flag && + is_key_scan_ror(param, keynr, key_tree->part + 1))) + param->is_ror_scan= FALSE; + } + if (tmp_min_flag & GEOM_FLAG) { key_range min_range; @@ -2363,6 +5177,13 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, records+=tmp; if (key_tree->right != &null_element) { + /* + There are at least two intervals for current key part, i.e. condition + was converted to something like + (keyXpartY less/equals c1) OR (keyXpartY more/equals c2). + This is not a ROR scan if the key is not Clustered Primary Key. + */ + param->is_ror_scan= FALSE; tmp=check_quick_keys(param,idx,key_tree->right,min_key,min_key_flag, max_key,max_key_flag); if (tmp == HA_POS_ERROR) @@ -2373,22 +5194,109 @@ check_quick_keys(PARAM *param,uint idx,SEL_ARG *key_tree, } -/**************************************************************************** -** change a tree to a structure to be used by quick_select -** This uses it's own malloc tree -****************************************************************************/ +/* + Check if key scan on given index with equality conditions on first n key + parts is a ROR scan. + + SYNOPSIS + is_key_scan_ror() + param Parameter from test_quick_select + keynr Number of key in the table. The key must not be a clustered + primary key. + nparts Number of first key parts for which equality conditions + are present. + + NOTES + ROR (Rowid Ordered Retrieval) key scan is a key scan that produces + ordered sequence of rowids (ha_xxx::cmp_ref is the comparison function) + + An index scan is a ROR scan if it is done using a condition in form + + "key1_1=c_1 AND ... AND key1_n=c_n" (1) + + where the index is defined on (key1_1, ..., key1_N [,a_1, ..., a_n]) + + and the table has a clustered Primary Key + + PRIMARY KEY(a_1, ..., a_n, b1, ..., b_k) with first key parts being + identical to uncovered parts ot the key being scanned (2) + + Scans on HASH indexes are not ROR scans, + any range scan on clustered primary key is ROR scan (3) + + Check (1) is made in check_quick_keys() + Check (3) is made check_quick_select() + Check (2) is made by this function. + + RETURN + TRUE If the scan is ROR-scan + FALSE otherwise +*/ + +static bool is_key_scan_ror(PARAM *param, uint keynr, uint8 nparts) +{ + KEY *table_key= param->table->key_info + keynr; + KEY_PART_INFO *key_part= table_key->key_part + nparts; + KEY_PART_INFO *key_part_end= table_key->key_part + + table_key->key_parts; + + if (key_part == key_part_end) + return TRUE; + uint pk_number= param->table->primary_key; + if (!param->table->file->primary_key_is_clustered() || pk_number == MAX_KEY) + return FALSE; + + KEY_PART_INFO *pk_part= param->table->key_info[pk_number].key_part; + KEY_PART_INFO *pk_part_end= pk_part + + param->table->key_info[pk_number].key_parts; + for(;(key_part!=key_part_end) && (pk_part != pk_part_end); + ++key_part, ++pk_part) + { + if ((key_part->field != pk_part->field) || + (key_part->length != pk_part->length)) + return FALSE; + } + return (key_part == key_part_end); +} + + +/* + Create a QUICK_RANGE_SELECT from given key and SEL_ARG tree for that key. + + SYNOPSIS + get_quick_select() + param + idx Index of used key in param->key. + key_tree SEL_ARG tree for the used key + parent_alloc If not NULL, use it to allocate memory for + quick select data. Otherwise use quick->alloc. + NOTES + The caller must call QUICK_SELECT::init for returned quick select + + CAUTION! This function may change thd->mem_root to a MEM_ROOT which will be + deallocated when the returned quick select is deleted. + + RETURN + NULL on error + otherwise created quick select +*/ -static QUICK_SELECT * -get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) +QUICK_RANGE_SELECT * +get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree, + MEM_ROOT *parent_alloc) { - QUICK_SELECT *quick; + QUICK_RANGE_SELECT *quick; DBUG_ENTER("get_quick_select"); if (param->table->key_info[param->real_keynr[idx]].flags & HA_SPATIAL) - quick=new QUICK_SELECT_GEOM(param->thd, param->table, param->real_keynr[idx], - 0); + quick=new QUICK_RANGE_SELECT_GEOM(param->thd, param->table, + param->real_keynr[idx], + test(parent_alloc), + parent_alloc); else - quick=new QUICK_SELECT(param->thd, param->table, param->real_keynr[idx]); + quick=new QUICK_RANGE_SELECT(param->thd, param->table, + param->real_keynr[idx], + test(parent_alloc)); if (quick) { @@ -2402,9 +5310,10 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) else { quick->key_parts=(KEY_PART*) - memdup_root(&quick->alloc,(char*) param->key[idx], - sizeof(KEY_PART)* - param->table->key_info[param->real_keynr[idx]].key_parts); + memdup_root(parent_alloc? parent_alloc : &quick->alloc, + (char*) param->key[idx], + sizeof(KEY_PART)* + param->table->key_info[param->real_keynr[idx]].key_parts); } } DBUG_RETURN(quick); @@ -2414,9 +5323,8 @@ get_quick_select(PARAM *param,uint idx,SEL_ARG *key_tree) /* ** Fix this to get all possible sub_ranges */ - -static bool -get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, +bool +get_quick_keys(PARAM *param,QUICK_RANGE_SELECT *quick,KEY_PART *key, SEL_ARG *key_tree,char *min_key,uint min_key_flag, char *max_key, uint max_key_flag) { @@ -2512,7 +5420,8 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, set_if_bigger(quick->max_used_key_length,range->min_length); set_if_bigger(quick->max_used_key_length,range->max_length); set_if_bigger(quick->used_key_parts, (uint) key_tree->part+1); - quick->ranges.push_back(range); + if (insert_dynamic(&quick->ranges, (gptr)&range)) + return 1; end: if (key_tree->right != &null_element) @@ -2526,12 +5435,12 @@ get_quick_keys(PARAM *param,QUICK_SELECT *quick,KEY_PART *key, Return 1 if there is only one range and this uses the whole primary key */ -bool QUICK_SELECT::unique_key_range() +bool QUICK_RANGE_SELECT::unique_key_range() { if (ranges.elements == 1) { - QUICK_RANGE *tmp; - if (((tmp=ranges.head())->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) + QUICK_RANGE *tmp= *((QUICK_RANGE**)ranges.buffer); + if ((tmp->flag & (EQ_RANGE | NULL_RANGE)) == EQ_RANGE) { KEY *key=head->key_info+index; return ((key->flags & (HA_NOSAME | HA_END_SPACE_KEY)) == HA_NOSAME && @@ -2542,11 +5451,11 @@ bool QUICK_SELECT::unique_key_range() } -/* Returns true if any part of the key is NULL */ +/* Returns TRUE if any part of the key is NULL */ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) { - for (const char *end=key+length ; + for (const char *end=key+length ; key < end; key+= key_part++->store_length) { @@ -2557,15 +5466,60 @@ static bool null_part_in_key(KEY_PART *key_part, const char *key, uint length) } +bool QUICK_SELECT_I::check_if_keys_used(List<Item> *fields) +{ + return check_if_key_used(head, index, *fields); +} + +bool QUICK_INDEX_MERGE_SELECT::check_if_keys_used(List<Item> *fields) +{ + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (check_if_key_used(head, quick->index, *fields)) + return 1; + } + return 0; +} + +bool QUICK_ROR_INTERSECT_SELECT::check_if_keys_used(List<Item> *fields) +{ + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (check_if_key_used(head, quick->index, *fields)) + return 1; + } + return 0; +} + +bool QUICK_ROR_UNION_SELECT::check_if_keys_used(List<Item> *fields) +{ + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (quick->check_if_keys_used(fields)) + return 1; + } + return 0; +} + + /**************************************************************************** Create a QUICK RANGE based on a key + This allocates things in a new memory root, as this may be called many times + during a query. ****************************************************************************/ -QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) +QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + TABLE_REF *ref) { MEM_ROOT *old_root= thd->mem_root; /* The following call may change thd->mem_root */ - QUICK_SELECT *quick= new QUICK_SELECT(thd, table, ref->key); + QUICK_RANGE_SELECT *quick= new QUICK_RANGE_SELECT(thd, table, ref->key, 0); KEY *key_info = &table->key_info[ref->key]; KEY_PART *key_part; QUICK_RANGE *range; @@ -2573,15 +5527,15 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) if (!quick) return 0; /* no ranges found */ - if (cp_buffer_from_ref(ref)) + if (quick->init()) { - if (thd->is_fatal_error) - goto err; // out of memory - goto ok; // empty range + delete quick; + goto err; } - if (!(range= new QUICK_RANGE())) - goto err; // out of memory + if (cp_buffer_from_ref(ref) && thd->is_fatal_error || + !(range= new QUICK_RANGE())) + goto err; // out of memory range->min_key=range->max_key=(char*) ref->key_buff; range->min_length=range->max_length=ref->key_length; @@ -2601,10 +5555,10 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) key_part->store_length= key_info->key_part[part].store_length; key_part->null_bit= key_info->key_part[part].null_bit; } - if (quick->ranges.push_back(range)) + if (insert_dynamic(&quick->ranges,(gptr)&range)) goto err; - /* + /* Add a NULL range if REF_OR_NULL optimization is used. For example: if we have "WHERE A=2 OR A IS NULL" we created the (A=2) range above @@ -2620,11 +5574,10 @@ QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, TABLE_REF *ref) EQ_RANGE))) goto err; *ref->null_ref_key= 0; // Clear null byte - if (quick->ranges.push_back(null_range)) + if (insert_dynamic(&quick->ranges,(gptr)&null_range)) goto err; } -ok: thd->mem_root= old_root; return quick; @@ -2634,11 +5587,513 @@ err: return 0; } - /* get next possible record using quick-struct */ -int QUICK_SELECT::get_next() +/* + Perform key scans for all used indexes (except CPK), get rowids and merge + them into an ordered non-recurrent sequence of rowids. + + The merge/duplicate removal is performed using Unique class. We put all + rowids into Unique, get the sorted sequence and destroy the Unique. + + If table has a clustered primary key that covers all rows (TRUE for bdb + and innodb currently) and one of the index_merge scans is a scan on PK, + then + rows that will be retrieved by PK scan are not put into Unique and + primary key scan is not performed here, it is performed later separately. + + RETURN + 0 OK + other error +*/ + +int QUICK_INDEX_MERGE_SELECT::read_keys_and_merge() { - DBUG_ENTER("get_next"); + List_iterator_fast<QUICK_RANGE_SELECT> cur_quick_it(quick_selects); + QUICK_RANGE_SELECT* cur_quick; + int result; + Unique *unique; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::prepare_unique"); + + /* We're going to just read rowids. */ + head->file->extra(HA_EXTRA_KEYREAD); + + /* + Make innodb retrieve all PK member fields, so + * ha_innobase::position (which uses them) call works. + * We can filter out rows that will be retrieved by clustered PK. + (This also creates a deficiency - it is possible that we will retrieve + parts of key that are not used by current query at all.) + */ + head->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); + + cur_quick_it.rewind(); + cur_quick= cur_quick_it++; + DBUG_ASSERT(cur_quick); + + /* + We reuse the same instance of handler so we need to call both init and + reset here. + */ + if (cur_quick->init()) + DBUG_RETURN(1); + cur_quick->reset(); + + unique= new Unique(refpos_order_cmp, (void *)head->file, + head->file->ref_length, + thd->variables.sortbuff_size); + if (!unique) + DBUG_RETURN(1); + for (;;) + { + while ((result= cur_quick->get_next()) == HA_ERR_END_OF_FILE) + { + cur_quick->range_end(); + cur_quick= cur_quick_it++; + if (!cur_quick) + break; + + if (cur_quick->file->inited != handler::NONE) + cur_quick->file->ha_index_end(); + if (cur_quick->init()) + DBUG_RETURN(1); + /* QUICK_RANGE_SELECT::reset never fails */ + cur_quick->reset(); + } + + if (result) + { + if (result != HA_ERR_END_OF_FILE) + { + cur_quick->range_end(); + DBUG_RETURN(result); + } + break; + } + + if (thd->killed) + DBUG_RETURN(1); + + /* skip row if it will be retrieved by clustered PK scan */ + if (pk_quick_select && pk_quick_select->row_in_ranges()) + continue; + + cur_quick->file->position(cur_quick->record); + result= unique->unique_add((char*)cur_quick->file->ref); + if (result) + DBUG_RETURN(1); + + } + + /* ok, all row ids are in Unique */ + result= unique->get(head); + delete unique; + doing_pk_scan= FALSE; + /* start table scan */ + init_read_record(&read_record, thd, head, (SQL_SELECT*) 0, 1, 1); + /* index_merge currently doesn't support "using index" at all */ + head->file->extra(HA_EXTRA_NO_KEYREAD); + + DBUG_RETURN(result); +} + + +/* + Get next row for index_merge. + NOTES + The rows are read from + 1. rowids stored in Unique. + 2. QUICK_RANGE_SELECT with clustered primary key (if any). + The sets of rows retrieved in 1) and 2) are guaranteed to be disjoint. +*/ + +int QUICK_INDEX_MERGE_SELECT::get_next() +{ + int result; + DBUG_ENTER("QUICK_INDEX_MERGE_SELECT::get_next"); + + if (doing_pk_scan) + DBUG_RETURN(pk_quick_select->get_next()); + + result= read_record.read_record(&read_record); + + if (result == -1) + { + result= HA_ERR_END_OF_FILE; + end_read_record(&read_record); + /* All rows from Unique have been retrieved, do a clustered PK scan */ + if (pk_quick_select) + { + doing_pk_scan= TRUE; + if ((result= pk_quick_select->init())) + DBUG_RETURN(result); + pk_quick_select->reset(); + DBUG_RETURN(pk_quick_select->get_next()); + } + } + + DBUG_RETURN(result); +} + + +/* + Retrieve next record. + SYNOPSIS + QUICK_ROR_INTERSECT_SELECT::get_next() + + NOTES + Invariant on enter/exit: all intersected selects have retrieved all index + records with rowid <= some_rowid_val and no intersected select has + retrieved any index records with rowid > some_rowid_val. + We start fresh and loop until we have retrieved the same rowid in each of + the key scans or we got an error. + + If a Clustered PK scan is present, it is used only to check if row + satisfies its condition (and never used for row retrieval). + + RETURN + 0 - Ok + other - Error code if any error occurred. +*/ + +int QUICK_ROR_INTERSECT_SELECT::get_next() +{ + List_iterator_fast<QUICK_RANGE_SELECT> quick_it(quick_selects); + QUICK_RANGE_SELECT* quick; + int error, cmp; + uint last_rowid_count=0; + DBUG_ENTER("QUICK_ROR_INTERSECT_SELECT::get_next"); + + /* Get a rowid for first quick and save it as a 'candidate' */ + quick= quick_it++; + if (cpk_quick) + { + do { + error= quick->get_next(); + }while (!error && !cpk_quick->row_in_ranges()); + } + else + error= quick->get_next(); + + if (error) + DBUG_RETURN(error); + + quick->file->position(quick->record); + memcpy(last_rowid, quick->file->ref, head->file->ref_length); + last_rowid_count= 1; + + while (last_rowid_count < quick_selects.elements) + { + if (!(quick= quick_it++)) + { + quick_it.rewind(); + quick= quick_it++; + } + + do { + if ((error= quick->get_next())) + DBUG_RETURN(error); + quick->file->position(quick->record); + cmp= head->file->cmp_ref(quick->file->ref, last_rowid); + } while (cmp < 0); + + /* Ok, current select 'caught up' and returned ref >= cur_ref */ + if (cmp > 0) + { + /* Found a row with ref > cur_ref. Make it a new 'candidate' */ + if (cpk_quick) + { + while (!cpk_quick->row_in_ranges()) + { + if ((error= quick->get_next())) + DBUG_RETURN(error); + } + } + memcpy(last_rowid, quick->file->ref, head->file->ref_length); + last_rowid_count= 1; + } + else + { + /* current 'candidate' row confirmed by this select */ + last_rowid_count++; + } + } + + /* We get here iff we got the same row ref in all scans. */ + if (need_to_fetch_row) + error= head->file->rnd_pos(head->record[0], last_rowid); + DBUG_RETURN(error); +} + + +/* + Retrieve next record. + SYNOPSIS + QUICK_ROR_UNION_SELECT::get_next() + + NOTES + Enter/exit invariant: + For each quick select in the queue a {key,rowid} tuple has been + retrieved but the corresponding row hasn't been passed to output. + + RETURN + 0 - Ok + other - Error code if any error occurred. +*/ + +int QUICK_ROR_UNION_SELECT::get_next() +{ + int error, dup_row; + QUICK_SELECT_I *quick; + byte *tmp; + DBUG_ENTER("QUICK_ROR_UNION_SELECT::get_next"); + + do + { + if (!queue.elements) + DBUG_RETURN(HA_ERR_END_OF_FILE); + /* Ok, we have a queue with >= 1 scans */ + + quick= (QUICK_SELECT_I*)queue_top(&queue); + memcpy(cur_rowid, quick->last_rowid, rowid_length); + + /* put into queue rowid from the same stream as top element */ + if ((error= quick->get_next())) + { + if (error != HA_ERR_END_OF_FILE) + DBUG_RETURN(error); + queue_remove(&queue, 0); + } + else + { + quick->save_last_pos(); + queue_replaced(&queue); + } + + if (!have_prev_rowid) + { + /* No rows have been returned yet */ + dup_row= FALSE; + have_prev_rowid= TRUE; + } + else + dup_row= !head->file->cmp_ref(cur_rowid, prev_rowid); + }while (dup_row); + + tmp= cur_rowid; + cur_rowid= prev_rowid; + prev_rowid= tmp; + + error= head->file->rnd_pos(quick->record, prev_rowid); + DBUG_RETURN(error); +} + + +/* + Initialize data structures needed by get_next(). + + SYNOPSIS + QUICK_RANGE_SELECT::get_next_init() + + DESCRIPTION + This is called from get_next() at its first call for an object. + It allocates memory buffers and sets size variables. + + RETURN + 0 OK. + != 0 Error. +*/ + +int QUICK_RANGE_SELECT::get_next_init(void) +{ + uint mrange_bufsiz; + byte *mrange_buff; + DBUG_ENTER("QUICK_RANGE_SELECT::get_next_init"); + + /* Do not allocate the buffers twice. */ + if (multi_range_length) + { + DBUG_ASSERT(multi_range_length == min(multi_range_count, ranges.elements)); + DBUG_RETURN(0); + } + + /* If the ranges are not yet initialized, wait for the next call. */ + if (! ranges.elements) + { + DBUG_RETURN(0); + } + + /* + Allocate the ranges array. + */ + multi_range_length= min(multi_range_count, ranges.elements); + DBUG_ASSERT(multi_range_length > 0); + while (multi_range_length && ! (multi_range= (KEY_MULTI_RANGE*) + my_malloc(multi_range_length * + sizeof(KEY_MULTI_RANGE), + MYF(MY_WME)))) + { + /* Try to shrink the buffers until it is 0. */ + multi_range_length/= 2; + } + if (! multi_range) + { + multi_range_length= 0; + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + + /* + Allocate the handler buffer if necessary. + */ + if (file->table_flags() & HA_NEED_READ_RANGE_BUFFER) + { + mrange_bufsiz= min(multi_range_bufsiz, + QUICK_SELECT_I::records * head->reclength); + + while (mrange_bufsiz && + ! my_multi_malloc(MYF(MY_WME), + &multi_range_buff, sizeof(*multi_range_buff), + &mrange_buff, mrange_bufsiz, + NullS)) + { + /* Try to shrink the buffers until both are 0. */ + mrange_bufsiz/= 2; + } + if (! multi_range_buff) + { + my_free((char*) multi_range, MYF(0)); + multi_range= NULL; + multi_range_length= 0; + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + + /* Initialize the handler buffer. */ + multi_range_buff->buffer= mrange_buff; + multi_range_buff->buffer_end= mrange_buff + mrange_bufsiz; + multi_range_buff->end_of_used_area= mrange_buff; + } + + /* Initialize the current QUICK_RANGE pointer. */ + cur_range= (QUICK_RANGE**) ranges.buffer; + DBUG_RETURN(0); +} + + +/* + Get next possible record using quick-struct. + + SYNOPSIS + QUICK_RANGE_SELECT::get_next() + + NOTES + Record is read into table->record[0] + + RETURN + 0 Found row + HA_ERR_END_OF_FILE No (more) rows in range + # Error code +*/ + +int QUICK_RANGE_SELECT::get_next() +{ + int result; + KEY_MULTI_RANGE *mrange; + key_range *start_key; + key_range *end_key; + DBUG_ENTER("QUICK_RANGE_SELECT::get_next"); + DBUG_ASSERT(multi_range_length && multi_range && + (cur_range >= (QUICK_RANGE**) ranges.buffer) && + (cur_range <= (QUICK_RANGE**) ranges.buffer + ranges.elements)); + + for (;;) + { + if (in_range) + { + /* We did already start to read this key. */ + result= file->read_multi_range_next(&mrange); + if (result != HA_ERR_END_OF_FILE) + { + in_range= ! result; + DBUG_RETURN(result); + } + } + + uint count= min(multi_range_length, ranges.elements - + (cur_range - (QUICK_RANGE**) ranges.buffer)); + if (count == 0) + { + /* Ranges have already been used up before. None is left for read. */ + in_range= FALSE; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + KEY_MULTI_RANGE *mrange_slot, *mrange_end; + for (mrange_slot= multi_range, mrange_end= mrange_slot+count; + mrange_slot < mrange_end; + mrange_slot++) + { + start_key= &mrange_slot->start_key; + end_key= &mrange_slot->end_key; + range= *(cur_range++); + + start_key->key= (const byte*) range->min_key; + start_key->length= range->min_length; + start_key->flag= ((range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : + (range->flag & EQ_RANGE) ? + HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); + end_key->key= (const byte*) range->max_key; + end_key->length= range->max_length; + /* + We use HA_READ_AFTER_KEY here because if we are reading on a key + prefix. We want to find all keys with this prefix. + */ + end_key->flag= (range->flag & NEAR_MAX ? HA_READ_BEFORE_KEY : + HA_READ_AFTER_KEY); + + mrange_slot->range_flag= range->flag; + } + + result= file->read_multi_range_first(&mrange, multi_range, count, + sorted, multi_range_buff); + if (result != HA_ERR_END_OF_FILE) + { + in_range= ! result; + DBUG_RETURN(result); + } + in_range= FALSE; /* No matching rows; go to next set of ranges. */ + } +} + + +/* + Get the next record with a different prefix. + + SYNOPSIS + QUICK_RANGE_SELECT::get_next_prefix() + prefix_length length of cur_prefix + cur_prefix prefix of a key to be searched for + + DESCRIPTION + Each subsequent call to the method retrieves the first record that has a + prefix with length prefix_length different from cur_prefix, such that the + record with the new prefix is within the ranges described by + this->ranges. The record found is stored into the buffer pointed by + this->record. + The method is useful for GROUP-BY queries with range conditions to + discover the prefix of the next group that satisfies the range conditions. + + TODO + This method is a modified copy of QUICK_RANGE_SELECT::get_next(), so both + methods should be unified into a more general one to reduce code + duplication. + + RETURN + 0 on success + HA_ERR_END_OF_FILE if returned all keys + other if some error occurred +*/ + +int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length, byte *cur_prefix) +{ + DBUG_ENTER("QUICK_RANGE_SELECT::get_next_prefix"); for (;;) { @@ -2646,22 +6101,30 @@ int QUICK_SELECT::get_next() key_range start_key, end_key; if (range) { - // Already read through key - result= file->read_range_next(); - if (result != HA_ERR_END_OF_FILE) - DBUG_RETURN(result); + /* Read the next record in the same range with prefix after cur_prefix. */ + DBUG_ASSERT(cur_prefix); + result= file->index_read(record, cur_prefix, prefix_length, + HA_READ_AFTER_KEY); + if (result || (file->compare_key(file->end_range) <= 0)) + DBUG_RETURN(result); } - if (!(range= it++)) - DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used + uint count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer); + if (count == 0) + { + /* Ranges have already been used up before. None is left for read. */ + range= 0; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + range= *(cur_range++); start_key.key= (const byte*) range->min_key; - start_key.length= range->min_length; + start_key.length= min(range->min_length, prefix_length); start_key.flag= ((range->flag & NEAR_MIN) ? HA_READ_AFTER_KEY : (range->flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); end_key.key= (const byte*) range->max_key; - end_key.length= range->max_length; + end_key.length= min(range->max_length, prefix_length); /* We use READ_AFTER_KEY here because if we are reading on a key prefix we want to find all keys with this prefix @@ -2685,9 +6148,9 @@ int QUICK_SELECT::get_next() /* Get next for geometrical indexes */ -int QUICK_SELECT_GEOM::get_next() +int QUICK_RANGE_SELECT_GEOM::get_next() { - DBUG_ENTER(" QUICK_SELECT_GEOM::get_next"); + DBUG_ENTER("QUICK_RANGE_SELECT_GEOM::get_next"); for (;;) { @@ -2701,8 +6164,14 @@ int QUICK_SELECT_GEOM::get_next() DBUG_RETURN(result); } - if (!(range= it++)) - DBUG_RETURN(HA_ERR_END_OF_FILE); // All ranges used + uint count= ranges.elements - (cur_range - (QUICK_RANGE**) ranges.buffer); + if (count == 0) + { + /* Ranges have already been used up before. None is left for read. */ + range= 0; + DBUG_RETURN(HA_ERR_END_OF_FILE); + } + range= *(cur_range++); result= file->index_read(record, (byte*) range->min_key, @@ -2716,6 +6185,46 @@ int QUICK_SELECT_GEOM::get_next() /* + Check if current row will be retrieved by this QUICK_RANGE_SELECT + + NOTES + It is assumed that currently a scan is being done on another index + which reads all necessary parts of the index that is scanned by this + quick select. + The implementation does a binary search on sorted array of disjoint + ranges, without taking size of range into account. + + This function is used to filter out clustered PK scan rows in + index_merge quick select. + + RETURN + TRUE if current row will be retrieved by this quick select + FALSE if not +*/ + +bool QUICK_RANGE_SELECT::row_in_ranges() +{ + QUICK_RANGE *range; + uint min= 0; + uint max= ranges.elements - 1; + uint mid= (max + min)/2; + + while (min != max) + { + if (cmp_next(*(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid))) + { + /* current row value > mid->max */ + min= mid + 1; + } + else + max= mid; + mid= (min + max) / 2; + } + range= *(QUICK_RANGE**)dynamic_array_ptr(&ranges, mid); + return (!cmp_next(range) && !cmp_prev(range)); +} + +/* This is a hack: we inherit from QUICK_SELECT so that we can use the get_next() interface, but we have to hold a pointer to the original QUICK_SELECT because its data are used all over the place. What @@ -2725,16 +6234,17 @@ int QUICK_SELECT_GEOM::get_next() for now, this seems to work right at least. */ -QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts) - : QUICK_SELECT(*q), rev_it(rev_ranges) +QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q, + uint used_key_parts) + : QUICK_RANGE_SELECT(*q), rev_it(rev_ranges) { QUICK_RANGE *r; - it.rewind(); - for (r = it++; r; r = it++) - { - rev_ranges.push_front(r); - } + QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer; + QUICK_RANGE **last_range= pr + ranges.elements; + for (; pr!=last_range; pr++) + rev_ranges.push_front(*pr); + /* Remove EQ_RANGE flag for keys that are not using the full key */ for (r = rev_it++; r; r = rev_it++) { @@ -2827,10 +6337,51 @@ int QUICK_SELECT_DESC::get_next() /* + Compare if found key is over max-value + Returns 0 if key <= range->max_key +*/ + +int QUICK_RANGE_SELECT::cmp_next(QUICK_RANGE *range_arg) +{ + if (range_arg->flag & NO_MAX_RANGE) + return 0; /* key can't be to large */ + + KEY_PART *key_part=key_parts; + uint store_length; + + for (char *key=range_arg->max_key, *end=key+range_arg->max_length; + key < end; + key+= store_length, key_part++) + { + int cmp; + store_length= key_part->store_length; + if (key_part->null_bit) + { + if (*key) + { + if (!key_part->field->is_null()) + return 1; + continue; + } + else if (key_part->field->is_null()) + return 0; + key++; // Skip null byte + store_length--; + } + if ((cmp=key_part->field->key_cmp((byte*) key, key_part->length)) < 0) + return 0; + if (cmp > 0) + return 1; + } + return (range_arg->flag & NEAR_MAX) ? 1 : 0; // Exact match +} + + +/* Returns 0 if found key is inside range (found key >= range->min_key). */ -int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range_arg) +int QUICK_RANGE_SELECT::cmp_prev(QUICK_RANGE *range_arg) { int cmp; if (range_arg->flag & NO_MIN_RANGE) @@ -2845,7 +6396,7 @@ int QUICK_SELECT_DESC::cmp_prev(QUICK_RANGE *range_arg) /* - * True if this range will require using HA_READ_AFTER_KEY + * TRUE if this range will require using HA_READ_AFTER_KEY See comment in get_next() about this */ @@ -2857,7 +6408,7 @@ bool QUICK_SELECT_DESC::range_reads_after_key(QUICK_RANGE *range_arg) } -/* True if we are reading over a key that may have a NULL value */ +/* TRUE if we are reading over a key that may have a NULL value */ #ifdef NOT_USED bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, @@ -2905,6 +6456,2145 @@ bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, #endif +void QUICK_RANGE_SELECT::add_info_string(String *str) +{ + KEY *key_info= head->key_info + index; + str->append(key_info->name); +} + +void QUICK_INDEX_MERGE_SELECT::add_info_string(String *str) +{ + QUICK_RANGE_SELECT *quick; + bool first= TRUE; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + str->append("sort_union("); + while ((quick= it++)) + { + if (!first) + str->append(','); + else + first= FALSE; + quick->add_info_string(str); + } + if (pk_quick_select) + { + str->append(','); + pk_quick_select->add_info_string(str); + } + str->append(')'); +} + +void QUICK_ROR_INTERSECT_SELECT::add_info_string(String *str) +{ + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + str->append("intersect("); + while ((quick= it++)) + { + KEY *key_info= head->key_info + quick->index; + if (!first) + str->append(','); + else + first= FALSE; + str->append(key_info->name); + } + if (cpk_quick) + { + KEY *key_info= head->key_info + cpk_quick->index; + str->append(','); + str->append(key_info->name); + } + str->append(')'); +} + +void QUICK_ROR_UNION_SELECT::add_info_string(String *str) +{ + bool first= TRUE; + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + str->append("union("); + while ((quick= it++)) + { + if (!first) + str->append(','); + else + first= FALSE; + quick->add_info_string(str); + } + str->append(')'); +} + + +void QUICK_RANGE_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + KEY *key_info= head->key_info + index; + key_names->append(key_info->name); + length= longlong2str(max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); +} + +void QUICK_INDEX_MERGE_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + if (first) + first= FALSE; + else + { + key_names->append(','); + used_lengths->append(','); + } + + KEY *key_info= head->key_info + quick->index; + key_names->append(key_info->name); + length= longlong2str(quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); + } + if (pk_quick_select) + { + KEY *key_info= head->key_info + pk_quick_select->index; + key_names->append(','); + key_names->append(key_info->name); + length= longlong2str(pk_quick_select->max_used_key_length, buf, 10) - buf; + used_lengths->append(','); + used_lengths->append(buf, length); + } +} + +void QUICK_ROR_INTERSECT_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + bool first= TRUE; + QUICK_RANGE_SELECT *quick; + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + while ((quick= it++)) + { + KEY *key_info= head->key_info + quick->index; + if (first) + first= FALSE; + else + { + key_names->append(','); + used_lengths->append(','); + } + key_names->append(key_info->name); + length= longlong2str(quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); + } + + if (cpk_quick) + { + KEY *key_info= head->key_info + cpk_quick->index; + key_names->append(','); + key_names->append(key_info->name); + length= longlong2str(cpk_quick->max_used_key_length, buf, 10) - buf; + used_lengths->append(','); + used_lengths->append(buf, length); + } +} + +void QUICK_ROR_UNION_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + bool first= TRUE; + QUICK_SELECT_I *quick; + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + while ((quick= it++)) + { + if (first) + first= FALSE; + else + { + used_lengths->append(','); + key_names->append(','); + } + quick->add_keys_and_lengths(key_names, used_lengths); + } +} + + +/******************************************************************************* +* Implementation of QUICK_GROUP_MIN_MAX_SELECT +*******************************************************************************/ + +static inline uint get_field_keypart(KEY *index, Field *field); +static inline SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, + PARAM *param, uint *param_idx); +static bool +get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, + KEY_PART_INFO *first_non_group_part, + KEY_PART_INFO *min_max_arg_part, + KEY_PART_INFO *last_part, THD *thd, + byte *key_infix, uint *key_infix_len, + KEY_PART_INFO **first_non_infix_part); +static bool +check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, + Field::imagetype image_type); + +static void +cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, + uint group_key_parts, SEL_TREE *range_tree, + SEL_ARG *index_tree, ha_rows quick_prefix_records, + bool have_min, bool have_max, + double *read_cost, ha_rows *records); + +/* + Test if this access method is applicable to a GROUP query with MIN/MAX + functions, and if so, construct a new TRP object. + + SYNOPSIS + get_best_group_min_max() + param Parameter from test_quick_select + sel_tree Range tree generated by get_mm_tree + + DESCRIPTION + Test whether a query can be computed via a QUICK_GROUP_MIN_MAX_SELECT. + Queries computable via a QUICK_GROUP_MIN_MAX_SELECT must satisfy the + following conditions: + A) Table T has at least one compound index I of the form: + I = <A_1, ...,A_k, [B_1,..., B_m], C, [D_1,...,D_n]> + B) Query conditions: + B0. Q is over a single table T. + B1. The attributes referenced by Q are a subset of the attributes of I. + B2. All attributes QA in Q can be divided into 3 overlapping groups: + - SA = {S_1, ..., S_l, [C]} - from the SELECT clause, where C is + referenced by any number of MIN and/or MAX functions if present. + - WA = {W_1, ..., W_p} - from the WHERE clause + - GA = <G_1, ..., G_k> - from the GROUP BY clause (if any) + = SA - if Q is a DISTINCT query (based on the + equivalence of DISTINCT and GROUP queries. + - NGA = QA - (GA union C) = {NG_1, ..., NG_m} - the ones not in + GROUP BY and not referenced by MIN/MAX functions. + with the following properties specified below. + + SA1. There is at most one attribute in SA referenced by any number of + MIN and/or MAX functions which, which if present, is denoted as C. + SA2. The position of the C attribute in the index is after the last A_k. + SA3. The attribute C can be referenced in the WHERE clause only in + predicates of the forms: + - (C {< | <= | > | >= | =} const) + - (const {< | <= | > | >= | =} C) + - (C between const_i and const_j) + - C IS NULL + - C IS NOT NULL + - C != const + SA4. If Q has a GROUP BY clause, there are no other aggregate functions + except MIN and MAX. For queries with DISTINCT, aggregate functions + are allowed. + SA5. The select list in DISTINCT queries should not contain expressions. + GA1. If Q has a GROUP BY clause, then GA is a prefix of I. That is, if + G_i = A_j => i = j. + GA2. If Q has a DISTINCT clause, then there is a permutation of SA that + forms a prefix of I. This permutation is used as the GROUP clause + when the DISTINCT query is converted to a GROUP query. + GA3. The attributes in GA may participate in arbitrary predicates, divided + into two groups: + - RNG(G_1,...,G_q ; where q <= k) is a range condition over the + attributes of a prefix of GA + - PA(G_i1,...G_iq) is an arbitrary predicate over an arbitrary subset + of GA. Since P is applied to only GROUP attributes it filters some + groups, and thus can be applied after the grouping. + GA4. There are no expressions among G_i, just direct column references. + NGA1.If in the index I there is a gap between the last GROUP attribute G_k, + and the MIN/MAX attribute C, then NGA must consist of exactly the index + attributes that constitute the gap. As a result there is a permutation + of NGA that coincides with the gap in the index <B_1, ..., B_m>. + NGA2.If BA <> {}, then the WHERE clause must contain a conjunction EQ of + equality conditions for all NG_i of the form (NG_i = const) or + (const = NG_i), such that each NG_i is referenced in exactly one + conjunct. Informally, the predicates provide constants to fill the + gap in the index. + WA1. There are no other attributes in the WHERE clause except the ones + referenced in predicates RNG, PA, PC, EQ defined above. Therefore + WA is subset of (GA union NGA union C) for GA,NGA,C that pass the above + tests. By transitivity then it also follows that each WA_i participates + in the index I (if this was already tested for GA, NGA and C). + + C) Overall query form: + SELECT EXPR([A_1,...,A_k], [B_1,...,B_m], [MIN(C)], [MAX(C)]) + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND EQ(B_1,...,B_m)] + [AND PC(C)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k + [HAVING PH(A_1, ..., B_1,..., C)] + where EXPR(...) is an arbitrary expression over some or all SELECT fields, + or: + SELECT DISTINCT A_i1,...,A_ik + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)]; + + NOTES + If the current query satisfies the conditions above, and if + (mem_root! = NULL), then the function constructs and returns a new TRP + object, that is later used to construct a new QUICK_GROUP_MIN_MAX_SELECT. + If (mem_root == NULL), then the function only tests whether the current + query satisfies the conditions above, and, if so, sets + is_applicable = TRUE. + + Queries with DISTINCT for which index access can be used are transformed + into equivalent group-by queries of the form: + + SELECT A_1,...,A_k FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k; + + The group-by list is a permutation of the select attributes, according + to their order in the index. + + TODO + - What happens if the query groups by the MIN/MAX field, and there is no + other field as in: "select min(a) from t1 group by a" ? + - We assume that the general correctness of the GROUP-BY query was checked + before this point. Is this correct, or do we have to check it completely? + + RETURN + If mem_root != NULL + - valid TRP_GROUP_MIN_MAX object if this QUICK class can be used for + the query + - NULL o/w. + If mem_root == NULL + - NULL +*/ + +static TRP_GROUP_MIN_MAX * +get_best_group_min_max(PARAM *param, SEL_TREE *tree) +{ + THD *thd= param->thd; + JOIN *join= thd->lex->select_lex.join; + TABLE *table= param->table; + bool have_min= FALSE; /* TRUE if there is a MIN function. */ + bool have_max= FALSE; /* TRUE if there is a MAX function. */ + Item_field *min_max_arg_item= NULL;/* The argument of all MIN/MAX functions.*/ + KEY_PART_INFO *min_max_arg_part= NULL; /* The corresponding keypart. */ + uint group_prefix_len= 0; /* Length (in bytes) of the key prefix. */ + KEY *index_info= NULL; /* The index chosen for data access. */ + uint index= 0; /* The id of the chosen index. */ + uint group_key_parts= 0; /* Number of index key parts in the group prefix. */ + uint used_key_parts= 0; /* Number of index key parts used for access. */ + byte key_infix[MAX_KEY_LENGTH]; /* Constants from equality predicates.*/ + uint key_infix_len= 0; /* Length of key_infix. */ + TRP_GROUP_MIN_MAX *read_plan= NULL; /* The eventually constructed TRP. */ + uint key_part_nr; + ORDER *tmp_group; + Item *item; + Item_field *item_field; + + DBUG_ENTER("get_best_group_min_max"); + + /* Perform few 'cheap' tests whether this access method is applicable. */ + if (!join || (thd->lex->sql_command != SQLCOM_SELECT)) + DBUG_RETURN(NULL); /* This is not a select statement. */ + if ((join->tables != 1) || /* The query must reference one table. */ + ((!join->group_list) && /* Neither GROUP BY nor a DISTINCT query. */ + (!join->select_distinct))) + DBUG_RETURN(NULL); + if(table->keys == 0) /* There are no indexes to use. */ + DBUG_RETURN(NULL); + + /* Analyze the query in more detail. */ + List_iterator<Item> select_items_it(join->fields_list); + + /* Check (SA1,SA4) and store the only MIN/MAX argument - the C attribute.*/ + if(join->make_sum_func_list(join->all_fields, join->fields_list, 1)) + DBUG_RETURN(NULL); + if (join->sum_funcs[0]) + { + Item_sum *min_max_item; + Item_sum **func_ptr= join->sum_funcs; + while ((min_max_item= *(func_ptr++))) + { + if (min_max_item->sum_func() == Item_sum::MIN_FUNC) + have_min= TRUE; + else if (min_max_item->sum_func() == Item_sum::MAX_FUNC) + have_max= TRUE; + else + DBUG_RETURN(NULL); + + Item *expr= min_max_item->args[0]; /* The argument of MIN/MAX. */ + if (expr->type() == Item::FIELD_ITEM) /* Is it an attribute? */ + { + if (! min_max_arg_item) + min_max_arg_item= (Item_field*) expr; + else if (! min_max_arg_item->eq(expr, 1)) + DBUG_RETURN(NULL); + } + else + DBUG_RETURN(NULL); + } + } + + /* Check (SA5). */ + if (join->select_distinct) + { + while ((item= select_items_it++)) + { + if (item->type() != Item::FIELD_ITEM) + DBUG_RETURN(NULL); + } + } + + /* Check (GA4) - that there are no expressions among the group attributes. */ + for (tmp_group= join->group_list; tmp_group; tmp_group= tmp_group->next) + { + if ((*tmp_group->item)->type() != Item::FIELD_ITEM) + DBUG_RETURN(NULL); + } + + /* + Check that table has at least one compound index such that the conditions + (GA1,GA2) are all TRUE. If there is more than one such index, select the + first one. Here we set the variables: group_prefix_len and index_info. + */ + KEY *cur_index_info= table->key_info; + KEY *cur_index_info_end= cur_index_info + table->keys; + KEY_PART_INFO *cur_part= NULL; + KEY_PART_INFO *end_part; /* Last part for loops. */ + /* Last index part. */ + KEY_PART_INFO *last_part= NULL; + KEY_PART_INFO *first_non_group_part= NULL; + KEY_PART_INFO *first_non_infix_part= NULL; + uint key_infix_parts= 0; + uint cur_group_key_parts= 0; + uint cur_group_prefix_len= 0; + /* Cost-related variables for the best index so far. */ + double best_read_cost= DBL_MAX; + ha_rows best_records= 0; + SEL_ARG *best_index_tree= NULL; + ha_rows best_quick_prefix_records= 0; + uint best_param_idx= 0; + double cur_read_cost= DBL_MAX; + ha_rows cur_records; + SEL_ARG *cur_index_tree= NULL; + ha_rows cur_quick_prefix_records= 0; + uint cur_param_idx; + + for (uint cur_index= 0 ; cur_index_info != cur_index_info_end ; + cur_index_info++, cur_index++) + { + /* Check (B1) - if current index is covering. */ + if (!table->used_keys.is_set(cur_index)) + goto next_index; + + /* + Check (GA1) for GROUP BY queries. + */ + if (join->group_list) + { + cur_part= cur_index_info->key_part; + end_part= cur_part + cur_index_info->key_parts; + /* Iterate in parallel over the GROUP list and the index parts. */ + for (tmp_group= join->group_list; tmp_group && (cur_part != end_part); + tmp_group= tmp_group->next, cur_part++) + { + /* + TODO: + tmp_group::item is an array of Item, is it OK to consider only the + first Item? If so, then why? What is the array for? + */ + /* Above we already checked that all group items are fields. */ + DBUG_ASSERT((*tmp_group->item)->type() == Item::FIELD_ITEM); + Item_field *group_field= (Item_field *) (*tmp_group->item); + if (group_field->field->eq(cur_part->field)) + { + cur_group_prefix_len+= cur_part->store_length; + ++cur_group_key_parts; + } + else + goto next_index; + } + } + /* + Check (GA2) if this is a DISTINCT query. + If GA2, then Store a new ORDER object in group_fields_array at the + position of the key part of item_field->field. Thus we get the ORDER + objects for each field ordered as the corresponding key parts. + Later group_fields_array of ORDER objects is used to convert the query + to a GROUP query. + */ + else if (join->select_distinct) + { + select_items_it.rewind(); + while ((item= select_items_it++)) + { + item_field= (Item_field*) item; /* (SA5) already checked above. */ + /* Find the order of the key part in the index. */ + key_part_nr= get_field_keypart(cur_index_info, item_field->field); + if (key_part_nr < 1 || key_part_nr > join->fields_list.elements) + goto next_index; + cur_part= cur_index_info->key_part + key_part_nr - 1; + cur_group_prefix_len+= cur_part->store_length; + } + cur_group_key_parts= join->fields_list.elements; + } + else + DBUG_ASSERT(FALSE); + + /* Check (SA2). */ + if (min_max_arg_item) + { + key_part_nr= get_field_keypart(cur_index_info, min_max_arg_item->field); + if (key_part_nr <= cur_group_key_parts) + goto next_index; + min_max_arg_part= cur_index_info->key_part + key_part_nr - 1; + } + + /* + Check (NGA1, NGA2) and extract a sequence of constants to be used as part + of all search keys. + */ + + /* + If there is MIN/MAX, each keypart between the last group part and the + MIN/MAX part must participate in one equality with constants, and all + keyparts after the MIN/MAX part must not be referenced in the query. + + If there is no MIN/MAX, the keyparts after the last group part can be + referenced only in equalities with constants, and the referenced keyparts + must form a sequence without any gaps that starts immediately after the + last group keypart. + */ + last_part= cur_index_info->key_part + cur_index_info->key_parts; + first_non_group_part= (cur_group_key_parts < cur_index_info->key_parts) ? + cur_index_info->key_part + cur_group_key_parts : + NULL; + first_non_infix_part= min_max_arg_part ? + (min_max_arg_part < last_part) ? + min_max_arg_part + 1 : + NULL : + NULL; + if (first_non_group_part && + (!min_max_arg_part || (min_max_arg_part - first_non_group_part > 0))) + { + if (tree) + { + uint dummy; + SEL_ARG *index_range_tree= get_index_range_tree(cur_index, tree, param, + &dummy); + if (!get_constant_key_infix(cur_index_info, index_range_tree, + first_non_group_part, min_max_arg_part, + last_part, thd, key_infix, &key_infix_len, + &first_non_infix_part)) + goto next_index; + } + else if (min_max_arg_part && + (min_max_arg_part - first_non_group_part > 0)) + /* + There is a gap but no range tree, thus no predicates at all for the + non-group keyparts. + */ + goto next_index; + } + + /* + Test (WA1) partially - that no other keypart after the last infix part is + referenced in the query. + */ + if (first_non_infix_part) + { + for (cur_part= first_non_infix_part; cur_part != last_part; cur_part++) + { + if (cur_part->field->query_id == thd->query_id) + goto next_index; + } + } + + /* If we got to this point, cur_index_info passes the test. */ + key_infix_parts= key_infix_len ? + (first_non_infix_part - first_non_group_part) : 0; + used_key_parts= cur_group_key_parts + key_infix_parts; + + /* Compute the cost of using this index. */ + if (tree) + { + /* Find the SEL_ARG sub-tree that corresponds to the chosen index. */ + cur_index_tree= get_index_range_tree(cur_index, tree, param, + &cur_param_idx); + /* Check if this range tree can be used for prefix retrieval. */ + cur_quick_prefix_records= check_quick_select(param, cur_param_idx, + cur_index_tree); + } + cost_group_min_max(table, cur_index_info, used_key_parts, + cur_group_key_parts, tree, cur_index_tree, + cur_quick_prefix_records, have_min, have_max, + &cur_read_cost, &cur_records); + + if (cur_read_cost < best_read_cost) + { + index_info= cur_index_info; + index= cur_index; + best_read_cost= cur_read_cost; + best_records= cur_records; + best_index_tree= cur_index_tree; + best_quick_prefix_records= cur_quick_prefix_records; + best_param_idx= cur_param_idx; + group_key_parts= cur_group_key_parts; + group_prefix_len= cur_group_prefix_len; + } + + next_index: + cur_group_key_parts= 0; + cur_group_prefix_len= 0; + } + if (!index_info) /* No usable index found. */ + DBUG_RETURN(NULL); + + /* Check (SA3) for the where clause. */ + if (join->conds && min_max_arg_item && + !check_group_min_max_predicates(join->conds, min_max_arg_item, + (index_info->flags & HA_SPATIAL) ? + Field::itMBR : Field::itRAW)) + DBUG_RETURN(NULL); + + /* The query passes all tests, so construct a new TRP object. */ + read_plan= new (param->mem_root) + TRP_GROUP_MIN_MAX(have_min, have_max, min_max_arg_part, + group_prefix_len, used_key_parts, + group_key_parts, index_info, index, + key_infix_len, + (key_infix_len > 0) ? key_infix : NULL, + tree, best_index_tree, best_param_idx, + best_quick_prefix_records); + if (read_plan) + { + if (tree && read_plan->quick_prefix_records == 0) + DBUG_RETURN(NULL); + + read_plan->read_cost= best_read_cost; + read_plan->records= best_records; + + DBUG_PRINT("info", + ("Returning group min/max plan: cost: %g, records: %lu", + read_plan->read_cost, (ulong) read_plan->records)); + } + + DBUG_RETURN(read_plan); +} + + +/* + Check that the MIN/MAX attribute participates only in range predicates + with constants. + + SYNOPSIS + check_group_min_max_predicates() + cond tree (or subtree) describing all or part of the WHERE + clause being analyzed + min_max_arg_item the field referenced by the MIN/MAX function(s) + min_max_arg_part the keypart of the MIN/MAX argument if any + + DESCRIPTION + The function walks recursively over the cond tree representing a WHERE + clause, and checks condition (SA3) - if a field is referenced by a MIN/MAX + aggregate function, it is referenced only by one of the following + predicates: {=, !=, <, <=, >, >=, between, is null, is not null}. + + RETURN + TRUE if cond passes the test + FALSE o/w +*/ + +static bool +check_group_min_max_predicates(COND *cond, Item_field *min_max_arg_item, + Field::imagetype image_type) +{ + DBUG_ENTER("check_group_min_max_predicates"); + DBUG_ASSERT(cond && min_max_arg_item); + + Item::Type cond_type= cond->type(); + if (cond_type == Item::COND_ITEM) /* 'AND' or 'OR' */ + { + DBUG_PRINT("info", ("Analyzing: %s", ((Item_func*) cond)->func_name())); + List_iterator_fast<Item> li(*((Item_cond*) cond)->argument_list()); + Item *and_or_arg; + while ((and_or_arg= li++)) + { + if(!check_group_min_max_predicates(and_or_arg, min_max_arg_item, + image_type)) + DBUG_RETURN(FALSE); + } + DBUG_RETURN(TRUE); + } + + /* + TODO: + This is a very crude fix to handle sub-selects in the WHERE clause + (Item_subselect objects). With the test below we rule out from the + optimization all queries with subselects in the WHERE clause. What has to + be done, is that here we should analyze whether the subselect references + the MIN/MAX argument field, and disallow the optimization only if this is + so. + */ + if (cond_type == Item::SUBSELECT_ITEM) + DBUG_RETURN(FALSE); + + /* We presume that at this point there are no other Items than functions. */ + DBUG_ASSERT(cond_type == Item::FUNC_ITEM); + + /* Test if cond references only group-by or non-group fields. */ + Item_func *pred= (Item_func*) cond; + Item **arguments= pred->arguments(); + Item *cur_arg; + DBUG_PRINT("info", ("Analyzing: %s", pred->func_name())); + for (uint arg_idx= 0; arg_idx < pred->argument_count (); arg_idx++) + { + cur_arg= arguments[arg_idx]; + DBUG_PRINT("info", ("cur_arg: %s", cur_arg->full_name())); + if (cur_arg->type() == Item::FIELD_ITEM) + { + if (min_max_arg_item->eq(cur_arg, 1)) + { + /* + If pred references the MIN/MAX argument, check whether pred is a range + condition that compares the MIN/MAX argument with a constant. + */ + Item_func::Functype pred_type= pred->functype(); + if (pred_type != Item_func::EQUAL_FUNC && + pred_type != Item_func::LT_FUNC && + pred_type != Item_func::LE_FUNC && + pred_type != Item_func::GT_FUNC && + pred_type != Item_func::GE_FUNC && + pred_type != Item_func::BETWEEN && + pred_type != Item_func::ISNULL_FUNC && + pred_type != Item_func::ISNOTNULL_FUNC && + pred_type != Item_func::EQ_FUNC && + pred_type != Item_func::NE_FUNC) + DBUG_RETURN(FALSE); + + /* Check that pred compares min_max_arg_item with a constant. */ + Item *args[3]; + bzero(args, 3 * sizeof(Item*)); + bool inv; + /* Test if this is a comparison of a field and a constant. */ + if (!simple_pred(pred, args, &inv)) + DBUG_RETURN(FALSE); + + /* Check for compatible string comparisons - similar to get_mm_leaf. */ + if (args[0] && args[1] && !args[2] && // this is a binary function + min_max_arg_item->result_type() == STRING_RESULT && + /* + Don't use an index when comparing strings of different collations. + */ + ((args[1]->result_type() == STRING_RESULT && + image_type == Field::itRAW && + ((Field_str*) min_max_arg_item->field)->charset() != + pred->compare_collation()) + || + /* + We can't always use indexes when comparing a string index to a + number. + */ + (args[1]->result_type() != STRING_RESULT && + min_max_arg_item->field->cmp_type() != args[1]->result_type()))) + DBUG_RETURN(FALSE); + } + } + else if (cur_arg->type() == Item::FUNC_ITEM) + { + if(!check_group_min_max_predicates(cur_arg, min_max_arg_item, + image_type)) + DBUG_RETURN(FALSE); + } + else if (cur_arg->const_item()) + { + DBUG_RETURN(TRUE); + } + else + DBUG_RETURN(FALSE); + } + + DBUG_RETURN(TRUE); +} + + +/* + Extract a sequence of constants from a conjunction of equality predicates. + + SYNOPSIS + get_constant_key_infix() + index_info [in] Descriptor of the chosen index. + index_range_tree [in] Range tree for the chosen index + first_non_group_part [in] First index part after group attribute parts + min_max_arg_part [in] The keypart of the MIN/MAX argument if any + last_part [in] Last keypart of the index + thd [in] Current thread + key_infix [out] Infix of constants to be used for index lookup + key_infix_len [out] Lenghth of the infix + first_non_infix_part [out] The first keypart after the infix (if any) + + DESCRIPTION + Test conditions (NGA1, NGA2) from get_best_group_min_max(). Namely, + for each keypart field NGF_i not in GROUP-BY, check that there is a constant + equality predicate among conds with the form (NGF_i = const_ci) or + (const_ci = NGF_i). + Thus all the NGF_i attributes must fill the 'gap' between the last group-by + attribute and the MIN/MAX attribute in the index (if present). If these + conditions hold, copy each constant from its corresponding predicate into + key_infix, in the order its NG_i attribute appears in the index, and update + key_infix_len with the total length of the key parts in key_infix. + + RETURN + TRUE if the index passes the test + FALSE o/w +*/ + +static bool +get_constant_key_infix(KEY *index_info, SEL_ARG *index_range_tree, + KEY_PART_INFO *first_non_group_part, + KEY_PART_INFO *min_max_arg_part, + KEY_PART_INFO *last_part, THD *thd, + byte *key_infix, uint *key_infix_len, + KEY_PART_INFO **first_non_infix_part) +{ + SEL_ARG *cur_range; + KEY_PART_INFO *cur_part; + /* End part for the first loop below. */ + KEY_PART_INFO *end_part= min_max_arg_part ? min_max_arg_part : last_part; + + *key_infix_len= 0; + byte *key_ptr= key_infix; + for (cur_part= first_non_group_part; cur_part != end_part; cur_part++) + { + /* + Find the range tree for the current keypart. We assume that + index_range_tree points to the leftmost keypart in the index. + */ + for (cur_range= index_range_tree; cur_range; + cur_range= cur_range->next_key_part) + { + if (cur_range->field->eq(cur_part->field)) + break; + } + if (!cur_range) + { + if (min_max_arg_part) + return FALSE; /* The current keypart has no range predicates at all. */ + else + { + *first_non_infix_part= cur_part; + return TRUE; + } + } + + /* Check that the current range tree is a single point interval. */ + if (cur_range->prev || cur_range->next) + return FALSE; /* This is not the only range predicate for the field. */ + if ((cur_range->min_flag & NO_MIN_RANGE) || + (cur_range->max_flag & NO_MAX_RANGE) || + (cur_range->min_flag & NEAR_MIN) || (cur_range->max_flag & NEAR_MAX)) + return FALSE; + + uint field_length= cur_part->store_length; + if ((cur_range->maybe_null && + cur_range->min_value[0] && cur_range->max_value[0]) + || + (memcmp(cur_range->min_value, cur_range->max_value, field_length) == 0)) + { /* cur_range specifies 'IS NULL' or an equality condition. */ + memcpy(key_ptr, cur_range->min_value, field_length); + key_ptr+= field_length; + *key_infix_len+= field_length; + } + else + return FALSE; + } + + if (!min_max_arg_part && (cur_part == last_part)) + *first_non_infix_part= last_part; + + return TRUE; +} + + +/* + Find the key part referenced by a field. + + SYNOPSIS + get_field_keypart() + index descriptor of an index + field field that possibly references some key part in index + + NOTES + The return value can be used to get a KEY_PART_INFO pointer by + part= index->key_part + get_field_keypart(...) - 1; + + RETURN + Positive number which is the consecutive number of the key part, or + 0 if field does not reference any index field. +*/ + +static inline uint +get_field_keypart(KEY *index, Field *field) +{ + KEY_PART_INFO *part, *end; + + for (part= index->key_part, end= part + index->key_parts; part < end; part++) + { + if (field->eq(part->field)) + return part - index->key_part + 1; + } + return 0; +} + + +/* + Find the SEL_ARG sub-tree that corresponds to the chosen index. + + SYNOPSIS + get_index_range_tree() + index [in] The ID of the index being looked for + range_tree[in] Tree of ranges being searched + param [in] PARAM from SQL_SELECT::test_quick_select + param_idx [out] Index in the array PARAM::key that corresponds to 'index' + + DESCRIPTION + + A SEL_TREE contains range trees for all usable indexes. This procedure + finds the SEL_ARG sub-tree for 'index'. The members of a SEL_TREE are + ordered in the same way as the members of PARAM::key, thus we first find + the corresponding index in the array PARAM::key. This index is returned + through the variable param_idx, to be used later as argument of + check_quick_select(). + + RETURN + Pointer to the SEL_ARG subtree that corresponds to index. +*/ + +SEL_ARG * get_index_range_tree(uint index, SEL_TREE* range_tree, PARAM *param, + uint *param_idx) +{ + uint idx= 0; /* Index nr in param->key_parts */ + while (idx < param->keys) + { + if (index == param->real_keynr[idx]) + break; + idx++; + } + *param_idx= idx; + return(range_tree->keys[idx]); +} + + +/* + Compute the cost of a quick_group_min_max_select for a particular index. + + SYNOPSIS + cost_group_min_max() + table [in] The table being accessed + index_info [in] The index used to access the table + used_key_parts [in] Number of key parts used to access the index + group_key_parts [in] Number of index key parts in the group prefix + range_tree [in] Tree of ranges for all indexes + index_tree [in] The range tree for the current index + quick_prefix_records [in] Number of records retrieved by the internally + used quick range select if any + have_min [in] True if there is a MIN function + have_max [in] True if there is a MAX function + read_cost [out] The cost to retrieve rows via this quick select + records [out] The number of rows retrieved + + DESCRIPTION + This method computes the access cost of a TRP_GROUP_MIN_MAX instance and + the number of rows returned. It updates this->read_cost and this->records. + + NOTES + The cost computation distinguishes several cases: + 1) No equality predicates over non-group attributes (thus no key_infix). + If groups are bigger than blocks on the average, then we assume that it + is very unlikely that block ends are aligned with group ends, thus even + if we look for both MIN and MAX keys, all pairs of neighbor MIN/MAX + keys, except for the first MIN and the last MAX keys, will be in the + same block. If groups are smaller than blocks, then we are going to + read all blocks. + 2) There are equality predicates over non-group attributes. + In this case the group prefix is extended by additional constants, and + as a result the min/max values are inside sub-groups of the original + groups. The number of blocks that will be read depends on whether the + ends of these sub-groups will be contained in the same or in different + blocks. We compute the probability for the two ends of a subgroup to be + in two different blocks as the ratio of: + - the number of positions of the left-end of a subgroup inside a group, + such that the right end of the subgroup is past the end of the buffer + containing the left-end, and + - the total number of possible positions for the left-end of the + subgroup, which is the number of keys in the containing group. + We assume it is very unlikely that two ends of subsequent subgroups are + in the same block. + 3) The are range predicates over the group attributes. + Then some groups may be filtered by the range predicates. We use the + selectivity of the range predicates to decide how many groups will be + filtered. + + TODO + - Take into account the optional range predicates over the MIN/MAX + argument. + - Check if we have a PK index and we use all cols - then each key is a + group, and it will be better to use an index scan. + + RETURN + None +*/ + +void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts, + uint group_key_parts, SEL_TREE *range_tree, + SEL_ARG *index_tree, ha_rows quick_prefix_records, + bool have_min, bool have_max, + double *read_cost, ha_rows *records) +{ + uint table_records; + uint num_groups; + uint num_blocks; + uint keys_per_block; + uint keys_per_group; + uint keys_per_subgroup; /* Average number of keys in sub-groups */ + /* formed by a key infix. */ + double p_overlap; /* Probability that a sub-group overlaps two blocks. */ + double quick_prefix_selectivity; + double io_cost; + double cpu_cost= 0; /* TODO: CPU cost of index_read calls? */ + DBUG_ENTER("TRP_GROUP_MIN_MAX::cost"); + + table_records= table->file->records; + keys_per_block= (table->file->block_size / 2 / + (index_info->key_length + table->file->ref_length) + + 1); + num_blocks= (table_records / keys_per_block) + 1; + + /* Compute the number of keys in a group. */ + keys_per_group= index_info->rec_per_key[group_key_parts - 1]; + if (keys_per_group == 0) /* If there is no statistics try to guess */ + /* each group contains 10% of all records */ + keys_per_group= (table_records / 10) + 1; + num_groups= (table_records / keys_per_group) + 1; + + /* Apply the selectivity of the quick select for group prefixes. */ + if (range_tree && (quick_prefix_records != HA_POS_ERROR)) + { + quick_prefix_selectivity= (double) quick_prefix_records / + (double) table_records; + num_groups= (uint) rint(num_groups * quick_prefix_selectivity); + } + + if (used_key_parts > group_key_parts) + { /* + Compute the probability that two ends of a subgroup are inside + different blocks. + */ + keys_per_subgroup= index_info->rec_per_key[used_key_parts - 1]; + if (keys_per_subgroup >= keys_per_block) /* If a subgroup is bigger than */ + p_overlap= 1.0; /* a block, it will overlap at least two blocks. */ + else + { + double blocks_per_group= (double) num_blocks / (double) num_groups; + p_overlap= (blocks_per_group * (keys_per_subgroup - 1)) / keys_per_group; + p_overlap= min(p_overlap, 1.0); + } + io_cost= (double) min(num_groups * (1 + p_overlap), num_blocks); + } + else + io_cost= (keys_per_group > keys_per_block) ? + (have_min && have_max) ? (double) (num_groups + 1) : + (double) num_groups : + (double) num_blocks; + + /* + TODO: If there is no WHERE clause and no other expressions, there should be + no CPU cost. We leave it here to make this cost comparable to that of index + scan as computed in SQL_SELECT::test_quick_select(). + */ + cpu_cost= (double) num_groups / TIME_FOR_COMPARE; + + *read_cost= io_cost + cpu_cost; + *records= num_groups; + + DBUG_PRINT("info", + ("records=%u, keys/block=%u, keys/group=%u, records=%u, blocks=%u", + table_records, keys_per_block, keys_per_group, records, + num_blocks)); + DBUG_VOID_RETURN; +} + + +/* + Construct a new quick select object for queries with group by with min/max. + + SYNOPSIS + TRP_GROUP_MIN_MAX::make_quick() + param Parameter from test_quick_select + retrieve_full_rows ignored + parent_alloc Memory pool to use, if any. + + NOTES + Make_quick ignores the retrieve_full_rows parameter because + QUICK_GROUP_MIN_MAX_SELECT always performs 'index only' scans. + The other parameter are ignored as well because all necessary + data to create the QUICK object is computed at this TRP creation + time. + + RETURN + New QUICK_GROUP_MIN_MAX_SELECT object if successfully created, + NULL o/w. +*/ + +QUICK_SELECT_I * +TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows, + MEM_ROOT *parent_alloc) +{ + QUICK_GROUP_MIN_MAX_SELECT *quick; + DBUG_ENTER("TRP_GROUP_MIN_MAX::make_quick"); + + quick= new QUICK_GROUP_MIN_MAX_SELECT(param->table, + param->thd->lex->select_lex.join, + have_min, have_max, min_max_arg_part, + group_prefix_len, used_key_parts, + index_info, index, read_cost, records, + key_infix_len, key_infix, + parent_alloc); + if (!quick) + DBUG_RETURN(NULL); + + if (quick->init()) + { + delete quick; + DBUG_RETURN(NULL); + } + + if (range_tree) + { + DBUG_ASSERT(quick_prefix_records > 0); + if (quick_prefix_records == HA_POS_ERROR) + quick->quick_prefix_select= NULL; /* Can't construct a quick select. */ + else + /* Make a QUICK_RANGE_SELECT to be used for group prefix retrieval. */ + quick->quick_prefix_select= get_quick_select(param, param_idx, + index_tree, + &quick->alloc); + + /* + Extract the SEL_ARG subtree that contains only ranges for the MIN/MAX + attribute, and create an array of QUICK_RANGES to be used by the + new quick select. + */ + if (min_max_arg_part) + { + SEL_ARG *min_max_range= index_tree; + while (min_max_range) /* Find the tree for the MIN/MAX key part. */ + { + if (min_max_range->field->eq(min_max_arg_part->field)) + break; + min_max_range= min_max_range->next_key_part; + } + /* Scroll to the leftmost interval for the MIN/MAX argument. */ + while (min_max_range && min_max_range->prev) + min_max_range= min_max_range->prev; + /* Create an array of QUICK_RANGEs for the MIN/MAX argument. */ + while (min_max_range) + { + if (quick->add_range(min_max_range)) + { + delete quick; + quick= NULL; + DBUG_RETURN(NULL); + } + min_max_range= min_max_range->next; + } + } + } + else + quick->quick_prefix_select= NULL; + + quick->update_key_stat(); + + DBUG_RETURN(quick); +} + + +/* + Construct new quick select for group queries with min/max. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::QUICK_GROUP_MIN_MAX_SELECT() + table The table being accessed + join Descriptor of the current query + have_min TRUE if the query selects a MIN function + have_max TRUE if the query selects a MAX function + min_max_arg_part The only argument field of all MIN/MAX functions + group_prefix_len Length of all key parts in the group prefix + prefix_key_parts All key parts in the group prefix + index_info The index chosen for data access + use_index The id of index_info + read_cost Cost of this access method + records Number of records returned + key_infix_len Length of the key infix appended to the group prefix + key_infix Infix of constants from equality predicates + parent_alloc Memory pool for this and quick_prefix_select data + + RETURN + None +*/ + +QUICK_GROUP_MIN_MAX_SELECT:: +QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join_arg, bool have_min_arg, + bool have_max_arg, + KEY_PART_INFO *min_max_arg_part_arg, + uint group_prefix_len_arg, + uint used_key_parts_arg, KEY *index_info_arg, + uint use_index, double read_cost_arg, + ha_rows records_arg, uint key_infix_len_arg, + byte *key_infix_arg, MEM_ROOT *parent_alloc) + :join(join_arg), index_info(index_info_arg), + group_prefix_len(group_prefix_len_arg), have_min(have_min_arg), + have_max(have_max_arg), seen_first_key(FALSE), + min_max_arg_part(min_max_arg_part_arg), key_infix(key_infix_arg), + key_infix_len(key_infix_len_arg) +{ + head= table; + file= head->file; + index= use_index; + record= head->record[0]; + tmp_record= head->record[1]; + read_time= read_cost_arg; + records= records_arg; + used_key_parts= used_key_parts_arg; + real_prefix_len= group_prefix_len + key_infix_len; + group_prefix= NULL; + min_max_arg_len= min_max_arg_part ? min_max_arg_part->store_length : 0; + + /* + We can't have parent_alloc set as the init function can't handle this case + yet. + */ + DBUG_ASSERT(!parent_alloc); + if (!parent_alloc) + { + init_sql_alloc(&alloc, join->thd->variables.range_alloc_block_size, 0); + join->thd->mem_root= &alloc; + } + else + bzero(&alloc, sizeof(MEM_ROOT)); // ensure that it's not used +} + + +/* + Do post-constructor initialization. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::init() + + DESCRIPTION + The method performs initialization that cannot be done in the constructor + such as memory allocations that may fail. It allocates memory for the + group prefix and inifix buffers, and for the lists of MIN/MAX item to be + updated during execution. + + RETURN + 0 OK + other Error code +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::init() +{ + if (group_prefix) /* Already initialized. */ + return 0; + + if (!(last_prefix= (byte*) alloc_root(&alloc, group_prefix_len))) + return 1; + /* + We may use group_prefix to store keys with all select fields, so allocate + enough space for it. + */ + if (!(group_prefix= (byte*) alloc_root(&alloc, + real_prefix_len + min_max_arg_len))) + return 1; + + if (key_infix_len > 0) + { + /* + The memory location pointed to by key_infix will be deleted soon, so + allocate a new buffer and copy the key_infix into it. + */ + byte *tmp_key_infix= (byte*) alloc_root(&alloc, key_infix_len); + if (!tmp_key_infix) + return 1; + memcpy(tmp_key_infix, this->key_infix, key_infix_len); + this->key_infix= tmp_key_infix; + } + + if (min_max_arg_part) + { + if(my_init_dynamic_array(&min_max_ranges, sizeof(QUICK_RANGE*), 16, 16)) + return 1; + + if (have_min) + { + if(!(min_functions= new List<Item_sum>)) + return 1; + } + else + min_functions= NULL; + if (have_max) + { + if(!(max_functions= new List<Item_sum>)) + return 1; + } + else + max_functions= NULL; + + Item_sum *min_max_item; + Item_sum **func_ptr= join->sum_funcs; + while ((min_max_item= *(func_ptr++))) + { + if (have_min && (min_max_item->sum_func() == Item_sum::MIN_FUNC)) + min_functions->push_back(min_max_item); + else if (have_max && (min_max_item->sum_func() == Item_sum::MAX_FUNC)) + max_functions->push_back(min_max_item); + } + + if (have_min) + { + if (!(min_functions_it= new List_iterator<Item_sum>(*min_functions))) + return 1; + } + else + min_functions_it= NULL; + + if (have_max) + { + if (!(max_functions_it= new List_iterator<Item_sum>(*max_functions))) + return 1; + } + else + max_functions_it= NULL; + } + else + min_max_ranges.elements= 0; + + return 0; +} + + +QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT() +{ + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT"); + if (file->inited != handler::NONE) + file->ha_index_end(); + if (min_max_arg_part) + delete_dynamic(&min_max_ranges); + free_root(&alloc,MYF(0)); + delete quick_prefix_select; + DBUG_VOID_RETURN; +} + + +/* + Eventually create and add a new quick range object. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::add_range() + sel_range Range object from which a + + NOTES + Construct a new QUICK_RANGE object from a SEL_ARG object, and + add it to the array min_max_ranges. If sel_arg is an infinite + range, e.g. (x < 5 or x > 4), then skip it and do not construct + a quick range. + + RETURN + FALSE on success + TRUE otherwise +*/ + +bool QUICK_GROUP_MIN_MAX_SELECT::add_range(SEL_ARG *sel_range) +{ + QUICK_RANGE *range; + uint range_flag= sel_range->min_flag | sel_range->max_flag; + + /* Skip (-inf,+inf) ranges, e.g. (x < 5 or x > 4). */ + if((range_flag & NO_MIN_RANGE) && (range_flag & NO_MAX_RANGE)) + return FALSE; + + if (!(sel_range->min_flag & NO_MIN_RANGE) && + !(sel_range->max_flag & NO_MAX_RANGE)) + { + if (sel_range->maybe_null && + sel_range->min_value[0] && sel_range->max_value[0]) + range_flag|= NULL_RANGE; /* IS NULL condition */ + else if (memcmp(sel_range->min_value, sel_range->max_value, + min_max_arg_len) == 0) + range_flag|= EQ_RANGE; /* equality condition */ + } + range= new QUICK_RANGE(sel_range->min_value, min_max_arg_len, + sel_range->max_value, min_max_arg_len, + range_flag); + if (!range) + return TRUE; + if (insert_dynamic(&min_max_ranges, (gptr)&range)) + return TRUE; + return FALSE; +} + + +/* + Determine the total number and length of the keys that will be used for + index lookup. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_key_stat() + + DESCRIPTION + The total length of the keys used for index lookup depends on whether + there are any predicates referencing the min/max argument, and/or if + the min/max argument field can be NULL. + This function does an optimistic analysis whether the search key might + be extended by a constant for the min/max keypart. It is 'optimistic' + because during actual execution it may happen that a particular range + is skipped, and then a shorter key will be used. However this is data + dependent and can't be easily estimated here. + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_key_stat() +{ + max_used_key_length= real_prefix_len; + if (min_max_ranges.elements > 0) + { + QUICK_RANGE *cur_range= 0; + if (have_min) + { /* Check if the right-most range has a lower boundary. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, + min_max_ranges.elements - 1); + if (!(cur_range->flag & NO_MIN_RANGE)) + { + max_used_key_length+= min_max_arg_len; + ++used_key_parts; + return; + } + } + if (have_max) + { /* Check if the left-most range has an upper boundary. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, 0); + if (!(cur_range->flag & NO_MAX_RANGE)) + { + max_used_key_length+= min_max_arg_len; + ++used_key_parts; + return; + } + } + } + else if (have_min && min_max_arg_part && min_max_arg_part->field->is_null()) + { + max_used_key_length+= min_max_arg_len; + ++used_key_parts; + } +} + + +/* + Initialize a quick group min/max select for key retrieval. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::reset() + + DESCRIPTION + Initialize the index chosen for access and find and store the prefix + of the last group. The method is expensive since it performs disk access. + + RETURN + 0 OK + other Error code +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::reset(void) +{ + int result; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::reset"); + + file->extra(HA_EXTRA_KEYREAD); /* We need only the key attributes */ + result= file->ha_index_init(index); + result= file->index_last(record); + if (quick_prefix_select) + quick_prefix_select->reset(); + if (result) + DBUG_RETURN(result); + /* Save the prefix of the last group. */ + key_copy(last_prefix, record, index_info, group_prefix_len); + + DBUG_RETURN(0); +} + + + +/* + Get the next key containing the MIN and/or MAX key for the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::get_next() + + DESCRIPTION + The method finds the next subsequent group of records that satisfies the + query conditions and finds the keys that contain the MIN/MAX values for + the key part referenced by the MIN/MAX function(s). Once a group and its + MIN/MAX values are found, store these values in the Item_sum objects for + the MIN/MAX functions. The rest of the values in the result row are stored + in the Item_field::result_field of each select field. If the query does + not contain MIN and/or MAX functions, then the function only finds the + group prefix, which is a query answer itself. + + NOTES + If both MIN and MAX are computed, then we use the fact that if there is + no MIN key, there can't be a MAX key as well, so we can skip looking + for a MAX key in this case. + + RETURN + 0 on success + HA_ERR_END_OF_FILE if returned all keys + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::get_next() +{ + int min_res= 0; + int max_res= 0; + int result; + int is_last_prefix; + + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::get_next"); + + /* + Loop until a group is found that satisfies all query conditions or the last + group is reached. + */ + do + { + result= next_prefix(); + /* + Check if this is the last group prefix. Notice that at this point + this->record contains the current prefix in record format. + */ + is_last_prefix= key_cmp(index_info->key_part, last_prefix, + group_prefix_len); + DBUG_ASSERT(is_last_prefix <= 0); + if (result == HA_ERR_KEY_NOT_FOUND) + continue; + else if (result) + break; + + if (have_min) + { + min_res= next_min(); + if (min_res == 0) + update_min_result(); + } + /* If there is no MIN in the group, there is no MAX either. */ + if ((have_max && !have_min) || + (have_max && have_min && (min_res == 0))) + { + max_res= next_max(); + if (max_res == 0) + update_max_result(); + /* If a MIN was found, a MAX must have been found as well. */ + DBUG_ASSERT((have_max && !have_min) || + (have_max && have_min && (max_res == 0))); + } + result= have_min ? min_res : have_max ? max_res : result; + } + while (result == HA_ERR_KEY_NOT_FOUND && is_last_prefix != 0); + + if (result == 0) + /* + Partially mimic the behavior of end_select_send. Copy the + field data from Item_field::field into Item_field::result_field + of each non-aggregated field (the group fields, and optionally + other fields in non-ANSI SQL mode). + */ + copy_fields(&join->tmp_table_param); + else if (result == HA_ERR_KEY_NOT_FOUND) + result= HA_ERR_END_OF_FILE; + + DBUG_RETURN(result); +} + + +/* + Retrieve the minimal key in the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_min() + + DESCRIPTION + Load the prefix of the next group into group_prefix and find the minimal + key within this group such that the key satisfies the query conditions and + NULL semantics. The found key is loaded into this->record. + + IMPLEMENTATION + Depending on the values of min_max_ranges.elements, key_infix_len, and + whether there is a NULL in the MIN field, this function may directly + return without any data access. In this case we use the key loaded into + this->record by the call to this->next_prefix() just before this call. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if no MIN key was found that fulfills all conditions. + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_min() +{ + int result= 0; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_min"); + + /* Find the MIN key using the eventually extended group prefix. */ + if (min_max_ranges.elements > 0) + { + if ((result= next_min_in_range())) + DBUG_RETURN(result); + } + else + { + /* Apply the constant equality conditions to the non-group select fields. */ + if (key_infix_len > 0) + { + if ((result= file->index_read(record, group_prefix, real_prefix_len, + HA_READ_KEY_EXACT))) + DBUG_RETURN(result); + } + + /* + If the min/max argument field is NULL, skip subsequent rows in the same + group with NULL in it. Notice that: + - if the first row in a group doesn't have a NULL in the field, no row + in the same group has (because NULL < any other value), + - min_max_arg_part->field->ptr points to some place in 'record'. + */ + if (min_max_arg_part && min_max_arg_part->field->is_null()) + { + /* Find the first subsequent record without NULL in the MIN/MAX field. */ + key_copy(tmp_record, record, index_info, 0); + result= file->index_read(record, tmp_record, + real_prefix_len + min_max_arg_len, + HA_READ_AFTER_KEY); + /* + Check if the new record belongs to the current group by comparing its + prefix with the group's prefix. If it is from the next group, then the + whole group has NULLs in the MIN/MAX field, so use the first record in + the group as a result. + TODO: + It is possible to reuse this new record as the result candidate for the + next call to next_min(), and to save one lookup in the next call. For + this add a new member 'this->next_group_prefix'. + */ + if (!result) + { + if(key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + key_restore(record, tmp_record, index_info, 0); + } else if (result == HA_ERR_KEY_NOT_FOUND) + result= 0; /* There is a result in any case. */ + } + } + + /* + If the MIN attribute is non-nullable, this->record already contains the + MIN key in the group, so just return. + */ + DBUG_RETURN(result); +} + + +/* + Retrieve the maximal key in the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_max() + + DESCRIPTION + If there was no previous next_min call to determine the next group prefix, + then load the next prefix into group_prefix, then lookup the maximal key of + the group, and store it into this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if no MAX key was found that fulfills all conditions. + other if some error occurred +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_max() +{ + int result; + + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_max"); + + /* Get the last key in the (possibly extended) group. */ + if (min_max_ranges.elements > 0) + result= next_max_in_range(); + else + result= file->index_read(record, group_prefix, real_prefix_len, + HA_READ_PREFIX_LAST); + DBUG_RETURN(result); +} + + +/* + Determine the prefix of the next group. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_prefix() + + DESCRIPTION + Determine the prefix of the next group that satisfies the query conditions. + If there is a range condition referencing the group attributes, use a + QUICK_RANGE_SELECT object to retrieve the *first* key that satisfies the + condition. If there is a key infix of constants, append this infix + immediately after the group attributes. The possibly extended prefix is + stored in this->group_prefix. The first key of the found group is stored in + this->record, on which relies this->next_min(). + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the formed prefix + HA_ERR_END_OF_FILE if there are no more keys + other if some error occurred +*/ +int QUICK_GROUP_MIN_MAX_SELECT::next_prefix() +{ + int result; + DBUG_ENTER("QUICK_GROUP_MIN_MAX_SELECT::next_prefix"); + + if (quick_prefix_select) + { + byte *cur_prefix= seen_first_key ? group_prefix : NULL; + if ((result= quick_prefix_select->get_next_prefix(group_prefix_len, + cur_prefix))) + DBUG_RETURN(result); + seen_first_key= TRUE; + } + else + { + if (!seen_first_key) + { + result= file->index_first(record); + if (result) + DBUG_RETURN(result); + seen_first_key= TRUE; + } + else + { + /* Load the first key in this group into record. */ + result= file->index_read(record, group_prefix, group_prefix_len, + HA_READ_AFTER_KEY); + if (result) + DBUG_RETURN(result); + } + } + + /* Save the prefix of this group for subsequent calls. */ + key_copy(group_prefix, record, index_info, group_prefix_len); + /* Append key_infix to group_prefix. */ + if (key_infix_len > 0) + memcpy(group_prefix + group_prefix_len, + key_infix, key_infix_len); + + DBUG_RETURN(0); +} + + +/* + Find the minimal key in a group that satisfies some range conditions for the + min/max argument field. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() + + DESCRIPTION + Given the sequence of ranges min_max_ranges, find the minimal key that is + in the left-most possible range. If there is no such key, then the current + group does not have a MIN key that satisfies the WHERE clause. If a key is + found, its value is stored in this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of + the ranges + other if some error +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_min_in_range() +{ + ha_rkey_function find_flag; + uint search_prefix_len; + QUICK_RANGE *cur_range; + bool found_null= FALSE; + int result= HA_ERR_KEY_NOT_FOUND; + + DBUG_ASSERT(min_max_ranges.elements > 0); + + for (uint range_idx= 0; range_idx < min_max_ranges.elements; range_idx++) + { /* Search from the left-most range to the right. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, range_idx); + + /* + If the current value for the min/max argument is bigger than the right + boundary of cur_range, there is no need to check this range. + */ + if (range_idx != 0 && !(cur_range->flag & NO_MAX_RANGE) && + (key_cmp(min_max_arg_part, (const byte*) cur_range->max_key, + min_max_arg_len) == 1)) + continue; + + if (cur_range->flag & NO_MIN_RANGE) + { + find_flag= HA_READ_KEY_EXACT; + search_prefix_len= real_prefix_len; + } + else + { + /* Extend the search key with the lower boundary for this range. */ + memcpy(group_prefix + real_prefix_len, cur_range->min_key, + cur_range->min_length); + search_prefix_len= real_prefix_len + min_max_arg_len; + find_flag= (cur_range->flag & (EQ_RANGE | NULL_RANGE)) ? + HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MIN) ? + HA_READ_AFTER_KEY : HA_READ_KEY_OR_NEXT; + } + + result= file->index_read(record, group_prefix, search_prefix_len, + find_flag); + if ((result == HA_ERR_KEY_NOT_FOUND) && + (cur_range->flag & (EQ_RANGE | NULL_RANGE))) + continue; /* Check the next range. */ + else if (result) + /* + In all other cases (HA_ERR_*, HA_READ_KEY_EXACT with NO_MIN_RANGE, + HA_READ_AFTER_KEY, HA_READ_KEY_OR_NEXT) if the lookup failed for this + range, it can't succeed for any other subsequent range. + */ + break; + + /* A key was found. */ + if (cur_range->flag & EQ_RANGE) + break; /* No need to perform the checks below for equal keys. */ + + if (cur_range->flag & NULL_RANGE) + { /* Remember this key, and continue looking for a non-NULL key that */ + /* satisfies some other condition. */ + memcpy(tmp_record, record, head->rec_buff_length); + found_null= TRUE; + continue; + } + + /* Check if record belongs to the current group. */ + if (key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + { + result = HA_ERR_KEY_NOT_FOUND; + continue; + } + + /* If there is an upper limit, check if the found key is in the range. */ + if ( !(cur_range->flag & NO_MAX_RANGE) ) + { + /* Compose the MAX key for the range. */ + byte *max_key= (byte*) my_alloca(real_prefix_len + min_max_arg_len); + memcpy(max_key, group_prefix, real_prefix_len); + memcpy(max_key + real_prefix_len, cur_range->max_key, + cur_range->max_length); + /* Compare the found key with max_key. */ + int cmp_res= key_cmp(index_info->key_part, max_key, + real_prefix_len + min_max_arg_len); + if (!((cur_range->flag & NEAR_MAX) && (cmp_res == -1) || + (cmp_res <= 0))) + { + result = HA_ERR_KEY_NOT_FOUND; + continue; + } + } + /* If we got to this point, the current key qualifies as MIN. */ + DBUG_ASSERT(result == 0); + break; + } + /* + If there was a key with NULL in the MIN/MAX field, and there was no other + key without NULL from the same group that satisfies some other condition, + then use the key with the NULL. + */ + if (found_null && result) + { + memcpy(record, tmp_record, head->rec_buff_length); + result= 0; + } + return result; +} + + +/* + Find the maximal key in a group that satisfies some range conditions for the + min/max argument field. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() + + DESCRIPTION + Given the sequence of ranges min_max_ranges, find the maximal key that is + in the right-most possible range. If there is no such key, then the current + group does not have a MAX key that satisfies the WHERE clause. If a key is + found, its value is stored in this->record. + + RETURN + 0 on success + HA_ERR_KEY_NOT_FOUND if there is no key with the given prefix in any of + the ranges + other if some error +*/ + +int QUICK_GROUP_MIN_MAX_SELECT::next_max_in_range() +{ + ha_rkey_function find_flag; + uint search_prefix_len; + QUICK_RANGE *cur_range; + int result; + + DBUG_ASSERT(min_max_ranges.elements > 0); + + for (uint range_idx= min_max_ranges.elements; range_idx > 0; range_idx--) + { /* Search from the right-most range to the left. */ + get_dynamic(&min_max_ranges, (gptr)&cur_range, range_idx - 1); + + /* + If the current value for the min/max argument is smaller than the left + boundary of cur_range, there is no need to check this range. + */ + if (range_idx != min_max_ranges.elements && + !(cur_range->flag & NO_MIN_RANGE) && + (key_cmp(min_max_arg_part, (const byte*) cur_range->min_key, + min_max_arg_len) == -1)) + continue; + + if (cur_range->flag & NO_MAX_RANGE) + { + find_flag= HA_READ_PREFIX_LAST; + search_prefix_len= real_prefix_len; + } + else + { + /* Extend the search key with the upper boundary for this range. */ + memcpy(group_prefix + real_prefix_len, cur_range->max_key, + cur_range->max_length); + search_prefix_len= real_prefix_len + min_max_arg_len; + find_flag= (cur_range->flag & EQ_RANGE) ? + HA_READ_KEY_EXACT : (cur_range->flag & NEAR_MAX) ? + HA_READ_BEFORE_KEY : HA_READ_PREFIX_LAST_OR_PREV; + } + + result= file->index_read(record, group_prefix, search_prefix_len, + find_flag); + + if ((result == HA_ERR_KEY_NOT_FOUND) && (cur_range->flag & EQ_RANGE)) + continue; /* Check the next range. */ + else if (result) + /* + In no key was found with this upper bound, there certainly are no keys + in the ranges to the left. + */ + return result; + + /* A key was found. */ + if (cur_range->flag & EQ_RANGE) + return result; /* No need to perform the checks below for equal keys. */ + + /* Check if record belongs to the current group. */ + if (key_cmp(index_info->key_part, group_prefix, real_prefix_len)) + { + result = HA_ERR_KEY_NOT_FOUND; + continue; + } + + /* If there is a lower limit, check if the found key is in the range. */ + if ( !(cur_range->flag & NO_MIN_RANGE) ) + { + /* Compose the MIN key for the range. */ + byte *min_key= (byte*) my_alloca(real_prefix_len + min_max_arg_len); + memcpy(min_key, group_prefix, real_prefix_len); + memcpy(min_key + real_prefix_len, cur_range->min_key, + cur_range->min_length); + /* Compare the found key with min_key. */ + int cmp_res= key_cmp(index_info->key_part, min_key, + real_prefix_len + min_max_arg_len); + if (!((cur_range->flag & NEAR_MIN) && (cmp_res == 1) || + (cmp_res >= 0))) + continue; + } + /* If we got to this point, the current key qualifies as MAX. */ + return result; + } + return HA_ERR_KEY_NOT_FOUND; +} + + +/* + Update all MIN function results with the newly found value. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_min_result() + + DESCRIPTION + The method iterates through all MIN functions and updates the result value + of each function by calling Item_sum::reset(), which in turn picks the new + result value from this->head->record[0], previously updated by + next_min(). The updated value is stored in a member variable of each of the + Item_sum objects, depending on the value type. + + IMPLEMENTATION + The update must be done separately for MIN and MAX, immediately after + next_min() was called and before next_max() is called, because both MIN and + MAX take their result value from the same buffer this->head->record[0] + (i.e. this->record). + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_min_result() +{ + Item_sum *min_func; + + min_functions_it->rewind(); + while ((min_func= (*min_functions_it)++)) + min_func->reset(); +} + + +/* + Update all MAX function results with the newly found value. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::update_max_result() + + DESCRIPTION + The method iterates through all MAX functions and updates the result value + of each function by calling Item_sum::reset(), which in turn picks the new + result value from this->head->record[0], previously updated by + next_max(). The updated value is stored in a member variable of each of the + Item_sum objects, depending on the value type. + + IMPLEMENTATION + The update must be done separately for MIN and MAX, immediately after + next_max() was called, because both MIN and MAX take their result value + from the same buffer this->head->record[0] (i.e. this->record). + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::update_max_result() +{ + Item_sum *max_func; + + max_functions_it->rewind(); + while ((max_func= (*max_functions_it)++)) + max_func->reset(); +} + + +/* + Append comma-separated list of keys this quick select uses to key_names; + append comma-separated list of corresponding used lengths to used_lengths. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths() + key_names [out] Names of used indexes + used_lengths [out] Corresponding lengths of the index names + + DESCRIPTION + This method is used by select_describe to extract the names of the + indexes used by a quick select. + +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::add_keys_and_lengths(String *key_names, + String *used_lengths) +{ + char buf[64]; + uint length; + key_names->append(index_info->name); + length= longlong2str(max_used_key_length, buf, 10) - buf; + used_lengths->append(buf, length); +} + + +#ifndef DBUG_OFF + +static void print_sel_tree(PARAM *param, SEL_TREE *tree, key_map *tree_map, + const char *msg) +{ + SEL_ARG **key,**end; + int idx; + char buff[1024]; + DBUG_ENTER("print_sel_tree"); + if (! _db_on_) + DBUG_VOID_RETURN; + + String tmp(buff,sizeof(buff),&my_charset_bin); + tmp.length(0); + for (idx= 0,key=tree->keys, end=key+param->keys ; + key != end ; + key++,idx++) + { + if (tree_map->is_set(idx)) + { + uint keynr= param->real_keynr[idx]; + if (tmp.length()) + tmp.append(','); + tmp.append(param->table->key_info[keynr].name); + } + } + if (!tmp.length()) + tmp.append("(empty)"); + + DBUG_PRINT("info", ("SEL_TREE %p (%s) scans:%s", tree, msg, tmp.ptr())); + + DBUG_VOID_RETURN; +} + + +static void print_ror_scans_arr(TABLE *table, const char *msg, + struct st_ror_scan_info **start, + struct st_ror_scan_info **end) +{ + DBUG_ENTER("print_ror_scans"); + if (! _db_on_) + DBUG_VOID_RETURN; + + char buff[1024]; + String tmp(buff,sizeof(buff),&my_charset_bin); + tmp.length(0); + for(;start != end; start++) + { + if (tmp.length()) + tmp.append(','); + tmp.append(table->key_info[(*start)->keynr].name); + } + if (!tmp.length()) + tmp.append("(empty)"); + DBUG_PRINT("info", ("ROR key scans (%s): %s", msg, tmp.ptr())); + DBUG_VOID_RETURN; +} + + /***************************************************************************** ** Print a quick range for debugging ** TODO: @@ -2912,8 +8602,6 @@ bool QUICK_SELECT_DESC::test_if_null_range(QUICK_RANGE *range_arg, ** of locking the DEBUG stream ! *****************************************************************************/ -#ifndef DBUG_OFF - static void print_key(KEY_PART *key_part,const char *key,uint used_length) { @@ -2937,8 +8625,11 @@ print_key(KEY_PART *key_part,const char *key,uint used_length) key++; // Skip null byte store_length--; } - field->set_key_image((char*) key, key_part->length, field->charset()); - field->val_str(&tmp); + field->set_key_image((char*) key, key_part->length); + if (field->type() == MYSQL_TYPE_BIT) + (void) field->val_int_as_str(&tmp, 1); + else + field->val_str(&tmp); fwrite(tmp.ptr(),sizeof(char),tmp.length(),DBUG_FILE); if (key+store_length < key_end) fputc('/',DBUG_FILE); @@ -2946,48 +8637,166 @@ print_key(KEY_PART *key_part,const char *key,uint used_length) } -static void print_quick(QUICK_SELECT *quick,const key_map* needed_reg) +static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg) { - QUICK_RANGE *range; char buf[MAX_KEY/8+1]; - DBUG_ENTER("print_param"); + DBUG_ENTER("print_quick"); if (! _db_on_ || !quick) DBUG_VOID_RETURN; + DBUG_LOCK_FILE; + + quick->dbug_dump(0, TRUE); + fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf)); + + DBUG_UNLOCK_FILE; + DBUG_VOID_RETURN; +} + - List_iterator<QUICK_RANGE> li(quick->ranges); +static void print_rowid(byte* val, int len) +{ + byte *pb; DBUG_LOCK_FILE; - fprintf(DBUG_FILE,"Used quick_range on key: %d (other_keys: 0x%s):\n", - quick->index, needed_reg->print(buf)); - while ((range=li++)) + fputc('\"', DBUG_FILE); + for (pb= val; pb!= val + len; ++pb) + fprintf(DBUG_FILE, "%c", *pb); + fprintf(DBUG_FILE, "\", hex: "); + + for (pb= val; pb!= val + len; ++pb) + fprintf(DBUG_FILE, "%x ", *pb); + fputc('\n', DBUG_FILE); + DBUG_UNLOCK_FILE; +} + +void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose) +{ + fprintf(DBUG_FILE, "%*squick range select, key %s, length: %d\n", + indent, "", head->key_info[index].name, max_used_key_length); + + if (verbose) { - if (!(range->flag & NO_MIN_RANGE)) + QUICK_RANGE *range; + QUICK_RANGE **pr= (QUICK_RANGE**)ranges.buffer; + QUICK_RANGE **last_range= pr + ranges.elements; + for (; pr!=last_range; ++pr) { - print_key(quick->key_parts,range->min_key,range->min_length); - if (range->flag & NEAR_MIN) - fputs(" < ",DBUG_FILE); - else - fputs(" <= ",DBUG_FILE); - } - fputs("X",DBUG_FILE); + fprintf(DBUG_FILE, "%*s", indent + 2, ""); + range= *pr; + if (!(range->flag & NO_MIN_RANGE)) + { + print_key(key_parts,range->min_key,range->min_length); + if (range->flag & NEAR_MIN) + fputs(" < ",DBUG_FILE); + else + fputs(" <= ",DBUG_FILE); + } + fputs("X",DBUG_FILE); - if (!(range->flag & NO_MAX_RANGE)) - { - if (range->flag & NEAR_MAX) - fputs(" < ",DBUG_FILE); - else - fputs(" <= ",DBUG_FILE); - print_key(quick->key_parts,range->max_key,range->max_length); + if (!(range->flag & NO_MAX_RANGE)) + { + if (range->flag & NEAR_MAX) + fputs(" < ",DBUG_FILE); + else + fputs(" <= ",DBUG_FILE); + print_key(key_parts,range->max_key,range->max_length); + } + fputs("\n",DBUG_FILE); } - fputs("\n",DBUG_FILE); } - DBUG_UNLOCK_FILE; - DBUG_VOID_RETURN; } -#endif +void QUICK_INDEX_MERGE_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + QUICK_RANGE_SELECT *quick; + fprintf(DBUG_FILE, "%*squick index_merge select\n", indent, ""); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + if (pk_quick_select) + { + fprintf(DBUG_FILE, "%*sclustered PK quick:\n", indent, ""); + pk_quick_select->dbug_dump(indent+2, verbose); + } + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + +void QUICK_ROR_INTERSECT_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); + QUICK_RANGE_SELECT *quick; + fprintf(DBUG_FILE, "%*squick ROR-intersect select, %scovering\n", + indent, "", need_to_fetch_row? "":"non-"); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + if (cpk_quick) + { + fprintf(DBUG_FILE, "%*sclustered PK quick:\n", indent, ""); + cpk_quick->dbug_dump(indent+2, verbose); + } + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + +void QUICK_ROR_UNION_SELECT::dbug_dump(int indent, bool verbose) +{ + List_iterator_fast<QUICK_SELECT_I> it(quick_selects); + QUICK_SELECT_I *quick; + fprintf(DBUG_FILE, "%*squick ROR-union select\n", indent, ""); + fprintf(DBUG_FILE, "%*smerged scans {\n", indent, ""); + while ((quick= it++)) + quick->dbug_dump(indent+2, verbose); + fprintf(DBUG_FILE, "%*s}\n", indent, ""); +} + + +/* + Print quick select information to DBUG_FILE. + + SYNOPSIS + QUICK_GROUP_MIN_MAX_SELECT::dbug_dump() + indent Indentation offset + verbose If TRUE show more detailed output. + + DESCRIPTION + Print the contents of this quick select to DBUG_FILE. The method also + calls dbug_dump() for the used quick select if any. + + IMPLEMENTATION + Caller is responsible for locking DBUG_FILE before this call and unlocking + it afterwards. + + RETURN + None +*/ + +void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose) +{ + fprintf(DBUG_FILE, + "%*squick_group_min_max_select: index %s (%d), length: %d\n", + indent, "", index_info->name, index, max_used_key_length); + if (key_infix_len > 0) + { + fprintf(DBUG_FILE, "%*susing key_infix with length %d:\n", + indent, "", key_infix_len); + } + if (quick_prefix_select) + { + fprintf(DBUG_FILE, "%*susing quick_range_select:\n", indent, ""); + quick_prefix_select->dbug_dump(indent + 2, verbose); + } + if (min_max_ranges.elements > 0) + { + fprintf(DBUG_FILE, "%*susing %d quick_ranges for MIN/MAX:\n", + indent, "", min_max_ranges.elements); + } +} + + +#endif /* NOT_USED */ /***************************************************************************** -** Instansiate templates +** Instantiate templates *****************************************************************************/ #ifdef __GNUC__ diff --git a/sql/opt_range.h b/sql/opt_range.h index 5a2044a59f4..71981dfb5c7 100644 --- a/sql/opt_range.h +++ b/sql/opt_range.h @@ -24,16 +24,6 @@ #pragma interface /* gcc class implementation */ #endif -#define NO_MIN_RANGE 1 -#define NO_MAX_RANGE 2 -#define NEAR_MIN 4 -#define NEAR_MAX 8 -#define UNIQUE_RANGE 16 -#define EQ_RANGE 32 -#define NULL_RANGE 64 -#define GEOM_FLAG 128 - - typedef struct st_key_part { uint16 key,part, store_length, length; uint8 null_bit; @@ -66,61 +56,600 @@ class QUICK_RANGE :public Sql_alloc { }; -class QUICK_SELECT { +/* + Quick select interface. + This class is a parent for all QUICK_*_SELECT and FT_SELECT classes. +*/ + +class QUICK_SELECT_I +{ +public: + bool sorted; + ha_rows records; /* estimate of # of records to be retrieved */ + double read_time; /* time to perform this retrieval */ + TABLE *head; + /* + Index this quick select uses, or MAX_KEY for quick selects + that use several indexes + */ + uint index; + + /* + Total length of first used_key_parts parts of the key. + Applicable if index!= MAX_KEY. + */ + uint max_used_key_length; + + /* + Max. number of (first) key parts this quick select uses for retrieval. + eg. for "(key1p1=c1 AND key1p2=c2) OR key1p1=c2" used_key_parts == 2. + Applicable if index!= MAX_KEY. + */ + uint used_key_parts; + + QUICK_SELECT_I(); + virtual ~QUICK_SELECT_I(){}; + + /* + Do post-constructor initialization. + SYNOPSIS + init() + + init() performs initializations that should have been in constructor if + it was possible to return errors from constructors. The join optimizer may + create and then delete quick selects without retrieving any rows so init() + must not contain any IO or CPU intensive code. + + If init() call fails the only valid action is to delete this quick select, + reset() and get_next() must not be called. + + RETURN + 0 OK + other Error code + */ + virtual int init() = 0; + + /* + Initialize quick select for row retrieval. + SYNOPSIS + reset() + + reset() should be called when it is certain that row retrieval will be + necessary. This call may do heavyweight initialization like buffering first + N records etc. If reset() call fails get_next() must not be called. + Note that reset() may be called several times if this quick select + executes in a subselect. + RETURN + 0 OK + other Error code + */ + virtual int reset(void) = 0; + + /* + Initialize get_next() for row retrieval. + SYNOPSIS + get_next_init() + + get_next_init() must be called before the first get_next(). + If get_next_init() call fails get_next() must not be called. + + RETURN + 0 OK + other Error code + */ + virtual int get_next_init() { return false; } + virtual int get_next() = 0; /* get next record to retrieve */ + + /* Range end should be called when we have looped over the whole index */ + virtual void range_end() {} + + virtual bool reverse_sorted() = 0; + virtual bool unique_key_range() { return false; } + + enum { + QS_TYPE_RANGE = 0, + QS_TYPE_INDEX_MERGE = 1, + QS_TYPE_RANGE_DESC = 2, + QS_TYPE_FULLTEXT = 3, + QS_TYPE_ROR_INTERSECT = 4, + QS_TYPE_ROR_UNION = 5, + QS_TYPE_GROUP_MIN_MAX = 6 + }; + + /* Get type of this quick select - one of the QS_TYPE_* values */ + virtual int get_type() = 0; + + /* + Initialize this quick select as a merged scan inside a ROR-union or a ROR- + intersection scan. The caller must not additionally call init() if this + function is called. + SYNOPSIS + init_ror_merged_scan() + reuse_handler If true, the quick select may use table->handler, otherwise + it must create and use a separate handler object. + RETURN + 0 Ok + other Error + */ + virtual int init_ror_merged_scan(bool reuse_handler) + { DBUG_ASSERT(0); return 1; } + + /* + Save ROWID of last retrieved row in file->ref. This used in ROR-merging. + */ + virtual void save_last_pos(){}; + + /* + Append comma-separated list of keys this quick select uses to key_names; + append comma-separated list of corresponding used lengths to used_lengths. + This is used by select_describe. + */ + virtual void add_keys_and_lengths(String *key_names, + String *used_lengths)=0; + + /* + Append text representation of quick select structure (what and how is + merged) to str. The result is added to "Extra" field in EXPLAIN output. + This function is implemented only by quick selects that merge other quick + selects output and/or can produce output suitable for merging. + */ + virtual void add_info_string(String *str) {}; + /* + Return 1 if any index used by this quick select + a) uses field that is listed in passed field list or + b) is automatically updated (like a timestamp) + */ + virtual bool check_if_keys_used(List<Item> *fields); + + /* + rowid of last row retrieved by this quick select. This is used only when + doing ROR-index_merge selects + */ + byte *last_rowid; + + /* + Table record buffer used by this quick select. + */ + byte *record; +#ifndef DBUG_OFF + /* + Print quick select information to DBUG_FILE. Caller is responsible + for locking DBUG_FILE before this call and unlocking it afterwards. + */ + virtual void dbug_dump(int indent, bool verbose)= 0; +#endif +}; + + +struct st_qsel_param; +class SEL_ARG; + +/* + Quick select that does a range scan on a single key. The records are + returned in key order. +*/ +class QUICK_RANGE_SELECT : public QUICK_SELECT_I +{ +protected: + bool next,dont_free; public: - bool next,dont_free,sorted; int error; - uint index, max_used_key_length, used_key_parts; - TABLE *head; +protected: handler *file; - byte *record; - List<QUICK_RANGE> ranges; - List_iterator<QUICK_RANGE> it; - QUICK_RANGE *range; - MEM_ROOT alloc; + /* + If true, this quick select has its "own" handler object which should be + closed no later then this quick select is deleted. + */ + bool free_file; + bool in_range; + uint multi_range_count; /* copy from thd->variables.multi_range_count */ + uint multi_range_length; /* the allocated length for the array */ + uint multi_range_bufsiz; /* copy from thd->variables.read_rnd_buff_size */ + KEY_MULTI_RANGE *multi_range; /* the multi-range array (allocated and + freed by QUICK_RANGE_SELECT) */ + HANDLER_BUFFER *multi_range_buff; /* the handler buffer (allocated and + freed by QUICK_RANGE_SELECT) */ +protected: + friend class TRP_ROR_INTERSECT; + friend + QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + struct st_table_ref *ref); + friend bool get_quick_keys(struct st_qsel_param *param, + QUICK_RANGE_SELECT *quick,KEY_PART *key, + SEL_ARG *key_tree, + char *min_key, uint min_key_flag, + char *max_key, uint max_key_flag); + friend QUICK_RANGE_SELECT *get_quick_select(struct st_qsel_param*,uint idx, + SEL_ARG *key_tree, + MEM_ROOT *alloc); + friend class QUICK_SELECT_DESC; + friend class QUICK_INDEX_MERGE_SELECT; + friend class QUICK_ROR_INTERSECT_SELECT; + + DYNAMIC_ARRAY ranges; /* ordered array of range ptrs */ + QUICK_RANGE **cur_range; /* current element in ranges */ + + QUICK_RANGE *range; KEY_PART *key_parts; KEY_PART_INFO *key_part_info; - ha_rows records; - double read_time; + int cmp_next(QUICK_RANGE *range); + int cmp_prev(QUICK_RANGE *range); + bool row_in_ranges(); +public: + MEM_ROOT alloc; - QUICK_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc=0); - virtual ~QUICK_SELECT(); - void reset(void) { next=0; it.rewind(); } - int init() + QUICK_RANGE_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc=0, + MEM_ROOT *parent_alloc=NULL); + ~QUICK_RANGE_SELECT(); + + int init(); + int reset(void) { - key_part_info= head->key_info[index].key_part; - return error=file->ha_index_init(index); + next=0; + range= NULL; + cur_range= (QUICK_RANGE**) ranges.buffer; + /* + Note: in opt_range.cc there are places where it is assumed that this + function always succeeds + */ + return 0; } - virtual int get_next(); - virtual bool reverse_sorted() { return 0; } + int get_next_init(void); + int get_next(); + void range_end(); + int get_next_prefix(uint prefix_length, byte *cur_prefix); + bool reverse_sorted() { return 0; } bool unique_key_range(); + int init_ror_merged_scan(bool reuse_handler); + void save_last_pos() + { file->position(record); } + int get_type() { return QS_TYPE_RANGE; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + QUICK_RANGE_SELECT(const QUICK_RANGE_SELECT& org) : QUICK_SELECT_I() + { + bcopy(&org, this, sizeof(*this)); + multi_range_length= 0; + multi_range= NULL; + multi_range_buff= NULL; + } }; -class QUICK_SELECT_GEOM: public QUICK_SELECT +class QUICK_RANGE_SELECT_GEOM: public QUICK_RANGE_SELECT { public: - QUICK_SELECT_GEOM(THD *thd, TABLE *table, uint index_arg, bool no_alloc) - :QUICK_SELECT(thd, table, index_arg, no_alloc) + QUICK_RANGE_SELECT_GEOM(THD *thd, TABLE *table, uint index_arg, + bool no_alloc, MEM_ROOT *parent_alloc) + :QUICK_RANGE_SELECT(thd, table, index_arg, no_alloc, parent_alloc) {}; virtual int get_next(); }; -class QUICK_SELECT_DESC: public QUICK_SELECT +/* + QUICK_INDEX_MERGE_SELECT - index_merge access method quick select. + + QUICK_INDEX_MERGE_SELECT uses + * QUICK_RANGE_SELECTs to get rows + * Unique class to remove duplicate rows + + INDEX MERGE OPTIMIZER + Current implementation doesn't detect all cases where index_merge could + be used, in particular: + * index_merge will never be used if range scan is possible (even if + range scan is more expensive) + + * index_merge+'using index' is not supported (this the consequence of + the above restriction) + + * If WHERE part contains complex nested AND and OR conditions, some ways + to retrieve rows using index_merge will not be considered. The choice + of read plan may depend on the order of conjuncts/disjuncts in WHERE + part of the query, see comments near imerge_list_or_list and + SEL_IMERGE::or_sel_tree_with_checks functions for details. + + * There is no "index_merge_ref" method (but index_merge on non-first + table in join is possible with 'range checked for each record'). + + See comments around SEL_IMERGE class and test_quick_select for more + details. + + ROW RETRIEVAL ALGORITHM + + index_merge uses Unique class for duplicates removal. index_merge takes + advantage of Clustered Primary Key (CPK) if the table has one. + The index_merge algorithm consists of two phases: + + Phase 1 (implemented in QUICK_INDEX_MERGE_SELECT::prepare_unique): + prepare() + { + activate 'index only'; + while(retrieve next row for non-CPK scan) + { + if (there is a CPK scan and row will be retrieved by it) + skip this row; + else + put its rowid into Unique; + } + deactivate 'index only'; + } + + Phase 2 (implemented as sequence of QUICK_INDEX_MERGE_SELECT::get_next + calls): + + fetch() + { + retrieve all rows from row pointers stored in Unique; + free Unique; + retrieve all rows for CPK scan; + } +*/ + +class QUICK_INDEX_MERGE_SELECT : public QUICK_SELECT_I +{ +public: + QUICK_INDEX_MERGE_SELECT(THD *thd, TABLE *table); + ~QUICK_INDEX_MERGE_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_INDEX_MERGE; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool check_if_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + + bool push_quick_back(QUICK_RANGE_SELECT *quick_sel_range); + + /* range quick selects this index_merge read consists of */ + List<QUICK_RANGE_SELECT> quick_selects; + + /* quick select that uses clustered primary key (NULL if none) */ + QUICK_RANGE_SELECT* pk_quick_select; + + /* true if this select is currently doing a clustered PK scan */ + bool doing_pk_scan; + + MEM_ROOT alloc; + THD *thd; + int read_keys_and_merge(); + + /* used to get rows collected in Unique */ + READ_RECORD read_record; +}; + + +/* + Rowid-Ordered Retrieval (ROR) index intersection quick select. + This quick select produces intersection of row sequences returned + by several QUICK_RANGE_SELECTs it "merges". + + All merged QUICK_RANGE_SELECTs must return rowids in rowid order. + QUICK_ROR_INTERSECT_SELECT will return rows in rowid order, too. + + All merged quick selects retrieve {rowid, covered_fields} tuples (not full + table records). + QUICK_ROR_INTERSECT_SELECT retrieves full records if it is not being used + by QUICK_ROR_INTERSECT_SELECT and all merged quick selects together don't + cover needed all fields. + + If one of the merged quick selects is a Clustered PK range scan, it is + used only to filter rowid sequence produced by other merged quick selects. +*/ + +class QUICK_ROR_INTERSECT_SELECT : public QUICK_SELECT_I +{ +public: + QUICK_ROR_INTERSECT_SELECT(THD *thd, TABLE *table, + bool retrieve_full_rows, + MEM_ROOT *parent_alloc); + ~QUICK_ROR_INTERSECT_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_ROR_INTERSECT; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool check_if_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + int init_ror_merged_scan(bool reuse_handler); + bool push_quick_back(QUICK_RANGE_SELECT *quick_sel_range); + + /* + Range quick selects this intersection consists of, not including + cpk_quick. + */ + List<QUICK_RANGE_SELECT> quick_selects; + + /* + Merged quick select that uses Clustered PK, if there is one. This quick + select is not used for row retrieval, it is used for row retrieval. + */ + QUICK_RANGE_SELECT *cpk_quick; + + MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */ + THD *thd; /* current thread */ + bool need_to_fetch_row; /* if true, do retrieve full table records. */ + /* in top-level quick select, true if merged scans where initialized */ + bool scans_inited; +}; + + +/* + Rowid-Ordered Retrieval index union select. + This quick select produces union of row sequences returned by several + quick select it "merges". + + All merged quick selects must return rowids in rowid order. + QUICK_ROR_UNION_SELECT will return rows in rowid order, too. + + All merged quick selects are set not to retrieve full table records. + ROR-union quick select always retrieves full records. + +*/ + +class QUICK_ROR_UNION_SELECT : public QUICK_SELECT_I { public: - QUICK_SELECT_DESC(QUICK_SELECT *q, uint used_key_parts); + QUICK_ROR_UNION_SELECT(THD *thd, TABLE *table); + ~QUICK_ROR_UNION_SELECT(); + + int init(); + int reset(void); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_ROR_UNION; } + void add_keys_and_lengths(String *key_names, String *used_lengths); + void add_info_string(String *str); + bool check_if_keys_used(List<Item> *fields); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif + + bool push_quick_back(QUICK_SELECT_I *quick_sel_range); + + List<QUICK_SELECT_I> quick_selects; /* Merged quick selects */ + + QUEUE queue; /* Priority queue for merge operation */ + MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */ + + THD *thd; /* current thread */ + byte *cur_rowid; /* buffer used in get_next() */ + byte *prev_rowid; /* rowid of last row returned by get_next() */ + bool have_prev_rowid; /* true if prev_rowid has valid data */ + uint rowid_length; /* table rowid length */ +private: + static int queue_cmp(void *arg, byte *val1, byte *val2); + bool scans_inited; +}; + + +/* + Index scan for GROUP-BY queries with MIN/MAX aggregate functions. + + This class provides a specialized index access method for GROUP-BY queries + of the forms: + + SELECT A_1,...,A_k, [B_1,...,B_m], [MIN(C)], [MAX(C)] + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND EQ(B_1,...,B_m)] + [AND PC(C)] + [AND PA(A_i1,...,A_iq)] + GROUP BY A_1,...,A_k; + + or + + SELECT DISTINCT A_i1,...,A_ik + FROM T + WHERE [RNG(A_1,...,A_p ; where p <= k)] + [AND PA(A_i1,...,A_iq)]; + + where all selected fields are parts of the same index. + The class of queries that can be processed by this quick select is fully + specified in the description of get_best_trp_group_min_max() in opt_range.cc. + + The get_next() method directly produces result tuples, thus obviating the + need to call end_send_group() because all grouping is already done inside + get_next(). + + Since one of the requirements is that all select fields are part of the same + index, this class produces only index keys, and not complete records. +*/ + +class QUICK_GROUP_MIN_MAX_SELECT : public QUICK_SELECT_I +{ +private: + handler *file; /* The handler used to get data. */ + JOIN *join; /* Descriptor of the current query */ + KEY *index_info; /* The index chosen for data access */ + byte *record; /* Buffer where the next record is returned. */ + byte *tmp_record; /* Temporary storage for next_min(), next_max(). */ + byte *group_prefix; /* Key prefix consisting of the GROUP fields. */ + uint group_prefix_len; /* Length of the group prefix. */ + byte *last_prefix; /* Prefix of the last group for detecting EOF. */ + bool have_min; /* Specify whether we are computing */ + bool have_max; /* a MIN, a MAX, or both. */ + bool seen_first_key; /* Denotes whether the first key was retrieved.*/ + KEY_PART_INFO *min_max_arg_part; /* The keypart of the only argument field */ + /* of all MIN/MAX functions. */ + uint min_max_arg_len; /* The length of the MIN/MAX argument field */ + byte *key_infix; /* Infix of constants from equality predicates. */ + uint key_infix_len; + DYNAMIC_ARRAY min_max_ranges; /* Array of range ptrs for the MIN/MAX field. */ + uint real_prefix_len; /* Length of key prefix extended with key_infix. */ + List<Item_sum> *min_functions; + List<Item_sum> *max_functions; + List_iterator<Item_sum> *min_functions_it; + List_iterator<Item_sum> *max_functions_it; +public: + /* + The following two members are public to allow easy access from + TRP_GROUP_MIN_MAX::make_quick() + */ + MEM_ROOT alloc; /* Memory pool for this and quick_prefix_select data. */ + QUICK_RANGE_SELECT *quick_prefix_select;/* For retrieval of group prefixes. */ +private: + int next_prefix(); + int next_min_in_range(); + int next_max_in_range(); + int next_min(); + int next_max(); + void update_min_result(); + void update_max_result(); +public: + QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join, bool have_min, + bool have_max, KEY_PART_INFO *min_max_arg_part, + uint group_prefix_len, uint used_key_parts, + KEY *index_info, uint use_index, double read_cost, + ha_rows records, uint key_infix_len, + byte *key_infix, MEM_ROOT *parent_alloc); + ~QUICK_GROUP_MIN_MAX_SELECT(); + bool add_range(SEL_ARG *sel_range); + void update_key_stat(); + bool alloc_buffers(); + int init(); + int reset(); + int get_next(); + bool reverse_sorted() { return false; } + bool unique_key_range() { return false; } + int get_type() { return QS_TYPE_GROUP_MIN_MAX; } + void add_keys_and_lengths(String *key_names, String *used_lengths); +#ifndef DBUG_OFF + void dbug_dump(int indent, bool verbose); +#endif +}; + + +class QUICK_SELECT_DESC: public QUICK_RANGE_SELECT +{ +public: + QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q, uint used_key_parts); int get_next(); bool reverse_sorted() { return 1; } + int get_type() { return QS_TYPE_RANGE_DESC; } private: - int cmp_prev(QUICK_RANGE *range); bool range_reads_after_key(QUICK_RANGE *range); #ifdef NOT_USED bool test_if_null_range(QUICK_RANGE *range, uint used_key_parts); #endif - void reset(void) { next=0; rev_it.rewind(); } + int reset(void) { next=0; rev_it.rewind(); return 0; } List<QUICK_RANGE> rev_ranges; List_iterator<QUICK_RANGE> rev_it; }; @@ -128,7 +657,7 @@ private: class SQL_SELECT :public Sql_alloc { public: - QUICK_SELECT *quick; // If quick-select used + QUICK_SELECT_I *quick; // If quick-select used COND *cond; // where condition TABLE *head; IO_CACHE file; // Positions to used records @@ -150,17 +679,16 @@ class SQL_SELECT :public Sql_alloc { }; -class FT_SELECT: public QUICK_SELECT { +class FT_SELECT: public QUICK_RANGE_SELECT { public: - FT_SELECT(THD *thd, TABLE *table, uint key): - QUICK_SELECT (thd, table, key, 1) { init(); } + FT_SELECT(THD *thd, TABLE *table, uint key) : + QUICK_RANGE_SELECT (thd, table, key, 1) { init(); } ~FT_SELECT() { file->ft_end(); } - int init() { return error= file->ft_init(); } - int get_next() { return error= file->ft_read(record); } + int init() { return error=file->ft_init(); } + int get_next() { return error=file->ft_read(record); } + int get_type() { return QS_TYPE_FULLTEXT; } }; - -QUICK_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, - struct st_table_ref *ref); - +QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, + struct st_table_ref *ref); #endif diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 613b2a4aec1..80226dcfa2c 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -59,9 +59,9 @@ static int maxmin_in_range(bool max_fl, Field* field, COND *cond); SYNOPSIS opt_sum_query() - tables Tables in query - all_fields All fields to be returned - conds WHERE clause + tables list of leaves of join table tree + all_fields All fields to be returned + conds WHERE clause NOTE: This function is only called for queries with sum functions and no @@ -89,7 +89,7 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) where_tables= conds->used_tables(); /* Don't replace expression on a table that is part of an outer join */ - for (TABLE_LIST *tl=tables; tl ; tl= tl->next) + for (TABLE_LIST *tl= tables; tl; tl= tl->next_leaf) { if (tl->on_expr) { @@ -128,10 +128,10 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) { longlong count= 1; TABLE_LIST *table; - for (table=tables ; table ; table=table->next) + for (table= tables; table; table= table->next_leaf) { if (outer_tables || (table->table->file->table_flags() & - HA_NOT_EXACT_COUNT)) + HA_NOT_EXACT_COUNT) || table->schema_table) { const_result= 0; // Can't optimize left join break; @@ -332,11 +332,23 @@ int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds) 1 Otherwise */ -static bool simple_pred(Item_func *func_item, Item **args, bool *inv_order) +bool simple_pred(Item_func *func_item, Item **args, bool *inv_order) { Item *item; *inv_order= 0; switch (func_item->argument_count()) { + case 0: + /* MULT_EQUAL_FUNC */ + { + Item_equal *item_equal= (Item_equal *) func_item; + Item_equal_iterator it(*item_equal); + args[0]= it++; + if (it++) + return 0; + if (!(args[1]= item_equal->get_const())) + return 0; + } + break; case 1: /* field IS NULL */ item= func_item->arguments()[0]; @@ -477,6 +489,9 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, case Item_func::BETWEEN: between= 1; break; + case Item_func::MULT_EQUAL_FUNC: + eq_type= 1; + break; default: return 0; // Can't optimize function } @@ -547,8 +562,7 @@ static bool matching_cond(bool max_fl, TABLE_REF *ref, KEY *keyinfo, store_val_in_field(part->field, args[between && max_fl ? 2 : 1]); if (part->null_bit) *key_ptr++= (byte) test(part->field->is_null()); - part->field->get_key_image((char*) key_ptr, part->length, - part->field->charset(), Field::itRAW); + part->field->get_key_image((char*) key_ptr, part->length, Field::itRAW); } if (is_field_part) { diff --git a/sql/parse_file.cc b/sql/parse_file.cc new file mode 100644 index 00000000000..6c3a81384a6 --- /dev/null +++ b/sql/parse_file.cc @@ -0,0 +1,772 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +// Text .frm files management routines + +#include "mysql_priv.h" +#include <errno.h> +#include <m_ctype.h> +#include <my_sys.h> +#include <my_dir.h> + + +/* + write string with escaping + + SYNOPSIS + write_escaped_string() + file - IO_CACHE for record + val_s - string for writing + + RETURN + FALSE - OK + TRUE - error +*/ + +static my_bool +write_escaped_string(IO_CACHE *file, LEX_STRING *val_s) +{ + char *eos= val_s->str + val_s->length; + char *ptr= val_s->str; + + for (; ptr < eos; ptr++) + { + /* + Should be in sync with read_escaped_string() and + parse_quoted_escaped_string() + */ + switch(*ptr) { + case '\\': // escape character + if (my_b_append(file, (const byte *)"\\\\", 2)) + return TRUE; + break; + case '\n': // parameter value delimiter + if (my_b_append(file, (const byte *)"\\n", 2)) + return TRUE; + break; + case '\0': // problem for some string processing utilities + if (my_b_append(file, (const byte *)"\\0", 2)) + return TRUE; + break; + case 26: // problem for windows utilities (Ctrl-Z) + if (my_b_append(file, (const byte *)"\\z", 2)) + return TRUE; + break; + case '\'': // list of string delimiter + if (my_b_append(file, (const byte *)"\\\'", 2)) + return TRUE; + break; + default: + if (my_b_append(file, (const byte *)ptr, 1)) + return TRUE; + } + } + return FALSE; +} + + +/* + write parameter value to IO_CACHE + + SYNOPSIS + write_parameter() + file pointer to IO_CACHE structure for writing + base pointer to data structure + parameter pointer to parameter descriptor + old_version for returning back old version number value + + RETURN + FALSE - OK + TRUE - error +*/ + +static my_bool +write_parameter(IO_CACHE *file, gptr base, File_option *parameter, + ulonglong *old_version) +{ + char num_buf[20]; // buffer for numeric operations + // string for numeric operations + String num(num_buf, sizeof(num_buf), &my_charset_bin); + DBUG_ENTER("write_parameter"); + + switch (parameter->type) { + case FILE_OPTIONS_STRING: + { + LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset); + if (my_b_append(file, (const byte *)val_s->str, val_s->length)) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_ESTRING: + { + if (write_escaped_string(file, (LEX_STRING *)(base + parameter->offset))) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_ULONGLONG: + { + num.set(*((ulonglong *)(base + parameter->offset)), &my_charset_bin); + if (my_b_append(file, (const byte *)num.ptr(), num.length())) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_REV: + { + ulonglong *val_i= (ulonglong *)(base + parameter->offset); + *old_version= (*val_i)++; + num.set(*val_i, &my_charset_bin); + if (my_b_append(file, (const byte *)num.ptr(), num.length())) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_TIMESTAMP: + { + /* string have to be allocated already */ + LEX_STRING *val_s= (LEX_STRING *)(base + parameter->offset); + time_t tm= time(NULL); + + get_date(val_s->str, GETDATE_DATE_TIME|GETDATE_GMT|GETDATE_FIXEDLENGTH, + tm); + val_s->length= PARSE_FILE_TIMESTAMPLENGTH; + if (my_b_append(file, (const byte *)val_s->str, + PARSE_FILE_TIMESTAMPLENGTH)) + DBUG_RETURN(TRUE); + break; + } + case FILE_OPTIONS_STRLIST: + { + List_iterator_fast<LEX_STRING> it(*((List<LEX_STRING>*) + (base + parameter->offset))); + bool first= 1; + LEX_STRING *str; + while ((str= it++)) + { + // We need ' ' after string to detect list continuation + if ((!first && my_b_append(file, (const byte *)" ", 1)) || + my_b_append(file, (const byte *)"\'", 1) || + write_escaped_string(file, str) || + my_b_append(file, (const byte *)"\'", 1)) + { + DBUG_RETURN(TRUE); + } + first= 0; + } + break; + } + default: + DBUG_ASSERT(0); // never should happened + } + DBUG_RETURN(FALSE); +} + + +/* + write new .frm + + SYNOPSIS + sql_create_definition_file() + dir directory where put .frm + file .frm file name + type .frm type string (VIEW, TABLE) + base base address for parameter reading (structure like + TABLE) + parameters parameters description + max_versions number of versions to save + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name, + const LEX_STRING *type, + gptr base, File_option *parameters, + uint max_versions) +{ + File handler; + IO_CACHE file; + char path[FN_REFLEN+1]; // +1 to put temporary file name for sure + ulonglong old_version= ULONGLONG_MAX; + int path_end; + File_option *param; + DBUG_ENTER("sql_create_definition_file"); + DBUG_PRINT("enter", ("Dir: %s, file: %s, base 0x%lx", + dir->str, file_name->str, (ulong) base)); + + fn_format(path, file_name->str, dir->str, 0, MY_UNPACK_FILENAME); + path_end= strlen(path); + + // temporary file name + path[path_end]='~'; + path[path_end+1]= '\0'; + if ((handler= my_create(path, CREATE_MODE, O_RDWR | O_TRUNC, + MYF(MY_WME))) <= 0) + { + DBUG_RETURN(TRUE); + } + + if (init_io_cache(&file, handler, 0, SEQ_READ_APPEND, 0L, 0, MYF(MY_WME))) + goto err_w_file; + + // write header (file signature) + if (my_b_append(&file, (const byte *)"TYPE=", 5) || + my_b_append(&file, (const byte *)type->str, type->length) || + my_b_append(&file, (const byte *)"\n", 1)) + goto err_w_file; + + // write parameters to temporary file + for (param= parameters; param->name.str; param++) + { + if (my_b_append(&file, (const byte *)param->name.str, + param->name.length) || + my_b_append(&file, (const byte *)"=", 1) || + write_parameter(&file, base, param, &old_version) || + my_b_append(&file, (const byte *)"\n", 1)) + goto err_w_cache; + } + + if (end_io_cache(&file)) + goto err_w_file; + + if (my_close(handler, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + + // archive copies management + path[path_end]='\0'; + if (!access(path, F_OK)) + { + if (old_version != ULONGLONG_MAX && max_versions != 0) + { + // save backup + char path_arc[FN_REFLEN]; + // backup old version + char path_to[FN_REFLEN]; + + // check archive directory existence + fn_format(path_arc, "arc", dir->str, "", MY_UNPACK_FILENAME); + if (access(path_arc, F_OK)) + { + if (my_mkdir(path_arc, 0777, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + + my_snprintf(path_to, FN_REFLEN, "%s/%s-%04lu", + path_arc, file_name->str, (ulong) old_version); + if (my_rename(path, path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + + // remove very old version + if (old_version > max_versions) + { + my_snprintf(path_to, FN_REFLEN, "%s/%s-%04lu", + path_arc, file_name->str, + (ulong)(old_version - max_versions)); + if (!access(path_arc, F_OK) && my_delete(path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + } + else + { + if (my_delete(path, MYF(MY_WME))) // no backups + { + DBUG_RETURN(TRUE); + } + } + } + + { + // rename temporary file + char path_to[FN_REFLEN]; + memcpy(path_to, path, path_end+1); + path[path_end]='~'; + if (my_rename(path, path_to, MYF(MY_WME))) + { + DBUG_RETURN(TRUE); + } + } + DBUG_RETURN(FALSE); +err_w_cache: + end_io_cache(&file); +err_w_file: + my_close(handler, MYF(MY_WME)); + DBUG_RETURN(TRUE); +} + + +/* + Prepare frm to parse (read to memory) + + SYNOPSIS + sql_parse_prepare() + file_name - path & filename to .frm file + mem_root - MEM_ROOT for buffer allocation + bad_format_errors - send errors on bad content + + RETURN + 0 - error + parser object + + NOTE + returned pointer + 1 will be type of .frm +*/ + +File_parser * +sql_parse_prepare(const LEX_STRING *file_name, MEM_ROOT *mem_root, + bool bad_format_errors) +{ + MY_STAT stat_info; + uint len; + char *end, *sign; + File_parser *parser; + File file; + DBUG_ENTER("sql__parse_prepare"); + + if (!my_stat(file_name->str, &stat_info, MYF(MY_WME))) + { + DBUG_RETURN(0); + } + + if (stat_info.st_size > INT_MAX-1) + { + my_error(ER_FPARSER_TOO_BIG_FILE, MYF(0), file_name->str); + DBUG_RETURN(0); + } + + if (!(parser= new(mem_root) File_parser)) + { + DBUG_RETURN(0); + } + + if (!(parser->buff= alloc_root(mem_root, stat_info.st_size+1))) + { + DBUG_RETURN(0); + } + + if ((file= my_open(file_name->str, O_RDONLY | O_SHARE, MYF(MY_WME))) < 0) + { + DBUG_RETURN(0); + } + + if ((len= my_read(file, (byte *)parser->buff, + stat_info.st_size, MYF(MY_WME))) == + MY_FILE_ERROR) + { + my_close(file, MYF(MY_WME)); + DBUG_RETURN(0); + } + + if (my_close(file, MYF(MY_WME))) + { + DBUG_RETURN(0); + } + + end= parser->end= parser->buff + len; + *end= '\0'; // barrier for more simple parsing + + // 7 = 5 (TYPE=) + 1 (letter at least of type name) + 1 ('\n') + if (len < 7 || + parser->buff[0] != 'T' || + parser->buff[1] != 'Y' || + parser->buff[2] != 'P' || + parser->buff[3] != 'E' || + parser->buff[4] != '=') + goto frm_error; + + // skip signature; + parser->file_type.str= sign= parser->buff + 5; + while (*sign >= 'A' && *sign <= 'Z' && sign < end) + sign++; + if (*sign != '\n') + goto frm_error; + parser->file_type.length= sign - parser->file_type.str; + // EOS for file signature just for safety + *sign= '\0'; + + parser->start= sign + 1; + parser->content_ok= 1; + + DBUG_RETURN(parser); + +frm_error: + if (bad_format_errors) + { + my_error(ER_FPARSER_BAD_HEADER, MYF(0), file_name->str); + DBUG_RETURN(0); + } + else + DBUG_RETURN(parser); // upper level have to check parser->ok() +} + + +/* + parse LEX_STRING + + SYNOPSIS + parse_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +static char * +parse_string(char *ptr, char *end, MEM_ROOT *mem_root, LEX_STRING *str) +{ + // get string length + char *eol= strchr(ptr, '\n'); + + if (eol >= end) + return 0; + + str->length= eol - ptr; + + if (!(str->str= alloc_root(mem_root, str->length+1))) + return 0; + + memcpy(str->str, ptr, str->length); + str->str[str->length]= '\0'; // just for safety + return eol+1; +} + + +/* + read escaped string from ptr to eol in already allocated str + + SYNOPSIS + parse_escaped_string() + ptr - pointer on string beginning + eol - pointer on character after end of string + str - target string + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +read_escaped_string(char *ptr, char *eol, LEX_STRING *str) +{ + char *write_pos= str->str; + + for(; ptr < eol; ptr++, write_pos++) + { + char c= *ptr; + if (c == '\\') + { + ptr++; + if (ptr >= eol) + return TRUE; + /* + Should be in sync with write_escaped_string() and + parse_quoted_escaped_string() + */ + switch(*ptr) { + case '\\': + *write_pos= '\\'; + break; + case 'n': + *write_pos= '\n'; + break; + case '0': + *write_pos= '\0'; + break; + case 'z': + *write_pos= 26; + break; + case '\'': + *write_pos= '\''; + break; + default: + return TRUE; + } + } + else + *write_pos= c; + } + str->str[str->length= write_pos-str->str]= '\0'; // just for safety + return FALSE; +} + + +/* + parse \n delimited escaped string + + SYNOPSIS + parse_escaped_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +static char * +parse_escaped_string(char *ptr, char *end, MEM_ROOT *mem_root, LEX_STRING *str) +{ + char *eol= strchr(ptr, '\n'); + + if (eol == 0 || eol >= end || + !(str->str= alloc_root(mem_root, (eol - ptr) + 1)) || + read_escaped_string(ptr, eol, str)) + return 0; + + return eol+1; +} + + +/* + parse '' delimited escaped string + + SYNOPSIS + parse_escaped_string() + ptr - pointer on string beginning + end - pointer on symbol after parsed string end (still owned + by buffer and can be accessed + mem_root - MEM_ROOT for parameter allocation + str - pointer on string, where results should be stored + + RETURN + 0 - error + # - pointer on symbol after string +*/ + +static char * +parse_quoted_escaped_string(char *ptr, char *end, + MEM_ROOT *mem_root, LEX_STRING *str) +{ + char *eol; + uint result_len= 0; + bool escaped= 0; + + // starting ' + if (*(ptr++) != '\'') + return 0; + + // find ending ' + for (eol= ptr; (*eol != '\'' || escaped) && eol < end; eol++) + { + if (!(escaped= (*eol == '\\' && !escaped))) + result_len++; + } + + // process string + if (eol >= end || + !(str->str= alloc_root(mem_root, result_len + 1)) || + read_escaped_string(ptr, eol, str)) + return 0; + + return eol+1; +} + + +/* + parse parameters + + SYNOPSIS + File_parser::parse() + base base address for parameter writing (structure like + TABLE) + mem_root MEM_ROOT for parameters allocation + parameters parameters description + required number of required parameters in above list + + RETURN + FALSE - OK + TRUE - error +*/ + +my_bool +File_parser::parse(gptr base, MEM_ROOT *mem_root, + struct File_option *parameters, uint required) +{ + uint first_param= 0, found= 0; + register char *ptr= start; + char *eol; + LEX_STRING *str; + List<LEX_STRING> *list; + DBUG_ENTER("File_parser::parse"); + + while (ptr < end && found < required) + { + char *line= ptr; + if (*ptr == '#') + { + // it is comment + if (!(ptr= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_EOF_IN_COMMENT, MYF(0), line); + DBUG_RETURN(TRUE); + } + ptr++; + } + else + { + File_option *parameter= parameters+first_param, + *parameters_end= parameters+required; + int len= 0; + for(; parameter < parameters_end; parameter++) + { + len= parameter->name.length; + // check length + if (len < (end-ptr) && ptr[len] != '=') + continue; + // check keyword + if (memcmp(parameter->name.str, ptr, len) == 0) + break; + } + + if (parameter < parameters_end) + { + found++; + /* + if we found first parameter, start search from next parameter + next time. + (this small optimisation should work, because they should be + written in same order) + */ + if (parameter == parameters+first_param) + first_param++; + + // get value + ptr+= (len+1); + switch (parameter->type) { + case FILE_OPTIONS_STRING: + { + if (!(ptr= parse_string(ptr, end, mem_root, + (LEX_STRING *)(base + + parameter->offset)))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + break; + } + case FILE_OPTIONS_ESTRING: + { + if (!(ptr= parse_escaped_string(ptr, end, mem_root, + (LEX_STRING *) + (base + parameter->offset)))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + break; + } + case FILE_OPTIONS_ULONGLONG: + case FILE_OPTIONS_REV: + if (!(eol= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + { + int not_used; + *((ulonglong*)(base + parameter->offset))= + my_strtoll10(ptr, 0, ¬_used); + } + ptr= eol+1; + break; + case FILE_OPTIONS_TIMESTAMP: + { + /* string have to be allocated already */ + LEX_STRING *val= (LEX_STRING *)(base + parameter->offset); + /* yyyy-mm-dd HH:MM:SS = 19(PARSE_FILE_TIMESTAMPLENGTH) characters */ + if (ptr[PARSE_FILE_TIMESTAMPLENGTH] != '\n') + { + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + DBUG_RETURN(TRUE); + } + memcpy(val->str, ptr, PARSE_FILE_TIMESTAMPLENGTH); + val->str[val->length= PARSE_FILE_TIMESTAMPLENGTH]= '\0'; + ptr+= (PARSE_FILE_TIMESTAMPLENGTH+1); + break; + } + case FILE_OPTIONS_STRLIST: + { + list= (List<LEX_STRING>*)(base + parameter->offset); + + list->empty(); + // list parsing + while (ptr < end) + { + if (!(str= (LEX_STRING*)alloc_root(mem_root, + sizeof(LEX_STRING))) || + list->push_back(str, mem_root)) + goto list_err; + if(!(ptr= parse_quoted_escaped_string(ptr, end, mem_root, str))) + goto list_err_w_message; + switch (*ptr) { + case '\n': + goto end_of_list; + case ' ': + // we cant go over buffer bounds, because we have \0 at the end + ptr++; + break; + default: + goto list_err_w_message; + } + } + end_of_list: + if (*(ptr++) != '\n') + goto list_err; + break; + + list_err_w_message: + my_error(ER_FPARSER_ERROR_IN_PARAMETER, MYF(0), + parameter->name.str, line); + list_err: + DBUG_RETURN(TRUE); + } + default: + DBUG_ASSERT(0); // never should happened + } + } + else + { + // skip unknown parameter + if (!(ptr= strchr(ptr, '\n'))) + { + my_error(ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER, MYF(0), line); + DBUG_RETURN(TRUE); + } + ptr++; + } + } + } + DBUG_RETURN(FALSE); +} diff --git a/sql/parse_file.h b/sql/parse_file.h new file mode 100644 index 00000000000..82a89dffd18 --- /dev/null +++ b/sql/parse_file.h @@ -0,0 +1,69 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _PARSE_FILE_H_ +#define _PARSE_FILE_H_ + +#define PARSE_FILE_TIMESTAMPLENGTH 19 + +enum file_opt_type { + FILE_OPTIONS_STRING, /* String (LEX_STRING) */ + FILE_OPTIONS_ESTRING, /* Escaped string (LEX_STRING) */ + FILE_OPTIONS_ULONGLONG, /* ulonglong parameter (ulonglong) */ + FILE_OPTIONS_REV, /* Revision version number (ulonglong) */ + FILE_OPTIONS_TIMESTAMP, /* timestamp (LEX_STRING have to be + allocated with length 20 (19+1) */ + FILE_OPTIONS_STRLIST /* list of escaped strings + (List<LEX_STRING>) */ +}; + +struct File_option +{ + LEX_STRING name; /* Name of the option */ + int offset; /* offset to base address of value */ + file_opt_type type; /* Option type */ +}; + +class File_parser; +File_parser *sql_parse_prepare(const LEX_STRING *file_name, + MEM_ROOT *mem_root, bool bad_format_errors); + +my_bool +sql_create_definition_file(const LEX_STRING *dir, const LEX_STRING *file_name, + const LEX_STRING *type, + gptr base, File_option *parameters, uint versions); + +class File_parser: public Sql_alloc +{ + char *buff, *start, *end; + LEX_STRING file_type; + my_bool content_ok; +public: + File_parser() :buff(0), start(0), end(0), content_ok(0) + { file_type.str= 0; file_type.length= 0; } + + my_bool ok() { return content_ok; } + LEX_STRING *type() { return &file_type; } + my_bool parse(gptr base, MEM_ROOT *mem_root, + struct File_option *parameters, uint required); + + friend File_parser *sql_parse_prepare(const LEX_STRING *file_name, + MEM_ROOT *mem_root, + bool bad_format_errors); +}; + +#endif /* _PARSE_FILE_H_ */ diff --git a/sql/password.c b/sql/password.c index 04b3a46bd48..79675ade30b 100644 --- a/sql/password.c +++ b/sql/password.c @@ -65,7 +65,7 @@ #include <sha1.h> #include "mysql.h" -/************ MySQL 3.23-4.0 authentification routines: untouched ***********/ +/************ MySQL 3.23-4.0 authentication routines: untouched ***********/ /* New (MySQL 3.21+) random generation structure initialization @@ -281,7 +281,7 @@ void make_password_from_salt_323(char *to, const ulong *salt) /* - **************** MySQL 4.1.1 authentification routines ************* + **************** MySQL 4.1.1 authentication routines ************* */ /* diff --git a/sql/procedure.cc b/sql/procedure.cc index 7779f5ce085..420a4f6262b 100644 --- a/sql/procedure.cc +++ b/sql/procedure.cc @@ -65,8 +65,7 @@ setup_procedure(THD *thd,ORDER *param,select_result *result, DBUG_RETURN(proc); } } - my_printf_error(ER_UNKNOWN_PROCEDURE,ER(ER_UNKNOWN_PROCEDURE),MYF(0), - (*param->item)->name); + my_error(ER_UNKNOWN_PROCEDURE, MYF(0), (*param->item)->name); *error=1; DBUG_RETURN(0); } diff --git a/sql/procedure.h b/sql/procedure.h index 5365b2e1102..4212a9246a5 100644 --- a/sql/procedure.h +++ b/sql/procedure.h @@ -60,7 +60,7 @@ public: void set(longlong nr) { value=(double) nr; } void set(const char *str,uint length,CHARSET_INFO *cs) { int err; value=my_strntod(cs,(char*) str,length,(char**)0,&err); } - double val() { return value; } + double val_real() { return value; } longlong val_int() { return (longlong) value; } String *val_str(String *s) { s->set(value,decimals,default_charset()); return s; } unsigned int size_of() { return sizeof(*this);} @@ -78,7 +78,7 @@ public: void set(longlong nr) { value=nr; } void set(const char *str,uint length, CHARSET_INFO *cs) { int err; value=my_strntoll(cs,str,length,10,NULL,&err); } - double val() { return (double) value; } + double val_real() { return (double) value; } longlong val_int() { return value; } String *val_str(String *s) { s->set(value, default_charset()); return s; } unsigned int size_of() { return sizeof(*this);} @@ -91,12 +91,12 @@ public: Item_proc_string(const char *name_par,uint length) :Item_proc(name_par) { this->max_length=length; } enum Item_result result_type () const { return STRING_RESULT; } - enum_field_types field_type() const { return MYSQL_TYPE_STRING; } + enum_field_types field_type() const { return MYSQL_TYPE_VARCHAR; } void set(double nr) { str_value.set(nr, 2, default_charset()); } void set(longlong nr) { str_value.set(nr, default_charset()); } void set(const char *str, uint length, CHARSET_INFO *cs) { str_value.copy(str,length,cs); } - double val() + double val_real() { int err; CHARSET_INFO *cs=str_value.charset(); diff --git a/sql/protocol.cc b/sql/protocol.cc index 95cd8415c85..d537f9cf829 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -24,6 +24,7 @@ #endif #include "mysql_priv.h" +#include "sp_rcontext.h" #include <stdarg.h> static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024; @@ -52,21 +53,23 @@ bool Protocol_prep::net_store_data(const char *from, uint length) /* Send a error string to client */ -void send_error(THD *thd, uint sql_errno, const char *err) +void net_send_error(THD *thd, uint sql_errno, const char *err) { #ifndef EMBEDDED_LIBRARY uint length; char buff[MYSQL_ERRMSG_SIZE+2], *pos; #endif NET *net= &thd->net; - DBUG_ENTER("send_error"); + DBUG_ENTER("net_send_error"); DBUG_PRINT("enter",("sql_errno: %d err: %s", sql_errno, err ? err : net->last_error[0] ? net->last_error : "NULL")); -#ifndef EMBEDDED_LIBRARY /* TODO query cache in embedded library*/ - query_cache_abort(net); -#endif + if (thd->spcont && thd->spcont->find_handler(sql_errno, + MYSQL_ERROR::WARN_LEVEL_ERROR)) + { + DBUG_VOID_RETURN; + } thd->query_error= 1; // needed to catch query errors during replication if (!err) { @@ -124,36 +127,10 @@ void send_error(THD *thd, uint sql_errno, const char *err) thd->net.report_error= 0; /* Abort multi-result sets */ - thd->lex->found_colon= 0; thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; DBUG_VOID_RETURN; } - -/* - Send a warning to the end user - - SYNOPSIS - send_warning() - thd Thread handler - sql_errno Warning number (error message) - err Error string. If not set, use ER(sql_errno) - - DESCRIPTION - Register the warning so that the user can get it with mysql_warnings() - Send an ok (+ warning count) to the end user. -*/ - -void send_warning(THD *thd, uint sql_errno, const char *err) -{ - DBUG_ENTER("send_warning"); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, sql_errno, - err ? err : ER(sql_errno)); - send_ok(thd); - DBUG_VOID_RETURN; -} - - /* Write error package and flush to client It's a little too low level, but I don't want to use another buffer for @@ -161,7 +138,7 @@ void send_warning(THD *thd, uint sql_errno, const char *err) */ void -net_printf(THD *thd, uint errcode, ...) +net_printf_error(THD *thd, uint errcode, ...) { va_list args; uint length,offset; @@ -174,17 +151,22 @@ net_printf(THD *thd, uint errcode, ...) #endif NET *net= &thd->net; - DBUG_ENTER("net_printf"); + DBUG_ENTER("net_printf_error"); DBUG_PRINT("enter",("message: %u",errcode)); + if (thd->spcont && thd->spcont->find_handler(errcode, + MYSQL_ERROR::WARN_LEVEL_ERROR)) + { + DBUG_VOID_RETURN; + } thd->query_error= 1; // needed to catch query errors during replication #ifndef EMBEDDED_LIBRARY query_cache_abort(net); // Safety #endif va_start(args,errcode); /* - The following is needed to make net_printf() work with 0 argument for - errorcode and use the argument after that as the format string. This + The following is needed to make net_printf_error() work with 0 argument + for errorcode and use the argument after that as the format string. This is useful for rare errors that are not worth the hassle to put in errmsg.sys, but at the same time, the message is not fixed text */ @@ -350,7 +332,7 @@ send_eof(THD *thd, bool no_flush) { NET *net= &thd->net; DBUG_ENTER("send_eof"); - if (net->vio != 0) + if (net->vio != 0 && !net->no_send_eof) { if (thd->client_capabilities & CLIENT_PROTOCOL_41) { @@ -489,6 +471,7 @@ bool Protocol::flush() flag Bit mask with the following functions: 1 send number of rows 2 send default values + 4 don't write eof packet DESCRIPTION Sum fields has table name empty and field_name. @@ -499,7 +482,7 @@ bool Protocol::flush() */ #ifndef EMBEDDED_LIBRARY -bool Protocol::send_fields(List<Item> *list, uint flag) +bool Protocol::send_fields(List<Item> *list, uint flags) { List_iterator_fast<Item> it(*list); Item *item; @@ -510,7 +493,7 @@ bool Protocol::send_fields(List<Item> *list, uint flag) CHARSET_INFO *thd_charset= thd->variables.character_set_results; DBUG_ENTER("send_fields"); - if (flag & 1) + if (flags & SEND_NUM_ROWS) { // Packet with number of elements char *pos=net_store_length(buff, (uint) list->elements); (void) my_net_write(&thd->net, buff,(uint) (pos-buff)); @@ -528,6 +511,11 @@ bool Protocol::send_fields(List<Item> *list, uint flag) CHARSET_INFO *cs= system_charset_info; Send_field field; item->make_field(&field); + + /* Keep things compatible for old clients */ + if (field.type == MYSQL_TYPE_VARCHAR) + field.type= MYSQL_TYPE_VAR_STRING; + prot.prepare_for_resend(); if (thd->client_capabilities & CLIENT_PROTOCOL_41) @@ -604,7 +592,7 @@ bool Protocol::send_fields(List<Item> *list, uint flag) } } local_packet->length((uint) (pos - local_packet->ptr())); - if (flag & 2) + if (flags & SEND_DEFAULTS) item->send(&prot, &tmp); // Send default value if (prot.write()) break; /* purecov: inspected */ @@ -613,11 +601,13 @@ bool Protocol::send_fields(List<Item> *list, uint flag) #endif } - my_net_write(&thd->net, eof_buff, 1); + if (flags & SEND_EOF) + my_net_write(&thd->net, eof_buff, 1); DBUG_RETURN(prepare_for_send(list)); err: - send_error(thd,ER_OUT_OF_RESOURCES); /* purecov: inspected */ + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), + MYF(0)); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } @@ -744,6 +734,7 @@ bool Protocol_simple::store(const char *from, uint length, #ifndef DEBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DECIMAL || + field_types[field_pos] == MYSQL_TYPE_BIT || (field_types[field_pos] >= MYSQL_TYPE_ENUM && field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); field_pos++; @@ -759,6 +750,7 @@ bool Protocol_simple::store(const char *from, uint length, #ifndef DEBUG_OFF DBUG_ASSERT(field_types == 0 || field_types[field_pos] == MYSQL_TYPE_DECIMAL || + field_types[field_pos] == MYSQL_TYPE_BIT || (field_types[field_pos] >= MYSQL_TYPE_ENUM && field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); field_pos++; @@ -972,12 +964,6 @@ void Protocol_prep::prepare_for_resend() bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) { CHARSET_INFO *tocs= thd->variables.character_set_results; -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DECIMAL || - (field_types[field_pos] >= MYSQL_TYPE_ENUM && - field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); -#endif field_pos++; return store_string_aux(from, length, fromcs, tocs); } @@ -985,12 +971,6 @@ bool Protocol_prep::store(const char *from, uint length, CHARSET_INFO *fromcs) bool Protocol_prep::store(const char *from,uint length, CHARSET_INFO *fromcs, CHARSET_INFO *tocs) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DECIMAL || - (field_types[field_pos] >= MYSQL_TYPE_ENUM && - field_types[field_pos] <= MYSQL_TYPE_GEOMETRY)); -#endif field_pos++; return store_string_aux(from, length, fromcs, tocs); } @@ -1008,10 +988,6 @@ bool Protocol_prep::store_null() bool Protocol_prep::store_tiny(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_TINY); -#endif char buff[1]; field_pos++; buff[0]= (uchar) from; @@ -1021,11 +997,6 @@ bool Protocol_prep::store_tiny(longlong from) bool Protocol_prep::store_short(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_SHORT || - field_types[field_pos] == MYSQL_TYPE_YEAR); -#endif field_pos++; char *to= packet->prep_append(2, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1037,11 +1008,6 @@ bool Protocol_prep::store_short(longlong from) bool Protocol_prep::store_long(longlong from) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_INT24 || - field_types[field_pos] == MYSQL_TYPE_LONG); -#endif field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1053,10 +1019,6 @@ bool Protocol_prep::store_long(longlong from) bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_LONGLONG); -#endif field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1068,10 +1030,6 @@ bool Protocol_prep::store_longlong(longlong from, bool unsigned_flag) bool Protocol_prep::store(float from, uint32 decimals, String *buffer) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_FLOAT); -#endif field_pos++; char *to= packet->prep_append(4, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1083,10 +1041,6 @@ bool Protocol_prep::store(float from, uint32 decimals, String *buffer) bool Protocol_prep::store(double from, uint32 decimals, String *buffer) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DOUBLE); -#endif field_pos++; char *to= packet->prep_append(8, PACKET_BUFFER_EXTRA_ALLOC); if (!to) @@ -1110,12 +1064,6 @@ bool Protocol_prep::store(Field *field) bool Protocol_prep::store(TIME *tm) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_DATETIME || - field_types[field_pos] == MYSQL_TYPE_DATE || - field_types[field_pos] == MYSQL_TYPE_TIMESTAMP); -#endif char buff[12],*pos; uint length; field_pos++; @@ -1150,10 +1098,6 @@ bool Protocol_prep::store_date(TIME *tm) bool Protocol_prep::store_time(TIME *tm) { -#ifndef DEBUG_OFF - DBUG_ASSERT(field_types == 0 || - field_types[field_pos] == MYSQL_TYPE_TIME); -#endif char buff[13], *pos; uint length; field_pos++; @@ -1180,12 +1124,3 @@ bool Protocol_prep::store_time(TIME *tm) buff[0]=(char) length; // Length is stored first return packet->append(buff, length+1, PACKET_BUFFER_EXTRA_ALLOC); } - -#ifdef EMBEDDED_LIBRARY -/* Should be removed when we define the Protocol_cursor's future */ -bool Protocol_cursor::write() -{ - return Protocol_simple::write(); -} -#endif - diff --git a/sql/protocol.h b/sql/protocol.h index a3b6da55da3..fddd3ceba94 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -50,17 +50,16 @@ public: Protocol(THD *thd_arg) { init(thd_arg); } virtual ~Protocol() {} void init(THD* thd_arg); - bool send_fields(List<Item> *list, uint flag); + + enum { SEND_NUM_ROWS= 1, SEND_DEFAULTS= 2, SEND_EOF= 4 }; + virtual bool send_fields(List<Item> *list, uint flags); + bool send_records_num(List<Item> *list, ulonglong records); bool store(I_List<i_string> *str_list); bool store(const char *from, CHARSET_INFO *cs); String *storage_packet() { return packet; } inline void free() { packet->free(); } -#ifndef EMBEDDED_LIBRARY - bool write(); -#else virtual bool write(); -#endif inline bool store(uint32 from) { return store_long((longlong) from); } inline bool store(longlong from) @@ -162,17 +161,20 @@ public: Protocol_cursor(THD *thd_arg, MEM_ROOT *ini_alloc) :Protocol_simple(thd_arg), alloc(ini_alloc) {} bool prepare_for_send(List<Item> *item_list) { + row_count= 0; fields= NULL; data= NULL; prev_record= &data; return Protocol_simple::prepare_for_send(item_list); } - bool send_fields(List<Item> *list, uint flag); + bool send_fields(List<Item> *list, uint flags); bool write(); + uint get_field_count() { return field_count; } }; void send_warning(THD *thd, uint sql_errno, const char *err=0); -void net_printf(THD *thd,uint sql_errno, ...); +void net_printf_error(THD *thd, uint sql_errno, ...); +void net_send_error(THD *thd, uint sql_errno=0, const char *err=0); void send_ok(THD *thd, ha_rows affected_rows=0L, ulonglong id=0L, const char *info=0); void send_eof(THD *thd, bool no_flush=0); diff --git a/sql/protocol_cursor.cc b/sql/protocol_cursor.cc index 5f35552c562..a5bf94469e7 100644 --- a/sql/protocol_cursor.cc +++ b/sql/protocol_cursor.cc @@ -26,22 +26,21 @@ #include "mysql_priv.h" #include <mysql.h> -bool Protocol_cursor::send_fields(List<Item> *list, uint flag) +bool Protocol_cursor::send_fields(List<Item> *list, uint flags) { List_iterator_fast<Item> it(*list); Item *item; MYSQL_FIELD *client_field; - - DBUG_ENTER("send_fields"); + DBUG_ENTER("Protocol_cursor::send_fields"); + if (prepare_for_send(list)) - return FALSE; + return FALSE; fields= (MYSQL_FIELD *)alloc_root(alloc, sizeof(MYSQL_FIELD) * field_count); if (!fields) goto err; - client_field= fields; - while ((item= it++)) + for (client_field= fields; (item= it++) ; client_field++) { Send_field server_field; item->make_field(&server_field); @@ -51,6 +50,7 @@ bool Protocol_cursor::send_fields(List<Item> *list, uint flag) client_field->name= strdup_root(alloc, server_field.col_name); client_field->org_table= strdup_root(alloc, server_field.org_table_name); client_field->org_name= strdup_root(alloc, server_field.org_col_name); + client_field->catalog= strdup_root(alloc, ""); client_field->length= server_field.length; client_field->type= server_field.type; client_field->flags= server_field.flags; @@ -60,12 +60,13 @@ bool Protocol_cursor::send_fields(List<Item> *list, uint flag) client_field->name_length= strlen(client_field->name); client_field->org_name_length= strlen(client_field->org_name); client_field->org_table_length= strlen(client_field->org_table); + client_field->catalog_length= 0; client_field->charsetnr= server_field.charsetnr; if (INTERNAL_NUM_FIELD(client_field)) client_field->flags|= NUM_FLAG; - if (flag & 2) + if (flags & (uint) Protocol::SEND_DEFAULTS) { char buff[80]; String tmp(buff, sizeof(buff), default_charset_info), *res; @@ -78,16 +79,19 @@ bool Protocol_cursor::send_fields(List<Item> *list, uint flag) else client_field->def=0; client_field->max_length= 0; - ++client_field; } DBUG_RETURN(FALSE); - err: - send_error(thd, ER_OUT_OF_RESOURCES); /* purecov: inspected */ + +err: + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), + MYF(0)); /* purecov: inspected */ DBUG_RETURN(TRUE); /* purecov: inspected */ } + /* Get the length of next field. Change parameter to point at fieldstart */ + bool Protocol_cursor::write() { byte *cp= (byte *)packet->ptr(); @@ -100,17 +104,18 @@ bool Protocol_cursor::write() byte *to; new_record= (MYSQL_ROWS *)alloc_root(alloc, - sizeof(MYSQL_ROWS) + (field_count + 1)*sizeof(char *) + packet->length()); + sizeof(MYSQL_ROWS) + (field_count + 1)*sizeof(char *) + packet->length()); if (!new_record) goto err; data_tmp= (byte **)(new_record + 1); new_record->data= (char **)data_tmp; - to= (byte *)(fields + field_count + 1); + to= (byte *)data_tmp + (field_count + 1)*sizeof(char *); for (; cur_field < fields_end; ++cur_field, ++data_tmp) { - if ((len=net_field_length((uchar **)&cp))) + if ((len= net_field_length((uchar **)&cp)) == 0 || + len == NULL_LENGTH) { *data_tmp= 0; } @@ -118,9 +123,10 @@ bool Protocol_cursor::write() { if ((long)len > (end_pos - cp)) { -// TODO error signal send_error(thd, CR_MALFORMED_PACKET); + // TODO error signal send_error(thd, CR_MALFORMED_PACKET); return TRUE; } + *data_tmp= to; memcpy(to,(char*) cp,len); to[len]=0; to+=len+1; @@ -129,6 +135,7 @@ bool Protocol_cursor::write() cur_field->max_length=len; } } + *data_tmp= 0; *prev_record= new_record; prev_record= &new_record->next; @@ -136,8 +143,6 @@ bool Protocol_cursor::write() row_count++; return FALSE; err: -// TODO error signal send_error(thd, ER_OUT_OF_RESOURCES); + // TODO error signal send_error(thd, ER_OUT_OF_RESOURCES); return TRUE; } - - diff --git a/sql/records.cc b/sql/records.cc index e5a0d102b10..3c0143d2307 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -100,11 +100,19 @@ void init_read_record(READ_RECORD *info,THD *thd, TABLE *table, } else if (select && select->quick) { + int error; DBUG_PRINT("info",("using rr_quick")); if (!table->file->inited) table->file->ha_index_init(select->quick->index); info->read_record=rr_quick; + + if ((error= select->quick->get_next_init())) + { + /* Cannot return error code here. Instead print to error log. */ + table->file->print_error(error,MYF(ME_NOREFRESH)); + thd->fatal_error(); + } } else if (table->sort.record_pointers) { @@ -189,7 +197,7 @@ static int rr_sequential(READ_RECORD *info) { if (info->thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + info->thd->send_kill_message(); return 1; } if (tmp != HA_ERR_RECORD_DELETED) diff --git a/sql/repl_failsafe.cc b/sql/repl_failsafe.cc index 85a51ba9b51..7852993b95b 100644 --- a/sql/repl_failsafe.cc +++ b/sql/repl_failsafe.cc @@ -193,7 +193,6 @@ err: my_message(ER_UNKNOWN_ERROR, "Wrong parameters to function register_slave", MYF(0)); err2: - send_error(thd); return 1; } @@ -437,7 +436,7 @@ static Slave_log_event* find_slave_event(IO_CACHE* log, This function is broken now. See comment for translate_master(). */ -int show_new_master(THD* thd) +bool show_new_master(THD* thd) { Protocol *protocol= thd->protocol; DBUG_ENTER("show_new_master"); @@ -450,23 +449,24 @@ int show_new_master(THD* thd) { if (errmsg[0]) my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0), - "SHOW NEW MASTER", errmsg); - DBUG_RETURN(-1); + "SHOW NEW MASTER", errmsg); + DBUG_RETURN(TRUE); } else { field_list.push_back(new Item_empty_string("Log_name", 20)); field_list.push_back(new Item_return_int("Log_pos", 10, MYSQL_TYPE_LONGLONG)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); protocol->prepare_for_resend(); protocol->store(lex_mi->log_file_name, &my_charset_bin); protocol->store((ulonglong) lex_mi->pos); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } } @@ -629,7 +629,7 @@ err: } -int show_slave_hosts(THD* thd) +bool show_slave_hosts(THD* thd) { List<Item> field_list; Protocol *protocol= thd->protocol; @@ -649,8 +649,9 @@ int show_slave_hosts(THD* thd) field_list.push_back(new Item_return_int("Master_id", 10, MYSQL_TYPE_LONG)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); pthread_mutex_lock(&LOCK_slave_list); @@ -671,12 +672,12 @@ int show_slave_hosts(THD* thd) if (protocol->write()) { pthread_mutex_unlock(&LOCK_slave_list); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } pthread_mutex_unlock(&LOCK_slave_list); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -707,6 +708,7 @@ int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi) if (!mysql_real_connect(mysql, mi->host, mi->user, mi->password, 0, mi->port, 0, 0)) DBUG_RETURN(1); + mysql->reconnect= 1; DBUG_RETURN(0); } @@ -757,7 +759,7 @@ static int fetch_db_tables(THD *thd, MYSQL *mysql, const char *db, - No active transaction (flush_relay_log_info would not work in this case) */ -int load_master_data(THD* thd) +bool load_master_data(THD* thd) { MYSQL mysql; MYSQL_RES* master_status_res = 0; @@ -779,16 +781,15 @@ int load_master_data(THD* thd) (error=terminate_slave_threads(active_mi,restart_thread_mask, 1 /*skip lock*/))) { - send_error(thd,error); + my_message(error, ER(error), MYF(0)); unlock_slave_threads(active_mi); pthread_mutex_unlock(&LOCK_active_mi); - return 1; + return TRUE; } if (connect_to_master(thd, &mysql, active_mi)) { - net_printf(thd, error= ER_CONNECT_TO_MASTER, - mysql_error(&mysql)); + my_error(error= ER_CONNECT_TO_MASTER, MYF(0), mysql_error(&mysql)); goto err; } @@ -800,8 +801,7 @@ int load_master_data(THD* thd) if (mysql_real_query(&mysql, "SHOW DATABASES", 14) || !(db_res = mysql_store_result(&mysql))) { - net_printf(thd, error = ER_QUERY_ON_MASTER, - mysql_error(&mysql)); + my_error(error= ER_QUERY_ON_MASTER, MYF(0), mysql_error(&mysql)); goto err; } @@ -814,7 +814,7 @@ int load_master_data(THD* thd) if (!(table_res = (MYSQL_RES**)thd->alloc(num_dbs * sizeof(MYSQL_RES*)))) { - net_printf(thd, error = ER_OUTOFMEMORY); + my_message(error = ER_OUTOFMEMORY, ER(ER_OUTOFMEMORY), MYF(0)); goto err; } @@ -828,8 +828,7 @@ int load_master_data(THD* thd) mysql_real_query(&mysql, "SHOW MASTER STATUS",18) || !(master_status_res = mysql_store_result(&mysql))) { - net_printf(thd, error = ER_QUERY_ON_MASTER, - mysql_error(&mysql)); + my_error(error= ER_QUERY_ON_MASTER, MYF(0), mysql_error(&mysql)); goto err; } @@ -874,7 +873,6 @@ int load_master_data(THD* thd) if (mysql_create_db(thd, db, &create_info, 1)) { - send_error(thd, 0, 0); cleanup_mysql_results(db_res, cur_table_res - 1, table_res); goto err; } @@ -883,8 +881,7 @@ int load_master_data(THD* thd) mysql_real_query(&mysql, "SHOW TABLES", 11) || !(*cur_table_res = mysql_store_result(&mysql))) { - net_printf(thd, error = ER_QUERY_ON_MASTER, - mysql_error(&mysql)); + my_error(error= ER_QUERY_ON_MASTER, MYF(0), mysql_error(&mysql)); cleanup_mysql_results(db_res, cur_table_res - 1, table_res); goto err; } @@ -922,7 +919,7 @@ int load_master_data(THD* thd) if (init_master_info(active_mi, master_info_file, relay_log_info_file, 0, (SLAVE_IO | SLAVE_SQL))) - send_error(thd, ER_MASTER_INFO); + my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0)); strmake(active_mi->master_log_name, row[0], sizeof(active_mi->master_log_name)); active_mi->master_log_pos= my_strtoll10(row[1], (char**) 0, &error); @@ -941,8 +938,7 @@ int load_master_data(THD* thd) if (mysql_real_query(&mysql, "UNLOCK TABLES", 13)) { - net_printf(thd, error = ER_QUERY_ON_MASTER, - mysql_error(&mysql)); + my_error(error= ER_QUERY_ON_MASTER, MYF(0), mysql_error(&mysql)); goto err; } } @@ -951,10 +947,10 @@ int load_master_data(THD* thd) 0 /* not only reset, but also reinit */, &errmsg)) { - send_error(thd, 0, "Failed purging old relay logs"); + my_error(ER_RELAY_LOG_FAIL, MYF(0), errmsg); unlock_slave_threads(active_mi); pthread_mutex_unlock(&LOCK_active_mi); - return 1; + return TRUE; } pthread_mutex_lock(&active_mi->rli.data_lock); active_mi->rli.group_master_log_pos = active_mi->master_log_pos; diff --git a/sql/repl_failsafe.h b/sql/repl_failsafe.h index ad0219bb735..dfaacf557e8 100644 --- a/sql/repl_failsafe.h +++ b/sql/repl_failsafe.h @@ -38,11 +38,11 @@ int update_slave_list(MYSQL* mysql, MASTER_INFO* mi); extern HASH slave_list; -int load_master_data(THD* thd); +bool load_master_data(THD* thd); int connect_to_master(THD *thd, MYSQL* mysql, MASTER_INFO* mi); -int show_new_master(THD* thd); -int show_slave_hosts(THD* thd); +bool show_new_master(THD* thd); +bool show_slave_hosts(THD* thd); int translate_master(THD* thd, LEX_MASTER_INFO* mi, char* errmsg); void init_slave_list(); void end_slave_list(); diff --git a/sql/set_var.cc b/sql/set_var.cc index 082c55db188..d710be2bb2e 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -82,7 +82,7 @@ TYPELIB delay_key_write_typelib= delay_key_write_type_names, NULL }; -static int sys_check_charset(THD *thd, set_var *var); +static int sys_check_charset(THD *thd, set_var *var); static bool sys_update_charset(THD *thd, set_var *var); static void sys_set_default_charset(THD *thd, enum_var_type type); static int sys_check_ftb_syntax(THD *thd, set_var *var); @@ -97,6 +97,7 @@ static bool set_option_autocommit(THD *thd, set_var *var); static int check_log_update(THD *thd, set_var *var); static bool set_log_update(THD *thd, set_var *var); static int check_pseudo_thread_id(THD *thd, set_var *var); +static bool set_log_bin(THD *thd, set_var *var); static void fix_low_priority_updates(THD *thd, enum_var_type type); static void fix_tx_isolation(THD *thd, enum_var_type type); static void fix_net_read_timeout(THD *thd, enum_var_type type); @@ -126,6 +127,14 @@ static byte *get_warning_count(THD *thd); alphabetic order */ +sys_var_thd_ulong sys_auto_increment_increment("auto_increment_increment", + &SV::auto_increment_increment); +sys_var_thd_ulong sys_auto_increment_offset("auto_increment_offset", + &SV::auto_increment_offset); + +sys_var_bool_ptr sys_automatic_sp_privileges("automatic_sp_privileges", + &sp_automatic_privileges); + sys_var_long_ptr sys_binlog_cache_size("binlog_cache_size", &binlog_cache_size); sys_var_thd_ulong sys_bulk_insert_buff_size("bulk_insert_buffer_size", @@ -243,12 +252,13 @@ sys_var_long_ptr sys_max_relay_log_size("max_relay_log_size", fix_max_relay_log_size); sys_var_thd_ulong sys_max_sort_length("max_sort_length", &SV::max_sort_length); -sys_var_long_ptr sys_max_user_connections("max_user_connections", - &max_user_connections); +sys_var_max_user_conn sys_max_user_connections("max_user_connections"); sys_var_thd_ulong sys_max_tmp_tables("max_tmp_tables", &SV::max_tmp_tables); sys_var_long_ptr sys_max_write_lock_count("max_write_lock_count", &max_write_lock_count); +sys_var_thd_ulong sys_multi_range_count("multi_range_count", + &SV::multi_range_count); sys_var_long_ptr sys_myisam_data_pointer_size("myisam_data_pointer_size", &myisam_data_pointer_size); sys_var_thd_ulonglong sys_myisam_max_extra_sort_file_size("myisam_max_extra_sort_file_size", &SV::myisam_max_extra_sort_file_size, fix_myisam_max_extra_sort_file_size, 1); @@ -268,6 +278,10 @@ sys_var_thd_ulong sys_net_retry_count("net_retry_count", 0, fix_net_retry_count); sys_var_thd_bool sys_new_mode("new", &SV::new_mode); sys_var_thd_bool sys_old_passwords("old_passwords", &SV::old_passwords); +sys_var_thd_ulong sys_optimizer_prune_level("optimizer_prune_level", + &SV::optimizer_prune_level); +sys_var_thd_ulong sys_optimizer_search_depth("optimizer_search_depth", + &SV::optimizer_search_depth); sys_var_thd_ulong sys_preload_buff_size("preload_buffer_size", &SV::preload_buff_size); sys_var_thd_ulong sys_read_buff_size("read_buffer_size", @@ -327,6 +341,11 @@ sys_var_thd_ulong sys_sort_buffer("sort_buffer_size", &SV::sortbuff_size); sys_var_thd_sql_mode sys_sql_mode("sql_mode", &SV::sql_mode); +sys_var_thd_enum +sys_updatable_views_with_limit("updatable_views_with_limit", + &SV::updatable_views_with_limit, + &updatable_views_with_limit_typelib); + sys_var_thd_table_type sys_table_type("table_type", &SV::table_type); sys_var_thd_storage_engine sys_storage_engine("storage_engine", @@ -345,6 +364,8 @@ sys_var_thd_enum sys_tx_isolation("tx_isolation", fix_tx_isolation); sys_var_thd_ulong sys_tmp_table_size("tmp_table_size", &SV::tmp_table_size); +sys_var_bool_ptr sys_timed_mutexes("timed_mutexes", + &timed_mutexes); sys_var_thd_ulong sys_net_wait_timeout("wait_timeout", &SV::net_wait_timeout); @@ -410,8 +431,8 @@ static sys_var_thd_bit sys_log_update("sql_log_update", OPTION_UPDATE_LOG); static sys_var_thd_bit sys_log_binlog("sql_log_bin", check_log_update, - set_log_update, - OPTION_BIN_LOG); + set_log_bin, + OPTION_BIN_LOG); static sys_var_thd_bit sys_sql_warnings("sql_warnings", 0, set_option_bit, OPTION_WARNINGS); @@ -486,7 +507,10 @@ sys_var_const_str sys_license("license", STRINGIFY_ARG(LICENSE)); sys_var *sys_variables[]= { &sys_auto_is_null, + &sys_auto_increment_increment, + &sys_auto_increment_offset, &sys_autocommit, + &sys_automatic_sp_privileges, &sys_big_tables, &sys_big_selects, &sys_binlog_cache_size, @@ -564,6 +588,8 @@ sys_var *sys_variables[]= &sys_net_write_timeout, &sys_new_mode, &sys_old_passwords, + &sys_optimizer_prune_level, + &sys_optimizer_search_depth, &sys_preload_buff_size, &sys_pseudo_thread_id, &sys_query_alloc_block_size, @@ -611,6 +637,7 @@ sys_var *sys_variables[]= &sys_table_type, &sys_thread_cache_size, &sys_time_format, + &sys_timed_mutexes, &sys_timestamp, &sys_time_zone, &sys_tmp_table_size, @@ -632,6 +659,7 @@ sys_var *sys_variables[]= &sys_ndb_use_transactions, #endif &sys_unique_checks, + &sys_updatable_views_with_limit, &sys_warning_count }; @@ -641,6 +669,9 @@ sys_var *sys_variables[]= */ struct show_var_st init_vars[]= { + {"auto_increment_increment", (char*) &sys_auto_increment_increment, SHOW_SYS}, + {"auto_increment_offset", (char*) &sys_auto_increment_offset, SHOW_SYS}, + {sys_automatic_sp_privileges.name,(char*) &sys_automatic_sp_privileges, SHOW_SYS}, {"back_log", (char*) &back_log, SHOW_LONG}, {"basedir", mysql_home, SHOW_CHAR}, #ifdef HAVE_BERKELEY_DB @@ -688,7 +719,8 @@ struct show_var_st init_vars[]= { {"have_compress", (char*) &have_compress, SHOW_HAVE}, {"have_crypt", (char*) &have_crypt, SHOW_HAVE}, {"have_csv", (char*) &have_csv_db, SHOW_HAVE}, - {"have_example_engine", (char*) &have_example_db, SHOW_HAVE}, + {"have_example_engine", (char*) &have_example_db, SHOW_HAVE}, + {"have_federated_db", (char*) &have_federated_db, SHOW_HAVE}, {"have_geometry", (char*) &have_geometry, SHOW_HAVE}, {"have_innodb", (char*) &have_innodb, SHOW_HAVE}, {"have_isam", (char*) &have_isam, SHOW_HAVE}, @@ -708,6 +740,8 @@ struct show_var_st init_vars[]= { {"innodb_buffer_pool_size", (char*) &innobase_buffer_pool_size, SHOW_LONG }, {"innodb_data_file_path", (char*) &innobase_data_file_path, SHOW_CHAR_PTR}, {"innodb_data_home_dir", (char*) &innobase_data_home_dir, SHOW_CHAR_PTR}, + {"innodb_doublewrite", (char*) &innobase_use_doublewrite, SHOW_MY_BOOL}, + {"innodb_checksums", (char*) &innobase_use_checksums, SHOW_MY_BOOL}, {"innodb_fast_shutdown", (char*) &innobase_fast_shutdown, SHOW_MY_BOOL}, {"innodb_file_io_threads", (char*) &innobase_file_io_threads, SHOW_LONG }, {"innodb_file_per_table", (char*) &innobase_file_per_table, SHOW_MY_BOOL}, @@ -741,6 +775,8 @@ struct show_var_st init_vars[]= { SHOW_SYS}, {"language", language, SHOW_CHAR}, {"large_files_support", (char*) &opt_large_files, SHOW_BOOL}, + {"large_pages", (char*) &opt_large_pages, SHOW_MY_BOOL}, + {"large_page_size", (char*) &opt_large_page_size, SHOW_INT}, {sys_license.name, (char*) &sys_license, SHOW_SYS}, {sys_local_infile.name, (char*) &sys_local_infile, SHOW_SYS}, #ifdef HAVE_MLOCKALL @@ -805,6 +841,10 @@ struct show_var_st init_vars[]= { {sys_new_mode.name, (char*) &sys_new_mode, SHOW_SYS}, {sys_old_passwords.name, (char*) &sys_old_passwords, SHOW_SYS}, {"open_files_limit", (char*) &open_files_limit, SHOW_LONG}, + {sys_optimizer_prune_level.name, (char*) &sys_optimizer_prune_level, + SHOW_SYS}, + {sys_optimizer_search_depth.name,(char*) &sys_optimizer_search_depth, + SHOW_SYS}, {"pid_file", (char*) pidfile_name, SHOW_CHAR}, {"port", (char*) &mysqld_port, SHOW_INT}, {sys_preload_buff_size.name, (char*) &sys_preload_buff_size, SHOW_SYS}, @@ -865,12 +905,15 @@ struct show_var_st init_vars[]= { {"thread_stack", (char*) &thread_stack, SHOW_LONG}, {sys_time_format.name, (char*) &sys_time_format, SHOW_SYS}, {"time_zone", (char*) &sys_time_zone, SHOW_SYS}, + {sys_timed_mutexes.name, (char*) &sys_timed_mutexes, SHOW_SYS}, {sys_tmp_table_size.name, (char*) &sys_tmp_table_size, SHOW_SYS}, {"tmpdir", (char*) &opt_mysql_tmpdir, SHOW_CHAR_PTR}, {sys_trans_alloc_block_size.name, (char*) &sys_trans_alloc_block_size, SHOW_SYS}, {sys_trans_prealloc_size.name, (char*) &sys_trans_prealloc_size, SHOW_SYS}, {sys_tx_isolation.name, (char*) &sys_tx_isolation, SHOW_SYS}, + {sys_updatable_views_with_limit.name, + (char*) &sys_updatable_views_with_limit,SHOW_SYS}, {"version", server_version, SHOW_CHAR}, #ifdef HAVE_BERKELEY_DB {"version_bdb", (char*) DB_VERSION_STRING, SHOW_CHAR}, @@ -896,8 +939,8 @@ bool sys_var_str::check(THD *thd, set_var *var) return 0; if ((res=(*check_func)(thd, var)) < 0) - my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), name, - var->value->str_value.ptr()); + my_error(ER_WRONG_VALUE_FOR_VAR, MYF(0), + name, var->value->str_value.ptr()); return res; } @@ -1534,8 +1577,8 @@ Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base) { if (var_type != OPT_DEFAULT) { - net_printf(thd, ER_INCORRECT_GLOBAL_LOCAL_VAR, - name, var_type == OPT_GLOBAL ? "SESSION" : "GLOBAL"); + my_error(ER_INCORRECT_GLOBAL_LOCAL_VAR, MYF(0), + name, var_type == OPT_GLOBAL ? "SESSION" : "GLOBAL"); return 0; } /* As there was no local variable, return the global value */ @@ -1578,7 +1621,7 @@ Item *sys_var::item(THD *thd, enum_var_type var_type, LEX_STRING *base) return tmp; } default: - net_printf(thd, ER_VAR_CANT_BE_READ, name); + my_error(ER_VAR_CANT_BE_READ, MYF(0), name); } return 0; } @@ -1957,15 +2000,15 @@ void sys_var_character_set_server::set_default(THD *thd, enum_var_type type) } } -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) bool sys_var_character_set_server::check(THD *thd, set_var *var) { if ((var->type == OPT_GLOBAL) && (mysql_bin_log.is_open() || active_mi->slave_running || active_mi->rli.slave_running)) { - my_printf_error(0, "Binary logging and replication forbid changing \ -the global server character set or collation", MYF(0)); + my_error(ER_LOGING_PROHIBIT_CHANGING_OF, MYF(0), + "character set, collation"); return 1; } return sys_var_character_set::check(thd,var); @@ -2064,15 +2107,15 @@ void sys_var_collation_database::set_default(THD *thd, enum_var_type type) } } -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) bool sys_var_collation_server::check(THD *thd, set_var *var) { if ((var->type == OPT_GLOBAL) && (mysql_bin_log.is_open() || active_mi->slave_running || active_mi->rli.slave_running)) { - my_printf_error(0, "Binary logging and replication forbid changing \ -the global server character set or collation", MYF(0)); + my_error(ER_LOGING_PROHIBIT_CHANGING_OF, MYF(0), + "character set, collation"); return 1; } return sys_var_collation::check(thd,var); @@ -2350,7 +2393,7 @@ bool sys_var_slave_skip_counter::check(THD *thd, set_var *var) pthread_mutex_lock(&active_mi->rli.run_lock); if (active_mi->rli.slave_running) { - my_error(ER_SLAVE_MUST_STOP, MYF(0)); + my_message(ER_SLAVE_MUST_STOP, ER(ER_SLAVE_MUST_STOP), MYF(0)); result=1; } pthread_mutex_unlock(&active_mi->rli.run_lock); @@ -2416,19 +2459,18 @@ bool sys_var_thd_time_zone::check(THD *thd, set_var *var) String str(buff, sizeof(buff), &my_charset_latin1); String *res= var->value->val_str(&str); -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) if ((var->type == OPT_GLOBAL) && (mysql_bin_log.is_open() || active_mi->slave_running || active_mi->rli.slave_running)) { - my_printf_error(0, "Binary logging and replication forbid changing " - "of the global server time zone", MYF(0)); + my_error(ER_LOGING_PROHIBIT_CHANGING_OF, MYF(0), "time zone"); return 1; } #endif if (!(var->save_result.time_zone= - my_tz_find(res, thd->lex->time_zone_tables_used))) + my_tz_find(res, thd->lex->time_zone_tables_used))) { my_error(ER_UNKNOWN_TIME_ZONE, MYF(0), res ? res->c_ptr() : "NULL"); return 1; @@ -2439,7 +2481,7 @@ bool sys_var_thd_time_zone::check(THD *thd, set_var *var) bool sys_var_thd_time_zone::update(THD *thd, set_var *var) { - /* We are using Time_zone object found during check() phase */ + /* We are using Time_zone object found during check() phase */ *get_tz_ptr(thd,var->type)= var->save_result.time_zone; return 0; } @@ -2483,6 +2525,51 @@ void sys_var_thd_time_zone::set_default(THD *thd, enum_var_type type) thd->variables.time_zone= global_system_variables.time_zone; } + +bool sys_var_max_user_conn::check(THD *thd, set_var *var) +{ + if (var->type == OPT_GLOBAL) + return sys_var_thd::check(thd, var); + else + { + /* + Per-session values of max_user_connections can't be set directly. + QQ: May be we should have a separate error message for this? + */ + my_error(ER_GLOBAL_VARIABLE, MYF(0), name); + return TRUE; + } +} + +bool sys_var_max_user_conn::update(THD *thd, set_var *var) +{ + DBUG_ASSERT(var->type == OPT_GLOBAL); + pthread_mutex_lock(&LOCK_global_system_variables); + max_user_connections= var->save_result.ulonglong_value; + pthread_mutex_unlock(&LOCK_global_system_variables); + return 0; +} + + +void sys_var_max_user_conn::set_default(THD *thd, enum_var_type type) +{ + DBUG_ASSERT(type == OPT_GLOBAL); + pthread_mutex_lock(&LOCK_global_system_variables); + max_user_connections= (ulong) option_limits->def_value; + pthread_mutex_unlock(&LOCK_global_system_variables); +} + + +byte *sys_var_max_user_conn::value_ptr(THD *thd, enum_var_type type, + LEX_STRING *base) +{ + if (type != OPT_GLOBAL && + thd->user_connect && thd->user_connect->user_resources.user_conn) + return (byte*) &(thd->user_connect->user_resources.user_conn); + return (byte*) &(max_user_connections); +} + + /* Functions to update thd->options bits */ @@ -2542,6 +2629,30 @@ static int check_log_update(THD *thd, set_var *var) static bool set_log_update(THD *thd, set_var *var) { + /* + The update log is not supported anymore since 5.0. + See sql/mysqld.cc/, comments in function init_server_components() for an + explaination of the different warnings we send below + */ + + if (opt_sql_bin_update) + { + ((sys_var_thd_bit*) var->var)->bit_flag|= (OPTION_BIN_LOG | + OPTION_UPDATE_LOG); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_UPDATE_LOG_DEPRECATED_TRANSLATED, + ER(ER_UPDATE_LOG_DEPRECATED_TRANSLATED)); + } + else + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_UPDATE_LOG_DEPRECATED_IGNORED, + ER(ER_UPDATE_LOG_DEPRECATED_IGNORED)); + set_option_bit(thd, var); + return 0; +} + +static bool set_log_bin(THD *thd, set_var *var) +{ if (opt_sql_bin_update) ((sys_var_thd_bit*) var->var)->bit_flag|= (OPTION_BIN_LOG | OPTION_UPDATE_LOG); @@ -2676,9 +2787,6 @@ void set_var_free() length Length of variable. zero means that we should use strlen() on the variable - NOTE - We have to use net_printf() as this is called during the parsing stage - RETURN VALUES pointer pointer to variable definitions 0 Unknown variable (error message is given) @@ -2691,7 +2799,7 @@ sys_var *find_sys_var(const char *str, uint length) length ? length : strlen(str)); if (!var) - net_printf(current_thd, ER_UNKNOWN_SYSTEM_VARIABLE, (char*) str); + my_error(ER_UNKNOWN_SYSTEM_VARIABLE, MYF(0), (char*) str); return var; } @@ -2781,9 +2889,8 @@ int set_var::check(THD *thd) { if (var->check_type(type)) { - my_error(type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE, - MYF(0), - var->name); + int err= type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE; + my_error(err, MYF(0), var->name); return -1; } if ((type == OPT_GLOBAL && check_global_access(thd, SUPER_ACL))) @@ -2827,9 +2934,8 @@ int set_var::light_check(THD *thd) { if (var->check_type(type)) { - my_error(type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE, - MYF(0), - var->name); + int err= type == OPT_GLOBAL ? ER_LOCAL_VARIABLE : ER_GLOBAL_VARIABLE; + my_error(err, MYF(0), var->name); return -1; } if (type == OPT_GLOBAL && check_global_access(thd, SUPER_ACL)) @@ -2896,7 +3002,7 @@ int set_var_user::update(THD *thd) if (user_var_item->update()) { /* Give an error if it's not given already */ - my_error(ER_SET_CONSTANTS_ONLY, MYF(0)); + my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY), MYF(0)); return -1; } return 0; @@ -2960,7 +3066,7 @@ bool sys_var_thd_storage_engine::check(THD *thd, set_var *var) err: my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), value); - return 1; + return 1; } @@ -3076,7 +3182,7 @@ ulong fix_sql_mode(ulong sql_mode) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | - MODE_NO_FIELD_OPTIONS); + MODE_NO_FIELD_OPTIONS | MODE_NO_AUTO_CREATE_USER); if (sql_mode & MODE_MSSQL) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | @@ -3096,11 +3202,15 @@ ulong fix_sql_mode(ulong sql_mode) sql_mode|= (MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | MODE_IGNORE_SPACE | MODE_NO_KEY_OPTIONS | MODE_NO_TABLE_OPTIONS | - MODE_NO_FIELD_OPTIONS); + MODE_NO_FIELD_OPTIONS | MODE_NO_AUTO_CREATE_USER); if (sql_mode & MODE_MYSQL40) - sql_mode|= MODE_NO_FIELD_OPTIONS; + sql_mode|= MODE_NO_FIELD_OPTIONS | MODE_HIGH_NOT_PRECEDENCE; if (sql_mode & MODE_MYSQL323) - sql_mode|= MODE_NO_FIELD_OPTIONS; + sql_mode|= MODE_NO_FIELD_OPTIONS | MODE_HIGH_NOT_PRECEDENCE; + if (sql_mode & MODE_TRADITIONAL) + sql_mode|= (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES | + MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE | + MODE_ERROR_FOR_DIVISION_BY_ZERO | MODE_NO_AUTO_CREATE_USER); return sql_mode; } diff --git a/sql/set_var.h b/sql/set_var.h index 4a4e631d88c..7c9f11041c8 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -49,20 +49,14 @@ public: const char *name; sys_after_update_func after_update; -#if MYSQL_VERSION_ID < 50000 bool no_support_one_shot; -#endif sys_var(const char *name_arg) :name(name_arg), after_update(0) -#if MYSQL_VERSION_ID < 50000 , no_support_one_shot(1) -#endif {} sys_var(const char *name_arg,sys_after_update_func func) :name(name_arg), after_update(func) -#if MYSQL_VERSION_ID < 50000 , no_support_one_shot(1) -#endif {} virtual ~sys_var() {} virtual bool check(THD *thd, set_var *var); @@ -507,9 +501,7 @@ class sys_var_collation :public sys_var_thd public: sys_var_collation(const char *name_arg) :sys_var_thd(name_arg) { -#if MYSQL_VERSION_ID < 50000 no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); SHOW_TYPE type() { return SHOW_CHAR; } @@ -529,13 +521,11 @@ public: sys_var_thd(name_arg) { nullable= 0; -#if MYSQL_VERSION_ID < 50000 /* In fact only almost all variables derived from sys_var_character_set support ONE_SHOT; character_set_results doesn't. But that's good enough. */ no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); SHOW_TYPE type() { return SHOW_CHAR; } @@ -574,7 +564,7 @@ class sys_var_character_set_server :public sys_var_character_set public: sys_var_character_set_server(const char *name_arg) : sys_var_character_set(name_arg) {} -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) bool check(THD *thd, set_var *var); #endif void set_default(THD *thd, enum_var_type type); @@ -612,7 +602,7 @@ class sys_var_collation_server :public sys_var_collation { public: sys_var_collation_server(const char *name_arg) :sys_var_collation(name_arg) {} -#if defined(HAVE_REPLICATION) && (MYSQL_VERSION_ID < 50000) +#if defined(HAVE_REPLICATION) bool check(THD *thd, set_var *var); #endif bool update(THD *thd, set_var *var); @@ -722,9 +712,7 @@ public: sys_var_thd_time_zone(const char *name_arg): sys_var_thd(name_arg) { -#if MYSQL_VERSION_ID < 50000 no_support_one_shot= 0; -#endif } bool check(THD *thd, set_var *var); SHOW_TYPE type() { return SHOW_CHAR; } @@ -739,6 +727,23 @@ public: Time_zone **get_tz_ptr(THD *thd, enum_var_type type); }; + +class sys_var_max_user_conn : public sys_var_thd +{ +public: + sys_var_max_user_conn(const char *name_arg): + sys_var_thd(name_arg) {} + bool check(THD *thd, set_var *var); + bool update(THD *thd, set_var *var); + bool check_default(enum_var_type type) + { + return type != OPT_GLOBAL || !option_limits; + } + void set_default(THD *thd, enum_var_type type); + SHOW_TYPE type() { return SHOW_LONG; } + byte *value_ptr(THD *thd, enum_var_type type, LEX_STRING *base); +}; + /**************************************************************************** Classes for parsing of the SET command ****************************************************************************/ @@ -752,9 +757,7 @@ public: virtual int update(THD *thd)=0; /* To set the value */ /* light check for PS */ virtual int light_check(THD *thd) { return check(thd); } -#if MYSQL_VERSION_ID < 50000 virtual bool no_support_one_shot() { return 1; } -#endif }; @@ -797,9 +800,7 @@ public: int check(THD *thd); int update(THD *thd); int light_check(THD *thd); -#if MYSQL_VERSION_ID < 50000 bool no_support_one_shot() { return var->no_support_one_shot; } -#endif }; diff --git a/sql/share/Makefile.am b/sql/share/Makefile.am index 662159a9c63..cfbbb36c489 100644 --- a/sql/share/Makefile.am +++ b/sql/share/Makefile.am @@ -1,5 +1,7 @@ ## Process this file with automake to create Makefile.in +EXTRA_DIST= errmsg.txt + dist-hook: for dir in charsets @AVAILABLE_LANGUAGES@; do \ test -d $(distdir)/$$dir || mkdir $(distdir)/$$dir; \ @@ -9,10 +11,13 @@ dist-hook: $(INSTALL_DATA) $(srcdir)/charsets/README $(distdir)/charsets $(INSTALL_DATA) $(srcdir)/charsets/Index.xml $(distdir)/charsets -all: @AVAILABLE_LANGUAGES_ERRORS@ +all: english/errmsg.sys + +# Use the english errmsg.sys as a flag that all errmsg.sys needs to be +# created. Normally these are created by extra/Makefile.am -# this is ugly, but portable -@AVAILABLE_LANGUAGES_ERRORS_RULES@ +english/errmsg.sys: errmsg.txt + $(top_builddir)/extra/comp_err --charset=$(srcdir)/charsets --out-dir=$(top_builddir)/sql/share/ --header_file=$(top_builddir)/extra/mysqld_error.h --state_file=$(top_builddir)/extra/sql_state.h --in_file=errmsg.txt install-data-local: for lang in @AVAILABLE_LANGUAGES@; \ @@ -20,18 +25,12 @@ install-data-local: $(mkinstalldirs) $(DESTDIR)$(pkgdatadir)/$$lang; \ $(INSTALL_DATA) $(srcdir)/$$lang/errmsg.sys \ $(DESTDIR)$(pkgdatadir)/$$lang/errmsg.sys; \ - $(INSTALL_DATA) $(srcdir)/$$lang/errmsg.txt \ - $(DESTDIR)$(pkgdatadir)/$$lang/errmsg.txt; \ done $(mkinstalldirs) $(DESTDIR)$(pkgdatadir)/charsets + $(INSTALL_DATA) $(srcdir)/errmsg.txt \ + $(DESTDIR)$(pkgdatadir)/errmsg.txt; \ $(INSTALL_DATA) $(srcdir)/charsets/README $(DESTDIR)$(pkgdatadir)/charsets/README $(INSTALL_DATA) $(srcdir)/charsets/*.xml $(DESTDIR)$(pkgdatadir)/charsets -fix_errors: - for lang in @AVAILABLE_LANGUAGES@; \ - do \ - ../../extra/comp_err -C$(srcdir)/charsets/ $(srcdir)/$$lang/errmsg.txt $(srcdir)/$$lang/errmsg.sys; \ - done - # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/sql/share/czech/errmsg.txt b/sql/share/czech/errmsg.txt deleted file mode 100644 index 022a624c921..00000000000 --- a/sql/share/czech/errmsg.txt +++ /dev/null @@ -1,333 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Modifikoval Petr -B©najdr, snajdr@pvt.net, snajdr@cpress.cz v.0.01 - ISO LATIN-8852-2 - Dal-B¹í verze Jan Pazdziora, adelton@fi.muni.cz - Tue Nov 18 17:53:55 MET 1997 - Tue Dec 2 19:08:54 MET 1997 podle 3.21.15c - Thu May 7 17:40:49 MET DST 1998 podle 3.21.29 - Thu Apr 1 20:49:57 CEST 1999 podle 3.22.20 - Mon Aug 9 13:30:09 MET DST 1999 podle 3.23.2 - Thu Nov 30 14:02:52 MET 2000 podle 3.23.28 -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NE", -"ANO", -"Nemohu vytvo-Bøit soubor '%-.64s' (chybový kód: %d)", -"Nemohu vytvo-Bøit tabulku '%-.64s' (chybový kód: %d)", -"Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)", -"Nemohu vytvo-Bøit databázi '%-.64s'; databáze ji¾ existuje", -"Nemohu zru-B¹it databázi '%-.64s', databáze neexistuje", -"Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.64s', chyba %d)", -"Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.64s', chyba %d)", -"Chyba p-Bøi výmazu '%-.64s' (chybový kód: %d)", -"Nemohu -Bèíst záznam v systémové tabulce", -"Nemohu z-Bískat stav '%-.64s' (chybový kód: %d)", -"Chyba p-Bøi zji¹»ování pracovní adresáø (chybový kód: %d)", -"Nemohu uzamknout soubor (chybov-Bý kód: %d)", -"Nemohu otev-Bøít soubor '%-.64s' (chybový kód: %d)", -"Nemohu naj-Bít soubor '%-.64s' (chybový kód: %d)", -"Nemohu -Bèíst adresáø '%-.64s' (chybový kód: %d)", -"Nemohu zm-Bìnit adresáø na '%-.64s' (chybový kód: %d)", -"Z-Báznam byl zmìnìn od posledního ètení v tabulce '%-.64s'", -"Disk je pln-Bý (%s), èekám na uvolnìní nìjakého místa ...", -"Nemohu zapsat, zdvojen-Bý klíè v tabulce '%-.64s'", -"Chyba p-Bøi zavírání '%-.64s' (chybový kód: %d)", -"Chyba p-Bøi ètení souboru '%-.64s' (chybový kód: %d)", -"Chyba p-Bøi pøejmenování '%-.64s' na '%-.64s' (chybový kód: %d)", -"Chyba p-Bøi zápisu do souboru '%-.64s' (chybový kód: %d)", -"'%-.64s' je zam-Bèen proti zmìnám", -"T-Bøídìní pøeru¹eno", -"Pohled '%-.64s' pro '%-.64s' neexistuje", -"Obsluha tabulky vr-Bátila chybu %d", -"Obsluha tabulky '%-.64s' nem-Bá tento parametr", -"Nemohu naj-Bít záznam v '%-.64s'", -"Nespr-Bávná informace v souboru '%-.64s'", -"Nespr-Bávný klíè pro tabulku '%-.64s'; pokuste se ho opravit", -"Star-Bý klíèový soubor pro '%-.64s'; opravte ho.", -"'%-.64s' je jen pro -Bètení", -"M-Bálo pamìti. Pøestartujte daemona a zkuste znovu (je potøeba %d bytù)", -"M-Bálo pamìti pro tøídìní. Zvy¹te velikost tøídícího bufferu", -"Neo-Bèekávaný konec souboru pøi ètení '%-.64s' (chybový kód: %d)", -"P-Bøíli¹ mnoho spojení", -"M-Bálo prostoru/pamìti pro thread", -"Nemohu zjistit jm-Béno stroje pro Va¹i adresu", -"Chyba p-Bøi ustavování spojení", -"P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' k databázi '%-.64s' není povolen", -"P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' (s heslem %s)", -"Nebyla vybr-Bána ¾ádná databáze", -"Nezn-Bámý pøíkaz", -"Sloupec '%-.64s' nem-Bù¾e být null", -"Nezn-Bámá databáze '%-.64s'", -"Tabulka '%-.64s' ji-B¾ existuje", -"Nezn-Bámá tabulka '%-.64s'", -"Sloupec '%-.64s' v %s nen-Bí zcela jasný", -"Prob-Bíhá ukonèování práce serveru", -"Nezn-Bámý sloupec '%-.64s' v %s", -"Pou-B¾ité '%-.64s' nebylo v group by", -"Nemohu pou-B¾ít group na '%-.64s'", -"P-Bøíkaz obsahuje zároveò funkci sum a sloupce", -"Po-Bèet sloupcù neodpovídá zadané hodnotì", -"Jm-Béno identifikátoru '%-.64s' je pøíli¹ dlouhé", -"Zdvojen-Bé jméno sloupce '%-.64s'", -"Zdvojen-Bé jméno klíèe '%-.64s'", -"Zvojen-Bý klíè '%-.64s' (èíslo klíèe %d)", -"Chybn-Bá specifikace sloupce '%-.64s'", -"%s bl-Bízko '%-.64s' na øádku %d", -"V-Býsledek dotazu je prázdný", -"Nejednozna-Bèná tabulka/alias: '%-.64s'", -"Chybn-Bá defaultní hodnota pro '%-.64s'", -"Definov-Báno více primárních klíèù", -"Zad-Báno pøíli¹ mnoho klíèù, je povoleno nejvíce %d klíèù", -"Zad-Báno pøíli¹ mnoho èást klíèù, je povoleno nejvíce %d èástí", -"Zadan-Bý klíè byl pøíli¹ dlouhý, nejvìt¹í délka klíèe je %d", -"Kl-Bíèový sloupec '%-.64s' v tabulce neexistuje", -"Blob sloupec '%-.64s' nem-Bù¾e být pou¾it jako klíè", -"P-Bøíli¹ velká délka sloupce '%-.64s' (nejvíce %d). Pou¾ijte BLOB", -"M-Bù¾ete mít pouze jedno AUTO pole a to musí být definováno jako klíè", -"%s: p-Bøipraven na spojení", -"%s: norm-Bální ukonèení\n", -"%s: p-Bøijat signal %d, konèím\n", -"%s: ukon-Bèení práce hotovo\n", -"%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.64s'\n", -"Nemohu vytvo-Bøit IP socket", -"Tabulka '%-.64s' nem-Bá index odpovídající CREATE INDEX. Vytvoøte tabulku znovu", -"Argument separ-Bátoru polo¾ek nebyl oèekáván. Pøeètìte si manuál", -"Nen-Bí mo¾né pou¾ít pevný rowlength s BLOBem. Pou¾ijte 'fields terminated by'.", -"Soubor '%-.64s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny", -"Soubor '%-.64s' ji-B¾ existuje", -"Z-Báznamù: %ld Vymazáno: %ld Pøeskoèeno: %ld Varování: %ld", -"Z-Báznamù: %ld Zdvojených: %ld", -"Chybn-Bá podèást klíèe -- není to øetìzec nebo je del¹í ne¾ délka èásti klíèe", -"Nen-Bí mo¾né vymazat v¹echny polo¾ky s ALTER TABLE. Pou¾ijte DROP TABLE", -"Nemohu zru-B¹it '%-.64s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe", -"Z-Báznamù: %ld Zdvojených: %ld Varování: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Nezn-Bámá identifikace threadu: %lu", -"Nejste vlastn-Bíkem threadu %lu", -"Nejsou pou-B¾ity ¾ádné tabulky", -"P-Bøíli¹ mnoho øetìzcù pro sloupec %s a SET", -"Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %s.(1-999)\n", -"Tabulka '%-.64s' byla zam-Bèena s READ a nemù¾e být zmìnìna", -"Tabulka '%-.64s' nebyla zam-Bèena s LOCK TABLES", -"Blob polo-B¾ka '%-.64s' nemù¾e mít defaultní hodnotu", -"Nep-Bøípustné jméno databáze '%-.64s'", -"Nep-Bøípustné jméno tabulky '%-.64s'", -"Zadan-Bý SELECT by procházel pøíli¹ mnoho záznamù a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v poøádku, pou¾ijte SET SQL_BIG_SELECTS=1", -"Nezn-Bámá chyba", -"Nezn-Bámá procedura %s", -"Chybn-Bý poèet parametrù procedury %s", -"Chybn-Bé parametry procedury %s", -"Nezn-Bámá tabulka '%-.64s' v %s", -"Polo-B¾ka '%-.64s' je zadána dvakrát", -"Nespr-Bávné pou¾ití funkce group", -"Tabulka '%-.64s' pou-B¾ívá roz¹íøení, které v této verzi MySQL není", -"Tabulka mus-Bí mít alespoò jeden sloupec", -"Tabulka '%-.64s' je pln-Bá", -"Nezn-Bámá znaková sada: '%-.64s'", -"P-Bøíli¹ mnoho tabulek, MySQL jich mù¾e mít v joinu jen %d", -"P-Bøíli¹ mnoho polo¾ek", -"-BØádek je pøíli¹ velký. Maximální velikost øádku, nepoèítaje polo¾ky blob, je %d. Musíte zmìnit nìkteré polo¾ky na blob", -"P-Bøeteèení zásobníku threadu: pou¾ito %ld z %ld. Pou¾ijte 'mysqld -O thread_stack=#' k zadání vìt¹ího zásobníku", -"V OUTER JOIN byl nalezen k-Bøí¾ový odkaz. Provìøte ON podmínky", -"Sloupec '%-.32s' je pou-B¾it s UNIQUE nebo INDEX, ale není definován jako NOT NULL", -"Nemohu na-Bèíst funkci '%-.64s'", -"Nemohu inicializovat funkci '%-.64s'; %-.80s", -"Pro sd-Bílenou knihovnu nejsou povoleny cesty", -"Funkce '%-.64s' ji-B¾ existuje", -"Nemohu otev-Bøít sdílenou knihovnu '%-.64s' (errno: %d %s)", -"Nemohu naj-Bít funkci '%-.64s' v knihovnì'", -"Funkce '%-.64s' nen-Bí definována", -"Stroj '%-.64s' je zablokov-Bán kvùli mnoha chybám pøi pøipojování. Odblokujete pou¾itím 'mysqladmin flush-hosts'", -"Stroj '%-.64s' nem-Bá povoleno se k tomuto MySQL serveru pøipojit", -"Pou-B¾íváte MySQL jako anonymní u¾ivatel a anonymní u¾ivatelé nemají povoleno mìnit hesla", -"Na zm-Bìnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql", -"V tabulce user nen-Bí ¾ádný odpovídající øádek", -"Nalezen-Bých øádkù: %ld Zmìnìno: %ld Varování: %ld", -"Nemohu vytvo-Bøit nový thread (errno %d). Pokud je je¹tì nìjaká volná pamì», podívejte se do manuálu na èást o chybách specifických pro jednotlivé operaèní systémy", -"Po-Bèet sloupcù neodpovídá poètu hodnot na øádku %ld", -"Nemohu znovuotev-Bøít tabulku: '%-.64s", -"Neplatn-Bé u¾ití hodnoty NULL", -"Regul-Bární výraz vrátil chybu '%-.64s'", -"Pokud nen-Bí ¾ádná GROUP BY klauzule, není dovoleno souèasné pou¾ití GROUP polo¾ek (MIN(),MAX(),COUNT()...) s ne GROUP polo¾kami", -"Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s'", -"%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'", -"%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'", -"Neplatn-Bý pøíkaz GRANT/REVOKE. Prosím, pøeètìte si v manuálu, jaká privilegia je mo¾né pou¾ít.", -"Argument p-Bøíkazu GRANT u¾ivatel nebo stroj je pøíli¹ dlouhý", -"Tabulka '%-.64s.%s' neexistuje", -"Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'", -"Pou-B¾itý pøíkaz není v této verzi MySQL povolen", -"Va-B¹e syntaxe je nìjaká divná", -"Zpo-B¾dìný insert threadu nebyl schopen získat po¾adovaný zámek pro tabulku %-.64s", -"P-Bøíli¹ mnoho zpo¾dìných threadù", -"Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.64s' (%s)", -"Zji-B¹tìn pøíchozí packet del¹í ne¾ 'max_allowed_packet'", -"Zji-B¹tìna chyba pøi ètení z roury spojení", -"Zji-B¹tìna chyba fcntl()", -"P-Bøíchozí packety v chybném poøadí", -"Nemohu rozkomprimovat komunika-Bèní packet", -"Zji-B¹tìna chyba pøi ètení komunikaèního packetu", -"Zji-B¹tìn timeout pøi ètení komunikaèního packetu", -"Zji-B¹tìna chyba pøi zápisu komunikaèního packetu", -"Zji-B¹tìn timeout pøi zápisu komunikaèního packetu", -"V-Býsledný øetìzec je del¹í ne¾ 'max_allowed_packet'", -"Typ pou-B¾ité tabulky nepodporuje BLOB/TEXT sloupce", -"Typ pou-B¾ité tabulky nepodporuje AUTO_INCREMENT sloupce", -"INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.64s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES", -"Nespr-Bávné jméno sloupce '%-.100s'", -"Handler pou-B¾ité tabulky neumí indexovat sloupce '%-.64s'", -"V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì", -"Kv-Bùli unique constraintu nemozu zapsat do tabulky '%-.64s'", -"BLOB sloupec '%-.64s' je pou-B¾it ve specifikaci klíèe bez délky", -"V-B¹echny èásti primárního klíèe musejí být NOT NULL; pokud potøebujete NULL, pou¾ijte UNIQUE", -"V-Býsledek obsahuje více ne¾ jeden øádek", -"Tento typ tabulky vy-B¾aduje primární klíè", -"Tato verze MySQL nen-Bí zkompilována s podporou RAID", -"Update tabulky bez WHERE s kl-Bíèem není v módu bezpeèných update dovoleno", -"Kl-Bíè '%-.64s' v tabulce '%-.64s' neexistuje", -"Nemohu otev-Bøít tabulku", -"Handler tabulky nepodporuje %s", -"Proveden-Bí tohoto pøíkazu není v transakci dovoleno", -"Chyba %d p-Bøi COMMIT", -"Chyba %d p-Bøi ROLLBACK", -"Chyba %d p-Bøi FLUSH_LOGS", -"Chyba %d p-Bøi CHECKPOINT", -"Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: `%-.64s' (%-.64s) bylo pøeru¹eno", -"Handler tabulky nepodporuje bin-Bární dump", -"Binlog uzav-Bøen pøi pokusu o FLUSH MASTER", -"P-Bøebudování indexu dumpnuté tabulky '%-.64s' nebylo úspì¹né", -"Chyba masteru: '%-.64s'", -"S-Bí»ová chyba pøi ètení z masteru", -"S-Bí»ová chyba pøi zápisu na master", -"-B®ádný sloupec nemá vytvoøen fulltextový index", -"Nemohu prov-Bést zadaný pøíkaz, proto¾e existují aktivní zamèené tabulky nebo aktivní transakce", -"Nezn-Bámá systémová promìnná '%-.64s'", -"Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a mìla by být opravena", -"Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updatable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/danish/errmsg.txt b/sql/share/danish/errmsg.txt deleted file mode 100644 index 18ebe5712f8..00000000000 --- a/sql/share/danish/errmsg.txt +++ /dev/null @@ -1,324 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* Knud Riishøjgård knudriis@post.tele.dk 99 && - Carsten H. Pedersen, carsten.pedersen@bitbybit.dk oct. 1999 / aug. 2001. */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NEJ", -"JA", -"Kan ikke oprette filen '%-.64s' (Fejlkode: %d)", -"Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)", -"Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)", -"Kan ikke oprette databasen '%-.64s'; databasen eksisterer", -"Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke", -"Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)", -"Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)", -"Fejl ved sletning af '%-.64s' (Fejlkode: %d)", -"Kan ikke læse posten i systemfolderen", -"Kan ikke læse status af '%-.64s' (Fejlkode: %d)", -"Kan ikke læse aktive folder (Fejlkode: %d)", -"Kan ikke låse fil (Fejlkode: %d)", -"Kan ikke åbne fil: '%-.64s' (Fejlkode: %d)", -"Kan ikke finde fila: '%-.64s' (Fejlkode: %d)", -"Kan ikke læse folder '%-.64s' (Fejlkode: %d)", -"Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)", -"Posten er ændret siden sidste læsning '%-.64s'", -"Ikke mere diskplads (%s). Venter på at få frigjort plads...", -"Kan ikke skrive, flere ens nøgler i tabellen '%-.64s'", -"Fejl ved lukning af '%-.64s' (Fejlkode: %d)", -"Fejl ved læsning af '%-.64s' (Fejlkode: %d)", -"Fejl ved omdøbning af '%-.64s' til '%-.64s' (Fejlkode: %d)", -"Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)", -"'%-.64s' er låst mod opdateringer", -"Sortering afbrudt", -"View '%-.64s' eksisterer ikke for '%-.64s'", -"Modtog fejl %d fra tabel håndteringen", -"Denne mulighed eksisterer ikke for tabeltypen '%-.64s'", -"Kan ikke finde posten i '%-.64s'", -"Forkert indhold i: '%-.64s'", -"Fejl i indeksfilen til tabellen '%-.64s'; prøv at reparere den", -"Gammel indeksfil for tabellen '%-.64s'; reparer den", -"'%-.64s' er skrivebeskyttet", -"Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)", -"Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren", -"Uventet afslutning på fil (eof) ved læsning af filen '%-.64s' (Fejlkode: %d)", -"For mange forbindelser (connections)", -"Udgået for tråde/hukommelse", -"Kan ikke få værtsnavn for din adresse", -"Forkert håndtryk (handshake)", -"Adgang nægtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'", -"Adgang nægtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)", -"Ingen database valgt", -"Ukendt kommando", -"Kolonne '%-.64s' kan ikke være NULL", -"Ukendt database '%-.64s'", -"Tabellen '%-.64s' findes allerede", -"Ukendt tabel '%-.64s'", -"Felt: '%-.64s' i tabel %s er ikke entydigt", -"Database nedlukning er i gang", -"Ukendt kolonne '%-.64s' i tabel %s", -"Brugte '%-.64s' som ikke var i group by", -"Kan ikke gruppere på '%-.64s'", -"Udtrykket har summer (sum) funktioner og kolonner i samme udtryk", -"Kolonne tæller stemmer ikke med antallet af værdier", -"Navnet '%-.64s' er for langt", -"Feltnavnet '%-.64s' findes allerede", -"Indeksnavnet '%-.64s' findes allerede", -"Ens værdier '%-.64s' for indeks %d", -"Forkert kolonnespecifikaton for felt '%-.64s'", -"%s nær '%-.64s' på linje %d", -"Forespørgsel var tom", -"Tabellen/aliaset: '%-.64s' er ikke unikt", -"Ugyldig standardværdi for '%-.64s'", -"Flere primærnøgler specificeret", -"For mange nøgler specificeret. Kun %d nøgler må bruges", -"For mange nøgledele specificeret. Kun %d dele må bruges", -"Specificeret nøgle var for lang. Maksimal nøglelængde er %d", -"Nøglefeltet '%-.64s' eksisterer ikke i tabellen", -"BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks", -"For stor feltlængde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet", -"Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret", -"%s: klar til tilslutninger", -"%s: Normal nedlukning\n", -"%s: Fangede signal %d. Afslutter!!\n", -"%s: Server lukket\n", -"%s: Forceret nedlukning af tråd: %ld bruger: '%-.64s'\n", -"Kan ikke oprette IP socket", -"Tabellen '%-.64s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen", -"Felt adskiller er ikke som forventet, se dokumentationen", -"Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'.", -"Filen '%-.64s' skal være i database-folderen og kunne læses af alle", -"Filen '%-.64s' eksisterer allerede", -"Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld", -"Poster: %ld Ens: %ld", -"Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden", -"Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet.", -"Kan ikke udføre DROP '%-.64s'. Undersøg om feltet/nøglen eksisterer.", -"Poster: %ld Ens: %ld Advarsler: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Ukendt tråd id: %lu", -"Du er ikke ejer af tråden %lu", -"Ingen tabeller i brug", -"For mange tekststrenge til specifikationen af SET i kolonne %-.64s", -"Kan ikke lave unikt log-filnavn %s.(1-999)\n", -"Tabellen '%-.64s' var låst med READ lås og kan ikke opdateres", -"Tabellen '%-.64s' var ikke låst med LOCK TABLES", -"BLOB feltet '%-.64s' kan ikke have en standard værdi", -"Ugyldigt database navn '%-.64s'", -"Ugyldigt tabel navn '%-.64s'", -"SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt", -"Ukendt fejl", -"Ukendt procedure %s", -"Forkert antal parametre til proceduren %s", -"Forkert(e) parametre til proceduren %s", -"Ukendt tabel '%-.64s' i %s", -"Feltet '%-.64s' er anvendt to gange", -"Forkert brug af grupperings-funktion", -"Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version", -"En tabel skal have mindst een kolonne", -"Tabellen '%-.64s' er fuld", -"Ukendt tegnsæt: '%-.64s'", -"For mange tabeller. MySQL kan kun bruge %d tabeller i et join", -"For mange felter", -"For store poster. Max post størrelse, uden BLOB's, er %d. Du må lave nogle felter til BLOB's", -"Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt", -"Krydsreferencer fundet i OUTER JOIN; check dine ON conditions", -"Kolonne '%-.32s' bruges som UNIQUE eller INDEX men er ikke defineret som NOT NULL", -"Kan ikke læse funktionen '%-.64s'", -"Kan ikke starte funktionen '%-.64s'; %-.80s", -"Angivelse af sti ikke tilladt for delt bibliotek", -"Funktionen '%-.64s' findes allerede", -"Kan ikke åbne delt bibliotek '%-.64s' (errno: %d %s)", -"Kan ikke finde funktionen '%-.64s' i bibliotek'", -"Funktionen '%-.64s' er ikke defineret", -"Værten er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'", -"Værten '%-.64s' kan ikke tilkoble denne MySQL-server", -"Du bruger MySQL som anonym bruger. Anonyme brugere må ikke ændre adgangskoder", -"Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ændre andres adgangskoder", -"Kan ikke finde nogen tilsvarende poster i bruger tabellen", -"Poster fundet: %ld Ændret: %ld Advarsler: %ld", -"Kan ikke danne en ny tråd (fejl nr. %d). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl", -"Kolonne antallet stemmer ikke overens med antallet af værdier i post %ld", -"Kan ikke genåbne tabel '%-.64s", -"Forkert brug af nulværdi (NULL)", -"Fik fejl '%-.64s' fra regexp", -"Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat", -"Denne tilladelse findes ikke for brugeren '%-.32s' på vært '%-.64s'", -"%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'", -"%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'", -"Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres.", -"Værts- eller brugernavn for langt til GRANT", -"Tabellen '%-.64s.%-.64s' eksisterer ikke", -"Denne tilladelse eksisterer ikke for brugeren '%-.32s' på vært '%-.64s' for tabellen '%-.64s'", -"Den brugte kommando er ikke tilladt med denne udgave af MySQL", -"Der er en fejl i SQL syntaksen", -"Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.64s", -"For mange slettede tråde (threads) i brug", -"Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.64s' (%-.64s)", -"Modtog en datapakke som var større end 'max_allowed_packet'", -"Fik læsefejl fra forbindelse (connection pipe)", -"Fik fejlmeddelelse fra fcntl()", -"Modtog ikke datapakker i korrekt rækkefølge", -"Kunne ikke dekomprimere kommunikations-pakke (communication packet)", -"Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)", -"Timeout-fejl ved læsning af kommunukations-pakker (communication packets)", -"Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)", -"Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)", -"Strengen med resultater er større end 'max_allowed_packet'", -"Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner", -"Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner", -"INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er låst med LOCK TABLES", -"Forkert kolonnenavn '%-.100s'", -"Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'", -"Tabellerne i MERGE er ikke defineret ens", -"Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler", -"BLOB kolonnen '%-.64s' brugt i nøglespecifikation uden nøglelængde", -"Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet", -"Resultatet bestod af mere end een række", -"Denne tabeltype kræver en primærnøgle", -"Denne udgave af MySQL er ikke oversat med understøttelse af RAID", -"Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt", -"Nøglen '%-.64s' eksisterer ikke i tabellen '%-.64s'", -"Kan ikke åbne tabellen", -"Denne tabeltype understøtter ikke %s", -"Du må ikke bruge denne kommando i en transaktion", -"Modtog fejl %d mens kommandoen COMMIT blev udført", -"Modtog fejl %d mens kommandoen ROLLBACK blev udført", -"Modtog fejl %d mens kommandoen FLUSH_LOGS blev udført", -"Modtog fejl %d mens kommandoen CHECKPOINT blev udført", -"Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: `%-.64s' (%-.64s)", -"Denne tabeltype unserstøtter ikke binært tabeldump", -"Binlog blev lukket mens kommandoen FLUSH MASTER blev udført", -"Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'", -"Fejl fra master: '%-.64s'", -"Netværksfejl ved læsning fra master", -"Netværksfejl ved skrivning til master", -"Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen", -"Kan ikke udføre den givne kommando fordi der findes aktive, låste tabeller eller fordi der udføres en transaktion", -"Ukendt systemvariabel '%-.64s'", -"Tabellen '%-.64s' er markeret med fejl og bør repareres", -"Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede", -"Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles", -"Fler-udtryks transaktion krævede mere plads en 'max_binlog_cache_size' bytes. Forhøj værdien af denne variabel og prøv igen", -"Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE", -"Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE", -"Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Kunne ikke danne en slave-tråd; check systemressourcerne", -"Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser", -"Du må kun bruge konstantudtryk med SET", -"Lock wait timeout overskredet", -"Det totale antal låse overstiger størrelsen på låse-tabellen", -"Update lås kan ikke opnås under en READ UNCOMMITTED transaktion", -"DROP DATABASE er ikke tilladt mens en tråd holder på globalt read lock", -"CREATE DATABASE er ikke tilladt mens en tråd holder på globalt read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%ld) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Modtog fejl %d '%-.100s' fra %s", -"Modtog temporary fejl %d '%-.100s' fra %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/dutch/errmsg.txt b/sql/share/dutch/errmsg.txt deleted file mode 100644 index 54377b5949a..00000000000 --- a/sql/share/dutch/errmsg.txt +++ /dev/null @@ -1,333 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Dutch error messages (share/dutch/errmsg.txt) - 2001-08-02 - Arjen Lentz (agl@bitbike.com) - Completed earlier partial translation; worked on consistency and spelling. - 2002-01-29 - Arjen Lentz (arjen@mysql.com) - 2002-04-11 - Arjen Lentz (arjen@mysql.com) - 2002-06-13 - Arjen Lentz (arjen@mysql.com) - 2002-08-08 - Arjen Lentz (arjen@mysql.com) - 2002-08-22 - Arjen Lentz (arjen@mysql.com) - Translated new error messages. -*/ - -character-set=latin1 - -"hashchk", -"isamchk", -"NEE", -"JA", -"Kan file '%-.64s' niet aanmaken (Errcode: %d)", -"Kan tabel '%-.64s' niet aanmaken (Errcode: %d)", -"Kan database '%-.64s' niet aanmaken (Errcode: %d)", -"Kan database '%-.64s' niet aanmaken; database bestaat reeds", -"Kan database '%-.64s' niet verwijderen; database bestaat niet", -"Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)", -"Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)", -"Fout bij het verwijderen van '%-.64s' (Errcode: %d)", -"Kan record niet lezen in de systeem tabel", -"Kan de status niet krijgen van '%-.64s' (Errcode: %d)", -"Kan de werkdirectory niet krijgen (Errcode: %d)", -"Kan de file niet blokeren (Errcode: %d)", -"Kan de file '%-.64s' niet openen (Errcode: %d)", -"Kan de file: '%-.64s' niet vinden (Errcode: %d)", -"Kan de directory niet lezen van '%-.64s' (Errcode: %d)", -"Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)", -"Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'", -"Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt...", -"Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'", -"Fout bij het sluiten van '%-.64s' (Errcode: %d)", -"Fout bij het lezen van file '%-.64s' (Errcode: %d)", -"Fout bij het hernoemen van '%-.64s' naar '%-.64s' (Errcode: %d)", -"Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)", -"'%-.64s' is geblokeerd tegen veranderingen", -"Sorteren afgebroken", -"View '%-.64s' bestaat niet voor '%-.64s'", -"Fout %d van tabel handler", -"Tabel handler voor '%-.64s' heeft deze optie niet", -"Kan record niet vinden in '%-.64s'", -"Verkeerde info in file: '%-.64s'", -"Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren", -"Oude zoeksleutel file voor tabel '%-.64s'; repareer het!", -"'%-.64s' is alleen leesbaar", -"Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)", -"Geen geheugen om te sorteren. Verhoog de server sort buffer size", -"Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)", -"Te veel verbindingen", -"Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen", -"Kan de hostname niet krijgen van uw adres", -"Verkeerde handshake", -"Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'", -"Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)", -"Geen database geselecteerd", -"Onbekend commando", -"Kolom '%-.64s' kan niet null zijn", -"Onbekende database '%-.64s'", -"Tabel '%-.64s' bestaat al", -"Onbekende tabel '%-.64s'", -"Kolom: '%-.64s' in %s is niet eenduidig", -"Bezig met het stoppen van de server", -"Onbekende kolom '%-.64s' in %s", -"Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt", -"Kan '%-.64s' niet groeperen", -"Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht", -"Het aantal kolommen komt niet overeen met het aantal opgegeven waardes", -"Naam voor herkenning '%-.64s' is te lang", -"Dubbele kolom naam '%-.64s'", -"Dubbele zoeksleutel naam '%-.64s'", -"Dubbele ingang '%-.64s' voor zoeksleutel %d", -"Verkeerde kolom specificatie voor kolom '%-.64s'", -"%s bij '%-.64s' in regel %d", -"Query was leeg", -"Niet unieke waarde tabel/alias: '%-.64s'", -"Foutieve standaard waarde voor '%-.64s'", -"Meerdere primaire zoeksleutels gedefinieerd", -"Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan", -"Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan", -"Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d", -"Zoeksleutel kolom '%-.64s' bestaat niet in tabel", -"BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie", -"Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB", -"Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd.", -"%s: klaar voor verbindingen", -"%s: Normaal afgesloten \n", -"%s: Signaal %d. Systeem breekt af!\n", -"%s: Afsluiten afgerond\n", -"%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.64s'\n", -"Kan IP-socket niet openen", -"Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw", -"De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding", -"Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'.", -"Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn.", -"Het bestand '%-.64s' bestaat reeds", -"Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld", -"Records: %ld Dubbel: %ld", -"Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel", -"Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!", -"Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat.", -"Records: %ld Dubbel: %ld Waarschuwing: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Onbekend thread id: %lu", -"U bent geen bezitter van thread %lu", -"Geen tabellen gebruikt.", -"Teveel strings voor kolom %s en SET", -"Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n", -"Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen.", -"Tabel '%-.64s' was niet gelocked met LOCK TABLES", -"Blob veld '%-.64s' can geen standaardwaarde bevatten", -"Databasenaam '%-.64s' is niet getoegestaan", -"Niet toegestane tabelnaam '%-.64s'", -"Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is.", -"Onbekende Fout", -"Onbekende procedure %s", -"Foutief aantal parameters doorgegeven aan procedure %s", -"Foutieve parameters voor procedure %s", -"Onbekende tabel '%-.64s' in %s", -"Veld '%-.64s' is dubbel gespecificeerd", -"Ongeldig gebruik van GROUP-functie", -"Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt.", -"Een tabel moet minstens 1 kolom bevatten", -"De tabel '%-.64s' is vol", -"Onbekende character set: '%-.64s'", -"Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten", -"Te veel velden", -"Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %d. U dient sommige velden in blobs te veranderen.", -"Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk).", -"Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions", -"Kolom '%-.64s' wordt gebruikt met UNIQUE of INDEX maar is niet gedefinieerd als NOT NULL", -"Kan functie '%-.64s' niet laden", -"Kan functie '%-.64s' niet initialiseren; %-.80s", -"Geen pad toegestaan voor shared library", -"Functie '%-.64s' bestaat reeds", -"Kan shared library '%-.64s' niet openen (Errcode: %d %s)", -"Kan functie '%-.64s' niet in library vinden", -"Functie '%-.64s' is niet gedefinieerd", -"Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'", -"Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server", -"U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen", -"U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen", -"Kan geen enkele passende rij vinden in de gebruikers tabel", -"Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld", -"Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout", -"Kolom aantal komt niet overeen met waarde aantal in rij %ld", -"Kan tabel niet opnieuw openen: '%-.64s", -"Foutief gebruik van de NULL waarde", -"Fout '%-.64s' ontvangen van regexp", -"Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is", -"Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'", -"%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'", -"%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'", -"Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden.", -"De host of gebruiker parameter voor GRANT is te lang", -"Tabel '%-.64s.%s' bestaat niet", -"Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'", -"Het used commando is niet toegestaan in deze MySQL versie", -"Er is iets fout in de gebruikte syntax", -"'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s", -"Te veel 'delayed' threads in gebruik", -"Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.64s' (%s)", -"Groter pakket ontvangen dan 'max_allowed_packet'", -"Kreeg leesfout van de verbindings pipe", -"Kreeg fout van fcntl()", -"Pakketten in verkeerde volgorde ontvangen", -"Communicatiepakket kon niet worden gedecomprimeerd", -"Fout bij het lezen van communicatiepakketten", -"Timeout bij het lezen van communicatiepakketten", -"Fout bij het schrijven van communicatiepakketten", -"Timeout bij het schrijven van communicatiepakketten", -"Resultaat string is langer dan 'max_allowed_packet'", -"Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen", -"Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen", -"INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES", -"Incorrecte kolom naam '%-.100s'", -"De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren", -"Niet alle tabellen in de MERGE tabel hebben identieke gedefinities", -"Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking", -"BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte", -"Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken", -"Resultaat bevatte meer dan een rij", -"Dit tabel type heeft een primaire zoeksleutel nodig", -"Deze versie van MySQL is niet gecompileerd met RAID ondersteuning", -"U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom", -"Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'", -"Kan tabel niet openen", -"De 'handler' voor de tabel ondersteund geen %s", -"Het is u niet toegestaan dit commando uit te voeren binnen een transactie", -"Kreeg fout %d tijdens COMMIT", -"Kreeg fout %d tijdens ROLLBACK", -"Kreeg fout %d tijdens FLUSH_LOGS", -"Kreeg fout %d tijdens CHECKPOINT", -"Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: `%-.64s' (%-.64s)", -"De 'handler' voor de tabel ondersteund geen binaire tabel dump", -"Binlog gesloten tijdens FLUSH MASTER poging", -"Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'", -"Fout van master: '%-.64s'", -"Net fout tijdens lezen van master", -"Net fout tijdens schrijven naar master", -"Kan geen FULLTEXT index vinden passend bij de kolom lijst", -"Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie", -"Onbekende systeem variabele '%-.64s'", -"Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd", -"Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte", -"Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen", -"Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw", -"Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE", -"Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE", -"De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Kon slave thread niet aanmaken, controleer systeem resources", -"Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen", -"U mag alleen constante expressies gebruiken bij SET", -"Lock wacht tijd overschreden", -"Het totale aantal locks overschrijdt de lock tabel grootte", -"Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie", -"DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit", -"CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit", -"Foutieve parameters voor %s", -"'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren", -"Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren", -"Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie", -"Het gebruikte tabel type ondersteund geen FULLTEXT indexen", -"Kan foreign key beperking niet toevoegen", -"Kan onderliggende rij niet toevoegen: foreign key beperking gefaald", -"Kan bovenliggende rij nite verwijderen: foreign key beperking gefaald", -"Fout bij opbouwen verbinding naar master: %-.128s", -"Fout bij uitvoeren query op master: %-.128s", -"Fout tijdens uitvoeren van commando %s: %-.128s", -"Foutief gebruik van %s en %s", -"De gebruikte SELECT commando's hebben een verschillend aantal kolommen", -"Kan de query niet uitvoeren vanwege een conflicterende read lock", -"Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld.", -"Optie '%s' tweemaal gebruikt in opdracht", -"Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)", -"Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie", -"Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL", -"Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL", -"Variabele '%-.64s' heeft geen standaard waarde", -"Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'", -"Foutief argumenttype voor variabele '%-.64s'", -"Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen", -"Foutieve toepassing/plaatsing van '%s'", -"Deze versie van MySQL ondersteunt nog geen '%s'", -"Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/english/errmsg.txt b/sql/share/english/errmsg.txt deleted file mode 100644 index 8ede3f61a0b..00000000000 --- a/sql/share/english/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NO", -"YES", -"Can't create file '%-.64s' (errno: %d)", -"Can't create table '%-.64s' (errno: %d)", -"Can't create database '%-.64s' (errno: %d)", -"Can't create database '%-.64s'; database exists", -"Can't drop database '%-.64s'; database doesn't exist", -"Error dropping database (can't delete '%-.64s', errno: %d)", -"Error dropping database (can't rmdir '%-.64s', errno: %d)", -"Error on delete of '%-.64s' (errno: %d)", -"Can't read record in system table", -"Can't get status of '%-.64s' (errno: %d)", -"Can't get working directory (errno: %d)", -"Can't lock file (errno: %d)", -"Can't open file: '%-.64s' (errno: %d)", -"Can't find file: '%-.64s' (errno: %d)", -"Can't read dir of '%-.64s' (errno: %d)", -"Can't change dir to '%-.64s' (errno: %d)", -"Record has changed since last read in table '%-.64s'", -"Disk full (%s); waiting for someone to free some space...", -"Can't write; duplicate key in table '%-.64s'", -"Error on close of '%-.64s' (errno: %d)", -"Error reading file '%-.64s' (errno: %d)", -"Error on rename of '%-.64s' to '%-.64s' (errno: %d)", -"Error writing file '%-.64s' (errno: %d)", -"'%-.64s' is locked against change", -"Sort aborted", -"View '%-.64s' doesn't exist for '%-.64s'", -"Got error %d from storage engine", -"Table storage engine for '%-.64s' doesn't have this option", -"Can't find record in '%-.64s'", -"Incorrect information in file: '%-.64s'", -"Incorrect key file for table '%-.64s'; try to repair it", -"Old key file for table '%-.64s'; repair it!", -"Table '%-.64s' is read only", -"Out of memory; restart server and try again (needed %d bytes)", -"Out of sort memory; increase server sort buffer size", -"Unexpected EOF found when reading file '%-.64s' (errno: %d)", -"Too many connections", -"Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space", -"Can't get hostname for your address", -"Bad handshake", -"Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'", -"Access denied for user '%-.32s'@'%-.64s' (using password: %s)", -"No database selected", -"Unknown command", -"Column '%-.64s' cannot be null", -"Unknown database '%-.64s'", -"Table '%-.64s' already exists", -"Unknown table '%-.64s'", -"Column '%-.64s' in %-.64s is ambiguous", -"Server shutdown in progress", -"Unknown column '%-.64s' in '%-.64s'", -"'%-.64s' isn't in GROUP BY", -"Can't group on '%-.64s'", -"Statement has sum functions and columns in same statement", -"Column count doesn't match value count", -"Identifier name '%-.100s' is too long", -"Duplicate column name '%-.64s'", -"Duplicate key name '%-.64s'", -"Duplicate entry '%-.64s' for key %d", -"Incorrect column specifier for column '%-.64s'", -"%s near '%-.80s' at line %d", -"Query was empty", -"Not unique table/alias: '%-.64s'", -"Invalid default value for '%-.64s'", -"Multiple primary key defined", -"Too many keys specified; max %d keys allowed", -"Too many key parts specified; max %d parts allowed", -"Specified key was too long; max key length is %d bytes", -"Key column '%-.64s' doesn't exist in table", -"BLOB column '%-.64s' can't be used in key specification with the used table type", -"Column length too big for column '%-.64s' (max = %d); use BLOB instead", -"Incorrect table definition; there can be only one auto column and it must be defined as a key", -"%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d", -"%s: Normal shutdown\n", -"%s: Got signal %d. Aborting!\n", -"%s: Shutdown complete\n", -"%s: Forcing close of thread %ld user: '%-.32s'\n", -"Can't create IP socket", -"Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table", -"Field separator argument is not what is expected; check the manual", -"You can't use fixed rowlength with BLOBs; please use 'fields terminated by'", -"The file '%-.64s' must be in the database directory or be readable by all", -"File '%-.80s' already exists", -"Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld", -"Records: %ld Duplicates: %ld", -"Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys", -"You can't delete all columns with ALTER TABLE; use DROP TABLE instead", -"Can't DROP '%-.64s'; check that column/key exists", -"Records: %ld Duplicates: %ld Warnings: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Unknown thread id: %lu", -"You are not owner of thread %lu", -"No tables used", -"Too many strings for column %-.64s and SET", -"Can't generate a unique log-filename %-.64s.(1-999)\n", -"Table '%-.64s' was locked with a READ lock and can't be updated", -"Table '%-.64s' was not locked with LOCK TABLES", -"BLOB/TEXT column '%-.64s' can't have a default value", -"Incorrect database name '%-.100s'", -"Incorrect table name '%-.100s'", -"The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay", -"Unknown error", -"Unknown procedure '%-.64s'", -"Incorrect parameter count to procedure '%-.64s'", -"Incorrect parameters to procedure '%-.64s'", -"Unknown table '%-.64s' in %-.32s", -"Column '%-.64s' specified twice", -"Invalid use of group function", -"Table '%-.64s' uses an extension that doesn't exist in this MySQL version", -"A table must have at least 1 column", -"The table '%-.64s' is full", -"Unknown character set: '%-.64s'", -"Too many tables; MySQL can only use %d tables in a join", -"Too many columns", -"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL", -"Can't load function '%-.64s'", -"Can't initialize function '%-.64s'; %-.80s", -"No paths allowed for shared library", -"Function '%-.64s' already exists", -"Can't open shared library '%-.64s' (errno: %d %-.64s)", -"Can't find function '%-.64s' in library'", -"Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s'", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%-.64s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used storage engine can't index column '%-.64s'", -"All tables in the MERGE table are not identically defined", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB/TEXT column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The storage engine for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The storage engine for the table does not support binary table dump", -"Binlog closed, cannot RESET MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add or update a child row: a foreign key constraint fails", -"Cannot delete or update a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu; new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated; use '%s' instead", -"The target table %-.100s of the %s is not updatable", -"The '%s' feature is disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated incorrect %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt new file mode 100644 index 00000000000..5b48f27d2e3 --- /dev/null +++ b/sql/share/errmsg.txt @@ -0,0 +1,5240 @@ +languages czech=cze latin2, danish=dan latin1, dutch=nla latin1, english=eng latin1, estonian=est latin7, french=fre latin1, german=ger latin1, greek=greek greek, hungarian=hun latin2, italian=ita latin1, japanese=jpn ujis, korean=kor euckr, norwegian-ny=norwegian-ny latin1, norwegian=nor latin1, polish=pol latin2, portuguese=por latin1, romanian=rum latin2, russian=rus koi8r, serbian=serbian cp1250, slovak=slo latin2, spanish=spa latin1, swedish=swe latin1, ukrainian=ukr koi8u; + +default-language eng + +start-error-number 1000 + +ER_HASHCHK + eng "hashchk" +ER_NISAMCHK + eng "isamchk" +ER_NO + cze "NE" + dan "NEJ" + nla "NEE" + eng "NO" + est "EI" + fre "NON" + ger "Nein" + greek "Ï×É" + hun "NEM" + kor "¾Æ´Ï¿À" + nor "NEI" + norwegian-ny "NEI" + pol "NIE" + por "NÃO" + rum "NU" + rus "îåô" + serbian "NE" + slo "NIE" + ukr "î¶" +ER_YES + cze "ANO" + dan "JA" + nla "JA" + eng "YES" + est "JAH" + fre "OUI" + ger "Ja" + greek "ÍÁÉ" + hun "IGEN" + ita "SI" + kor "¿¹" + nor "JA" + norwegian-ny "JA" + pol "TAK" + por "SIM" + rum "DA" + rus "äá" + serbian "DA" + slo "Áno" + spa "SI" + ukr "ôáë" +ER_CANT_CREATE_FILE + cze "Nemohu vytvo-Bøit soubor '%-.64s' (chybový kód: %d)" + dan "Kan ikke oprette filen '%-.64s' (Fejlkode: %d)" + nla "Kan file '%-.64s' niet aanmaken (Errcode: %d)" + eng "Can't create file '%-.64s' (errno: %d)" + est "Ei suuda luua faili '%-.64s' (veakood: %d)" + fre "Ne peut créer le fichier '%-.64s' (Errcode: %d)" + ger "Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.64s' file nem hozhato letre (hibakod: %d)" + ita "Impossibile creare il file '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)" + kor "ÈÀÏ '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette fila '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette fila '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ pliku '%-.64s' (Kod b³êdu: %d)" + por "Não pode criar o arquivo '%-.64s' (erro no. %d)" + rum "Nu pot sa creez fisierul '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram file '%-.64s' (errno: %d)" + slo "Nemô¾em vytvori» súbor '%-.64s' (chybový kód: %d)" + spa "No puedo crear archivo '%-.64s' (Error: %d)" + swe "Kan inte skapa filen '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_CREATE_TABLE + cze "Nemohu vytvo-Bøit tabulku '%-.64s' (chybový kód: %d)" + dan "Kan ikke oprette tabellen '%-.64s' (Fejlkode: %d)" + nla "Kan tabel '%-.64s' niet aanmaken (Errcode: %d)" + eng "Can't create table '%-.64s' (errno: %d)" + est "Ei suuda luua tabelit '%-.64s' (veakood: %d)" + fre "Ne peut créer la table '%-.64s' (Errcode: %d)" + ger "Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.64s' tabla nem hozhato letre (hibakod: %d)" + ita "Impossibile creare la tabella '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤¬ºî¤ì¤Þ¤»¤ó.(errno: %d)" + kor "Å×À̺í '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ tabeli '%-.64s' (Kod b³êdu: %d)" + por "Não pode criar a tabela '%-.64s' (erro no. %d)" + rum "Nu pot sa creez tabla '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram tabelu '%-.64s' (errno: %d)" + slo "Nemô¾em vytvori» tabuµku '%-.64s' (chybový kód: %d)" + spa "No puedo crear tabla '%-.64s' (Error: %d)" + swe "Kan inte skapa tabellen '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_CREATE_DB + cze "Nemohu vytvo-Bøit databázi '%-.64s' (chybový kód: %d)" + dan "Kan ikke oprette databasen '%-.64s' (Fejlkode: %d)" + nla "Kan database '%-.64s' niet aanmaken (Errcode: %d)" + eng "Can't create database '%-.64s' (errno: %d)" + est "Ei suuda luua andmebaasi '%-.64s' (veakood: %d)" + fre "Ne peut créer la base '%-.64s' (Erreur %d)" + ger "Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)" + greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)" + ita "Impossibile creare il database '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)" + kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke opprette databasen '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na stworzyæ bazy danych '%-.64s' (Kod b³êdu: %d)" + por "Não pode criar o banco de dados '%-.64s' (erro no. %d)" + rum "Nu pot sa creez baza de date '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da kreiram bazu '%-.64s' (errno: %d)" + slo "Nemô¾em vytvori» databázu '%-.64s' (chybový kód: %d)" + spa "No puedo crear base de datos '%-.64s' (Error: %d)" + swe "Kan inte skapa databasen '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_DB_CREATE_EXISTS + cze "Nemohu vytvo-Bøit databázi '%-.64s'; databáze ji¾ existuje" + dan "Kan ikke oprette databasen '%-.64s'; databasen eksisterer" + nla "Kan database '%-.64s' niet aanmaken; database bestaat reeds" + eng "Can't create database '%-.64s'; database exists" + est "Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib" + fre "Ne peut créer la base '%-.64s'; elle existe déjà" + ger "Kann Datenbank '%-.64s' nicht erzeugen. Datenbank '%-.64s' existiert bereits" + greek "Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç" + hun "Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik" + ita "Impossibile creare il database '%-.64s'; il database esiste" + jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó.´û¤Ë¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬Â¸ºß¤·¤Þ¤¹" + kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÔ" + nor "Kan ikke opprette databasen '%-.64s'; databasen eksisterer" + norwegian-ny "Kan ikkje opprette databasen '%-.64s'; databasen eksisterer" + pol "Nie mo¿na stworzyæ bazy danych '%-.64s'; baza danych ju¿ istnieje" + por "Não pode criar o banco de dados '%-.64s'; este banco de dados já existe" + rum "Nu pot sa creez baza de date '%-.64s'; baza de date exista deja" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Ne mogu da kreiram bazu '%-.64s'; baza veæ postoji." + slo "Nemô¾em vytvori» databázu '%-.64s'; databáza existuje" + spa "No puedo crear base de datos '%-.64s'; la base de datos ya existe" + swe "Databasen '%-.64s' existerar redan" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤" +ER_DB_DROP_EXISTS + cze "Nemohu zru-B¹it databázi '%-.64s', databáze neexistuje" + dan "Kan ikke slette (droppe) '%-.64s'; databasen eksisterer ikke" + nla "Kan database '%-.64s' niet verwijderen; database bestaat niet" + eng "Can't drop database '%-.64s'; database doesn't exist" + est "Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri" + fre "Ne peut effacer la base '%-.64s'; elle n'existe pas" + ger "Kann Datenbank '%-.64s' nicht löschen. Keine Datenbank '%-.64s' vorhanden" + greek "Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé" + hun "A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik" + ita "Impossibile cancellare '%-.64s'; il database non esiste" + jpn "'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤òÇË´þ¤Ç¤¤Þ¤»¤ó. ¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬¤Ê¤¤¤Î¤Ç¤¹." + kor "µ¥ÀÌŸº£À̽º '%-.64s'¸¦ Á¦°ÅÇÏÁö ¸øÇß½À´Ï´Ù. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÏÁö ¾ÊÀ½ " + nor "Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke" + norwegian-ny "Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje" + pol "Nie mo¿na usun?æ bazy danych '%-.64s'; baza danych nie istnieje" + por "Não pode eliminar o banco de dados '%-.64s'; este banco de dados não existe" + rum "Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ" + serbian "Ne mogu da izbrišem bazu '%-.64s'; baza ne postoji." + slo "Nemô¾em zmaza» databázu '%-.64s'; databáza neexistuje" + spa "No puedo eliminar base de datos '%-.64s'; la base de datos no existe" + swe "Kan inte radera databasen '%-.64s'; databasen finns inte" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤" +ER_DB_DROP_DELETE + cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat '%-.64s', chyba %d)" + dan "Fejl ved sletning (drop) af databasen (kan ikke slette '%-.64s', Fejlkode %d)" + nla "Fout bij verwijderen database (kan '%-.64s' niet verwijderen, Errcode: %d)" + eng "Error dropping database (can't delete '%-.64s', errno: %d)" + est "Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)" + fre "Ne peut effacer la base '%-.64s' (erreur %d)" + ger "Fehler beim Löschen der Datenbank ('%-.64s' kann nicht gelöscht werden, Fehlernuumer: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.64s', êùäéêüò ëÜèïõò: %d)" + hun "Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)" + ita "Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)" + jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤òºï½ü¤Ç¤¤Þ¤»¤ó, errno: %d)" + kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯('%-.64s'¸¦ »èÁ¦ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)" + nor "Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)" + norwegian-ny "Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)" + pol "B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.64s', b³?d %d)" + por "Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)" + rum "Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.64s', ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.64s', errno: %d)" + slo "Chyba pri mazaní databázy (nemô¾em zmaza» '%-.64s', chybový kód: %d)" + spa "Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)" + swe "Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s', ÐÏÍÉÌËÁ: %d)" +ER_DB_DROP_RMDIR + cze "Chyba p-Bøi ru¹ení databáze (nemohu vymazat adresáø '%-.64s', chyba %d)" + dan "Fejl ved sletting af database (kan ikke slette folderen '%-.64s', Fejlkode %d)" + nla "Fout bij verwijderen database (kan rmdir '%-.64s' niet uitvoeren, Errcode: %d)" + eng "Error dropping database (can't rmdir '%-.64s', errno: %d)" + est "Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)" + fre "Erreur en effaçant la base (rmdir '%-.64s', erreur %d)" + ger "Fehler beim Löschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehlernummer: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.64s', êùäéêüò ëÜèïõò: %d)" + hun "Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)" + ita "Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)" + jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤ò rmdir ¤Ç¤¤Þ¤»¤ó, errno: %d)" + kor "µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯(rmdir '%-.64s'¸¦ ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)" + nor "Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)" + norwegian-ny "Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)" + pol "B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.64s', b³?d %d)" + por "Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)" + rum "Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ËÁÔÁÌÏÇ '%-.64s', ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.64s', errno: %d)" + slo "Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.64s', chybový kód: %d)" + spa "Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)" + swe "Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÔÅËÕ '%-.64s', ÐÏÍÉÌËÁ: %d)" +ER_CANT_DELETE_FILE + cze "Chyba p-Bøi výmazu '%-.64s' (chybový kód: %d)" + dan "Fejl ved sletning af '%-.64s' (Fejlkode: %d)" + nla "Fout bij het verwijderen van '%-.64s' (Errcode: %d)" + eng "Error on delete of '%-.64s' (errno: %d)" + est "Viga '%-.64s' kustutamisel (veakood: %d)" + fre "Erreur en effaçant '%-.64s' (Errcode: %d)" + ger "Fehler beim Löschen von '%-.64s' (Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Torlesi hiba: '%-.64s' (hibakod: %d)" + ita "Errore durante la cancellazione di '%-.64s' (errno: %d)" + jpn "'%-.64s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)" + kor "'%-.64s' »èÁ¦ Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved sletting av '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved sletting av '%-.64s' (Feilkode: %d)" + pol "B³?d podczas usuwania '%-.64s' (Kod b³êdu: %d)" + por "Erro na remoção de '%-.64s' (erro no. %d)" + rum "Eroare incercind sa delete '%-.64s' (Eroare: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri brisanju '%-.64s' (errno: %d)" + slo "Chyba pri mazaní '%-.64s' (chybový kód: %d)" + spa "Error en el borrado de '%-.64s' (Error: %d)" + swe "Kan inte radera filen '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_FIND_SYSTEM_REC + cze "Nemohu -Bèíst záznam v systémové tabulce" + dan "Kan ikke læse posten i systemfolderen" + nla "Kan record niet lezen in de systeem tabel" + eng "Can't read record in system table" + est "Ei suuda lugeda kirjet süsteemsest tabelist" + fre "Ne peut lire un enregistrement de la table 'system'" + ger "Datensatz in der Systemtabelle nicht lesbar" + greek "Áäýíáôç ç áíÜãíùóç åããñáöÞò áðü ðßíáêá ôïõ óõóôÞìáôïò" + hun "Nem olvashato rekord a rendszertablaban" + ita "Impossibile leggere il record dalla tabella di sistema" + jpn "system table ¤Î¥ì¥³¡¼¥É¤òÆÉ¤à»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿" + kor "system Å×ÀÌºí¿¡¼ ·¹Äڵ带 ÀÐÀ» ¼ö ¾ø½À´Ï´Ù." + nor "Kan ikke lese posten i systemkatalogen" + norwegian-ny "Kan ikkje lese posten i systemkatalogen" + pol "Nie mo¿na odczytaæ rekordu z tabeli systemowej" + por "Não pode ler um registro numa tabela do sistema" + rum "Nu pot sa citesc cimpurile in tabla de system (system table)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ÚÁÐÉÓØ × ÓÉÓÔÅÍÎÏÊ ÔÁÂÌÉÃÅ" + serbian "Ne mogu da proèitam slog iz sistemske tabele" + slo "Nemô¾em èíta» záznam v systémovej tabuµke" + spa "No puedo leer el registro en la tabla del sistema" + swe "Hittar inte posten i systemregistret" + ukr "îÅ ÍÏÖÕ ÚÞÉÔÁÔÉ ÚÁÐÉÓ Ú ÓÉÓÔÅÍÎϧ ÔÁÂÌÉæ" +ER_CANT_GET_STAT + cze "Nemohu z-Bískat stav '%-.64s' (chybový kód: %d)" + dan "Kan ikke læse status af '%-.64s' (Fejlkode: %d)" + nla "Kan de status niet krijgen van '%-.64s' (Errcode: %d)" + eng "Can't get status of '%-.64s' (errno: %d)" + est "Ei suuda lugeda '%-.64s' olekut (veakood: %d)" + fre "Ne peut obtenir le status de '%-.64s' (Errcode: %d)" + ger "Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)" + greek "Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)" + ita "Impossibile leggere lo stato di '%-.64s' (errno: %d)" + jpn "'%-.64s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)" + kor "'%-.64s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke lese statusen til '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na otrzymaæ statusu '%-.64s' (Kod b³êdu: %d)" + por "Não pode obter o status de '%-.64s' (erro no. %d)" + rum "Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)" + slo "Nemô¾em zisti» stav '%-.64s' (chybový kód: %d)" + spa "No puedo obtener el estado de '%-.64s' (Error: %d)" + swe "Kan inte läsa filinformationen (stat) från '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÏÔÒÉÍÁÔÉ ÓÔÁÔÕÓ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_GET_WD + cze "Chyba p-Bøi zji¹»ování pracovní adresáø (chybový kód: %d)" + dan "Kan ikke læse aktive folder (Fejlkode: %d)" + nla "Kan de werkdirectory niet krijgen (Errcode: %d)" + eng "Can't get working directory (errno: %d)" + est "Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)" + fre "Ne peut obtenir le répertoire de travail (Errcode: %d)" + ger "Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)" + greek "Ï öÜêåëëïò åñãáóßáò äåí âñÝèçêå (êùäéêüò ëÜèïõò: %d)" + hun "A munkakonyvtar nem allapithato meg (hibakod: %d)" + ita "Impossibile leggere la directory di lavoro (errno: %d)" + jpn "working directory ¤òÆÀ¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿ (errno: %d)" + kor "¼öÇà µð·ºÅ丮¸¦ ãÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke lese aktiv katalog(Feilkode: %d)" + norwegian-ny "Kan ikkje lese aktiv katalog(Feilkode: %d)" + pol "Nie mo¿na rozpoznaæ aktualnego katalogu (Kod b³êdu: %d)" + por "Não pode obter o diretório corrente (erro no. %d)" + rum "Nu pot sa obtin directorul current (working directory) (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÐÒÅÄÅÌÉÔØ ÒÁÂÏÞÉÊ ËÁÔÁÌÏÇ (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da dobijem trenutni direktorijum (errno: %d)" + slo "Nemô¾em zisti» pracovný adresár (chybový kód: %d)" + spa "No puedo acceder al directorio (Error: %d)" + swe "Kan inte inte läsa aktivt bibliotek. (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ÒÏÂÏÞÕ ÔÅËÕ (ÐÏÍÉÌËÁ: %d)" +ER_CANT_LOCK + cze "Nemohu uzamknout soubor (chybov-Bý kód: %d)" + dan "Kan ikke låse fil (Fejlkode: %d)" + nla "Kan de file niet blokeren (Errcode: %d)" + eng "Can't lock file (errno: %d)" + est "Ei suuda lukustada faili (veakood: %d)" + fre "Ne peut verrouiller le fichier (Errcode: %d)" + ger "Datei kann nicht gesperrt werden (Fehler: %d)" + greek "Ôï áñ÷åßï äåí ìðïñåß íá êëåéäùèåß (êùäéêüò ëÜèïõò: %d)" + hun "A file nem zarolhato. (hibakod: %d)" + ita "Impossibile il locking il file (errno: %d)" + jpn "¥Õ¥¡¥¤¥ë¤ò¥í¥Ã¥¯¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "ÈÀÏÀ» Àá±×Áö(lock) ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke låse fila (Feilkode: %d)" + norwegian-ny "Kan ikkje låse fila (Feilkode: %d)" + pol "Nie mo¿na zablokowaæ pliku (Kod b³êdu: %d)" + por "Não pode travar o arquivo (erro no. %d)" + rum "Nu pot sa lock fisierul (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÓÔÁ×ÉÔØ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÆÁÊÌÅ (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da zakljuèam file (errno: %d)" + slo "Nemô¾em zamknú» súbor (chybový kód: %d)" + spa "No puedo bloquear archivo: (Error: %d)" + swe "Kan inte låsa filen. (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÚÁÂÌÏËÕ×ÁÔÉ ÆÁÊÌ (ÐÏÍÉÌËÁ: %d)" +ER_CANT_OPEN_FILE + cze "Nemohu otev-Bøít soubor '%-.64s' (chybový kód: %d)" + dan "Kan ikke åbne fil: '%-.64s' (Fejlkode: %d)" + nla "Kan de file '%-.64s' niet openen (Errcode: %d)" + eng "Can't open file: '%-.64s' (errno: %d)" + est "Ei suuda avada faili '%-.64s' (veakood: %d)" + fre "Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)" + ger "Datei '%-.64s' nicht öffnen (Fehler: %d)" + greek "Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "A '%-.64s' file nem nyithato meg (hibakod: %d)" + ita "Impossibile aprire il file: '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "ÈÀÏÀ» ¿Áö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke åpne fila: '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje åpne fila: '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na otworzyæ pliku: '%-.64s' (Kod b³êdu: %d)" + por "Não pode abrir o arquivo '%-.64s' (erro no. %d)" + rum "Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da otvorim file: '%-.64s' (errno: %d)" + slo "Nemô¾em otvori» súbor: '%-.64s' (chybový kód: %d)" + spa "No puedo abrir archivo: '%-.64s' (Error: %d)" + swe "Kan inte använda '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_FILE_NOT_FOUND + cze "Nemohu naj-Bít soubor '%-.64s' (chybový kód: %d)" + dan "Kan ikke finde fila: '%-.64s' (Fejlkode: %d)" + nla "Kan de file: '%-.64s' niet vinden (Errcode: %d)" + eng "Can't find file: '%-.64s' (errno: %d)" + est "Ei suuda leida faili '%-.64s' (veakood: %d)" + fre "Ne peut trouver le fichier: '%-.64s' (Errcode: %d)" + ger "Kann Datei '%-.64s' nicht finden (Fehler: %d)" + greek "Äåí âñÝèçêå ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.64s' file nem talalhato (hibakod: %d)" + ita "Impossibile trovare il file: '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó.(errno: %d)" + kor "ÈÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke finne fila: '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje finne fila: '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na znale¥æ pliku: '%-.64s' (Kod b³êdu: %d)" + por "Não pode encontrar o arquivo '%-.64s' (erro no. %d)" + rum "Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da pronaðem file: '%-.64s' (errno: %d)" + slo "Nemô¾em nájs» súbor: '%-.64s' (chybový kód: %d)" + spa "No puedo encontrar archivo: '%-.64s' (Error: %d)" + swe "Hittar inte filen '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_READ_DIR + cze "Nemohu -Bèíst adresáø '%-.64s' (chybový kód: %d)" + dan "Kan ikke læse folder '%-.64s' (Fejlkode: %d)" + nla "Kan de directory niet lezen van '%-.64s' (Errcode: %d)" + eng "Can't read dir of '%-.64s' (errno: %d)" + est "Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)" + fre "Ne peut lire le répertoire de '%-.64s' (Errcode: %d)" + ger "Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)" + greek "Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)" + ita "Impossibile leggere la directory di '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬ÆÉ¤á¤Þ¤»¤ó.(errno: %d)" + kor "'%-.64s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke lese katalogen '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na odczytaæ katalogu '%-.64s' (Kod b³êdu: %d)" + por "Não pode ler o diretório de '%-.64s' (erro no. %d)" + rum "Nu pot sa citesc directorul '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da proèitam direktorijum '%-.64s' (errno: %d)" + slo "Nemô¾em èíta» adresár '%-.64s' (chybový kód: %d)" + spa "No puedo leer el directorio de '%-.64s' (Error: %d)" + swe "Kan inte läsa från bibliotek '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CANT_SET_WD + cze "Nemohu zm-Bìnit adresáø na '%-.64s' (chybový kód: %d)" + dan "Kan ikke skifte folder til '%-.64s' (Fejlkode: %d)" + nla "Kan de directory niet veranderen naar '%-.64s' (Errcode: %d)" + eng "Can't change dir to '%-.64s' (errno: %d)" + est "Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)" + fre "Ne peut changer le répertoire pour '%-.64s' (Errcode: %d)" + ger "Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)" + greek "Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)" + ita "Impossibile cambiare la directory in '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤¤Þ¤»¤ó.(errno: %d)" + kor "'%-.64s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)" + nor "Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)" + norwegian-ny "Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)" + pol "Nie mo¿na zmieniæ katalogu na '%-.64s' (Kod b³êdu: %d)" + por "Não pode mudar para o diretório '%-.64s' (erro no. %d)" + rum "Nu pot sa schimb directorul '%-.64s' (Eroare: %d)" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)" + slo "Nemô¾em vojs» do adresára '%-.64s' (chybový kód: %d)" + spa "No puedo cambiar al directorio de '%-.64s' (Error: %d)" + swe "Kan inte byta till '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÔÉ Õ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CHECKREAD + cze "Z-Báznam byl zmìnìn od posledního ètení v tabulce '%-.64s'" + dan "Posten er ændret siden sidste læsning '%-.64s'" + nla "Record is veranderd sinds de laatste lees activiteit in de tabel '%-.64s'" + eng "Record has changed since last read in table '%-.64s'" + est "Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik" + fre "Enregistrement modifié depuis sa dernière lecture dans la table '%-.64s'" + ger "Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert" + greek "Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.64s'" + hun "A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota" + ita "Il record e` cambiato dall'ultima lettura della tabella '%-.64s'" + kor "Å×À̺í '%-.64s'¿¡¼ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù." + nor "Posten har blitt endret siden den ble lest '%-.64s'" + norwegian-ny "Posten har vorte endra sidan den sist vart lesen '%-.64s'" + pol "Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.64s'" + por "Registro alterado desde a última leitura da tabela '%-.64s'" + rum "Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'" + rus "úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.64s'" + serbian "Slog je promenjen od zadnjeg èitanja tabele '%-.64s'" + slo "Záznam bol zmenený od posledného èítania v tabuµke '%-.64s'" + spa "El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'" + swe "Posten har förändrats sedan den lästes i register '%-.64s'" + ukr "úÁÐÉÓ ÂÕÌÏ ÚͦÎÅÎÏ Ú ÞÁÓÕ ÏÓÔÁÎÎØÏÇÏ ÞÉÔÁÎÎÑ Ú ÔÁÂÌÉæ '%-.64s'" +ER_DISK_FULL + cze "Disk je pln-Bý (%s), èekám na uvolnìní nìjakého místa ..." + dan "Ikke mere diskplads (%s). Venter på at få frigjort plads..." + nla "Schijf vol (%s). Aan het wachten totdat er ruimte vrij wordt gemaakt..." + eng "Disk full (%s); waiting for someone to free some space..." + est "Ketas täis (%s). Ootame kuni tekib vaba ruumi..." + fre "Disque plein (%s). J'attend que quelqu'un libère de l'espace..." + ger "Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ..." + greek "Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò..." + hun "A lemez megtelt (%s)." + ita "Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio..." + jpn "Disk full (%s). 狼¤¬²¿¤«¤ò¸º¤é¤¹¤Þ¤Ç¤Þ¤Ã¤Æ¤¯¤À¤µ¤¤..." + kor "Disk full (%s). ´Ù¸¥ »ç¶÷ÀÌ Áö¿ï¶§±îÁö ±â´Ù¸³´Ï´Ù..." + nor "Ikke mer diskplass (%s). Venter på å få frigjort plass..." + norwegian-ny "Ikkje meir diskplass (%s). Ventar på å få frigjort plass..." + pol "Dysk pe³ny (%s). Oczekiwanie na zwolnienie miejsca..." + por "Disco cheio (%s). Aguardando alguém liberar algum espaço..." + rum "Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu..." + rus "äÉÓË ÚÁÐÏÌÎÅÎ. (%s). ïÖÉÄÁÅÍ, ÐÏËÁ ËÔÏ-ÔÏ ÎÅ ÕÂÅÒÅÔ ÐÏÓÌÅ ÓÅÂÑ ÍÕÓÏÒ..." + serbian "Disk je pun (%s). Èekam nekoga da doðe i oslobodi nešto mesta..." + slo "Disk je plný (%s), èakám na uvoµnenie miesta..." + spa "Disco lleno (%s). Esperando para que se libere algo de espacio..." + swe "Disken är full (%s). Väntar tills det finns ledigt utrymme..." + ukr "äÉÓË ÚÁÐÏ×ÎÅÎÉÊ (%s). ÷ÉÞÉËÕÀ, ÄÏËÉ Ú×¦ÌØÎÉÔØÓÑ ÔÒÏÈÉ Í¦ÓÃÑ..." +ER_DUP_KEY 23000 + cze "Nemohu zapsat, zdvojen-Bý klíè v tabulce '%-.64s'" + dan "Kan ikke skrive, flere ens nøgler i tabellen '%-.64s'" + nla "Kan niet schrijven, dubbele zoeksleutel in tabel '%-.64s'" + eng "Can't write; duplicate key in table '%-.64s'" + est "Ei saa kirjutada, korduv võti tabelis '%-.64s'" + fre "Ecriture impossible, doublon dans une clé de la table '%-.64s'" + ger "Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.64s'" + greek "Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.64s'" + hun "Irasi hiba, duplikalt kulcs a '%-.64s' tablaban." + ita "Scrittura impossibile: chiave duplicata nella tabella '%-.64s'" + jpn "table '%-.64s' ¤Ë key ¤¬½ÅÊ£¤·¤Æ¤¤¤Æ½ñ¤¤³¤á¤Þ¤»¤ó" + kor "±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.64s'¿¡¼ Áߺ¹ Ű" + nor "Kan ikke skrive, flere like nøkler i tabellen '%-.64s'" + norwegian-ny "Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'" + pol "Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.64s'" + por "Não pode gravar. Chave duplicada na tabela '%-.64s'" + rum "Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.64s'" + serbian "Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.64s'" + slo "Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.64s'" + spa "No puedo escribir, clave duplicada en la tabla '%-.64s'" + swe "Kan inte skriva, dubbel söknyckel i register '%-.64s'" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ, ÄÕÂÌÀÀÞÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉæ '%-.64s'" +ER_ERROR_ON_CLOSE + cze "Chyba p-Bøi zavírání '%-.64s' (chybový kód: %d)" + dan "Fejl ved lukning af '%-.64s' (Fejlkode: %d)" + nla "Fout bij het sluiten van '%-.64s' (Errcode: %d)" + eng "Error on close of '%-.64s' (errno: %d)" + est "Viga faili '%-.64s' sulgemisel (veakood: %d)" + fre "Erreur a la fermeture de '%-.64s' (Errcode: %d)" + ger "Fehler beim Schließen von '%-.64s' (Fehler: %d)" + greek "ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)" + ita "Errore durante la chiusura di '%-.64s' (errno: %d)" + kor "'%-.64s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved lukking av '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved lukking av '%-.64s' (Feilkode: %d)" + pol "B³?d podczas zamykania '%-.64s' (Kod b³êdu: %d)" + por "Erro ao fechar '%-.64s' (erro no. %d)" + rum "Eroare inchizind '%-.64s' (errno: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri zatvaranju '%-.64s' (errno: %d)" + slo "Chyba pri zatváraní '%-.64s' (chybový kód: %d)" + spa "Error en el cierre de '%-.64s' (Error: %d)" + swe "Fick fel vid stängning av '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_ERROR_ON_READ + cze "Chyba p-Bøi ètení souboru '%-.64s' (chybový kód: %d)" + dan "Fejl ved læsning af '%-.64s' (Fejlkode: %d)" + nla "Fout bij het lezen van file '%-.64s' (Errcode: %d)" + eng "Error reading file '%-.64s' (errno: %d)" + est "Viga faili '%-.64s' lugemisel (veakood: %d)" + fre "Erreur en lecture du fichier '%-.64s' (Errcode: %d)" + ger "Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.64s'file olvasasakor. (hibakod: %d)" + ita "Errore durante la lettura del file '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ÎÆÉ¤ß¹þ¤ß¥¨¥é¡¼ (errno: %d)" + kor "'%-.64s'ÈÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved lesing av '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved lesing av '%-.64s' (Feilkode: %d)" + pol "B³?d podczas odczytu pliku '%-.64s' (Kod b³êdu: %d)" + por "Erro ao ler arquivo '%-.64s' (erro no. %d)" + rum "Eroare citind fisierul '%-.64s' (errno: %d)" + rus "ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri èitanju file-a '%-.64s' (errno: %d)" + slo "Chyba pri èítaní súboru '%-.64s' (chybový kód: %d)" + spa "Error leyendo el fichero '%-.64s' (Error: %d)" + swe "Fick fel vid läsning av '%-.64s' (Felkod %d)" + ukr "îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_ERROR_ON_RENAME + cze "Chyba p-Bøi pøejmenování '%-.64s' na '%-.64s' (chybový kód: %d)" + dan "Fejl ved omdøbning af '%-.64s' til '%-.64s' (Fejlkode: %d)" + nla "Fout bij het hernoemen van '%-.64s' naar '%-.64s' (Errcode: %d)" + eng "Error on rename of '%-.64s' to '%-.64s' (errno: %d)" + est "Viga faili '%-.64s' ümbernimetamisel '%-.64s'-ks (veakood: %d)" + fre "Erreur en renommant '%-.64s' en '%-.64s' (Errcode: %d)" + ger "Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí ìåôïíïìáóßá ôïõ áñ÷åßïõ '%-.64s' to '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.64s' file atnevezesekor. (hibakod: %d)" + ita "Errore durante la rinominazione da '%-.64s' a '%-.64s' (errno: %d)" + jpn "'%-.64s' ¤ò '%-.64s' ¤Ë rename ¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "'%-.64s'¸¦ '%-.64s'·Î À̸§ º¯°æÁß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved omdøping av '%-.64s' til '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved omdøyping av '%-.64s' til '%-.64s' (Feilkode: %d)" + pol "B³?d podczas zmieniania nazwy '%-.64s' na '%-.64s' (Kod b³êdu: %d)" + por "Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)" + rum "Eroare incercind sa renumesc '%-.64s' in '%-.64s' (errno: %d)" + rus "ïÛÉÂËÁ ÐÒÉ ÐÅÒÅÉÍÅÎÏ×ÁÎÉÉ '%-.64s' × '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri promeni imena '%-.64s' na '%-.64s' (errno: %d)" + slo "Chyba pri premenovávaní '%-.64s' na '%-.64s' (chybový kód: %d)" + spa "Error en el renombrado de '%-.64s' a '%-.64s' (Error: %d)" + swe "Kan inte byta namn från '%-.64s' till '%-.64s' (Felkod: %d)" + ukr "îÅ ÍÏÖÕ ÐÅÒÅÊÍÅÎÕ×ÁÔÉ '%-.64s' Õ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_ERROR_ON_WRITE + cze "Chyba p-Bøi zápisu do souboru '%-.64s' (chybový kód: %d)" + dan "Fejl ved skriving av filen '%-.64s' (Fejlkode: %d)" + nla "Fout bij het wegschrijven van file '%-.64s' (Errcode: %d)" + eng "Error writing file '%-.64s' (errno: %d)" + est "Viga faili '%-.64s' kirjutamisel (veakood: %d)" + fre "Erreur d'écriture du fichier '%-.64s' (Errcode: %d)" + ger "Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)" + greek "Ðñüâëçìá êáôÜ ôçí áðïèÞêåõóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Hiba a '%-.64s' file irasakor. (hibakod: %d)" + ita "Errore durante la scrittura del file '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤ò½ñ¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)" + kor "'%-.64s'ÈÀÏ ±â·Ï Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)" + nor "Feil ved skriving av fila '%-.64s' (Feilkode: %d)" + norwegian-ny "Feil ved skriving av fila '%-.64s' (Feilkode: %d)" + pol "B³?d podczas zapisywania pliku '%-.64s' (Kod b³êdu: %d)" + por "Erro ao gravar arquivo '%-.64s' (erro no. %d)" + rum "Eroare scriind fisierul '%-.64s' (errno: %d)" + rus "ïÛÉÂËÁ ÚÁÐÉÓÉ × ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Greška pri upisu '%-.64s' (errno: %d)" + slo "Chyba pri zápise do súboru '%-.64s' (chybový kód: %d)" + spa "Error escribiendo el archivo '%-.64s' (Error: %d)" + swe "Fick fel vid skrivning till '%-.64s' (Felkod %d)" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_FILE_USED + cze "'%-.64s' je zam-Bèen proti zmìnám" + dan "'%-.64s' er låst mod opdateringer" + nla "'%-.64s' is geblokeerd tegen veranderingen" + eng "'%-.64s' is locked against change" + est "'%-.64s' on lukustatud muudatuste vastu" + fre "'%-.64s' est verrouillé contre les modifications" + ger "'%-.64s' ist für Änderungen gesperrt" + greek "'%-.64s' äåí åðéôñÝðïíôáé áëëáãÝò" + hun "'%-.64s' a valtoztatas ellen zarolva" + ita "'%-.64s' e` soggetto a lock contro i cambiamenti" + jpn "'%-.64s' ¤Ï¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤¹" + kor "'%-.64s'°¡ º¯°æÇÒ ¼ö ¾øµµ·Ï Àá°ÜÀÖÀ¾´Ï´Ù." + nor "'%-.64s' er låst mot oppdateringer" + norwegian-ny "'%-.64s' er låst mot oppdateringar" + pol "'%-.64s' jest zablokowany na wypadek zmian" + por "'%-.64s' está com travamento contra alterações" + rum "'%-.64s' este blocat pentry schimbari (loccked against change)" + rus "'%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÄÌÑ ÉÚÍÅÎÅÎÉÊ" + serbian "'%-.64s' je zakljuèan za upis" + slo "'%-.64s' je zamknutý proti zmenám" + spa "'%-.64s' esta bloqueado contra cambios" + swe "'%-.64s' är låst mot användning" + ukr "'%-.64s' ÚÁÂÌÏËÏ×ÁÎÉÊ ÎÁ ×ÎÅÓÅÎÎÑ ÚͦÎ" +ER_FILSORT_ABORT + cze "T-Bøídìní pøeru¹eno" + dan "Sortering afbrudt" + nla "Sorteren afgebroken" + eng "Sort aborted" + est "Sorteerimine katkestatud" + fre "Tri alphabétique abandonné" + ger "Sortiervorgang abgebrochen" + greek "Ç äéáäéêáóßá ôáîéíüìéóçò áêõñþèçêå" + hun "Sikertelen rendezes" + ita "Operazione di ordinamento abbandonata" + jpn "Sort ÃæÃÇ" + kor "¼ÒÆ®°¡ ÁߴܵǾú½À´Ï´Ù." + nor "Sortering avbrutt" + norwegian-ny "Sortering avbrote" + pol "Sortowanie przerwane" + por "Ordenação abortada" + rum "Sortare intrerupta" + rus "óÏÒÔÉÒÏ×ËÁ ÐÒÅÒ×ÁÎÁ" + serbian "Sortiranje je prekinuto" + slo "Triedenie preru¹ené" + spa "Ordeancion cancelada" + swe "Sorteringen avbruten" + ukr "óÏÒÔÕ×ÁÎÎÑ ÐÅÒÅÒ×ÁÎÏ" +ER_FORM_NOT_FOUND + cze "Pohled '%-.64s' pro '%-.64s' neexistuje" + dan "View '%-.64s' eksisterer ikke for '%-.64s'" + nla "View '%-.64s' bestaat niet voor '%-.64s'" + eng "View '%-.64s' doesn't exist for '%-.64s'" + est "Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks" + fre "La vue (View) '%-.64s' n'existe pas pour '%-.64s'" + ger "View '%-.64s' existiert für '%-.64s' nicht" + greek "Ôï View '%-.64s' äåí õðÜñ÷åé ãéá '%-.64s'" + hun "A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz" + ita "La view '%-.64s' non esiste per '%-.64s'" + jpn "View '%-.64s' ¤¬ '%-.64s' ¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "ºä '%-.64s'°¡ '%-.64s'¿¡¼´Â Á¸ÀçÇÏÁö ¾ÊÀ¾´Ï´Ù." + nor "View '%-.64s' eksisterer ikke for '%-.64s'" + norwegian-ny "View '%-.64s' eksisterar ikkje for '%-.64s'" + pol "Widok '%-.64s' nie istnieje dla '%-.64s'" + por "Visão '%-.64s' não existe para '%-.64s'" + rum "View '%-.64s' nu exista pentru '%-.64s'" + rus "ðÒÅÄÓÔÁ×ÌÅÎÉÅ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ ÄÌÑ '%-.64s'" + serbian "View '%-.64s' ne postoji za '%-.64s'" + slo "Pohµad '%-.64s' neexistuje pre '%-.64s'" + spa "La vista '%-.64s' no existe para '%-.64s'" + swe "Formulär '%-.64s' finns inte i '%-.64s'" + ukr "÷ÉÇÌÑÄ '%-.64s' ÎÅ ¦ÓÎÕ¤ ÄÌÑ '%-.64s'" +ER_GET_ERRNO + cze "Obsluha tabulky vr-Bátila chybu %d" + dan "Modtog fejl %d fra tabel håndteringen" + nla "Fout %d van tabel handler" + eng "Got error %d from storage engine" + est "Tabeli handler tagastas vea %d" + fre "Reçu l'erreur %d du handler de la table" + ger "Fehler %d (Tabellenhandler)" + greek "ÅëÞöèç ìÞíõìá ëÜèïõò %d áðü ôïí ÷åéñéóôÞ ðßíáêá (table handler)" + hun "%d hibajelzes a tablakezelotol" + ita "Rilevato l'errore %d dal gestore delle tabelle" + jpn "Got error %d from table handler" + kor "Å×À̺í handler¿¡¼ %d ¿¡·¯°¡ ¹ß»ý ÇÏ¿´½À´Ï´Ù." + nor "Mottok feil %d fra tabell håndterer" + norwegian-ny "Mottok feil %d fra tabell handterar" + pol "Otrzymano b³?d %d z obs³ugi tabeli" + por "Obteve erro %d no manipulador de tabelas" + rum "Eroarea %d obtinuta din handlerul tabelei" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d ÏÔ ÏÂÒÁÂÏÔÞÉËÁ ÔÁÂÌÉÃ" + serbian "Handler tabela je vratio grešku %d" + slo "Obsluha tabuµky vrátila chybu %d" + spa "Error %d desde el manejador de la tabla" + swe "Fick felkod %d från databashanteraren" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d ×¦Ä ÄÅÓËÒÉÐÔÏÒÁ ÔÁÂÌÉæ" +ER_ILLEGAL_HA + cze "Obsluha tabulky '%-.64s' nem-Bá tento parametr" + dan "Denne mulighed eksisterer ikke for tabeltypen '%-.64s'" + nla "Tabel handler voor '%-.64s' heeft deze optie niet" + eng "Table storage engine for '%-.64s' doesn't have this option" + est "Tabeli '%-.64s' handler ei toeta antud operatsiooni" + fre "Le handler de la table '%-.64s' n'a pas cette option" + ger "Diese Option gibt es nicht (Tabellenhandler)" + greek "Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.64s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ" + hun "A(z) '%-.64s' tablakezelonek nincs ilyen opcioja" + ita "Il gestore delle tabelle per '%-.64s' non ha questa opzione" + jpn "Table handler for '%-.64s' doesn't have this option" + kor "'%-.64s'ÀÇ Å×À̺í handler´Â ÀÌ·¯ÇÑ ¿É¼ÇÀ» Á¦°øÇÏÁö ¾ÊÀ¾´Ï´Ù." + nor "Tabell håndtereren for '%-.64s' har ikke denne muligheten" + norwegian-ny "Tabell håndteraren for '%-.64s' har ikkje denne moglegheita" + pol "Obs³uga tabeli '%-.64s' nie posiada tej opcji" + por "Manipulador de tabela para '%-.64s' não tem esta opção" + rum "Handlerul tabelei pentru '%-.64s' nu are aceasta optiune" + rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÕ ×ÏÚÍÏÖÎÏÓÔØ" + serbian "Handler tabela za '%-.64s' nema ovu opciju" + slo "Obsluha tabuµky '%-.64s' nemá tento parameter" + spa "El manejador de la tabla de '%-.64s' no tiene esta opcion" + swe "Registrets databas har inte denna facilitet" + ukr "äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦" +ER_KEY_NOT_FOUND + cze "Nemohu naj-Bít záznam v '%-.64s'" + dan "Kan ikke finde posten i '%-.64s'" + nla "Kan record niet vinden in '%-.64s'" + eng "Can't find record in '%-.64s'" + est "Ei suuda leida kirjet '%-.64s'-s" + fre "Ne peut trouver l'enregistrement dans '%-.64s'" + ger "Kann Datensatz nicht finden" + greek "Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.64s'" + hun "Nem talalhato a rekord '%-.64s'-ben" + ita "Impossibile trovare il record in '%-.64s'" + jpn "'%-.64s'¤Î¤Ê¤«¤Ë¥ì¥³¡¼¥É¤¬¸«ÉÕ¤«¤ê¤Þ¤»¤ó" + kor "'%-.64s'¿¡¼ ·¹Äڵ带 ãÀ» ¼ö ¾øÀ¾´Ï´Ù." + nor "Kan ikke finne posten i '%-.64s'" + norwegian-ny "Kan ikkje finne posten i '%-.64s'" + pol "Nie mo¿na znale¥æ rekordu w '%-.64s'" + por "Não pode encontrar registro em '%-.64s'" + rum "Nu pot sa gasesc recordul in '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÚÁÐÉÓØ × '%-.64s'" + serbian "Ne mogu da pronaðem slog u '%-.64s'" + slo "Nemô¾em nájs» záznam v '%-.64s'" + spa "No puedo encontrar el registro en '%-.64s'" + swe "Hittar inte posten" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'" +ER_NOT_FORM_FILE + cze "Nespr-Bávná informace v souboru '%-.64s'" + dan "Forkert indhold i: '%-.64s'" + nla "Verkeerde info in file: '%-.64s'" + eng "Incorrect information in file: '%-.64s'" + est "Vigane informatsioon failis '%-.64s'" + fre "Information erronnée dans le fichier: '%-.64s'" + ger "Falsche Information in Datei '%-.64s'" + greek "ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.64s'" + hun "Ervenytelen info a file-ban: '%-.64s'" + ita "Informazione errata nel file: '%-.64s'" + jpn "¥Õ¥¡¥¤¥ë '%-.64s' ¤Î info ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹" + kor "ÈÀÏÀÇ ºÎÁ¤È®ÇÑ Á¤º¸: '%-.64s'" + nor "Feil informasjon i filen: '%-.64s'" + norwegian-ny "Feil informasjon i fila: '%-.64s'" + pol "Niew³a?ciwa informacja w pliku: '%-.64s'" + por "Informação incorreta no arquivo '%-.64s'" + rum "Informatie incorecta in fisierul: '%-.64s'" + rus "îÅËÏÒÒÅËÔÎÁÑ ÉÎÆÏÒÍÁÃÉÑ × ÆÁÊÌÅ '%-.64s'" + serbian "Pogrešna informacija u file-u: '%-.64s'" + slo "Nesprávna informácia v súbore: '%-.64s'" + spa "Informacion erronea en el archivo: '%-.64s'" + swe "Felaktig fil: '%-.64s'" + ukr "èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.64s'" +ER_NOT_KEYFILE + cze "Nespr-Bávný klíè pro tabulku '%-.64s'; pokuste se ho opravit" + dan "Fejl i indeksfilen til tabellen '%-.64s'; prøv at reparere den" + nla "Verkeerde zoeksleutel file voor tabel: '%-.64s'; probeer het te repareren" + eng "Incorrect key file for table '%-.64s'; try to repair it" + est "Tabeli '%-.64s' võtmefail on vigane; proovi seda parandada" + fre "Index corrompu dans la table: '%-.64s'; essayez de le réparer" + ger "Falsche Schlüssel-Datei für Tabelle '%-.64s'. versuche zu reparieren" + greek "ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!" + hun "Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!" + ita "File chiave errato per la tabella : '%-.64s'; prova a riparalo" + jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Î key file ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹. ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤" + kor "'%-.64s' Å×À̺íÀÇ ºÎÁ¤È®ÇÑ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!" + nor "Tabellen '%-.64s' har feil i nøkkelfilen; forsøk å reparer den" + norwegian-ny "Tabellen '%-.64s' har feil i nykkelfila; prøv å reparere den" + pol "Niew³a?ciwy plik kluczy dla tabeli: '%-.64s'; spróbuj go naprawiæ" + por "Arquivo de índice incorreto para tabela '%-.64s'; tente repará-lo" + rum "Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari" + rus "îÅËÏÒÒÅËÔÎÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ: '%-.64s'. ðÏÐÒÏÂÕÊÔÅ ×ÏÓÓÔÁÎÏ×ÉÔØ ÅÇÏ" + serbian "Pogrešan key file za tabelu: '%-.64s'; probajte da ga ispravite" + slo "Nesprávny kµúè pre tabuµku '%-.64s'; pokúste sa ho opravi»" + spa "Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo" + swe "Fatalt fel vid hantering av register '%-.64s'; kör en reparation" + ukr "èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.64s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ" +ER_OLD_KEYFILE + cze "Star-Bý klíèový soubor pro '%-.64s'; opravte ho." + dan "Gammel indeksfil for tabellen '%-.64s'; reparer den" + nla "Oude zoeksleutel file voor tabel '%-.64s'; repareer het!" + eng "Old key file for table '%-.64s'; repair it!" + est "Tabeli '%-.64s' võtmefail on aegunud; paranda see!" + fre "Vieux fichier d'index pour la table '%-.64s'; réparez le!" + ger "Alte Schlüssel-Datei für Tabelle '%-.64s'. Bitte reparieren" + greek "Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!" + hun "Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!" + ita "File chiave vecchio per la tabella '%-.64s'; riparalo!" + jpn "'%-.64s' ¥Æ¡¼¥Ö¥ë¤Ï¸Å¤¤·Á¼°¤Î key file ¤Î¤è¤¦¤Ç¤¹; ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤" + kor "'%-.64s' Å×À̺íÀÇ ÀÌÀü¹öÁ¯ÀÇ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!" + nor "Gammel nøkkelfil for tabellen '%-.64s'; reparer den!" + norwegian-ny "Gammel nykkelfil for tabellen '%-.64s'; reparer den!" + pol "Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!" + por "Arquivo de índice desatualizado para tabela '%-.64s'; repare-o!" + rum "Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!" + rus "óÔÁÒÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'; ÏÔÒÅÍÏÎÔÉÒÕÊÔÅ ÅÇÏ!" + serbian "Zastareo key file za tabelu '%-.64s'; ispravite ga" + slo "Starý kµúèový súbor pre '%-.64s'; opravte ho!" + spa "Clave de archivo antigua para la tabla '%-.64s'; reparelo!" + swe "Gammal nyckelfil '%-.64s'; reparera registret" + ukr "óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.64s'; ÷¦ÄÎÏ×¦ÔØ ÊÏÇÏ!" +ER_OPEN_AS_READONLY + cze "'%-.64s' je jen pro -Bètení" + dan "'%-.64s' er skrivebeskyttet" + nla "'%-.64s' is alleen leesbaar" + eng "Table '%-.64s' is read only" + est "Tabel '%-.64s' on ainult lugemiseks" + fre "'%-.64s' est en lecture seulement" + ger "'%-.64s' ist nur lesbar" + greek "'%-.64s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç" + hun "'%-.64s' irasvedett" + ita "'%-.64s' e` di sola lettura" + jpn "'%-.64s' ¤ÏÆÉ¤ß¹þ¤ßÀìÍѤǤ¹" + kor "Å×À̺í '%-.64s'´Â ÀбâÀü¿ë ÀÔ´Ï´Ù." + nor "'%-.64s' er skrivebeskyttet" + norwegian-ny "'%-.64s' er skrivetryggja" + pol "'%-.64s' jest tylko do odczytu" + por "Tabela '%-.64s' é somente para leitura" + rum "Tabela '%-.64s' e read-only" + rus "ôÁÂÌÉÃÁ '%-.64s' ÐÒÅÄÎÁÚÎÁÞÅÎÁ ÔÏÌØËÏ ÄÌÑ ÞÔÅÎÉÑ" + serbian "Tabelu '%-.64s' je dozvoljeno samo èitati" + slo "'%-.64s' is èíta» only" + spa "'%-.64s' es de solo lectura" + swe "'%-.64s' är skyddad mot förändring" + ukr "ôÁÂÌÉÃÑ '%-.64s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ" +ER_OUTOFMEMORY HY001 S1001 + cze "M-Bálo pamìti. Pøestartujte daemona a zkuste znovu (je potøeba %d bytù)" + dan "Ikke mere hukommelse. Genstart serveren og prøv igen (mangler %d bytes)" + nla "Geen geheugen meer. Herstart server en probeer opnieuw (%d bytes nodig)" + eng "Out of memory; restart server and try again (needed %d bytes)" + est "Mälu sai otsa. Proovi MySQL uuesti käivitada (puudu jäi %d baiti)" + fre "Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)" + ger "Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten" + greek "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç. ÐñïóðáèÞóôå ðÜëé, åðáíåêéíþíôáò ôç äéáäéêáóßá (demon) (÷ñåéÜæïíôáé %d bytes)" + hun "Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)" + ita "Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)" + jpn "Out of memory. ¥Ç¡¼¥â¥ó¤ò¥ê¥¹¥¿¡¼¥È¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤ (%d bytes ɬÍ×)" + kor "Out of memory. µ¥¸óÀ» Àç ½ÇÇà ÈÄ ´Ù½Ã ½ÃÀÛÇϽÿÀ (needed %d bytes)" + nor "Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)" + norwegian-ny "Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)" + pol "Zbyt ma³o pamiêci. Uruchom ponownie demona i spróbuj ponownie (potrzeba %d bajtów)" + por "Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)" + rum "Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)" + rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ. ðÅÒÅÚÁÐÕÓÔÉÔÅ ÓÅÒ×ÅÒ É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ (ÎÕÖÎÏ %d ÂÁÊÔ)" + serbian "Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)" + slo "Málo pamäti. Re¹tartujte daemona a skúste znova (je potrebných %d bytov)" + spa "Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)" + swe "Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)" + ukr "âÒÁË ÐÁÍ'ÑÔ¦. òÅÓÔÁÒÔÕÊÔÅ ÓÅÒ×ÅÒ ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ (ÐÏÔÒ¦ÂÎÏ %d ÂÁÊÔ¦×)" +ER_OUT_OF_SORTMEMORY HY001 S1001 + cze "M-Bálo pamìti pro tøídìní. Zvy¹te velikost tøídícího bufferu" + dan "Ikke mere sorteringshukommelse. Øg sorteringshukommelse (sort buffer size) for serveren" + nla "Geen geheugen om te sorteren. Verhoog de server sort buffer size" + eng "Out of sort memory; increase server sort buffer size" + est "Mälu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit" + fre "Manque de mémoire pour le tri. Augmentez-la." + ger "Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte erhöht werden" + greek "Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç ãéá ôáîéíüìéóç. ÁõîÞóôå ôï sort buffer size ãéá ôç äéáäéêáóßá (demon)" + hun "Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet" + ita "Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone" + jpn "Out of sort memory. sort buffer size ¤¬Â¤ê¤Ê¤¤¤è¤¦¤Ç¤¹." + kor "Out of sort memory. daemon sort bufferÀÇ Å©±â¸¦ Áõ°¡½ÃŰ¼¼¿ä" + nor "Ikke mer sorteringsminne. Øk sorteringsminnet (sort buffer size) for tjenesten" + norwegian-ny "Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten" + pol "Zbyt ma³o pamiêci dla sortowania. Zwiêksz wielko?æ bufora demona dla sortowania" + por "Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação" + rum "Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)" + rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ ÄÌÑ ÓÏÒÔÉÒÏ×ËÉ. õ×ÅÌÉÞØÔÅ ÒÁÚÍÅÒ ÂÕÆÅÒÁ ÓÏÒÔÉÒÏ×ËÉ ÎÁ ÓÅÒ×ÅÒÅ" + serbian "Nema memorije za sortiranje. Poveæajte velièinu sort buffer-a MySQL server-u" + slo "Málo pamäti pre triedenie, zvý¹te veµkos» triediaceho bufferu" + spa "Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion" + swe "Sorteringsbufferten räcker inte till. Kontrollera startparametrarna" + ukr "âÒÁË ÐÁÍ'ÑÔ¦ ÄÌÑ ÓÏÒÔÕ×ÁÎÎÑ. ôÒÅÂÁ ÚÂ¦ÌØÛÉÔÉ ÒÏÚÍ¦Ò ÂÕÆÅÒÁ ÓÏÒÔÕ×ÁÎÎÑ Õ ÓÅÒ×ÅÒÁ" +ER_UNEXPECTED_EOF + cze "Neo-Bèekávaný konec souboru pøi ètení '%-.64s' (chybový kód: %d)" + dan "Uventet afslutning på fil (eof) ved læsning af filen '%-.64s' (Fejlkode: %d)" + nla "Onverwachte eof gevonden tijdens het lezen van file '%-.64s' (Errcode: %d)" + eng "Unexpected EOF found when reading file '%-.64s' (errno: %d)" + est "Ootamatu faililõpumärgend faili '%-.64s' lugemisel (veakood: %d)" + fre "Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)" + ger "Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)" + greek "ÊáôÜ ôç äéÜñêåéá ôçò áíÜãíùóçò, âñÝèçêå áðñïóäüêçôá ôï ôÝëïò ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)" + hun "Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)" + ita "Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)" + jpn "'%-.64s' ¥Õ¥¡¥¤¥ë¤òÆÉ¤ß¹þ¤ßÃæ¤Ë EOF ¤¬Í½´ü¤»¤Ì½ê¤Ç¸½¤ì¤Þ¤·¤¿. (errno: %d)" + kor "'%-.64s' ÈÀÏÀ» Àд µµÁß À߸øµÈ eofÀ» ¹ß°ß (¿¡·¯¹øÈ£: %d)" + nor "Uventet slutt på fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)" + norwegian-ny "Uventa slutt på fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)" + pol "Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod b³êdu: %d)" + por "Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)" + rum "Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)" + serbian "Neoèekivani kraj pri èitanju file-a '%-.64s' (errno: %d)" + slo "Neoèakávaný koniec súboru pri èítaní '%-.64s' (chybový kód: %d)" + spa "Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)" + swe "Oväntat filslut vid läsning från '%-.64s' (Felkod: %d)" + ukr "èÉÂÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)" +ER_CON_COUNT_ERROR 08004 + cze "P-Bøíli¹ mnoho spojení" + dan "For mange forbindelser (connections)" + nla "Te veel verbindingen" + eng "Too many connections" + est "Liiga palju samaaegseid ühendusi" + fre "Trop de connections" + ger "Zu viele Verbindungen" + greek "ÕðÜñ÷ïõí ðïëëÝò óõíäÝóåéò..." + hun "Tul sok kapcsolat" + ita "Troppe connessioni" + jpn "Àܳ¤¬Â¿¤¹¤®¤Þ¤¹" + kor "³Ê¹« ¸¹Àº ¿¬°á... max_connectionÀ» Áõ°¡ ½ÃŰ½Ã¿À..." + nor "For mange tilkoblinger (connections)" + norwegian-ny "For mange tilkoplingar (connections)" + pol "Zbyt wiele po³?czeñ" + por "Excesso de conexões" + rum "Prea multe conectiuni" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÓÏÅÄÉÎÅÎÉÊ" + serbian "Previše konekcija" + slo "Príli¹ mnoho spojení" + spa "Demasiadas conexiones" + swe "För många anslutningar" + ukr "úÁÂÁÇÁÔÏ Ú'¤ÄÎÁÎØ" +ER_OUT_OF_RESOURCES + cze "M-Bálo prostoru/pamìti pro thread" + dan "Udgået for tråde/hukommelse" + nla "Geen thread geheugen meer; controleer of mysqld of andere processen al het beschikbare geheugen gebruikt. Zo niet, dan moet u wellicht 'ulimit' gebruiken om mysqld toe te laten meer geheugen te benutten, of u kunt extra swap ruimte toevoegen" + eng "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space" + est "Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MySQL-le rohkema mälu kasutamise lubamine" + fre "Manque de 'threads'/mémoire" + ger "Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess allen Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten" + greek "Ðñüâëçìá ìå ôç äéáèÝóéìç ìíÞìç (Out of thread space/memory)" + hun "Elfogyott a thread-memoria" + ita "Fine dello spazio/memoria per i thread" + jpn "Out of memory; mysqld ¤«¤½¤Î¾¤Î¥×¥í¥»¥¹¤¬¥á¥â¥ê¡¼¤òÁ´¤Æ»È¤Ã¤Æ¤¤¤ë¤«³Îǧ¤·¤Æ¤¯¤À¤µ¤¤. ¥á¥â¥ê¡¼¤ò»È¤¤ÀڤäƤ¤¤Ê¤¤¾ì¹ç¡¢'ulimit' ¤òÀßÄꤷ¤Æ mysqld ¤Î¥á¥â¥ê¡¼»ÈÍѸ³¦Î̤ò¿¤¯¤¹¤ë¤«¡¢swap space ¤òÁý¤ä¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤" + kor "Out of memory; mysqld³ª ¶Ç´Ù¸¥ ÇÁ·Î¼¼¼¿¡¼ »ç¿ë°¡´ÉÇÑ ¸Þ¸ð¸®¸¦ »ç¿ëÇÑÁö äũÇϽÿÀ. ¸¸¾à ±×·¸Áö ¾Ê´Ù¸é ulimit ¸í·ÉÀ» ÀÌ¿¿ëÇÏ¿© ´õ¸¹Àº ¸Þ¸ð¸®¸¦ »ç¿ëÇÒ ¼ö ÀÖµµ·Ï Çϰųª ½º¿Ò ½ºÆÐÀ̽º¸¦ Áõ°¡½ÃŰ½Ã¿À" + nor "Tomt for tråd plass/minne" + norwegian-ny "Tomt for tråd plass/minne" + pol "Zbyt ma³o miejsca/pamiêci dla w?tku" + por "Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou você pode adicionar mais área de 'swap'" + rum "Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)" + rus "îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ; ÕÄÏÓÔÏ×ÅÒØÔÅÓØ, ÞÔÏ mysqld ÉÌÉ ËÁËÏÊ-ÌÉÂÏ ÄÒÕÇÏÊ ÐÒÏÃÅÓÓ ÎÅ ÚÁÎÉÍÁÅÔ ×ÓÀ ÄÏÓÔÕÐÎÕÀ ÐÁÍÑÔØ. åÓÌÉ ÎÅÔ, ÔÏ ×Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ ulimit, ÞÔÏÂÙ ×ÙÄÅÌÉÔØ ÄÌÑ mysqld ÂÏÌØÛÅ ÐÁÍÑÔÉ, ÉÌÉ Õ×ÅÌÉÞÉÔØ ÏÂßÅÍ ÆÁÊÌÁ ÐÏÄËÁÞËÉ" + serbian "Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi više memorije ili probajte da dodate više swap memorije)" + slo "Málo miesta-pamäti pre vlákno" + spa "Memoria/espacio de tranpaso insuficiente" + swe "Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap" + ukr "âÒÁË ÐÁÍ'ÑÔ¦; ðÅÒÅצÒÔÅ ÞÉ mysqld ÁÂÏ ÑË¦ÓØ ¦ÎÛ¦ ÐÒÏÃÅÓÉ ×ÉËÏÒÉÓÔÏ×ÕÀÔØ ÕÓÀ ÄÏÓÔÕÐÎÕ ÐÁÍ'ÑÔØ. ñË Î¦, ÔÏ ×É ÍÏÖÅÔÅ ÓËÏÒÉÓÔÁÔÉÓÑ 'ulimit', ÁÂÉ ÄÏÚ×ÏÌÉÔÉ mysqld ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ Â¦ÌØÛÅ ÐÁÍ'ÑÔ¦ ÁÂÏ ×É ÍÏÖÅÔÅ ÄÏÄÁÔÉ Â¦ÌØÛŠͦÓÃÑ Ð¦Ä Ó×ÁÐ" +ER_BAD_HOST_ERROR 08S01 + cze "Nemohu zjistit jm-Béno stroje pro Va¹i adresu" + dan "Kan ikke få værtsnavn for din adresse" + nla "Kan de hostname niet krijgen van uw adres" + eng "Can't get hostname for your address" + est "Ei suuda lahendada IP aadressi masina nimeks" + fre "Ne peut obtenir de hostname pour votre adresse" + ger "Kann Hostnamen für diese Adresse nicht erhalten" + greek "Äåí Ýãéíå ãíùóôü ôï hostname ãéá ôçí address óáò" + hun "A gepnev nem allapithato meg a cimbol" + ita "Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)" + jpn "¤½¤Î address ¤Î hostname ¤¬°ú¤±¤Þ¤»¤ó." + kor "´ç½ÅÀÇ ÄÄÇ»ÅÍÀÇ È£½ºÆ®À̸§À» ¾òÀ» ¼ö ¾øÀ¾´Ï´Ù." + nor "Kan ikke få tak i vertsnavn for din adresse" + norwegian-ny "Kan ikkje få tak i vertsnavn for di adresse" + pol "Nie mo¿na otrzymaæ nazwy hosta dla twojego adresu" + por "Não pode obter nome do 'host' para seu endereço" + rum "Nu pot sa obtin hostname-ul adresei tale" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÉÍÑ ÈÏÓÔÁ ÄÌÑ ×ÁÛÅÇÏ ÁÄÒÅÓÁ" + serbian "Ne mogu da dobijem ime host-a za vašu IP adresu" + slo "Nemô¾em zisti» meno hostiteµa pre va¹u adresu" + spa "No puedo obtener el nombre de maquina de tu direccion" + swe "Kan inte hitta 'hostname' för din adress" + ukr "îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ¦Í'Ñ ÈÏÓÔÕ ÄÌÑ ×ÁÛϧ ÁÄÒÅÓÉ" +ER_HANDSHAKE_ERROR 08S01 + cze "Chyba p-Bøi ustavování spojení" + dan "Forkert håndtryk (handshake)" + nla "Verkeerde handshake" + eng "Bad handshake" + est "Väär handshake" + fre "Mauvais 'handshake'" + ger "Schlechter Handshake" + greek "Ç áíáãíþñéóç (handshake) äåí Ýãéíå óùóôÜ" + hun "A kapcsolatfelvetel nem sikerult (Bad handshake)" + ita "Negoziazione impossibile" + nor "Feil håndtrykk (handshake)" + norwegian-ny "Feil handtrykk (handshake)" + pol "Z³y uchwyt(handshake)" + por "Negociação de acesso falhou" + rum "Prost inceput de conectie (bad handshake)" + rus "îÅËÏÒÒÅËÔÎÏÅ ÐÒÉ×ÅÔÓÔ×ÉÅ" + serbian "Loš poèetak komunikacije (handshake)" + slo "Chyba pri nadväzovaní spojenia" + spa "Protocolo erroneo" + swe "Fel vid initiering av kommunikationen med klienten" + ukr "îÅצÒÎÁ ÕÓÔÁÎÏ×ËÁ Ú×'ÑÚËÕ" +ER_DBACCESS_DENIED_ERROR 42000 + cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' k databázi '%-.64s' není povolen" + dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' til databasen '%-.64s'" + nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' naar database '%-.64s'" + eng "Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'" + est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'" + fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'" + ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.64s'" + greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'" + hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz." + ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'" + jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹" + kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù." + nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet" + norwegian-ny "Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta" + por "Acesso negado para o usuário '%-.32s'@'%-.64s' ao banco de dados '%-.64s'" + rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'" + rus "äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ" + serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'" + slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' k databázi '%-.64s'" + spa "Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'" + swe "Användare '%-.32s'@'%-.64s' är ej berättigad att använda databasen %-.64s" + ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'" +ER_ACCESS_DENIED_ERROR 28000 + cze "P-Bøístup pro u¾ivatele '%-.32s'@'%-.64s' (s heslem %s)" + dan "Adgang nægtet bruger: '%-.32s'@'%-.64s' (Bruger adgangskode: %s)" + nla "Toegang geweigerd voor gebruiker: '%-.32s'@'%-.64s' (Wachtwoord gebruikt: %s)" + eng "Access denied for user '%-.32s'@'%-.64s' (using password: %s)" + est "Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)" + fre "Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)" + ger "Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)" + greek "Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)" + hun "A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)" + ita "Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)" + jpn "¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)" + kor "'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)" + nor "Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)" + norwegian-ny "Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)" + por "Acesso negado para o usuário '%-.32s'@'%-.64s' (senha usada: %s)" + rum "Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)" + rus "äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)" + serbian "Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')" + slo "Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' (pou¾itie hesla: %s)" + spa "Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)" + swe "Användare '%-.32s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)" + ukr "äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)" +ER_NO_DB_ERROR 3D000 + cze "Nebyla vybr-Bána ¾ádná databáze" + dan "Ingen database valgt" + nla "Geen database geselecteerd" + eng "No database selected" + est "Andmebaasi ei ole valitud" + fre "Aucune base n'a été sélectionnée" + ger "Keine Datenbank ausgewählt" + greek "Äåí åðéëÝ÷èçêå âÜóç äåäïìÝíùí" + hun "Nincs kivalasztott adatbazis" + ita "Nessun database selezionato" + jpn "¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ÁªÂò¤µ¤ì¤Æ¤¤¤Þ¤»¤ó." + kor "¼±ÅÃµÈ µ¥ÀÌŸº£À̽º°¡ ¾ø½À´Ï´Ù." + nor "Ingen database valgt" + norwegian-ny "Ingen database vald" + pol "Nie wybrano ¿adnej bazy danych" + por "Nenhum banco de dados foi selecionado" + rum "Nici o baza de data nu a fost selectata inca" + rus "âÁÚÁ ÄÁÎÎÙÈ ÎÅ ×ÙÂÒÁÎÁ" + serbian "Ni jedna baza nije selektovana" + slo "Nebola vybraná databáza" + spa "Base de datos no seleccionada" + swe "Ingen databas i användning" + ukr "âÁÚÕ ÄÁÎÎÉÈ ÎÅ ×ÉÂÒÁÎÏ" +ER_UNKNOWN_COM_ERROR 08S01 + cze "Nezn-Bámý pøíkaz" + dan "Ukendt kommando" + nla "Onbekend commando" + eng "Unknown command" + est "Tundmatu käsk" + fre "Commande inconnue" + ger "Unbekannter Befehl" + greek "Áãíùóôç åíôïëÞ" + hun "Ervenytelen parancs" + ita "Comando sconosciuto" + jpn "¤½¤Î¥³¥Þ¥ó¥É¤Ï²¿¡©" + kor "¸í·É¾î°¡ ¹ºÁö ¸ð¸£°Ú¾î¿ä..." + nor "Ukjent kommando" + norwegian-ny "Ukjent kommando" + pol "Nieznana komenda" + por "Comando desconhecido" + rum "Comanda invalida" + rus "îÅÉÚ×ÅÓÔÎÁÑ ËÏÍÁÎÄÁ ËÏÍÍÕÎÉËÁÃÉÏÎÎÏÇÏ ÐÒÏÔÏËÏÌÁ" + serbian "Nepoznata komanda" + slo "Neznámy príkaz" + spa "Comando desconocido" + swe "Okänt commando" + ukr "îÅצÄÏÍÁ ËÏÍÁÎÄÁ" +ER_BAD_NULL_ERROR 23000 + cze "Sloupec '%-.64s' nem-Bù¾e být null" + dan "Kolonne '%-.64s' kan ikke være NULL" + nla "Kolom '%-.64s' kan niet null zijn" + eng "Column '%-.64s' cannot be null" + est "Tulp '%-.64s' ei saa omada nullväärtust" + fre "Le champ '%-.64s' ne peut être vide (null)" + ger "Feld '%-.64s' darf nicht NULL sein" + greek "Ôï ðåäßï '%-.64s' äåí ìðïñåß íá åßíáé êåíü (null)" + hun "A(z) '%-.64s' oszlop erteke nem lehet nulla" + ita "La colonna '%-.64s' non puo` essere nulla" + jpn "Column '%-.64s' ¤Ï null ¤Ë¤Ï¤Ç¤¤Ê¤¤¤Î¤Ç¤¹" + kor "Ä®·³ '%-.64s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. " + nor "Kolonne '%-.64s' kan ikke vere null" + norwegian-ny "Kolonne '%-.64s' kan ikkje vere null" + pol "Kolumna '%-.64s' nie mo¿e byæ null" + por "Coluna '%-.64s' não pode ser vazia" + rum "Coloana '%-.64s' nu poate sa fie null" + rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL" + serbian "Kolona '%-.64s' ne može biti NULL" + slo "Pole '%-.64s' nemô¾e by» null" + spa "La columna '%-.64s' no puede ser nula" + swe "Kolumn '%-.64s' får inte vara NULL" + ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ" +ER_BAD_DB_ERROR 42000 + cze "Nezn-Bámá databáze '%-.64s'" + dan "Ukendt database '%-.64s'" + nla "Onbekende database '%-.64s'" + eng "Unknown database '%-.64s'" + est "Tundmatu andmebaas '%-.64s'" + fre "Base '%-.64s' inconnue" + ger "Unbekannte Datenbank '%-.64s'" + greek "Áãíùóôç âÜóç äåäïìÝíùí '%-.64s'" + hun "Ervenytelen adatbazis: '%-.64s'" + ita "Database '%-.64s' sconosciuto" + jpn "'%-.64s' ¤Ê¤ó¤Æ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ÏÃΤê¤Þ¤»¤ó." + kor "µ¥ÀÌŸº£À̽º '%-.64s'´Â ¾Ë¼ö ¾øÀ½" + nor "Ukjent database '%-.64s'" + norwegian-ny "Ukjent database '%-.64s'" + pol "Nieznana baza danych '%-.64s'" + por "Banco de dados '%-.64s' desconhecido" + rum "Baza de data invalida '%-.64s'" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÂÁÚÁ ÄÁÎÎÙÈ '%-.64s'" + serbian "Nepoznata baza '%-.64s'" + slo "Neznáma databáza '%-.64s'" + spa "Base de datos desconocida '%-.64s'" + swe "Okänd databas: '%-.64s'" + ukr "îÅצÄÏÍÁ ÂÁÚÁ ÄÁÎÎÉÈ '%-.64s'" +ER_TABLE_EXISTS_ERROR 42S01 + cze "Tabulka '%-.64s' ji-B¾ existuje" + dan "Tabellen '%-.64s' findes allerede" + nla "Tabel '%-.64s' bestaat al" + eng "Table '%-.64s' already exists" + est "Tabel '%-.64s' juba eksisteerib" + fre "La table '%-.64s' existe déjà" + ger "Tabelle '%-.64s' bereits vorhanden" + greek "Ï ðßíáêáò '%-.64s' õðÜñ÷åé Þäç" + hun "A(z) '%-.64s' tabla mar letezik" + ita "La tabella '%-.64s' esiste gia`" + jpn "Table '%-.64s' ¤Ï´û¤Ë¤¢¤ê¤Þ¤¹" + kor "Å×À̺í '%-.64s'´Â ÀÌ¹Ì Á¸ÀçÇÔ" + nor "Tabellen '%-.64s' eksisterer allerede" + norwegian-ny "Tabellen '%-.64s' eksisterar allereide" + pol "Tabela '%-.64s' ju¿ istnieje" + por "Tabela '%-.64s' já existe" + rum "Tabela '%-.64s' exista deja" + rus "ôÁÂÌÉÃÁ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Tabela '%-.64s' veæ postoji" + slo "Tabuµka '%-.64s' u¾ existuje" + spa "La tabla '%-.64s' ya existe" + swe "Tabellen '%-.64s' finns redan" + ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÖÅ ¦ÓÎÕ¤" +ER_BAD_TABLE_ERROR 42S02 + cze "Nezn-Bámá tabulka '%-.64s'" + dan "Ukendt tabel '%-.64s'" + nla "Onbekende tabel '%-.64s'" + eng "Unknown table '%-.64s'" + est "Tundmatu tabel '%-.64s'" + fre "Table '%-.64s' inconnue" + ger "Unbekannte Tabelle '%-.64s'" + greek "Áãíùóôïò ðßíáêáò '%-.64s'" + hun "Ervenytelen tabla: '%-.64s'" + ita "Tabella '%-.64s' sconosciuta" + jpn "table '%-.64s' ¤Ï¤¢¤ê¤Þ¤»¤ó." + kor "Å×À̺í '%-.64s'´Â ¾Ë¼ö ¾øÀ½" + nor "Ukjent tabell '%-.64s'" + norwegian-ny "Ukjent tabell '%-.64s'" + pol "Nieznana tabela '%-.64s'" + por "Tabela '%-.64s' desconhecida" + rum "Tabela '%-.64s' este invalida" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s'" + serbian "Nepoznata tabela '%-.64s'" + slo "Neznáma tabuµka '%-.64s'" + spa "Tabla '%-.64s' desconocida" + swe "Okänd tabell '%-.64s'" + ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s'" +ER_NON_UNIQ_ERROR 23000 + cze "Sloupec '%-.64s' v %s nen-Bí zcela jasný" + dan "Felt: '%-.64s' i tabel %s er ikke entydigt" + nla "Kolom: '%-.64s' in %s is niet eenduidig" + eng "Column '%-.64s' in %-.64s is ambiguous" + est "Väli '%-.64s' %-.64s-s ei ole ühene" + fre "Champ: '%-.64s' dans %s est ambigu" + ger "Spalte '%-.64s' in %-.64s ist nicht eindeutig" + greek "Ôï ðåäßï: '%-.64s' óå %-.64s äåí Ý÷åé êáèïñéóôåß" + hun "A(z) '%-.64s' oszlop %-.64s-ben ketertelmu" + ita "Colonna: '%-.64s' di %-.64s e` ambigua" + jpn "Column: '%-.64s' in %-.64s is ambiguous" + kor "Ä®·³: '%-.64s' in '%-.64s' ÀÌ ¸ðÈ£ÇÔ" + nor "Felt: '%-.64s' i tabell %s er ikke entydig" + norwegian-ny "Kolonne: '%-.64s' i tabell %s er ikkje eintydig" + pol "Kolumna: '%-.64s' w %s jest dwuznaczna" + por "Coluna '%-.64s' em '%-.64s' é ambígua" + rum "Coloana: '%-.64s' in %-.64s este ambigua" + rus "óÔÏÌÂÅà '%-.64s' × %-.64s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ" + serbian "Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu" + slo "Pole: '%-.64s' v %-.64s je nejasné" + spa "La columna: '%-.64s' en %s es ambigua" + swe "Kolumn '%-.64s' i %s är inte unik" + ukr "óÔÏ×ÂÅÃØ '%-.64s' Õ %-.64s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ" +ER_SERVER_SHUTDOWN 08S01 + cze "Prob-Bíhá ukonèování práce serveru" + dan "Database nedlukning er i gang" + nla "Bezig met het stoppen van de server" + eng "Server shutdown in progress" + est "Serveri seiskamine käib" + fre "Arrêt du serveur en cours" + ger "Der Server wird heruntergefahren" + greek "Åíáñîç äéáäéêáóßáò áðïóýíäåóçò ôïõ åîõðçñåôçôÞ (server shutdown)" + hun "A szerver leallitasa folyamatban" + ita "Shutdown del server in corso" + jpn "Server ¤ò shutdown Ãæ..." + kor "Server°¡ ¼Ë´Ù¿î ÁßÀÔ´Ï´Ù." + nor "Database nedkobling er i gang" + norwegian-ny "Tenar nedkopling er i gang" + pol "Trwa koñczenie dzia³ania serwera" + por "'Shutdown' do servidor em andamento" + rum "Terminarea serverului este in desfasurare" + rus "óÅÒ×ÅÒ ÎÁÈÏÄÉÔÓÑ × ÐÒÏÃÅÓÓÅ ÏÓÔÁÎÏ×ËÉ" + serbian "Gašenje servera je u toku" + slo "Prebieha ukonèovanie práce servera" + spa "Desconexion de servidor en proceso" + swe "Servern går nu ned" + ukr "úÁ×ÅÒÛÕ¤ÔØÓÑ ÒÁÂÏÔÁ ÓÅÒ×ÅÒÁ" +ER_BAD_FIELD_ERROR 42S22 S0022 + cze "Nezn-Bámý sloupec '%-.64s' v %s" + dan "Ukendt kolonne '%-.64s' i tabel %s" + nla "Onbekende kolom '%-.64s' in %s" + eng "Unknown column '%-.64s' in '%-.64s'" + est "Tundmatu tulp '%-.64s' '%-.64s'-s" + fre "Champ '%-.64s' inconnu dans %s" + ger "Unbekanntes Tabellenfeld '%-.64s' in %-.64s" + greek "Áãíùóôï ðåäßï '%-.64s' óå '%-.64s'" + hun "A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben" + ita "Colonna sconosciuta '%-.64s' in '%-.64s'" + jpn "'%-.64s' column ¤Ï '%-.64s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó." + kor "Unknown Ä®·³ '%-.64s' in '%-.64s'" + nor "Ukjent kolonne '%-.64s' i tabell %s" + norwegian-ny "Ukjent felt '%-.64s' i tabell %s" + pol "Nieznana kolumna '%-.64s' w %s" + por "Coluna '%-.64s' desconhecida em '%-.64s'" + rum "Coloana invalida '%-.64s' in '%-.64s'" + rus "îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.64s' × '%-.64s'" + serbian "Nepoznata kolona '%-.64s' u '%-.64s'" + slo "Neznáme pole '%-.64s' v '%-.64s'" + spa "La columna '%-.64s' en %s es desconocida" + swe "Okänd kolumn '%-.64s' i %s" + ukr "îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' Õ '%-.64s'" +ER_WRONG_FIELD_WITH_GROUP 42000 S1009 + cze "Pou-B¾ité '%-.64s' nebylo v group by" + dan "Brugte '%-.64s' som ikke var i group by" + nla "Opdracht gebruikt '%-.64s' dat niet in de GROUP BY voorkomt" + eng "'%-.64s' isn't in GROUP BY" + est "'%-.64s' puudub GROUP BY klauslis" + fre "'%-.64s' n'est pas dans 'group by'" + ger "'%-.64s' ist nicht in GROUP BY vorhanden" + greek "×ñçóéìïðïéÞèçêå '%-.64s' ðïõ äåí õðÞñ÷å óôï group by" + hun "Used '%-.64s' with wasn't in group by" + ita "Usato '%-.64s' che non e` nel GROUP BY" + kor "'%-.64s'Àº GROUP BY¼Ó¿¡ ¾øÀ½" + nor "Brukte '%-.64s' som ikke var i group by" + norwegian-ny "Brukte '%-.64s' som ikkje var i group by" + pol "U¿yto '%-.64s' bez umieszczenia w group by" + por "'%-.64s' não está em 'GROUP BY'" + rum "'%-.64s' nu exista in clauza GROUP BY" + rus "'%-.64s' ÎÅ ÐÒÉÓÕÔÓÔ×ÕÅÔ × GROUP BY" + serbian "Entitet '%-.64s' nije naveden u komandi 'GROUP BY'" + slo "Pou¾ité '%-.64s' nebolo v 'group by'" + spa "Usado '%-.64s' el cual no esta group by" + swe "'%-.64s' finns inte i GROUP BY" + ukr "'%-.64s' ÎÅ ¤ Õ GROUP BY" +ER_WRONG_GROUP_FIELD 42000 S1009 + cze "Nemohu pou-B¾ít group na '%-.64s'" + dan "Kan ikke gruppere på '%-.64s'" + nla "Kan '%-.64s' niet groeperen" + eng "Can't group on '%-.64s'" + est "Ei saa grupeerida '%-.64s' järgi" + fre "Ne peut regrouper '%-.64s'" + ger "Gruppierung über '%-.64s' nicht möglich" + greek "Áäýíáôç ç ïìáäïðïßçóç (group on) '%-.64s'" + hun "A group nem hasznalhato: '%-.64s'" + ita "Impossibile raggruppare per '%-.64s'" + kor "'%-.64s'¸¦ ±×·ìÇÒ ¼ö ¾øÀ½" + nor "Kan ikke gruppere på '%-.64s'" + norwegian-ny "Kan ikkje gruppere på '%-.64s'" + pol "Nie mo¿na grupowaæ po '%-.64s'" + por "Não pode agrupar em '%-.64s'" + rum "Nu pot sa grupez pe (group on) '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÇÒÕÐÐÉÒÏ×ËÕ ÐÏ '%-.64s'" + serbian "Ne mogu da grupišem po '%-.64s'" + slo "Nemô¾em pou¾i» 'group' na '%-.64s'" + spa "No puedo agrupar por '%-.64s'" + swe "Kan inte använda GROUP BY med '%-.64s'" + ukr "îÅ ÍÏÖÕ ÇÒÕÐÕ×ÁÔÉ ÐÏ '%-.64s'" +ER_WRONG_SUM_SELECT 42000 S1009 + cze "P-Bøíkaz obsahuje zároveò funkci sum a sloupce" + dan "Udtrykket har summer (sum) funktioner og kolonner i samme udtryk" + nla "Opdracht heeft totaliseer functies en kolommen in dezelfde opdracht" + eng "Statement has sum functions and columns in same statement" + est "Lauses on korraga nii tulbad kui summeerimisfunktsioonid" + fre "Vous demandez la fonction sum() et des champs dans la même commande" + ger "Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt" + greek "Ç äéáôýðùóç ðåñéÝ÷åé sum functions êáé columns óôçí ßäéá äéáôýðùóç" + ita "Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY" + kor "Statement °¡ sum±â´ÉÀ» µ¿ÀÛÁßÀ̰í Ä®·³µµ µ¿ÀÏÇÑ statementÀÔ´Ï´Ù." + nor "Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk" + norwegian-ny "Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk" + pol "Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu" + por "Cláusula contém funções de soma e colunas juntas" + rum "Comanda are functii suma si coloane in aceeasi comanda" + rus "÷ÙÒÁÖÅÎÉÅ ÓÏÄÅÒÖÉÔ ÇÒÕÐÐÏ×ÙÅ ÆÕÎËÃÉÉ É ÓÔÏÌÂÃÙ, ÎÏ ÎÅ ×ËÌÀÞÁÅÔ GROUP BY. á ËÁË ×Ù ÕÍÕÄÒÉÌÉÓØ ÐÏÌÕÞÉÔØ ÜÔÏ ÓÏÏÂÝÅÎÉÅ Ï ÏÛÉÂËÅ?" + serbian "Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme" + slo "Príkaz obsahuje zároveò funkciu 'sum' a poµa" + spa "El estamento tiene funciones de suma y columnas en el mismo estamento" + swe "Kommandot har både sum functions och enkla funktioner" + ukr "õ ×ÉÒÁÚ¦ ×ÉËÏÒÉÓÔÁÎÏ Ð¦ÄÓÕÍÏ×ÕÀÞ¦ ÆÕÎËæ§ ÐÏÒÑÄ Ú ¦ÍÅÎÁÍÉ ÓÔÏ×Âæ×" +ER_WRONG_VALUE_COUNT 21S01 + cze "Po-Bèet sloupcù neodpovídá zadané hodnotì" + dan "Kolonne tæller stemmer ikke med antallet af værdier" + nla "Het aantal kolommen komt niet overeen met het aantal opgegeven waardes" + eng "Column count doesn't match value count" + est "Tulpade arv erineb väärtuste arvust" + ger "Die Anzahl der Spalten entspricht nicht der Anzahl der Werte" + greek "Ôï Column count äåí ôáéñéÜæåé ìå ôï value count" + hun "Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel" + ita "Il numero delle colonne non e` uguale al numero dei valori" + kor "Ä®·³ÀÇ Ä«¿îÆ®°¡ °ªÀÇ Ä«¿îÆ®¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù." + nor "Felt telling stemmer verdi telling" + norwegian-ny "Kolonne telling stemmer verdi telling" + pol "Liczba kolumn nie odpowiada liczbie warto?ci" + por "Contagem de colunas não confere com a contagem de valores" + rum "Numarul de coloane nu este acelasi cu numarul valoarei" + rus "ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ" + serbian "Broj kolona ne odgovara broju vrednosti" + slo "Poèet polí nezodpovedá zadanej hodnote" + spa "La columna con count no tiene valores para contar" + swe "Antalet kolumner motsvarar inte antalet värden" + ukr "ë¦ÌØË¦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØË¦ÓÔÀ ÚÎÁÞÅÎØ" +ER_TOO_LONG_IDENT 42000 S1009 + cze "Jm-Béno identifikátoru '%-.64s' je pøíli¹ dlouhé" + dan "Navnet '%-.64s' er for langt" + nla "Naam voor herkenning '%-.64s' is te lang" + eng "Identifier name '%-.100s' is too long" + est "Identifikaatori '%-.100s' nimi on liiga pikk" + fre "Le nom de l'identificateur '%-.64s' est trop long" + ger "Name des Bezeichners '%-.64s' ist zu lang" + greek "Ôï identifier name '%-.100s' åßíáé ðïëý ìåãÜëï" + hun "A(z) '%-.100s' azonositonev tul hosszu." + ita "Il nome dell'identificatore '%-.100s' e` troppo lungo" + jpn "Identifier name '%-.100s' ¤ÏŤ¹¤®¤Þ¤¹" + kor "Identifier '%-.100s'´Â ³Ê¹« ±æ±º¿ä." + nor "Identifikator '%-.64s' er for lang" + norwegian-ny "Identifikator '%-.64s' er for lang" + pol "Nazwa identyfikatora '%-.64s' jest zbyt d³uga" + por "Nome identificador '%-.100s' é longo demais" + rum "Numele indentificatorului '%-.100s' este prea lung" + rus "óÌÉÛËÏÍ ÄÌÉÎÎÙÊ ÉÄÅÎÔÉÆÉËÁÔÏÒ '%-.100s'" + serbian "Ime '%-.100s' je predugaèko" + slo "Meno identifikátora '%-.100s' je príli¹ dlhé" + spa "El nombre del identificador '%-.64s' es demasiado grande" + swe "Kolumnnamn '%-.64s' är för långt" + ukr "¶Í'Ñ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒÁ '%-.100s' ÚÁÄÏ×ÇÅ" +ER_DUP_FIELDNAME 42S21 S1009 + cze "Zdvojen-Bé jméno sloupce '%-.64s'" + dan "Feltnavnet '%-.64s' findes allerede" + nla "Dubbele kolom naam '%-.64s'" + eng "Duplicate column name '%-.64s'" + est "Kattuv tulba nimi '%-.64s'" + fre "Nom du champ '%-.64s' déjà utilisé" + ger "Doppelter Spaltenname vorhanden: '%-.64s'" + greek "ÅðáíÜëçøç column name '%-.64s'" + hun "Duplikalt oszlopazonosito: '%-.64s'" + ita "Nome colonna duplicato '%-.64s'" + jpn "'%-.64s' ¤È¤¤¤¦ column ̾¤Ï½ÅÊ£¤·¤Æ¤Þ¤¹" + kor "Áߺ¹µÈ Ä®·³ À̸§: '%-.64s'" + nor "Feltnavnet '%-.64s' eksisterte fra før" + norwegian-ny "Feltnamnet '%-.64s' eksisterte frå før" + pol "Powtórzona nazwa kolumny '%-.64s'" + por "Nome da coluna '%-.64s' duplicado" + rum "Numele coloanei '%-.64s' e duplicat" + rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ÓÔÏÌÂÃÁ '%-.64s'" + serbian "Duplirano ime kolone '%-.64s'" + slo "Opakované meno poµa '%-.64s'" + spa "Nombre de columna duplicado '%-.64s'" + swe "Kolumnnamn '%-.64s finns flera gånger" + ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.64s'" +ER_DUP_KEYNAME 42000 S1009 + cze "Zdvojen-Bé jméno klíèe '%-.64s'" + dan "Indeksnavnet '%-.64s' findes allerede" + nla "Dubbele zoeksleutel naam '%-.64s'" + eng "Duplicate key name '%-.64s'" + est "Kattuv võtme nimi '%-.64s'" + fre "Nom de clef '%-.64s' déjà utilisé" + ger "Doppelter Name für Schlüssel (Key) vorhanden: '%-.64s'" + greek "ÅðáíÜëçøç key name '%-.64s'" + hun "Duplikalt kulcsazonosito: '%-.64s'" + ita "Nome chiave duplicato '%-.64s'" + jpn "'%-.64s' ¤È¤¤¤¦ key ¤Î̾Á°¤Ï½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" + kor "Áߺ¹µÈ Ű À̸§ : '%-.64s'" + nor "Nøkkelnavnet '%-.64s' eksisterte fra før" + norwegian-ny "Nøkkelnamnet '%-.64s' eksisterte frå før" + pol "Powtórzony nazwa klucza '%-.64s'" + por "Nome da chave '%-.64s' duplicado" + rum "Numele cheiei '%-.64s' e duplicat" + rus "äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ËÌÀÞÁ '%-.64s'" + serbian "Duplirano ime kljuèa '%-.64s'" + slo "Opakované meno kµúèa '%-.64s'" + spa "Nombre de clave duplicado '%-.64s'" + swe "Nyckelnamn '%-.64s' finns flera gånger" + ukr "äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.64s'" +ER_DUP_ENTRY 23000 S1009 + cze "Zvojen-Bý klíè '%-.64s' (èíslo klíèe %d)" + dan "Ens værdier '%-.64s' for indeks %d" + nla "Dubbele ingang '%-.64s' voor zoeksleutel %d" + eng "Duplicate entry '%-.64s' for key %d" + est "Kattuv väärtus '%-.64s' võtmele %d" + fre "Duplicata du champ '%-.64s' pour la clef %d" + ger "Doppelter Eintrag '%-.64s' für Schlüssel %d" + greek "ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß %d" + hun "Duplikalt bejegyzes '%-.64s' a %d kulcs szerint." + ita "Valore duplicato '%-.64s' per la chiave %d" + jpn "'%-.64s' ¤Ï key %d ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹" + kor "Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key %d" + nor "Like verdier '%-.64s' for nøkkel %d" + norwegian-ny "Like verdiar '%-.64s' for nykkel %d" + pol "Powtórzone wyst?pienie '%-.64s' dla klucza %d" + por "Entrada '%-.64s' duplicada para a chave %d" + rum "Cimpul '%-.64s' e duplicat pentru cheia %d" + rus "äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ %d" + serbian "Dupliran unos '%-.64s' za kljuè '%d'" + slo "Opakovaný kµúè '%-.64s' (èíslo kµúèa %d)" + spa "Entrada duplicada '%-.64s' para la clave %d" + swe "Dubbel nyckel '%-.64s' för nyckel %d" + ukr "äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ %d" +ER_WRONG_FIELD_SPEC 42000 S1009 + cze "Chybn-Bá specifikace sloupce '%-.64s'" + dan "Forkert kolonnespecifikaton for felt '%-.64s'" + nla "Verkeerde kolom specificatie voor kolom '%-.64s'" + eng "Incorrect column specifier for column '%-.64s'" + est "Vigane tulba kirjeldus tulbale '%-.64s'" + fre "Mauvais paramètre de champ pour le champ '%-.64s'" + ger "Falsche Spaltenangaben für Spalte '%-.64s'" + greek "ÅóöáëìÝíï column specifier ãéá ôï ðåäßï '%-.64s'" + hun "Rossz oszlopazonosito: '%-.64s'" + ita "Specifica errata per la colonna '%-.64s'" + kor "Ä®·³ '%-.64s'ÀÇ ºÎÁ¤È®ÇÑ Ä®·³ Á¤ÀÇÀÚ" + nor "Feil kolonne spesifikator for felt '%-.64s'" + norwegian-ny "Feil kolonne spesifikator for kolonne '%-.64s'" + pol "B³êdna specyfikacja kolumny dla kolumny '%-.64s'" + por "Especificador de coluna incorreto para a coluna '%-.64s'" + rum "Specificandul coloanei '%-.64s' este incorect" + rus "îÅËÏÒÒÅËÔÎÙÊ ÏÐÒÅÄÅÌÉÔÅÌØ ÓÔÏÌÂÃÁ ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s'" + serbian "Pogrešan naziv kolone za kolonu '%-.64s'" + slo "Chyba v ¹pecifikácii poµa '%-.64s'" + spa "Especificador de columna erroneo para la columna '%-.64s'" + swe "Felaktigt kolumntyp för kolumn '%-.64s'" + ukr "îÅצÒÎÉÊ ÓÐÅÃÉÆ¦ËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.64s'" +ER_PARSE_ERROR 42000 + cze "%s bl-Bízko '%-.64s' na øádku %d" + dan "%s nær '%-.64s' på linje %d" + nla "%s bij '%-.64s' in regel %d" + eng "%s near '%-.80s' at line %d" + est "%s '%-.80s' ligidal real %d" + fre "%s près de '%-.64s' à la ligne %d" + ger "%s bei '%-.80s' in Zeile %d" + greek "%s ðëçóßïí '%-.80s' óôç ãñáììÞ %d" + hun "A %s a '%-.80s'-hez kozeli a %d sorban" + ita "%s vicino a '%-.80s' linea %d" + jpn "%s : '%-.80s' ÉÕ¶á : %d ¹ÔÌÜ" + kor "'%-.64s' ¿¡·¯ °°À¾´Ï´Ù. ('%-.80s' ¸í·É¾î ¶óÀÎ %d)" + nor "%s nær '%-.64s' på linje %d" + norwegian-ny "%s attmed '%-.64s' på line %d" + pol "%s obok '%-.64s' w linii %d" + por "%s próximo a '%-.80s' na linha %d" + rum "%s linga '%-.80s' pe linia %d" + rus "%s ÏËÏÌÏ '%-.80s' ÎÁ ÓÔÒÏËÅ %d" + serbian "'%s' u iskazu '%-.80s' na liniji %d" + slo "%s blízko '%-.80s' na riadku %d" + spa "%s cerca '%-.64s' en la linea %d" + swe "%s nära '%-.64s' på rad %d" + ukr "%s ¦ÌÑ '%-.80s' × ÓÔÒÏæ %d" +ER_EMPTY_QUERY 42000 + cze "V-Býsledek dotazu je prázdný" + dan "Forespørgsel var tom" + nla "Query was leeg" + eng "Query was empty" + est "Tühi päring" + fre "Query est vide" + ger "Leere Abfrage" + greek "Ôï åñþôçìá (query) ðïõ èÝóáôå Þôáí êåíü" + hun "Ures lekerdezes." + ita "La query e` vuota" + jpn "Query ¤¬¶õ¤Ç¤¹." + kor "Äõ¸®°á°ú°¡ ¾ø½À´Ï´Ù." + nor "Forespørsel var tom" + norwegian-ny "Førespurnad var tom" + pol "Zapytanie by³o puste" + por "Consulta (query) estava vazia" + rum "Query-ul a fost gol" + rus "úÁÐÒÏÓ ÏËÁÚÁÌÓÑ ÐÕÓÔÙÍ" + serbian "Upit je bio prazan" + slo "Výsledok po¾iadavky bol prázdny" + spa "La query estaba vacia" + swe "Frågan var tom" + ukr "ðÕÓÔÉÊ ÚÁÐÉÔ" +ER_NONUNIQ_TABLE 42000 S1009 + cze "Nejednozna-Bèná tabulka/alias: '%-.64s'" + dan "Tabellen/aliaset: '%-.64s' er ikke unikt" + nla "Niet unieke waarde tabel/alias: '%-.64s'" + eng "Not unique table/alias: '%-.64s'" + est "Ei ole unikaalne tabel/alias '%-.64s'" + fre "Table/alias: '%-.64s' non unique" + ger "Tabellenname/Alias '%-.64s' nicht eindeutig" + greek "Áäýíáôç ç áíåýñåóç unique table/alias: '%-.64s'" + hun "Nem egyedi tabla/alias: '%-.64s'" + ita "Tabella/alias non unico: '%-.64s'" + jpn "'%-.64s' ¤Ï°ì°Õ¤Î table/alias ̾¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó" + kor "Unique ÇÏÁö ¾ÊÀº Å×À̺í/alias: '%-.64s'" + nor "Ikke unikt tabell/alias: '%-.64s'" + norwegian-ny "Ikkje unikt tabell/alias: '%-.64s'" + pol "Tabela/alias nie s? unikalne: '%-.64s'" + por "Tabela/alias '%-.64s' não única" + rum "Tabela/alias: '%-.64s' nu este unic" + rus "ðÏ×ÔÏÒÑÀÝÁÑÓÑ ÔÁÂÌÉÃÁ/ÐÓÅ×ÄÏÎÉÍ '%-.64s'" + serbian "Tabela ili alias nisu bili jedinstveni: '%-.64s'" + slo "Nie jednoznaèná tabuµka/alias: '%-.64s'" + spa "Tabla/alias: '%-.64s' es no unica" + swe "Icke unikt tabell/alias: '%-.64s'" + ukr "îÅÕΦËÁÌØÎÁ ÔÁÂÌÉÃÑ/ÐÓÅ×ÄÏΦÍ: '%-.64s'" +ER_INVALID_DEFAULT 42000 S1009 + cze "Chybn-Bá defaultní hodnota pro '%-.64s'" + dan "Ugyldig standardværdi for '%-.64s'" + nla "Foutieve standaard waarde voor '%-.64s'" + eng "Invalid default value for '%-.64s'" + est "Vigane vaikeväärtus '%-.64s' jaoks" + fre "Valeur par défaut invalide pour '%-.64s'" + ger "Fehlerhafter Vorgabewert (DEFAULT): '%-.64s'" + greek "ÅóöáëìÝíç ðñïêáèïñéóìÝíç ôéìÞ (default value) ãéá '%-.64s'" + hun "Ervenytelen ertek: '%-.64s'" + ita "Valore di default non valido per '%-.64s'" + kor "'%-.64s'ÀÇ À¯È¿ÇÏÁö ¸øÇÑ µðÆúÆ® °ªÀ» »ç¿ëÇϼ̽À´Ï´Ù." + nor "Ugyldig standardverdi for '%-.64s'" + norwegian-ny "Ugyldig standardverdi for '%-.64s'" + pol "Niew³a?ciwa warto?æ domy?lna dla '%-.64s'" + por "Valor padrão (default) inválido para '%-.64s'" + rum "Valoarea de default este invalida pentru '%-.64s'" + rus "îÅËÏÒÒÅËÔÎÏÅ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ '%-.64s'" + serbian "Loša default vrednost za '%-.64s'" + slo "Chybná implicitná hodnota pre '%-.64s'" + spa "Valor por defecto invalido para '%-.64s'" + swe "Ogiltigt DEFAULT värde för '%-.64s'" + ukr "îÅצÒÎÅ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ ÄÌÑ '%-.64s'" +ER_MULTIPLE_PRI_KEY 42000 S1009 + cze "Definov-Báno více primárních klíèù" + dan "Flere primærnøgler specificeret" + nla "Meerdere primaire zoeksleutels gedefinieerd" + eng "Multiple primary key defined" + est "Mitut primaarset võtit ei saa olla" + fre "Plusieurs clefs primaires définies" + ger "Mehrfacher Primärschlüssel (PRIMARY KEY) definiert" + greek "Ðåñéóóüôåñá áðü Ýíá primary key ïñßóôçêáí" + hun "Tobbszoros elsodleges kulcs definialas." + ita "Definite piu` chiave primarie" + jpn "Ê£¿ô¤Î primary key ¤¬ÄêµÁ¤µ¤ì¤Þ¤·¤¿" + kor "Multiple primary key°¡ Á¤ÀǵǾî ÀÖ½¿" + nor "Fleire primærnøkle spesifisert" + norwegian-ny "Fleire primærnyklar spesifisert" + pol "Zdefiniowano wiele kluczy podstawowych" + por "Definida mais de uma chave primária" + rum "Chei primare definite de mai multe ori" + rus "õËÁÚÁÎÏ ÎÅÓËÏÌØËÏ ÐÅÒ×ÉÞÎÙÈ ËÌÀÞÅÊ" + serbian "Definisani višestruki primarni kljuèevi" + slo "Zadefinovaných viac primárnych kµúèov" + spa "Multiples claves primarias definidas" + swe "Flera PRIMARY KEY använda" + ukr "ðÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ ×ÉÚÎÁÞÅÎÏ ÎÅÏÄÎÏÒÁÚÏ×Ï" +ER_TOO_MANY_KEYS 42000 S1009 + cze "Zad-Báno pøíli¹ mnoho klíèù, je povoleno nejvíce %d klíèù" + dan "For mange nøgler specificeret. Kun %d nøgler må bruges" + nla "Teveel zoeksleutels gedefinieerd. Maximaal zijn %d zoeksleutels toegestaan" + eng "Too many keys specified; max %d keys allowed" + est "Liiga palju võtmeid. Maksimaalselt võib olla %d võtit" + fre "Trop de clefs sont définies. Maximum de %d clefs alloué" + ger "Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt" + greek "ÐÜñá ðïëëÜ key ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé" + hun "Tul sok kulcs. Maximum %d kulcs engedelyezett." + ita "Troppe chiavi. Sono ammesse max %d chiavi" + jpn "key ¤Î»ØÄ꤬¿¤¹¤®¤Þ¤¹. key ¤ÏºÇÂç %d ¤Þ¤Ç¤Ç¤¹" + kor "³Ê¹« ¸¹Àº ۰¡ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %dÀÇ Å°°¡ °¡´ÉÇÔ" + nor "For mange nøkler spesifisert. Maks %d nøkler tillatt" + norwegian-ny "For mange nykler spesifisert. Maks %d nyklar tillatt" + pol "Okre?lono zbyt wiele kluczy. Dostêpnych jest maksymalnie %d kluczy" + por "Especificadas chaves demais. O máximo permitido são %d chaves" + rum "Prea multe chei. Numarul de chei maxim este %d" + rus "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ËÌÀÞÅÊ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ËÌÀÞÅÊ" + serbian "Navedeno je previše kljuèeva. Maksimum %d kljuèeva je dozvoljeno" + slo "Zadaných ríli¹ veµa kµúèov. Najviac %d kµúèov je povolených" + spa "Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas" + swe "För många nycklar använda. Man får ha högst %d nycklar" + ukr "úÁÂÁÇÁÔÏ ËÌÀÞ¦× ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ËÌÀÞ¦×" +ER_TOO_MANY_KEY_PARTS 42000 S1009 + cze "Zad-Báno pøíli¹ mnoho èást klíèù, je povoleno nejvíce %d èástí" + dan "For mange nøgledele specificeret. Kun %d dele må bruges" + nla "Teveel zoeksleutel onderdelen gespecificeerd. Maximaal %d onderdelen toegestaan" + eng "Too many key parts specified; max %d parts allowed" + est "Võti koosneb liiga paljudest osadest. Maksimaalselt võib olla %d osa" + fre "Trop de parties specifiées dans la clef. Maximum de %d parties" + ger "Zu viele Teilschlüssel definiert. Maximal sind %d Teilschlüssel erlaubt" + greek "ÐÜñá ðïëëÜ key parts ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé" + hun "Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett" + ita "Troppe parti di chiave specificate. Sono ammesse max %d parti" + kor "³Ê¹« ¸¹Àº Ű ºÎºÐ(parts)µéÀÌ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %d ºÎºÐÀÌ °¡´ÉÇÔ" + nor "For mange nøkkeldeler spesifisert. Maks %d deler tillatt" + norwegian-ny "For mange nykkeldelar spesifisert. Maks %d delar tillatt" + pol "Okre?lono zbyt wiele czê?ci klucza. Dostêpnych jest maksymalnie %d czê?ci" + por "Especificadas partes de chave demais. O máximo permitido são %d partes" + rum "Prea multe chei. Numarul de chei maxim este %d" + rus "õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÞÁÓÔÅÊ ÓÏÓÔÁ×ÎÏÇÏ ËÌÀÞÁ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ÞÁÓÔÅÊ" + serbian "Navedeno je previše delova kljuèa. Maksimum %d delova je dozvoljeno" + slo "Zadaných ríli¹ veµa èastí kµúèov. Je povolených najviac %d èastí" + spa "Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas" + swe "För många nyckeldelar använda. Man får ha högst %d nyckeldelar" + ukr "úÁÂÁÇÁÔÏ ÞÁÓÔÉÎ ËÌÀÞÁ ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ÞÁÓÔÉÎ" +ER_TOO_LONG_KEY 42000 S1009 + cze "Zadan-Bý klíè byl pøíli¹ dlouhý, nejvìt¹í délka klíèe je %d" + dan "Specificeret nøgle var for lang. Maksimal nøglelængde er %d" + nla "Gespecificeerde zoeksleutel was te lang. De maximale lengte is %d" + eng "Specified key was too long; max key length is %d bytes" + est "Võti on liiga pikk. Maksimaalne võtmepikkus on %d" + fre "La clé est trop longue. Longueur maximale: %d" + ger "Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d" + greek "Ôï êëåéäß ðïõ ïñßóèçêå åßíáé ðïëý ìåãÜëï. Ôï ìÝãéóôï ìÞêïò åßíáé %d" + hun "A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d" + ita "La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d" + jpn "key ¤¬Ä¹¤¹¤®¤Þ¤¹. key ¤ÎŤµ¤ÏºÇÂç %d ¤Ç¤¹" + kor "Á¤ÀÇµÈ Å°°¡ ³Ê¹« ±é´Ï´Ù. ÃÖ´ë ŰÀÇ ±æÀÌ´Â %dÀÔ´Ï´Ù." + nor "Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d" + norwegian-ny "Spesifisert nykkel var for lang. Maks nykkellengde er %d" + pol "Zdefinowany klucz jest zbyt d³ugi. Maksymaln? d³ugo?ci? klucza jest %d" + por "Chave especificada longa demais. O comprimento de chave máximo permitido é %d" + rum "Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d" + rus "õËÁÚÁÎ ÓÌÉÛËÏÍ ÄÌÉÎÎÙÊ ËÌÀÞ. íÁËÓÉÍÁÌØÎÁÑ ÄÌÉÎÁ ËÌÀÞÁ ÓÏÓÔÁ×ÌÑÅÔ %d ÂÁÊÔ" + serbian "Navedeni kljuè je predug. Maksimalna dužina kljuèa je %d" + slo "Zadaný kµúè je príli¹ dlhý, najväè¹ia då¾ka kµúèa je %d" + spa "Declaracion de clave demasiado larga. La maxima longitud de clave es %d" + swe "För lång nyckel. Högsta tillåtna nyckellängd är %d" + ukr "úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁÊÂ¦ÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d ÂÁÊÔ¦×" +ER_KEY_COLUMN_DOES_NOT_EXITS 42000 S1009 + cze "Kl-Bíèový sloupec '%-.64s' v tabulce neexistuje" + dan "Nøglefeltet '%-.64s' eksisterer ikke i tabellen" + nla "Zoeksleutel kolom '%-.64s' bestaat niet in tabel" + eng "Key column '%-.64s' doesn't exist in table" + est "Võtme tulp '%-.64s' puudub tabelis" + fre "La clé '%-.64s' n'existe pas dans la table" + ger "In der Tabelle gibt es keine Schlüsselspalte '%-.64s'" + greek "Ôï ðåäßï êëåéäß '%-.64s' äåí õðÜñ÷åé óôïí ðßíáêá" + hun "A(z) '%-.64s'kulcsoszlop nem letezik a tablaban" + ita "La colonna chiave '%-.64s' non esiste nella tabella" + jpn "Key column '%-.64s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó." + kor "Key Ä®·³ '%-.64s'´Â Å×ÀÌºí¿¡ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Nøkkel felt '%-.64s' eksiterer ikke i tabellen" + norwegian-ny "Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen" + pol "Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli" + por "Coluna chave '%-.64s' não existe na tabela" + rum "Coloana cheie '%-.64s' nu exista in tabela" + rus "ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.64s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Kljuèna kolona '%-.64s' ne postoji u tabeli" + slo "Kµúèový ståpec '%-.64s' v tabuµke neexistuje" + spa "La columna clave '%-.64s' no existe en la tabla" + swe "Nyckelkolumn '%-.64s' finns inte" + ukr "ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ" +ER_BLOB_USED_AS_KEY 42000 S1009 + cze "Blob sloupec '%-.64s' nem-Bù¾e být pou¾it jako klíè" + dan "BLOB feltet '%-.64s' kan ikke bruges ved specifikation af indeks" + nla "BLOB kolom '%-.64s' kan niet gebruikt worden bij zoeksleutel specificatie" + eng "BLOB column '%-.64s' can't be used in key specification with the used table type" + est "BLOB-tüüpi tulpa '%-.64s' ei saa kasutada võtmena" + fre "Champ BLOB '%-.64s' ne peut être utilisé dans une clé" + ger "BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden" + greek "Ðåäßï ôýðïõ Blob '%-.64s' äåí ìðïñåß íá ÷ñçóéìïðïéçèåß óôïí ïñéóìü åíüò êëåéäéïý (key specification)" + hun "Blob objektum '%-.64s' nem hasznalhato kulcskent" + ita "La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave" + kor "BLOB Ä®·³ '%-.64s'´Â Ű Á¤ÀÇ¿¡¼ »ç¿ëµÉ ¼ö ¾ø½À´Ï´Ù." + nor "Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nøkler" + norwegian-ny "Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar" + pol "Kolumna typu Blob '%-.64s' nie mo¿e byæ u¿yta w specyfikacji klucza" + por "Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado" + rum "Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit" + rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ" + serbian "BLOB kolona '%-.64s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi" + slo "Blob pole '%-.64s' nemô¾e by» pou¾ité ako kµúè" + spa "La columna Blob '%-.64s' no puede ser usada en una declaracion de clave" + swe "En BLOB '%-.64s' kan inte vara nyckel med den använda tabelltypen" + ukr "BLOB ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ" +ER_TOO_BIG_FIELDLENGTH 42000 S1009 + cze "P-Bøíli¹ velká délka sloupce '%-.64s' (nejvíce %d). Pou¾ijte BLOB" + dan "For stor feltlængde for kolonne '%-.64s' (maks = %d). Brug BLOB i stedet" + nla "Te grote kolomlengte voor '%-.64s' (max = %d). Maak hiervoor gebruik van het type BLOB" + eng "Column length too big for column '%-.64s' (max = %d); use BLOB instead" + est "Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB väljatüüpi" + fre "Champ '%-.64s' trop long (max = %d). Utilisez un BLOB" + ger "Feldlänge für Feld '%-.64s' zu groß (maximal %d). BLOB-Feld verwenden!" + greek "Ðïëý ìåãÜëï ìÞêïò ãéá ôï ðåäßï '%-.64s' (max = %d). Ðáñáêáëþ ÷ñçóéìïðïéåßóôå ôïí ôýðï BLOB" + hun "A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb." + ita "La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB." + jpn "column '%-.64s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤." + kor "Ä®·³ '%-.64s'ÀÇ Ä®·³ ±æÀ̰¡ ³Ê¹« ±é´Ï´Ù (ÃÖ´ë = %d). ´ë½Å¿¡ BLOB¸¦ »ç¿ëÇϼ¼¿ä." + nor "For stor nøkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor" + norwegian-ny "For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor" + pol "Zbyt du¿a d³ugo?æ kolumny '%-.64s' (maks. = %d). W zamian u¿yj typu BLOB" + por "Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar" + rum "Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine" + rus "óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.64s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ" + serbian "Previše podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje" + slo "Príli¹ veµká då¾ka pre pole '%-.64s' (maximum = %d). Pou¾ite BLOB" + spa "Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar" + swe "För stor kolumnlängd angiven för '%-.64s' (max= %d). Använd en BLOB instället" + ukr "úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.64s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB" +ER_WRONG_AUTO_KEY 42000 S1009 + cze "M-Bù¾ete mít pouze jedno AUTO pole a to musí být definováno jako klíè" + dan "Der kan kun specificeres eet AUTO_INCREMENT-felt, og det skal være indekseret" + nla "Er kan slechts 1 autofield zijn en deze moet als zoeksleutel worden gedefinieerd." + eng "Incorrect table definition; there can be only one auto column and it must be defined as a key" + est "Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena" + fre "Un seul champ automatique est permis et il doit être indexé" + ger "Falsche Tabellendefinition. Es darf nur ein Auto-Feld geben und dieses muss als Schlüssel definiert werden" + greek "Ìðïñåß íá õðÜñ÷åé ìüíï Ýíá auto field êáé ðñÝðåé íá Ý÷åé ïñéóèåß óáí key" + hun "Csak egy auto mezo lehetseges, es azt kulcskent kell definialni." + ita "Puo` esserci solo un campo AUTO e deve essere definito come chiave" + jpn "¥Æ¡¼¥Ö¥ë¤ÎÄêµÁ¤¬°ã¤¤¤Þ¤¹; there can be only one auto column and it must be defined as a key" + kor "ºÎÁ¤È®ÇÑ Å×À̺í Á¤ÀÇ; Å×À̺íÀº ÇϳªÀÇ auto Ä®·³ÀÌ Á¸ÀçÇϰí Ű·Î Á¤ÀǵǾîÁ®¾ß ÇÕ´Ï´Ù." + nor "Bare ett auto felt kan være definert som nøkkel." + norwegian-ny "Bare eitt auto felt kan være definert som nøkkel." + pol "W tabeli mo¿e byæ tylko jedno pole auto i musi ono byæ zdefiniowane jako klucz" + por "Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave" + rum "Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie" + rus "îÅËÏÒÒÅËÔÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ: ÍÏÖÅÔ ÓÕÝÅÓÔ×Ï×ÁÔØ ÔÏÌØËÏ ÏÄÉÎ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÊ ÓÔÏÌÂÅÃ, É ÏÎ ÄÏÌÖÅÎ ÂÙÔØ ÏÐÒÅÄÅÌÅÎ ËÁË ËÌÀÞ" + serbian "Pogrešna definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljuèa" + slo "Mô¾ete ma» iba jedno AUTO pole a to musí by» definované ako kµúè" + spa "Puede ser solamente un campo automatico y este debe ser definido como una clave" + swe "Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel" + ukr "îÅצÒÎÅ ×ÉÚÎÁÞÅÎÎÑ ÔÁÂÌÉæ; íÏÖÅ ÂÕÔÉ ÌÉÛÅ ÏÄÉÎ Á×ÔÏÍÁÔÉÞÎÉÊ ÓÔÏ×ÂÅÃØ, ÝÏ ÐÏ×ÉÎÅÎ ÂÕÔÉ ×ÉÚÎÁÞÅÎÉÊ ÑË ËÌÀÞ" +ER_READY + cze "%s: p-Bøipraven na spojení" + dan "%s: klar til tilslutninger" + nla "%s: klaar voor verbindingen" + eng "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d" + est "%s: ootab ühendusi" + fre "%s: Prêt pour des connections" + ger "%-.64s: Bereit für Verbindungen" + greek "%s: óå áíáìïíÞ óõíäÝóåùí" + hun "%s: kapcsolatra kesz" + ita "%s: Pronto per le connessioni\n" + jpn "%s: ½àÈ÷´°Î»" + kor "%s: ¿¬°á ÁغñÁßÀÔ´Ï´Ù" + nor "%s: klar for tilkoblinger" + norwegian-ny "%s: klar for tilkoblingar" + pol "%s: gotowe do po³?czenia" + por "%s: Pronto para conexões" + rum "%s: sint gata pentru conectii" + rus "%s: çÏÔÏ× ÐÒÉÎÉÍÁÔØ ÓÏÅÄÉÎÅÎÉÑ.\n÷ÅÒÓÉÑ: '%s' ÓÏËÅÔ: '%s' ÐÏÒÔ: %d" + serbian "%s: Spreman za konekcije\n" + slo "%s: pripravený na spojenie" + spa "%s: preparado para conexiones" + swe "%s: klar att ta emot klienter" + ukr "%s: çÏÔÏ×ÉÊ ÄÌÑ Ú'¤ÄÎÁÎØ!" +ER_NORMAL_SHUTDOWN + cze "%s: norm-Bální ukonèení\n" + dan "%s: Normal nedlukning\n" + nla "%s: Normaal afgesloten \n" + eng "%s: Normal shutdown\n" + est "%s: MySQL lõpetas\n" + fre "%s: Arrêt normal du serveur\n" + ger "%-.64s: Normal heruntergefahren\n" + greek "%s: ÖõóéïëïãéêÞ äéáäéêáóßá shutdown\n" + hun "%s: Normal leallitas\n" + ita "%s: Shutdown normale\n" + kor "%s: Á¤»óÀûÀÎ shutdown\n" + nor "%s: Normal avslutning\n" + norwegian-ny "%s: Normal nedkopling\n" + pol "%s: Standardowe zakoñczenie dzia³ania\n" + por "%s: 'Shutdown' normal\n" + rum "%s: Terminare normala\n" + rus "%s: ëÏÒÒÅËÔÎÁÑ ÏÓÔÁÎÏ×ËÁ\n" + serbian "%s: Normalno gašenje\n" + slo "%s: normálne ukonèenie\n" + spa "%s: Apagado normal\n" + swe "%s: Normal avslutning\n" + ukr "%s: îÏÒÍÁÌØÎÅ ÚÁ×ÅÒÛÅÎÎÑ\n" +ER_GOT_SIGNAL + cze "%s: p-Bøijat signal %d, konèím\n" + dan "%s: Fangede signal %d. Afslutter!!\n" + nla "%s: Signaal %d. Systeem breekt af!\n" + eng "%s: Got signal %d. Aborting!\n" + est "%s: sain signaali %d. Lõpetan!\n" + fre "%s: Reçu le signal %d. Abandonne!\n" + ger "%-.64s: Signal %d erhalten. Abbruch!\n" + greek "%s: ÅëÞöèç ôï ìÞíõìá %d. Ç äéáäéêáóßá åãêáôáëåßðåôáé!\n" + hun "%s: %d jelzes. Megszakitva!\n" + ita "%s: Ricevuto segnale %d. Interruzione!\n" + jpn "%s: Got signal %d. ÃæÃÇ!\n" + kor "%s: %d ½ÅÈ£°¡ µé¾î¿ÔÀ½. ÁßÁö!\n" + nor "%s: Oppdaget signal %d. Avslutter!\n" + norwegian-ny "%s: Oppdaga signal %d. Avsluttar!\n" + pol "%s: Otrzymano sygna³ %d. Koñczenie dzia³ania!\n" + por "%s: Obteve sinal %d. Abortando!\n" + rum "%s: Semnal %d obtinut. Aborting!\n" + rus "%s: ðÏÌÕÞÅÎ ÓÉÇÎÁÌ %d. ðÒÅËÒÁÝÁÅÍ!\n" + serbian "%s: Dobio signal %d. Prekidam!\n" + slo "%s: prijatý signál %d, ukonèenie (Abort)!\n" + spa "%s: Recibiendo signal %d. Abortando!\n" + swe "%s: Fick signal %d. Avslutar!\n" + ukr "%s: ïÔÒÉÍÁÎÏ ÓÉÇÎÁÌ %d. ðÅÒÅÒÉ×ÁÀÓØ!\n" +ER_SHUTDOWN_COMPLETE + cze "%s: ukon-Bèení práce hotovo\n" + dan "%s: Server lukket\n" + nla "%s: Afsluiten afgerond\n" + eng "%s: Shutdown complete\n" + est "%s: Lõpp\n" + fre "%s: Arrêt du serveur terminé\n" + ger "%-.64s: Heruntergefahren (shutdown)\n" + greek "%s: Ç äéáäéêáóßá Shutdown ïëïêëçñþèçêå\n" + hun "%s: A leallitas kesz\n" + ita "%s: Shutdown completato\n" + jpn "%s: Shutdown ´°Î»\n" + kor "%s: Shutdown ÀÌ ¿Ï·áµÊ!\n" + nor "%s: Avslutning komplett\n" + norwegian-ny "%s: Nedkopling komplett\n" + pol "%s: Zakoñczenie dzia³ania wykonane\n" + por "%s: 'Shutdown' completo\n" + rum "%s: Terminare completa\n" + rus "%s: ïÓÔÁÎÏ×ËÁ ÚÁ×ÅÒÛÅÎÁ\n" + serbian "%s: Gašenje završeno\n" + slo "%s: práca ukonèená\n" + spa "%s: Apagado completado\n" + swe "%s: Avslutning klar\n" + ukr "%s: òÏÂÏÔÕ ÚÁ×ÅÒÛÅÎÏ\n" +ER_FORCING_CLOSE 08S01 + cze "%s: n-Básilné uzavøení threadu %ld u¾ivatele '%-.64s'\n" + dan "%s: Forceret nedlukning af tråd: %ld bruger: '%-.64s'\n" + nla "%s: Afsluiten afgedwongen van thread %ld gebruiker: '%-.64s'\n" + eng "%s: Forcing close of thread %ld user: '%-.32s'\n" + est "%s: Sulgen jõuga lõime %ld kasutaja: '%-.32s'\n" + fre "%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.64s'\n" + ger "%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n" + greek "%s: Ôï thread èá êëåßóåé %ld user: '%-.64s'\n" + hun "%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.64s'\n" + ita "%s: Forzata la chiusura del thread %ld utente: '%-.64s'\n" + jpn "%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.64s'\n" + kor "%s: thread %ldÀÇ °Á¦ Á¾·á user: '%-.64s'\n" + nor "%s: Påtvinget avslutning av tråd %ld bruker: '%-.64s'\n" + norwegian-ny "%s: Påtvinga avslutning av tråd %ld brukar: '%-.64s'\n" + pol "%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.64s'\n" + por "%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n" + rum "%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n" + rus "%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.32s'\n" + serbian "%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n" + slo "%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.64s'\n" + spa "%s: Forzando a cerrar el thread %ld usuario: '%-.64s'\n" + swe "%s: Stänger av tråd %ld; användare: '%-.64s'\n" + ukr "%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'\n" +ER_IPSOCK_ERROR 08S01 + cze "Nemohu vytvo-Bøit IP socket" + dan "Kan ikke oprette IP socket" + nla "Kan IP-socket niet openen" + eng "Can't create IP socket" + est "Ei suuda luua IP socketit" + fre "Ne peut créer la connection IP (socket)" + ger "Kann IP-Socket nicht erzeugen" + greek "Äåí åßíáé äõíáôÞ ç äçìéïõñãßá IP socket" + hun "Az IP socket nem hozhato letre" + ita "Impossibile creare il socket IP" + jpn "IP socket ¤¬ºî¤ì¤Þ¤»¤ó" + kor "IP ¼ÒÄÏÀ» ¸¸µéÁö ¸øÇß½À´Ï´Ù." + nor "Kan ikke opprette IP socket" + norwegian-ny "Kan ikkje opprette IP socket" + pol "Nie mo¿na stworzyæ socket'u IP" + por "Não pode criar o soquete IP" + rum "Nu pot crea IP socket" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ IP-ÓÏËÅÔ" + serbian "Ne mogu da kreiram IP socket" + slo "Nemô¾em vytvori» IP socket" + spa "No puedo crear IP socket" + swe "Kan inte skapa IP-socket" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ IP ÒÏÚ'¤Í" +ER_NO_SUCH_INDEX 42S12 S1009 + cze "Tabulka '%-.64s' nem-Bá index odpovídající CREATE INDEX. Vytvoøte tabulku znovu" + dan "Tabellen '%-.64s' har ikke den nøgle, som blev brugt i CREATE INDEX. Genopret tabellen" + nla "Tabel '%-.64s' heeft geen INDEX zoals deze gemaakt worden met CREATE INDEX. Maak de tabel opnieuw" + eng "Table '%-.64s' has no index like the one used in CREATE INDEX; recreate the table" + est "Tabelil '%-.64s' puuduvad võtmed. Loo tabel uuesti" + fre "La table '%-.64s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table" + ger "Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Index neu anlegen" + greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé åõñåôÞñéï (index) óáí áõôü ðïõ ÷ñçóéìïðïéåßôå óôçí CREATE INDEX. Ðáñáêáëþ, îáíáäçìéïõñãÞóôå ôïí ðßíáêá" + hun "A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat" + ita "La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella" + jpn "Table '%-.64s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤" + kor "Å×À̺í '%-.64s'´Â À妽º¸¦ ¸¸µéÁö ¾Ê¾Ò½À´Ï´Ù. alter Å×À̺í¸í·ÉÀ» ÀÌ¿ëÇÏ¿© Å×À̺íÀ» ¼öÁ¤Çϼ¼¿ä..." + nor "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen" + norwegian-ny "Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt" + pol "Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelê" + por "Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela" + rum "Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela" + rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï" + serbian "Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo" + slo "Tabuµka '%-.64s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova" + spa "La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla" + swe "Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen" + ukr "ôÁÂÌÉÃÑ '%-.64s' ÍÁ¤ ¦ÎÄÅËÓ, ÝÏ ÎÅ ÓЦ×ÐÁÄÁ¤ Ú ×ËÁÚÁÎÎÉÍ Õ CREATE INDEX. óÔ×ÏÒ¦ÔØ ÔÁÂÌÉÃÀ ÚÎÏ×Õ" +ER_WRONG_FIELD_TERMINATORS 42000 S1009 + cze "Argument separ-Bátoru polo¾ek nebyl oèekáván. Pøeètìte si manuál" + dan "Felt adskiller er ikke som forventet, se dokumentationen" + nla "De argumenten om velden te scheiden zijn anders dan verwacht. Raadpleeg de handleiding" + eng "Field separator argument is not what is expected; check the manual" + est "Väljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga" + fre "Séparateur de champs inconnu. Vérifiez dans le manuel" + ger "Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen" + greek "Ï äéá÷ùñéóôÞò ðåäßùí äåí åßíáé áõôüò ðïõ áíáìåíüôáí. Ðáñáêáëþ áíáôñÝîôå óôï manual" + hun "A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!" + ita "L'argomento 'Field separator' non e` quello atteso. Controlla il manuale" + kor "ÇÊµå ±¸ºÐÀÚ ÀμöµéÀÌ ¿ÏÀüÇÏÁö ¾Ê½À´Ï´Ù. ¸Þ´º¾óÀ» ã¾Æ º¸¼¼¿ä." + nor "Felt skiller argumentene er ikke som forventet, se dokumentasjonen" + norwegian-ny "Felt skiljer argumenta er ikkje som venta, sjå dokumentasjonen" + pol "Nie oczekiwano separatora. Sprawd¥ podrêcznik" + por "Argumento separador de campos não é o esperado. Cheque o manual" + rum "Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul" + rus "áÒÇÕÍÅÎÔ ÒÁÚÄÅÌÉÔÅÌÑ ÐÏÌÅÊ - ÎÅ ÔÏÔ, ËÏÔÏÒÙÊ ÏÖÉÄÁÌÓÑ. ïÂÒÁÝÁÊÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ" + serbian "Argument separatora polja nije ono što se oèekivalo. Proverite uputstvo MySQL server-a" + slo "Argument oddeµovaè polí nezodpovedá po¾iadavkám. Skontrolujte v manuáli" + spa "Los separadores de argumentos del campo no son los especificados. Comprueba el manual" + swe "Fältseparatorerna är vad som förväntades. Kontrollera mot manualen" + ukr "èÉÂÎÉÊ ÒÏÚĦÌÀ×ÁÞ ÐÏ̦×. ðÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ" +ER_BLOBS_AND_NO_TERMINATED 42000 S1009 + cze "Nen-Bí mo¾né pou¾ít pevný rowlength s BLOBem. Pou¾ijte 'fields terminated by'." + dan "Man kan ikke bruge faste feltlængder med BLOB. Brug i stedet 'fields terminated by'." + nla "Bij het gebruik van BLOBs is het niet mogelijk om vaste rijlengte te gebruiken. Maak s.v.p. gebruik van 'fields terminated by'." + eng "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'" + est "BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang." + fre "Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'." + ger "Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden" + greek "Äåí ìðïñåßôå íá ÷ñçóéìïðïéÞóåôå fixed rowlength óå BLOBs. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'fields terminated by'." + hun "Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' ." + ita "Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'." + jpn "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'." + kor "BLOB·Î´Â °íÁ¤±æÀÌÀÇ lowlength¸¦ »ç¿ëÇÒ ¼ö ¾ø½À´Ï´Ù. 'fields terminated by'¸¦ »ç¿ëÇϼ¼¿ä." + nor "En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." + norwegian-ny "Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'." + pol "Nie mo¿na u¿yæ sta³ej d³ugo?ci wiersza z polami typu BLOB. U¿yj 'fields terminated by'." + por "Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado." + rum "Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'." + rus "æÉËÓÉÒÏ×ÁÎÎÙÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ Ó ÐÏÌÑÍÉ ÔÉÐÁ BLOB ÉÓÐÏÌØÚÏ×ÁÔØ ÎÅÌØÚÑ, ÐÒÉÍÅÎÑÊÔÅ 'fields terminated by'" + serbian "Ne možete koristiti fiksnu velièinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju." + slo "Nie je mo¾né pou¾i» fixnú då¾ku s BLOBom. Pou¾ite 'fields terminated by'." + spa "No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '." + swe "Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'" + ukr "îÅ ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÓÔÁÌÕ ÄÏ×ÖÉÎÕ ÓÔÒÏËÉ Ú BLOB. úËÏÒÉÓÔÁÊÔÅÓÑ 'fields terminated by'" +ER_TEXTFILE_NOT_READABLE + cze "Soubor '%-.64s' mus-Bí být v adresáøi databáze nebo èitelný pro v¹echny" + dan "Filen '%-.64s' skal være i database-folderen og kunne læses af alle" + nla "Het bestand '%-.64s' dient in de database directory voor the komen of leesbaar voor iedereen te zijn." + eng "The file '%-.64s' must be in the database directory or be readable by all" + est "Fail '%-.64s' peab asuma andmebaasi kataloogis või olema kõigile loetav" + fre "Le fichier '%-.64s' doit être dans le répertoire de la base et lisible par tous" + ger "Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden und lesbar für alle sein" + greek "Ôï áñ÷åßï '%-.64s' ðñÝðåé íá õðÜñ÷åé óôï database directory Þ íá ìðïñåß íá äéáâáóôåß áðü üëïõò" + hun "A(z) '%-.64s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak" + ita "Il file '%-.64s' deve essere nella directory del database e deve essere leggibile da tutti" + jpn "¥Õ¥¡¥¤¥ë '%-.64s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬ÆÉ¤á¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó." + kor "'%-.64s' ÈÀÏ´Â µ¥ÀÌŸº£À̽º µð·ºÅ丮¿¡ Á¸ÀçÇϰųª ¸ðµÎ¿¡°Ô Àб⠰¡´ÉÇÏ¿©¾ß ÇÕ´Ï´Ù." + nor "Filen '%-.64s' må være i database-katalogen for å være lesbar for alle" + norwegian-ny "Filen '%-.64s' må være i database-katalogen for å være lesbar for alle" + pol "Plik '%-.64s' musi znajdowaæ sie w katalogu bazy danych lub mieæ prawa czytania przez wszystkich" + por "Arquivo '%-.64s' tem que estar no diretório do banco de dados ou ter leitura possível para todos" + rum "Fisierul '%-.64s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)" + rus "æÁÊÌ '%-.64s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ" + serbian "File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa" + slo "Súbor '%-.64s' musí by» v adresári databázy, alebo èitateµný pre v¹etkých" + spa "El archivo '%-.64s' debe estar en el directorio de la base de datos o ser de lectura por todos" + swe "Textfilen '%.64s' måste finnas i databasbiblioteket eller vara läsbar för alla" + ukr "æÁÊÌ '%-.64s' ÐÏ×ÉÎÅÎ ÂÕÔÉ Õ ÔÅæ ÂÁÚÉ ÄÁÎÎÉÈ ÁÂÏ ÍÁÔÉ ×ÓÔÁÎÏ×ÌÅÎÅ ÐÒÁ×Ï ÎÁ ÞÉÔÁÎÎÑ ÄÌÑ ÕÓ¦È" +ER_FILE_EXISTS_ERROR + cze "Soubor '%-.64s' ji-B¾ existuje" + dan "Filen '%-.64s' eksisterer allerede" + nla "Het bestand '%-.64s' bestaat reeds" + eng "File '%-.80s' already exists" + est "Fail '%-.80s' juba eksisteerib" + fre "Le fichier '%-.64s' existe déjà" + ger "Datei '%-.64s' bereits vorhanden" + greek "Ôï áñ÷åßï '%-.64s' õðÜñ÷åé Þäç" + hun "A '%-.64s' file mar letezik." + ita "Il file '%-.64s' esiste gia`" + jpn "File '%-.64s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹" + kor "'%-.64s' ÈÀÏÀº ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù." + nor "Filen '%-.64s' eksisterte allerede" + norwegian-ny "Filen '%-.64s' eksisterte allereide" + pol "Plik '%-.64s' ju¿ istnieje" + por "Arquivo '%-.80s' já existe" + rum "Fisierul '%-.80s' exista deja" + rus "æÁÊÌ '%-.80s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "File '%-.80s' veæ postoji" + slo "Súbor '%-.64s' u¾ existuje" + spa "El archivo '%-.64s' ya existe" + swe "Filen '%-.64s' existerar redan" + ukr "æÁÊÌ '%-.80s' ×ÖÅ ¦ÓÎÕ¤" +ER_LOAD_INFO + cze "Z-Báznamù: %ld Vymazáno: %ld Pøeskoèeno: %ld Varování: %ld" + dan "Poster: %ld Fjernet: %ld Sprunget over: %ld Advarsler: %ld" + nla "Records: %ld Verwijderd: %ld Overgeslagen: %ld Waarschuwingen: %ld" + eng "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld" + est "Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld" + fre "Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld" + ger "Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld" + greek "ÅããñáöÝò: %ld ÄéáãñáöÝò: %ld ÐáñåêÜìöèçóáí: %ld ÐñïåéäïðïéÞóåéò: %ld" + hun "Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld" + ita "Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld" + jpn "¥ì¥³¡¼¥É¿ô: %ld ºï½ü: %ld Skipped: %ld Warnings: %ld" + kor "·¹ÄÚµå: %ld°³ »èÁ¦: %ld°³ ½ºÅµ: %ld°³ °æ°í: %ld°³" + nor "Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld" + norwegian-ny "Poster: %ld Fjerna: %ld Hoppa over: %ld Åtvaringar: %ld" + pol "Recordów: %ld Usuniêtych: %ld Pominiêtych: %ld Ostrze¿eñ: %ld" + por "Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld" + rum "Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld" + rus "úÁÐÉÓÅÊ: %ld õÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld" + serbian "Slogova: %ld Izbrisano: %ld Preskoèeno: %ld Upozorenja: %ld" + slo "Záznamov: %ld Zmazaných: %ld Preskoèených: %ld Varovania: %ld" + spa "Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld" + swe "Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld" + ukr "úÁÐÉÓ¦×: %ld ÷ÉÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld" +ER_ALTER_INFO + cze "Z-Báznamù: %ld Zdvojených: %ld" + dan "Poster: %ld Ens: %ld" + nla "Records: %ld Dubbel: %ld" + eng "Records: %ld Duplicates: %ld" + est "Kirjeid: %ld Kattuvaid: %ld" + fre "Enregistrements: %ld Doublons: %ld" + ger "Datensätze: %ld Duplikate: %ld" + greek "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld" + hun "Rekordok: %ld Duplikalva: %ld" + ita "Records: %ld Duplicati: %ld" + jpn "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£: %ld" + kor "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³" + nor "Poster: %ld Like: %ld" + norwegian-ny "Poster: %ld Like: %ld" + pol "Rekordów: %ld Duplikatów: %ld" + por "Registros: %ld - Duplicados: %ld" + rum "Recorduri: %ld Duplicate: %ld" + rus "úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld" + serbian "Slogova: %ld Duplikata: %ld" + slo "Záznamov: %ld Opakovaných: %ld" + spa "Registros: %ld Duplicados: %ld" + swe "Rader: %ld Dubletter: %ld" + ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld" +ER_WRONG_SUB_KEY + cze "Chybn-Bá podèást klíèe -- není to øetìzec nebo je del¹í ne¾ délka èásti klíèe" + dan "Forkert indeksdel. Den anvendte nøgledel er ikke en streng eller længden er større end nøglelængden" + nla "Foutief sub-gedeelte van de zoeksleutel. De gebruikte zoeksleutel is geen onderdeel van een string of of de gebruikte lengte is langer dan de zoeksleutel" + eng "Incorrect sub part key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique sub keys" + est "Vigane võtme osa. Kasutatud võtmeosa ei ole string tüüpi, määratud pikkus on pikem kui võtmeosa või tabelihandler ei toeta seda tüüpi võtmeid" + fre "Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dépasse celle définie dans la clef" + ger "Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder der Tabellenhandler unterstützt keine Unterteilschlüssel" + greek "ÅóöáëìÝíï sub part key. Ôï ÷ñçóéìïðïéïýìåíï key part äåí åßíáé string Þ ôï ìÞêïò ôïõ åßíáé ìåãáëýôåñï" + hun "Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz" + ita "Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave." + jpn "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part" + kor "ºÎÁ¤È®ÇÑ ¼¹ö ÆÄÆ® Ű. »ç¿ëµÈ Ű ÆÄÆ®°¡ ½ºÆ®¸µÀÌ ¾Æ´Ï°Å³ª Ű ÆÄÆ®ÀÇ ±æÀ̰¡ ³Ê¹« ±é´Ï´Ù." + nor "Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden" + norwegian-ny "Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden" + pol "B³êdna podczê?æ klucza. U¿yta czê?æ klucza nie jest ³añcuchem lub u¿yta d³ugo?æ jest wiêksza ni¿ czê?æ klucza" + por "Sub parte da chave incorreta. A parte da chave usada não é uma 'string' ou o comprimento usado é maior que parte da chave ou o manipulador de tabelas não suporta sub chaves únicas" + rum "Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii" + rus "îÅËÏÒÒÅËÔÎÁÑ ÞÁÓÔØ ËÌÀÞÁ. éÓÐÏÌØÚÕÅÍÁÑ ÞÁÓÔØ ËÌÀÞÁ ÎÅ Ñ×ÌÑÅÔÓÑ ÓÔÒÏËÏÊ, ÕËÁÚÁÎÎÁÑ ÄÌÉÎÁ ÂÏÌØÛÅ, ÞÅÍ ÄÌÉÎÁ ÞÁÓÔÉ ËÌÀÞÁ, ÉÌÉ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÕÎÉËÁÌØÎÙÅ ÞÁÓÔÉ ËÌÀÞÁ" + serbian "Pogrešan pod-kljuè dela kljuèa. Upotrebljeni deo kljuèa nije string, upotrebljena dužina je veæa od dela kljuèa ili handler tabela ne podržava jedinstvene pod-kljuèeve" + slo "Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part" + spa "Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave" + swe "Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden" + ukr "îÅצÒÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ. ÷ÉËÏÒÉÓÔÁÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ ÎÅ ¤ ÓÔÒÏËÏÀ, ÚÁÄÏ×ÇÁ ÁÂÏ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ÕΦËÁÌØÎÉÈ ÞÁÓÔÉÎ ËÌÀÞÅÊ" +ER_CANT_REMOVE_ALL_FIELDS 42000 + cze "Nen-Bí mo¾né vymazat v¹echny polo¾ky s ALTER TABLE. Pou¾ijte DROP TABLE" + dan "Man kan ikke slette alle felter med ALTER TABLE. Brug DROP TABLE i stedet." + nla "Het is niet mogelijk alle velden te verwijderen met ALTER TABLE. Gebruik a.u.b. DROP TABLE hiervoor!" + eng "You can't delete all columns with ALTER TABLE; use DROP TABLE instead" + est "ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil" + fre "Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE" + ger "Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden" + greek "Äåí åßíáé äõíáôÞ ç äéáãñáöÞ üëùí ôùí ðåäßùí ìå ALTER TABLE. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå DROP TABLE" + hun "Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette" + ita "Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE" + jpn "ALTER TABLE ¤ÇÁ´¤Æ¤Î column ¤Ïºï½ü¤Ç¤¤Þ¤»¤ó. DROP TABLE ¤ò»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤" + kor "ALTER TABLE ¸í·ÉÀ¸·Î´Â ¸ðµç Ä®·³À» Áö¿ï ¼ö ¾ø½À´Ï´Ù. DROP TABLE ¸í·ÉÀ» ÀÌ¿ëÇϼ¼¿ä." + nor "En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden." + norwegian-ny "Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor." + pol "Nie mo¿na usun?æ wszystkich pól wykorzystuj?c ALTER TABLE. W zamian u¿yj DROP TABLE" + por "Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar" + rum "Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb" + rus "îÅÌØÚÑ ÕÄÁÌÉÔØ ×ÓÅ ÓÔÏÌÂÃÙ Ó ÐÏÍÏÝØÀ ALTER TABLE. éÓÐÏÌØÚÕÊÔÅ DROP TABLE" + serbian "Ne možete da izbrišete sve kolone pomoæu komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite" + slo "One nemô¾em zmaza» all fields with ALTER TABLE; use DROP TABLE instead" + spa "No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo" + swe "Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället" + ukr "îÅ ÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ×Ó¦ ÓÔÏ×Âæ ÚÁ ÄÏÐÏÍÏÇÏÀ ALTER TABLE. äÌÑ ÃØÏÇÏ ÓËÏÒÉÓÔÁÊÔÅÓÑ DROP TABLE" +ER_CANT_DROP_FIELD_OR_KEY 42000 + cze "Nemohu zru-B¹it '%-.64s' (provést DROP). Zkontrolujte, zda neexistují záznamy/klíèe" + dan "Kan ikke udføre DROP '%-.64s'. Undersøg om feltet/nøglen eksisterer." + nla "Kan '%-.64s' niet weggooien. Controleer of het veld of de zoeksleutel daadwerkelijk bestaat." + eng "Can't DROP '%-.64s'; check that column/key exists" + est "Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/võti eksisteerib" + fre "Ne peut effacer (DROP) '%-.64s'. Vérifiez s'il existe" + ger "Kann '%-.64s' nicht löschen. Existiert das Feld / der Schlüssel?" + greek "Áäýíáôç ç äéáãñáöÞ (DROP) '%-.64s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé" + hun "A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e" + ita "Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista" + jpn "'%-.64s' ¤òÇË´þ¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists" + kor "'%-.64s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª ۰¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä." + nor "Kan ikke DROP '%-.64s'. Undersøk om felt/nøkkel eksisterer." + norwegian-ny "Kan ikkje DROP '%-.64s'. Undersøk om felt/nøkkel eksisterar." + pol "Nie mo¿na wykonaæ operacji DROP '%-.64s'. Sprawd¥, czy to pole/klucz istnieje" + por "Não se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe" + rum "Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ (DROP) '%-.64s'. õÂÅÄÉÔÅÓØ ÞÔÏ ÓÔÏÌÂÅÃ/ËÌÀÞ ÄÅÊÓÔ×ÉÔÅÌØÎÏ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Ne mogu da izvršim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno kljuè) postoji" + slo "Nemô¾em zru¹i» (DROP) '%-.64s'. Skontrolujte, èi neexistujú záznamy/kµúèe" + spa "No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe" + swe "Kan inte ta bort '%-.64s'. Kontrollera att fältet/nyckel finns" + ukr "îÅ ÍÏÖÕ DROP '%-.64s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤" +ER_INSERT_INFO + cze "Z-Báznamù: %ld Zdvojených: %ld Varování: %ld" + dan "Poster: %ld Ens: %ld Advarsler: %ld" + nla "Records: %ld Dubbel: %ld Waarschuwing: %ld" + eng "Records: %ld Duplicates: %ld Warnings: %ld" + est "Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld" + fre "Enregistrements: %ld Doublons: %ld Avertissements: %ld" + ger "Datensätze: %ld Duplikate: %ld Warnungen: %ld" + greek "ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld ÐñïåéäïðïéÞóåéò: %ld" + hun "Rekordok: %ld Duplikalva: %ld Warnings: %ld" + ita "Records: %ld Duplicati: %ld Avvertimenti: %ld" + jpn "¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£¿ô: %ld Warnings: %ld" + kor "·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³ °æ°í: %ld°³" + nor "Poster: %ld Like: %ld Advarsler: %ld" + norwegian-ny "Postar: %ld Like: %ld Åtvaringar: %ld" + pol "Rekordów: %ld Duplikatów: %ld Ostrze¿eñ: %ld" + por "Registros: %ld - Duplicados: %ld - Avisos: %ld" + rum "Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld" + rus "úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld" + serbian "Slogova: %ld Duplikata: %ld Upozorenja: %ld" + slo "Záznamov: %ld Opakovaných: %ld Varovania: %ld" + spa "Registros: %ld Duplicados: %ld Peligros: %ld" + swe "Rader: %ld Dubletter: %ld Varningar: %ld" + ukr "úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld úÁÓÔÅÒÅÖÅÎØ: %ld" +ER_UPDATE_TABLE_USED + eng "You can't specify target table '%-.64s' for update in FROM clause" + ger "Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulässig." + rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ ÕËÁÚÁÎÉÅ ÔÁÂÌÉÃÙ '%-.64s' × ÓÐÉÓËÅ ÔÁÂÌÉà FROM ÄÌÑ ×ÎÅÓÅÎÉÑ × ÎÅÅ ÉÚÍÅÎÅÎÉÊ" + swe "INSERT-table '%-.64s' får inte finnas i FROM tabell-listan" + ukr "ôÁÂÌÉÃÑ '%-.64s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM" +ER_NO_SUCH_THREAD + cze "Nezn-Bámá identifikace threadu: %lu" + dan "Ukendt tråd id: %lu" + nla "Onbekend thread id: %lu" + eng "Unknown thread id: %lu" + est "Tundmatu lõim: %lu" + fre "Numéro de tâche inconnu: %lu" + ger "Unbekannte Thread-ID: %lu" + greek "Áãíùóôï thread id: %lu" + hun "Ervenytelen szal (thread) id: %lu" + ita "Thread id: %lu sconosciuto" + jpn "thread id: %lu ¤Ï¤¢¤ê¤Þ¤»¤ó" + kor "¾Ë¼ö ¾ø´Â ¾²·¹µå id: %lu" + nor "Ukjent tråd id: %lu" + norwegian-ny "Ukjent tråd id: %lu" + pol "Nieznany identyfikator w?tku: %lu" + por "'Id' de 'thread' %lu desconhecido" + rum "Id-ul: %lu thread-ului este necunoscut" + rus "îÅÉÚ×ÅÓÔÎÙÊ ÎÏÍÅÒ ÐÏÔÏËÁ: %lu" + serbian "Nepoznat thread identifikator: %lu" + slo "Neznáma identifikácia vlákna: %lu" + spa "Identificador del thread: %lu desconocido" + swe "Finns ingen tråd med id %lu" + ukr "îÅצÄÏÍÉÊ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒ Ç¦ÌËÉ: %lu" +ER_KILL_DENIED_ERROR + cze "Nejste vlastn-Bíkem threadu %lu" + dan "Du er ikke ejer af tråden %lu" + nla "U bent geen bezitter van thread %lu" + eng "You are not owner of thread %lu" + est "Ei ole lõime %lu omanik" + fre "Vous n'êtes pas propriétaire de la tâche no: %lu" + ger "Sie sind nicht Eigentümer von Thread %lu" + greek "Äåí åßóèå owner ôïõ thread %lu" + hun "A %lu thread-nek mas a tulajdonosa" + ita "Utente non proprietario del thread %lu" + jpn "thread %lu ¤Î¥ª¡¼¥Ê¡¼¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó" + kor "¾²·¹µå(Thread) %luÀÇ ¼ÒÀ¯ÀÚ°¡ ¾Æ´Õ´Ï´Ù." + nor "Du er ikke eier av tråden %lu" + norwegian-ny "Du er ikkje eigar av tråd %lu" + pol "Nie jeste? w³a?cicielem w?tku %lu" + por "Você não é proprietário da 'thread' %lu" + rum "Nu sinteti proprietarul threadului %lu" + rus "÷Ù ÎÅ Ñ×ÌÑÅÔÅÓØ ×ÌÁÄÅÌØÃÅÍ ÐÏÔÏËÁ %lu" + serbian "Vi niste vlasnik thread-a %lu" + slo "Nie ste vlastníkom vlákna %lu" + spa "Tu no eres el propietario del thread%lu" + swe "Du är inte ägare till tråd %lu" + ukr "÷É ÎÅ ×ÏÌÏÄÁÒ Ç¦ÌËÉ %lu" +ER_NO_TABLES_USED + cze "Nejsou pou-B¾ity ¾ádné tabulky" + dan "Ingen tabeller i brug" + nla "Geen tabellen gebruikt." + eng "No tables used" + est "Ühtegi tabelit pole kasutusel" + fre "Aucune table utilisée" + ger "Keine Tabellen verwendet" + greek "Äåí ÷ñçóéìïðïéÞèçêáí ðßíáêåò" + hun "Nincs hasznalt tabla" + ita "Nessuna tabella usata" + kor "¾î¶² Å×ÀÌºíµµ »ç¿ëµÇÁö ¾Ê¾Ò½À´Ï´Ù." + nor "Ingen tabeller i bruk" + norwegian-ny "Ingen tabellar i bruk" + pol "Nie ma ¿adej u¿ytej tabeli" + por "Nenhuma tabela usada" + rum "Nici o tabela folosita" + rus "îÉËÁËÉÅ ÔÁÂÌÉÃÙ ÎÅ ÉÓÐÏÌØÚÏ×ÁÎÙ" + serbian "Nema upotrebljenih tabela" + slo "Nie je pou¾itá ¾iadna tabuµka" + spa "No ha tablas usadas" + swe "Inga tabeller angivna" + ukr "îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ" +ER_TOO_BIG_SET + cze "P-Bøíli¹ mnoho øetìzcù pro sloupec %s a SET" + dan "For mange tekststrenge til specifikationen af SET i kolonne %-.64s" + nla "Teveel strings voor kolom %s en SET" + eng "Too many strings for column %-.64s and SET" + est "Liiga palju string tulbale %-.64s tüübile SET" + fre "Trop de chaînes dans la colonne %s avec SET" + ger "Zu viele Strings für SET-Spalte %-.64s angegeben" + greek "ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.64s êáé SET" + hun "Tul sok karakter: %-.64s es SET" + ita "Troppe stringhe per la colonna %-.64s e la SET" + kor "Ä®·³ %-.64s¿Í SET¿¡¼ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù." + nor "For mange tekststrenger kolonne %s og SET" + norwegian-ny "For mange tekststrengar felt %s og SET" + pol "Zbyt wiele ³añcuchów dla kolumny %s i polecenia SET" + por "'Strings' demais para coluna '%-.64s' e SET" + rum "Prea multe siruri pentru coloana %-.64s si SET" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.64s × SET" + serbian "Previše string-ova za kolonu '%-.64s' i komandu 'SET'" + slo "Príli¹ mnoho re»azcov pre pole %-.64s a SET" + spa "Muchas strings para columna %s y SET" + swe "För många alternativ till kolumn %s för SET" + ukr "úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.64s ÔÁ SET" +ER_NO_UNIQUE_LOGFILE + cze "Nemohu vytvo-Bøit jednoznaèné jméno logovacího souboru %s.(1-999)\n" + dan "Kan ikke lave unikt log-filnavn %s.(1-999)\n" + nla "Het is niet mogelijk een unieke naam te maken voor de logfile %s.(1-999)\n" + eng "Can't generate a unique log-filename %-.64s.(1-999)\n" + est "Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n" + fre "Ne peut générer un unique nom de journal %s.(1-999)\n" + ger "Kann keinen eindeutigen Dateinamen für die Logdatei %-.64s erzeugen (1-999)\n" + greek "Áäýíáôç ç äçìéïõñãßá unique log-filename %-.64s.(1-999)\n" + hun "Egyedi log-filenev nem generalhato: %-.64s.(1-999)\n" + ita "Impossibile generare un nome del file log unico %-.64s.(1-999)\n" + kor "Unique ·Î±×ÈÀÏ '%-.64s'¸¦ ¸¸µé¼ö ¾ø½À´Ï´Ù.(1-999)\n" + nor "Kan ikke lage unikt loggfilnavn %s.(1-999)\n" + norwegian-ny "Kan ikkje lage unikt loggfilnavn %s.(1-999)\n" + pol "Nie mo¿na stworzyæ unikalnej nazwy pliku z logiem %s.(1-999)\n" + por "Não pode gerar um nome de arquivo de 'log' único '%-.64s'.(1-999)\n" + rum "Nu pot sa generez un nume de log unic %-.64s.(1-999)\n" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÕÎÉËÁÌØÎÏÅ ÉÍÑ ÆÁÊÌÁ ÖÕÒÎÁÌÁ %-.64s.(1-999)\n" + serbian "Ne mogu da generišem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n" + slo "Nemô¾em vytvori» unikátne meno log-súboru %-.64s.(1-999)\n" + spa "No puede crear un unico archivo log %s.(1-999)\n" + swe "Kan inte generera ett unikt filnamn %s.(1-999)\n" + ukr "îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.64s.(1-999)\n" +ER_TABLE_NOT_LOCKED_FOR_WRITE + cze "Tabulka '%-.64s' byla zam-Bèena s READ a nemù¾e být zmìnìna" + dan "Tabellen '%-.64s' var låst med READ lås og kan ikke opdateres" + nla "Tabel '%-.64s' was gelocked met een lock om te lezen. Derhalve kunnen geen wijzigingen worden opgeslagen." + eng "Table '%-.64s' was locked with a READ lock and can't be updated" + est "Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav" + fre "Table '%-.64s' verrouillée lecture (READ): modification impossible" + ger "Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden" + greek "Ï ðßíáêáò '%-.64s' Ý÷åé êëåéäùèåß ìå READ lock êáé äåí åðéôñÝðïíôáé áëëáãÝò" + hun "A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni" + ita "La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata" + jpn "Table '%-.64s' ¤Ï READ lock ¤Ë¤Ê¤Ã¤Æ¤¤¤Æ¡¢¹¹¿·¤Ï¤Ç¤¤Þ¤»¤ó" + kor "Å×À̺í '%-.64s'´Â READ ¶ôÀÌ Àá°ÜÀÖ¾î¼ °»½ÅÇÒ ¼ö ¾ø½À´Ï´Ù." + nor "Tabellen '%-.64s' var låst med READ lås og kan ikke oppdateres" + norwegian-ny "Tabellen '%-.64s' var låst med READ lås og kan ikkje oppdaterast" + pol "Tabela '%-.64s' zosta³a zablokowana przez READ i nie mo¿e zostaæ zaktualizowana" + por "Tabela '%-.64s' foi travada com trava de leitura e não pode ser atualizada" + rum "Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata" + rus "ôÁÂÌÉÃÁ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎÁ ÕÒÏ×ÎÅÍ READ lock É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ" + serbian "Tabela '%-.64s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati" + slo "Tabuµka '%-.64s' bola zamknutá s READ a nemô¾e by» zmenená" + spa "Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada" + swe "Tabell '%-.64s' kan inte uppdateras emedan den är låst för läsning" + ukr "ôÁÂÌÉÃÀ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ, ÔÏÍÕ §§ ÎÅ ÍÏÖÎÁ ÏÎÏ×ÉÔÉ" +ER_TABLE_NOT_LOCKED + cze "Tabulka '%-.64s' nebyla zam-Bèena s LOCK TABLES" + dan "Tabellen '%-.64s' var ikke låst med LOCK TABLES" + nla "Tabel '%-.64s' was niet gelocked met LOCK TABLES" + eng "Table '%-.64s' was not locked with LOCK TABLES" + est "Tabel '%-.64s' ei ole lukustatud käsuga LOCK TABLES" + fre "Table '%-.64s' non verrouillée: utilisez LOCK TABLES" + ger "Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt" + greek "Ï ðßíáêáò '%-.64s' äåí Ý÷åé êëåéäùèåß ìå LOCK TABLES" + hun "A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel" + ita "Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES" + jpn "Table '%-.64s' ¤Ï LOCK TABLES ¤Ë¤è¤Ã¤Æ¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "Å×À̺í '%-.64s'´Â LOCK TABLES ¸í·ÉÀ¸·Î Àá±âÁö ¾Ê¾Ò½À´Ï´Ù." + nor "Tabellen '%-.64s' var ikke låst med LOCK TABLES" + norwegian-ny "Tabellen '%-.64s' var ikkje låst med LOCK TABLES" + pol "Tabela '%-.64s' nie zosta³a zablokowana poleceniem LOCK TABLES" + por "Tabela '%-.64s' não foi travada com LOCK TABLES" + rum "Tabela '%-.64s' nu a fost locked cu LOCK TABLES" + rus "ôÁÂÌÉÃÁ '%-.64s' ÎÅ ÂÙÌÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES" + serbian "Tabela '%-.64s' nije bila zakljuèana komandom 'LOCK TABLES'" + slo "Tabuµka '%-.64s' nebola zamknutá s LOCK TABLES" + spa "Tabla '%-.64s' no fue trabada con LOCK TABLES" + swe "Tabell '%-.64s' är inte låst med LOCK TABLES" + ukr "ôÁÂÌÉÃÀ '%-.64s' ÎÅ ÂÕÌÏ ÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES" +ER_BLOB_CANT_HAVE_DEFAULT 42000 + cze "Blob polo-B¾ka '%-.64s' nemù¾e mít defaultní hodnotu" + dan "BLOB feltet '%-.64s' kan ikke have en standard værdi" + nla "Blob veld '%-.64s' can geen standaardwaarde bevatten" + eng "BLOB/TEXT column '%-.64s' can't have a default value" + est "BLOB-tüüpi tulp '%-.64s' ei saa omada vaikeväärtust" + fre "BLOB '%-.64s' ne peut avoir de valeur par défaut" + ger "BLOB-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben" + greek "Ôá Blob ðåäßá '%-.64s' äåí ìðïñïýí íá Ý÷ïõí ðñïêáèïñéóìÝíåò ôéìÝò (default value)" + hun "A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke" + ita "Il campo BLOB '%-.64s' non puo` avere un valore di default" + jpn "BLOB column '%-.64s' can't have a default value" + kor "BLOB Ä®·³ '%-.64s' ´Â µðÆúÆ® °ªÀ» °¡Áú ¼ö ¾ø½À´Ï´Ù." + nor "Blob feltet '%-.64s' kan ikke ha en standard verdi" + norwegian-ny "Blob feltet '%-.64s' kan ikkje ha ein standard verdi" + pol "Pole typu blob '%-.64s' nie mo¿e mieæ domy?lnej warto?ci" + por "Coluna BLOB '%-.64s' não pode ter um valor padrão (default)" + rum "Coloana BLOB '%-.64s' nu poate avea o valoare default" + rus "îÅ×ÏÚÍÏÖÎÏ ÕËÁÚÙ×ÁÔØ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ ÓÔÏÌÂÃÁ BLOB '%-.64s'" + serbian "BLOB kolona '%-.64s' ne može imati default vrednost" + slo "Pole BLOB '%-.64s' nemô¾e ma» implicitnú hodnotu" + spa "Campo Blob '%-.64s' no puede tener valores patron" + swe "BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde" + ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ" +ER_WRONG_DB_NAME 42000 + cze "Nep-Bøípustné jméno databáze '%-.64s'" + dan "Ugyldigt database navn '%-.64s'" + nla "Databasenaam '%-.64s' is niet getoegestaan" + eng "Incorrect database name '%-.100s'" + est "Vigane andmebaasi nimi '%-.100s'" + fre "Nom de base de donnée illégal: '%-.64s'" + ger "Unerlaubter Datenbankname '%-.64s'" + greek "ËÜèïò üíïìá âÜóçò äåäïìÝíùí '%-.100s'" + hun "Hibas adatbazisnev: '%-.100s'" + ita "Nome database errato '%-.100s'" + jpn "»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹" + kor "'%-.100s' µ¥ÀÌŸº£À̽ºÀÇ À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù." + nor "Ugyldig database navn '%-.64s'" + norwegian-ny "Ugyldig database namn '%-.64s'" + pol "Niedozwolona nazwa bazy danych '%-.64s'" + por "Nome de banco de dados '%-.100s' incorreto" + rum "Numele bazei de date este incorect '%-.100s'" + rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÂÁÚÙ ÄÁÎÎÙÈ '%-.100s'" + serbian "Pogrešno ime baze '%-.100s'" + slo "Neprípustné meno databázy '%-.100s'" + spa "Nombre de base de datos ilegal '%-.64s'" + swe "Felaktigt databasnamn '%-.64s'" + ukr "îÅצÒÎÅ ¦Í'Ñ ÂÁÚÉ ÄÁÎÎÉÈ '%-.100s'" +ER_WRONG_TABLE_NAME 42000 + cze "Nep-Bøípustné jméno tabulky '%-.64s'" + dan "Ugyldigt tabel navn '%-.64s'" + nla "Niet toegestane tabelnaam '%-.64s'" + eng "Incorrect table name '%-.100s'" + est "Vigane tabeli nimi '%-.100s'" + fre "Nom de table illégal: '%-.64s'" + ger "Unerlaubter Tabellenname '%-.64s'" + greek "ËÜèïò üíïìá ðßíáêá '%-.100s'" + hun "Hibas tablanev: '%-.100s'" + ita "Nome tabella errato '%-.100s'" + jpn "»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹" + kor "'%-.100s' Å×À̺í À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù." + nor "Ugyldig tabell navn '%-.64s'" + norwegian-ny "Ugyldig tabell namn '%-.64s'" + pol "Niedozwolona nazwa tabeli '%-.64s'..." + por "Nome de tabela '%-.100s' incorreto" + rum "Numele tabelei este incorect '%-.100s'" + rus "îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÔÁÂÌÉÃÙ '%-.100s'" + serbian "Pogrešno ime tabele '%-.100s'" + slo "Neprípustné meno tabuµky '%-.100s'" + spa "Nombre de tabla ilegal '%-.64s'" + swe "Felaktigt tabellnamn '%-.64s'" + ukr "îÅצÒÎÅ ¦Í'Ñ ÔÁÂÌÉæ '%-.100s'" +ER_TOO_BIG_SELECT 42000 + cze "Zadan-Bý SELECT by procházel pøíli¹ mnoho záznamù a trval velmi dlouho. Zkontrolujte tvar WHERE a je-li SELECT v poøádku, pou¾ijte SET SQL_BIG_SELECTS=1" + dan "SELECT ville undersøge for mange poster og ville sandsynligvis tage meget lang tid. Undersøg WHERE delen og brug SET SQL_BIG_SELECTS=1 hvis udtrykket er korrekt" + nla "Het SELECT-statement zou te veel records analyseren en dus veel tijd in beslagnemen. Kijk het WHERE-gedeelte van de query na en kies SET SQL_BIG_SELECTS=1 als het stament in orde is." + eng "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay" + est "SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1" + fre "SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien" + ger "Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen oder gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden" + greek "Ôï SELECT èá åîåôÜóåé ìåãÜëï áñéèìü åããñáöþí êáé ðéèáíþò èá êáèõóôåñÞóåé. Ðáñáêáëþ åîåôÜóôå ôéò ðáñáìÝôñïõò ôïõ WHERE êáé ÷ñçóéìïðïéåßóôå SET SQL_BIG_SELECTS=1 áí ôï SELECT åßíáé óùóôü" + hun "A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay" + ita "La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto." + kor "SELECT ¸í·É¿¡¼ ³Ê¹« ¸¹Àº ·¹Äڵ带 ã±â ¶§¹®¿¡ ¸¹Àº ½Ã°£ÀÌ ¼Ò¿äµË´Ï´Ù. µû¶ó¼ WHERE ¹®À» Á¡°ËÇϰųª, ¸¸¾à SELECT°¡ okµÇ¸é SET SQL_BIG_SELECTS=1 ¿É¼ÇÀ» »ç¿ëÇϼ¼¿ä." + nor "SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" + norwegian-ny "SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt" + pol "Operacja SELECT bêdzie dotyczy³a zbyt wielu rekordów i prawdopodobnie zajmie bardzo du¿o czasu. Sprawd¥ warunek WHERE i u¿yj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna" + por "O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cláusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto" + rum "SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay" + rus "äÌÑ ÔÁËÏÊ ×ÙÂÏÒËÉ SELECT ÄÏÌÖÅÎ ÂÕÄÅÔ ÐÒÏÓÍÏÔÒÅÔØ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÚÁÐÉÓÅÊ É, ×ÉÄÉÍÏ, ÜÔÏ ÚÁÊÍÅÔ ÏÞÅÎØ ÍÎÏÇÏ ×ÒÅÍÅÎÉ. ðÒÏ×ÅÒØÔÅ ×ÁÛÅ ÕËÁÚÁÎÉÅ WHERE, É, ÅÓÌÉ × ÎÅÍ ×ÓÅ × ÐÏÒÑÄËÅ, ÕËÁÖÉÔÅ SET SQL_BIG_SELECTS=1" + serbian "Komanda 'SELECT' æe ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu" + slo "Zadaná po¾iadavka SELECT by prechádzala príli¹ mnoho záznamov a trvala by príli¹ dlho. Skontrolujte tvar WHERE a ak je v poriadku, pou¾ite SET SQL_BIG_SELECTS=1" + spa "El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto" + swe "Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins" + ukr "úÁÐÉÔÕ SELECT ÐÏÔÒ¦ÂÎÏ ÏÂÒÏÂÉÔÉ ÂÁÇÁÔÏ ÚÁÐÉÓ¦×, ÝÏ, ÐÅ×ÎÅ, ÚÁÊÍÅ ÄÕÖÅ ÂÁÇÁÔÏ ÞÁÓÕ. ðÅÒÅצÒÔÅ ×ÁÛÅ WHERE ÔÁ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ SET SQL_BIG_SELECTS=1, ÑËÝÏ ÃÅÊ ÚÁÐÉÔ SELECT ¤ צÒÎÉÍ" +ER_UNKNOWN_ERROR + cze "Nezn-Bámá chyba" + dan "Ukendt fejl" + nla "Onbekende Fout" + eng "Unknown error" + est "Tundmatu viga" + fre "Erreur inconnue" + ger "Unbekannter Fehler" + greek "ÐñïÝêõøå Üãíùóôï ëÜèïò" + hun "Ismeretlen hiba" + ita "Errore sconosciuto" + kor "¾Ë¼ö ¾ø´Â ¿¡·¯ÀÔ´Ï´Ù." + nor "Ukjent feil" + norwegian-ny "Ukjend feil" + por "Erro desconhecido" + rum "Eroare unknown" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÏÛÉÂËÁ" + serbian "Nepoznata greška" + slo "Neznámá chyba" + spa "Error desconocido" + swe "Oidentifierat fel" + ukr "îÅצÄÏÍÁ ÐÏÍÉÌËÁ" +ER_UNKNOWN_PROCEDURE 42000 + cze "Nezn-Bámá procedura %s" + dan "Ukendt procedure %s" + nla "Onbekende procedure %s" + eng "Unknown procedure '%-.64s'" + est "Tundmatu protseduur '%-.64s'" + fre "Procédure %s inconnue" + ger "Unbekannte Prozedur '%-.64s'" + greek "Áãíùóôç äéáäéêáóßá '%-.64s'" + hun "Ismeretlen eljaras: '%-.64s'" + ita "Procedura '%-.64s' sconosciuta" + kor "¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.64s'" + nor "Ukjent prosedyre %s" + norwegian-ny "Ukjend prosedyre %s" + pol "Unkown procedure %s" + por "'Procedure' '%-.64s' desconhecida" + rum "Procedura unknown '%-.64s'" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.64s'" + serbian "Nepoznata procedura '%-.64s'" + slo "Neznámá procedúra '%-.64s'" + spa "Procedimiento desconocido %s" + swe "Okänd procedur: %s" + ukr "îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.64s'" +ER_WRONG_PARAMCOUNT_TO_PROCEDURE 42000 + cze "Chybn-Bý poèet parametrù procedury %s" + dan "Forkert antal parametre til proceduren %s" + nla "Foutief aantal parameters doorgegeven aan procedure %s" + eng "Incorrect parameter count to procedure '%-.64s'" + est "Vale parameetrite hulk protseduurile '%-.64s'" + fre "Mauvais nombre de paramètres pour la procedure %s" + ger "Falsche Parameterzahl für Prozedur '%-.64s'" + greek "ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.64s'" + hun "Rossz parameter a(z) '%-.64s'eljaras szamitasanal" + ita "Numero di parametri errato per la procedura '%-.64s'" + kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆÄ¶ó¸ÞÅÍ" + nor "Feil parameter antall til prosedyren %s" + norwegian-ny "Feil parameter tal til prosedyra %s" + pol "Incorrect parameter count to procedure %s" + por "Número de parâmetros incorreto para a 'procedure' '%-.64s'" + rum "Procedura '%-.64s' are un numar incorect de parametri" + rus "îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'" + serbian "Pogrešan broj parametara za proceduru '%-.64s'" + slo "Chybný poèet parametrov procedúry '%-.64s'" + spa "Equivocado parametro count para procedimiento %s" + swe "Felaktigt antal parametrar till procedur %s" + ukr "èÉÂÎÁ Ë¦ÌØË¦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.64s'" +ER_WRONG_PARAMETERS_TO_PROCEDURE + cze "Chybn-Bé parametry procedury %s" + dan "Forkert(e) parametre til proceduren %s" + nla "Foutieve parameters voor procedure %s" + eng "Incorrect parameters to procedure '%-.64s'" + est "Vigased parameetrid protseduurile '%-.64s'" + fre "Paramètre erroné pour la procedure %s" + ger "Falsche Parameter für Prozedur '%-.64s'" + greek "ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.64s'" + hun "Rossz parameter a(z) '%-.64s' eljarasban" + ita "Parametri errati per la procedura '%-.64s'" + kor "'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆÄ¶ó¸ÞÅÍ" + nor "Feil parametre til prosedyren %s" + norwegian-ny "Feil parameter til prosedyra %s" + pol "Incorrect parameters to procedure %s" + por "Parâmetros incorretos para a 'procedure' '%-.64s'" + rum "Procedura '%-.64s' are parametrii incorecti" + rus "îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'" + serbian "Pogrešni parametri prosleðeni proceduri '%-.64s'" + slo "Chybné parametre procedúry '%-.64s'" + spa "Equivocados parametros para procedimiento %s" + swe "Felaktiga parametrar till procedur %s" + ukr "èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.64s'" +ER_UNKNOWN_TABLE 42S02 + cze "Nezn-Bámá tabulka '%-.64s' v %s" + dan "Ukendt tabel '%-.64s' i %s" + nla "Onbekende tabel '%-.64s' in %s" + eng "Unknown table '%-.64s' in %-.32s" + est "Tundmatu tabel '%-.64s' %-.32s-s" + fre "Table inconnue '%-.64s' dans %s" + ger "Unbekannte Tabelle '%-.64s' in '%-.64s'" + greek "Áãíùóôïò ðßíáêáò '%-.64s' óå %s" + hun "Ismeretlen tabla: '%-.64s' %s-ban" + ita "Tabella '%-.64s' sconosciuta in %s" + jpn "Unknown table '%-.64s' in %s" + kor "¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %s)" + nor "Ukjent tabell '%-.64s' i %s" + norwegian-ny "Ukjend tabell '%-.64s' i %s" + pol "Unknown table '%-.64s' in %s" + por "Tabela '%-.64s' desconhecida em '%-.32s'" + rum "Tabla '%-.64s' invalida in %-.32s" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s' × %-.32s" + serbian "Nepoznata tabela '%-.64s' u '%-.32s'" + slo "Neznáma tabuµka '%-.64s' v %s" + spa "Tabla desconocida '%-.64s' in %s" + swe "Okänd tabell '%-.64s' i '%-.64s'" + ukr "îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s' Õ %-.32s" +ER_FIELD_SPECIFIED_TWICE 42000 + cze "Polo-B¾ka '%-.64s' je zadána dvakrát" + dan "Feltet '%-.64s' er anvendt to gange" + nla "Veld '%-.64s' is dubbel gespecificeerd" + eng "Column '%-.64s' specified twice" + est "Tulp '%-.64s' on määratletud topelt" + fre "Champ '%-.64s' spécifié deux fois" + ger "Feld '%-.64s' wurde zweimal angegeben" + greek "Ôï ðåäßï '%-.64s' Ý÷åé ïñéóèåß äýï öïñÝò" + hun "A(z) '%-.64s' mezot ketszer definialta" + ita "Campo '%-.64s' specificato 2 volte" + kor "Ä®·³ '%-.64s'´Â µÎ¹ø Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù." + nor "Feltet '%-.64s' er spesifisert to ganger" + norwegian-ny "Feltet '%-.64s' er spesifisert to gangar" + pol "Field '%-.64s' specified twice" + por "Coluna '%-.64s' especificada duas vezes" + rum "Coloana '%-.64s' specificata de doua ori" + rus "óÔÏÌÂÅà '%-.64s' ÕËÁÚÁÎ Ä×ÁÖÄÙ" + serbian "Kolona '%-.64s' je navedena dva puta" + slo "Pole '%-.64s' je zadané dvakrát" + spa "Campo '%-.64s' especificado dos veces" + swe "Fält '%-.64s' är redan använt" + ukr "óÔÏ×ÂÅÃØ '%-.64s' ÚÁÚÎÁÞÅÎÏ Äצަ" +ER_INVALID_GROUP_FUNC_USE + cze "Nespr-Bávné pou¾ití funkce group" + dan "Forkert brug af grupperings-funktion" + nla "Ongeldig gebruik van GROUP-functie" + eng "Invalid use of group function" + est "Vigane grupeerimisfunktsiooni kasutus" + fre "Utilisation invalide de la clause GROUP" + ger "Falsche Verwendung einer Gruppierungsfunktion" + greek "ÅóöáëìÝíç ÷ñÞóç ôçò group function" + hun "A group funkcio ervenytelen hasznalata" + ita "Uso non valido di una funzione di raggruppamento" + kor "À߸øµÈ ±×·ì ÇÔ¼ö¸¦ »ç¿ëÇÏ¿´½À´Ï´Ù." + por "Uso inválido de função de agrupamento (GROUP)" + rum "Folosire incorecta a functiei group" + rus "îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÇÒÕÐÐÏ×ÙÈ ÆÕÎËÃÉÊ" + serbian "Pogrešna upotreba 'GROUP' funkcije" + slo "Nesprávne pou¾itie funkcie GROUP" + spa "Invalido uso de función en grupo" + swe "Felaktig användning av SQL grupp function" + ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÆÕÎËæ§ ÇÒÕÐÕ×ÁÎÎÑ" +ER_UNSUPPORTED_EXTENSION 42000 + cze "Tabulka '%-.64s' pou-B¾ívá roz¹íøení, které v této verzi MySQL není" + dan "Tabellen '%-.64s' bruger et filtypenavn som ikke findes i denne MySQL version" + nla "Tabel '%-.64s' gebruikt een extensie, die niet in deze MySQL-versie voorkomt." + eng "Table '%-.64s' uses an extension that doesn't exist in this MySQL version" + est "Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis" + fre "Table '%-.64s' : utilise une extension invalide pour cette version de MySQL" + ger "Tabelle '%-.64s' verwendet eine Extension, die in dieser MySQL-Version nicht verfügbar ist" + greek "Ï ðßíáêò '%-.64s' ÷ñçóéìïðïéåß êÜðïéï extension ðïõ äåí õðÜñ÷åé óôçí Ýêäïóç áõôÞ ôçò MySQL" + hun "A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban." + ita "La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL" + kor "Å×À̺í '%-.64s'´Â È®Àå¸í·ÉÀ» ÀÌ¿ëÇÏÁö¸¸ ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + norwegian-ny "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + pol "Table '%-.64s' uses a extension that doesn't exist in this MySQL version" + por "Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL" + rum "Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL" + rus "÷ ÔÁÂÌÉÃÅ '%-.64s' ÉÓÐÏÌØÚÕÀÔÓÑ ×ÏÚÍÏÖÎÏÓÔÉ, ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÍÙÅ × ÜÔÏÊ ×ÅÒÓÉÉ MySQL" + serbian "Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a" + slo "Tabuµka '%-.64s' pou¾íva roz¹írenie, ktoré v tejto verzii MySQL nie je" + spa "Tabla '%-.64s' usa una extensión que no existe en esta MySQL versión" + swe "Tabell '%-.64s' har en extension som inte finns i denna version av MySQL" + ukr "ôÁÂÌÉÃÑ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ ÒÏÚÛÉÒÅÎÎÑ, ÝÏ ÎÅ ¦ÓÎÕ¤ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL" +ER_TABLE_MUST_HAVE_COLUMNS 42000 + cze "Tabulka mus-Bí mít alespoò jeden sloupec" + dan "En tabel skal have mindst een kolonne" + nla "Een tabel moet minstens 1 kolom bevatten" + eng "A table must have at least 1 column" + est "Tabelis peab olema vähemalt üks tulp" + fre "Une table doit comporter au moins une colonne" + ger "Eine Tabelle muß mindestens 1 Spalte besitzen" + greek "Åíáò ðßíáêáò ðñÝðåé íá Ý÷åé ôïõëÜ÷éóôïí Ýíá ðåäßï" + hun "A tablanak legalabb egy oszlopot tartalmazni kell" + ita "Una tabella deve avere almeno 1 colonna" + jpn "¥Æ¡¼¥Ö¥ë¤ÏºÇÄã 1 ¸Ä¤Î column ¤¬É¬ÍפǤ¹" + kor "ÇϳªÀÇ Å×ÀÌºí¿¡¼´Â Àû¾îµµ ÇϳªÀÇ Ä®·³ÀÌ Á¸ÀçÇÏ¿©¾ß ÇÕ´Ï´Ù." + por "Uma tabela tem que ter pelo menos uma (1) coluna" + rum "O tabela trebuie sa aiba cel putin o coloana" + rus "÷ ÔÁÂÌÉÃÅ ÄÏÌÖÅÎ ÂÙÔØ ËÁË ÍÉÎÉÍÕÍ ÏÄÉÎ ÓÔÏÌÂÅÃ" + serbian "Tabela mora imati najmanje jednu kolonu" + slo "Tabuµka musí ma» aspoò 1 pole" + spa "Una tabla debe tener al menos 1 columna" + swe "Tabeller måste ha minst 1 kolumn" + ukr "ôÁÂÌÉÃÑ ÐÏ×ÉÎÎÁ ÍÁÔÉ ÈÏÞÁ ÏÄÉÎ ÓÔÏ×ÂÅÃØ" +ER_RECORD_FILE_FULL + cze "Tabulka '%-.64s' je pln-Bá" + dan "Tabellen '%-.64s' er fuld" + nla "De tabel '%-.64s' is vol" + eng "The table '%-.64s' is full" + est "Tabel '%-.64s' on täis" + fre "La table '%-.64s' est pleine" + ger "Tabelle '%-.64s' ist voll" + greek "Ï ðßíáêáò '%-.64s' åßíáé ãåìÜôïò" + hun "A '%-.64s' tabla megtelt" + ita "La tabella '%-.64s' e` piena" + jpn "table '%-.64s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹" + kor "Å×À̺í '%-.64s'°¡ full³µ½À´Ï´Ù. " + por "Tabela '%-.64s' está cheia" + rum "Tabela '%-.64s' e plina" + rus "ôÁÂÌÉÃÁ '%-.64s' ÐÅÒÅÐÏÌÎÅÎÁ" + serbian "Tabela '%-.64s' je popunjena do kraja" + slo "Tabuµka '%-.64s' je plná" + spa "La tabla '%-.64s' está llena" + swe "Tabellen '%-.64s' är full" + ukr "ôÁÂÌÉÃÑ '%-.64s' ÚÁÐÏ×ÎÅÎÁ" +ER_UNKNOWN_CHARACTER_SET 42000 + cze "Nezn-Bámá znaková sada: '%-.64s'" + dan "Ukendt tegnsæt: '%-.64s'" + nla "Onbekende character set: '%-.64s'" + eng "Unknown character set: '%-.64s'" + est "Vigane kooditabel '%-.64s'" + fre "Jeu de caractères inconnu: '%-.64s'" + ger "Unbekannter Zeichensatz: '%-.64s'" + greek "Áãíùóôï character set: '%-.64s'" + hun "Ervenytelen karakterkeszlet: '%-.64s'" + ita "Set di caratteri '%-.64s' sconosciuto" + jpn "character set '%-.64s' ¤Ï¥µ¥Ý¡¼¥È¤·¤Æ¤¤¤Þ¤»¤ó" + kor "¾Ë¼ö¾ø´Â ¾ð¾î Set: '%-.64s'" + por "Conjunto de caracteres '%-.64s' desconhecido" + rum "Set de caractere invalid: '%-.64s'" + rus "îÅÉÚ×ÅÓÔÎÁÑ ËÏÄÉÒÏ×ËÁ '%-.64s'" + serbian "Nepoznati karakter-set: '%-.64s'" + slo "Neznáma znaková sada: '%-.64s'" + spa "Juego de caracteres desconocido: '%-.64s'" + swe "Okänd teckenuppsättning: '%-.64s'" + ukr "îÅצÄÏÍÁ ËÏÄÏ×Á ÔÁÂÌÉÃÑ: '%-.64s'" +ER_TOO_MANY_TABLES + cze "P-Bøíli¹ mnoho tabulek, MySQL jich mù¾e mít v joinu jen %d" + dan "For mange tabeller. MySQL kan kun bruge %d tabeller i et join" + nla "Teveel tabellen. MySQL kan slechts %d tabellen in een join bevatten" + eng "Too many tables; MySQL can only use %d tables in a join" + est "Liiga palju tabeleid. MySQL suudab JOINiga ühendada kuni %d tabelit" + fre "Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN" + ger "Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden" + greek "Ðïëý ìåãÜëïò áñéèìüò ðéíÜêùí. Ç MySQL ìðïñåß íá ÷ñçóéìïðïéÞóåé %d ðßíáêåò óå äéáäéêáóßá join" + hun "Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor" + ita "Troppe tabelle. MySQL puo` usare solo %d tabelle in una join" + jpn "¥Æ¡¼¥Ö¥ë¤¬Â¿¤¹¤®¤Þ¤¹; MySQL can only use %d tables in a join" + kor "³Ê¹« ¸¹Àº Å×À̺íÀÌ JoinµÇ¾ú½À´Ï´Ù. MySQL¿¡¼´Â JOIN½Ã %d°³ÀÇ Å×ÀÌºí¸¸ »ç¿ëÇÒ ¼ö ÀÖ½À´Ï´Ù." + por "Tabelas demais. O MySQL pode usar somente %d tabelas em uma junção (JOIN)" + rum "Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÔÁÂÌÉÃ. MySQL ÍÏÖÅÔ ÉÓÐÏÌØÚÏ×ÁÔØ ÔÏÌØËÏ %d ÔÁÂÌÉÃ × ÓÏÅÄÉÎÅÎÉÉ" + serbian "Previše tabela. MySQL može upotrebiti maksimum %d tabela pri 'JOIN' operaciji" + slo "Príli¹ mnoho tabuliek. MySQL mô¾e pou¾i» len %d v JOIN-e" + spa "Muchas tablas. MySQL solamente puede usar %d tablas en un join" + swe "För många tabeller. MySQL can ha högst %d tabeller i en och samma join" + ukr "úÁÂÁÇÁÔÏ ÔÁÂÌÉÃØ. MySQL ÍÏÖÅ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ %d ÔÁÂÌÉÃØ Õ ÏÂ'¤ÄÎÁÎΦ" +ER_TOO_MANY_FIELDS + cze "P-Bøíli¹ mnoho polo¾ek" + dan "For mange felter" + nla "Te veel velden" + eng "Too many columns" + est "Liiga palju tulpasid" + fre "Trop de champs" + ger "Zu viele Spalten" + greek "Ðïëý ìåãÜëïò áñéèìüò ðåäßùí" + hun "Tul sok mezo" + ita "Troppi campi" + jpn "column ¤¬Â¿¤¹¤®¤Þ¤¹" + kor "Ä®·³ÀÌ ³Ê¹« ¸¹½À´Ï´Ù." + por "Colunas demais" + rum "Prea multe coloane" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÓÔÏÌÂÃÏ×" + serbian "Previše kolona" + slo "Príli¹ mnoho polí" + spa "Muchos campos" + swe "För många fält" + ukr "úÁÂÁÇÁÔÏ ÓÔÏ×Âæ×" +ER_TOO_BIG_ROWSIZE 42000 + cze "-BØádek je pøíli¹ velký. Maximální velikost øádku, nepoèítaje polo¾ky blob, je %d. Musíte zmìnit nìkteré polo¾ky na blob" + dan "For store poster. Max post størrelse, uden BLOB's, er %d. Du må lave nogle felter til BLOB's" + nla "Rij-grootte is groter dan toegestaan. Maximale rij grootte, blobs niet meegeteld, is %d. U dient sommige velden in blobs te veranderen." + eng "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs" + est "Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %d. Muuda mõned väljad BLOB-tüüpi väljadeks" + fre "Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %d. Changez le type de quelques colonnes en BLOB" + ger "Zeilenlänge zu groß. Die maximale Spaltenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %d. Einige Felder müssen in BLOB oder TEXT umgewandelt werden" + greek "Ðïëý ìåãÜëï ìÝãåèïò åããñáöÞò. Ôï ìÝãéóôï ìÝãåèïò åããñáöÞò, ÷ùñßò íá õðïëïãßæïíôáé ôá blobs, åßíáé %d. ÐñÝðåé íá ïñßóåôå êÜðïéá ðåäßá óáí blobs" + hun "Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %d. Nehany mezot meg kell valtoztatnia" + ita "Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %d. Devi cambiare alcuni campi in BLOB" + jpn "row size ¤¬Â礤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %d ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤." + kor "³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %dÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä.." + por "Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %d. Você tem que mudar alguns campos para BLOBs" + rum "Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %d. Trebuie sa schimbati unele cimpuri in BLOB-uri" + rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ. íÁËÓÉÍÁÌØÎÙÊ ÒÁÚÍÅÒ ÓÔÒÏËÉ, ÉÓËÌÀÞÁÑ ÐÏÌÑ BLOB, - %d. ÷ÏÚÍÏÖÎÏ, ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÍÅÎÉÔØ ÔÉÐ ÎÅËÏÔÏÒÙÈ ÐÏÌÅÊ ÎÁ BLOB" + serbian "Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB" + slo "Riadok je príli¹ veµký. Maximálna veµkos» riadku, okrem 'BLOB', je %d. Musíte zmeni» niektoré polo¾ky na BLOB" + spa "Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob" + swe "För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %d. Ändra några av dina fält till BLOB" + ukr "úÁÄÏ×ÇÁ ÓÔÒÏËÁ. îÁÊÂ¦ÌØÛÏÀ ÄÏ×ÖÉÎÏÀ ÓÔÒÏËÉ, ÎÅ ÒÁÈÕÀÞÉ BLOB, ¤ %d. ÷ÁÍ ÐÏÔÒ¦ÂÎÏ ÐÒÉ×ÅÓÔÉ ÄÅÑ˦ ÓÔÏ×Âæ ÄÏ ÔÉÐÕ BLOB" +ER_STACK_OVERRUN + cze "P-Bøeteèení zásobníku threadu: pou¾ito %ld z %ld. Pou¾ijte 'mysqld -O thread_stack=#' k zadání vìt¹ího zásobníku" + dan "Thread stack brugt: Brugt: %ld af en %ld stak. Brug 'mysqld -O thread_stack=#' for at allokere en større stak om nødvendigt" + nla "Thread stapel overrun: Gebruikte: %ld van een %ld stack. Gebruik 'mysqld -O thread_stack=#' om een grotere stapel te definieren (indien noodzakelijk)." + eng "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed" + fre "Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur" + ger "Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenen, um notfalls einen größeren Stack anzulegen" + greek "Stack overrun óôï thread: Used: %ld of a %ld stack. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'mysqld -O thread_stack=#' ãéá íá ïñßóåôå Ýíá ìåãáëýôåñï stack áí ÷ñåéÜæåôáé" + hun "Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz" + ita "Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande." + jpn "Thread stack overrun: Used: %ld of a %ld stack. ¥¹¥¿¥Ã¥¯Îΰè¤ò¿¤¯¤È¤ê¤¿¤¤¾ì¹ç¡¢'mysqld -O thread_stack=#' ¤È»ØÄꤷ¤Æ¤¯¤À¤µ¤¤" + kor "¾²·¹µå ½ºÅÃÀÌ ³ÑÃÆ½À´Ï´Ù. »ç¿ë: %ld°³ ½ºÅÃ: %ld°³. ¸¸¾à ÇÊ¿ä½Ã ´õÅ« ½ºÅÃÀ» ¿øÇÒ¶§¿¡´Â 'mysqld -O thread_stack=#' ¸¦ Á¤ÀÇÇϼ¼¿ä" + por "Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário" + rum "Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare" + rus "óÔÅË ÐÏÔÏËÏ× ÐÅÒÅÐÏÌÎÅÎ: ÉÓÐÏÌØÚÏ×ÁÎÏ: %ld ÉÚ %ld ÓÔÅËÁ. ðÒÉÍÅÎÑÊÔÅ 'mysqld -O thread_stack=#' ÄÌÑ ÕËÁÚÁÎÉÑ ÂÏÌØÛÅÇÏ ÒÁÚÍÅÒÁ ÓÔÅËÁ, ÅÓÌÉ ÎÅÏÂÈÏÄÉÍÏ" + serbian "Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete veæi stack ako je potrebno" + slo "Preteèenie zásobníku vlákna: pou¾ité: %ld z %ld. Pou¾ite 'mysqld -O thread_stack=#' k zadaniu väè¹ieho zásobníka" + spa "Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario" + swe "Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld -O thread_stack=#' ifall du behöver en större stack" + ukr "óÔÅË Ç¦ÌÏË ÐÅÒÅÐÏ×ÎÅÎÏ: ÷ÉËÏÒÉÓÔÁÎÏ: %ld Ú %ld. ÷ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqld -O thread_stack=#' ÁÂÉ ÚÁÚÎÁÞÉÔÉ Â¦ÌØÛÉÊ ÓÔÅË, ÑËÝÏ ÎÅÏÂȦÄÎÏ" +ER_WRONG_OUTER_JOIN 42000 + cze "V OUTER JOIN byl nalezen k-Bøí¾ový odkaz. Provìøte ON podmínky" + dan "Krydsreferencer fundet i OUTER JOIN; check dine ON conditions" + nla "Gekruiste afhankelijkheid gevonden in OUTER JOIN. Controleer uw ON-conditions" + eng "Cross dependency found in OUTER JOIN; examine your ON conditions" + est "Ristsõltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi" + fre "Dépendance croisée dans une clause OUTER JOIN. Vérifiez la condition ON" + ger "OUTER JOIN enthält fehlerhafte Abhängigkeiten. In ON verwendete Bedingungen überprüfen" + greek "Cross dependency âñÝèçêå óå OUTER JOIN. Ðáñáêáëþ åîåôÜóôå ôéò óõíèÞêåò ðïõ èÝóáôå óôï ON" + hun "Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket" + ita "Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON" + por "Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'" + rum "Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON" + rus "÷ OUTER JOIN ÏÂÎÁÒÕÖÅÎÁ ÐÅÒÅËÒÅÓÔÎÁÑ ÚÁ×ÉÓÉÍÏÓÔØ. ÷ÎÉÍÁÔÅÌØÎÏ ÐÒÏÁÎÁÌÉÚÉÒÕÊÔÅ Ó×ÏÉ ÕÓÌÏ×ÉÑ ON" + serbian "Unakrsna zavisnost pronaðena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove" + slo "V OUTER JOIN bol nájdený krí¾ový odkaz. Skontrolujte podmienky ON" + spa "Dependencia cruzada encontrada en OUTER JOIN. Examine su condición ON" + swe "Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket" + ukr "ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON" +ER_NULL_COLUMN_IN_INDEX 42000 + cze "Sloupec '%-.32s' je pou-B¾it s UNIQUE nebo INDEX, ale není definován jako NOT NULL" + dan "Kolonne '%-.32s' bruges som UNIQUE eller INDEX men er ikke defineret som NOT NULL" + nla "Kolom '%-.64s' wordt gebruikt met UNIQUE of INDEX maar is niet gedefinieerd als NOT NULL" + eng "Column '%-.64s' is used with UNIQUE or INDEX but is not defined as NOT NULL" + est "Tulp '%-.64s' on kasutusel indeksina, kuid ei ole määratletud kui NOT NULL" + fre "La colonne '%-.32s' fait partie d'un index UNIQUE ou INDEX mais n'est pas définie comme NOT NULL" + ger "Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt, ist aber nicht als NOT NULL definiert" + greek "Ôï ðåäßï '%-.64s' ÷ñçóéìïðïéåßôáé óáí UNIQUE Þ INDEX áëëÜ äåí Ý÷åé ïñéóèåß óáí NOT NULL" + hun "A(z) '%-.64s' oszlop INDEX vagy UNIQUE (egyedi), de a definicioja szerint nem NOT NULL" + ita "La colonna '%-.64s' e` usata con UNIQUE o INDEX ma non e` definita come NOT NULL" + jpn "Column '%-.64s' ¤¬ UNIQUE ¤« INDEX ¤Ç»ÈÍѤµ¤ì¤Þ¤·¤¿. ¤³¤Î¥«¥é¥à¤Ï NOT NULL ¤ÈÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó." + kor "'%-.64s' Ä®·³ÀÌ UNIQUE³ª INDEX¸¦ »ç¿ëÇÏ¿´Áö¸¸ NOT NULLÀÌ Á¤ÀǵÇÁö ¾Ê¾Ò±º¿ä..." + nor "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL" + norwegian-ny "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL" + pol "Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL" + por "Coluna '%-.64s' é usada com única (UNIQUE) ou índice (INDEX), mas não está definida como não-nula (NOT NULL)" + rum "Coloana '%-.64s' e folosita cu UNIQUE sau INDEX dar fara sa fie definita ca NOT NULL" + rus "óÔÏÌÂÅà '%-.64s' ÉÓÐÏÌØÚÕÅÔÓÑ × UNIQUE ÉÌÉ × INDEX, ÎÏ ÎÅ ÏÐÒÅÄÅÌÅÎ ËÁË NOT NULL" + serbian "Kolona '%-.64s' je upotrebljena kao 'UNIQUE' ili 'INDEX' ali nije definisana kao 'NOT NULL'" + slo "Pole '%-.64s' je pou¾ité s UNIQUE alebo INDEX, ale nie je zadefinované ako NOT NULL" + spa "Columna '%-.32s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL" + swe "Kolumn '%-.32s' är använd med UNIQUE eller INDEX men är inte definerad med NOT NULL" + ukr "óÔÏ×ÂÅÃØ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ Ú UNIQUE ÁÂÏ INDEX, ÁÌÅ ÎÅ ×ÉÚÎÁÞÅÎÉÊ ÑË NOT NULL" +ER_CANT_FIND_UDF + cze "Nemohu na-Bèíst funkci '%-.64s'" + dan "Kan ikke læse funktionen '%-.64s'" + nla "Kan functie '%-.64s' niet laden" + eng "Can't load function '%-.64s'" + est "Ei suuda avada funktsiooni '%-.64s'" + fre "Imposible de charger la fonction '%-.64s'" + ger "Kann Funktion '%-.64s' nicht laden" + greek "Äåí åßíáé äõíáôÞ ç äéáäéêáóßá load ãéá ôç óõíÜñôçóç '%-.64s'" + hun "A(z) '%-.64s' fuggveny nem toltheto be" + ita "Impossibile caricare la funzione '%-.64s'" + jpn "function '%-.64s' ¤ò ¥í¡¼¥É¤Ç¤¤Þ¤»¤ó" + kor "'%-.64s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù." + por "Não pode carregar a função '%-.64s'" + rum "Nu pot incarca functia '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÇÒÕÚÉÔØ ÆÕÎËÃÉÀ '%-.64s'" + serbian "Ne mogu da uèitam funkciju '%-.64s'" + slo "Nemô¾em naèíta» funkciu '%-.64s'" + spa "No puedo cargar función '%-.64s'" + swe "Kan inte ladda funktionen '%-.64s'" + ukr "îÅ ÍÏÖÕ ÚÁ×ÁÎÔÁÖÉÔÉ ÆÕÎËæÀ '%-.64s'" +ER_CANT_INITIALIZE_UDF + cze "Nemohu inicializovat funkci '%-.64s'; %-.80s" + dan "Kan ikke starte funktionen '%-.64s'; %-.80s" + nla "Kan functie '%-.64s' niet initialiseren; %-.80s" + eng "Can't initialize function '%-.64s'; %-.80s" + est "Ei suuda algväärtustada funktsiooni '%-.64s'; %-.80s" + fre "Impossible d'initialiser la fonction '%-.64s'; %-.80s" + ger "Kann Funktion '%-.64s' nicht initialisieren: %-.80s" + greek "Äåí åßíáé äõíáôÞ ç Ýíáñîç ôçò óõíÜñôçóçò '%-.64s'; %-.80s" + hun "A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s" + ita "Impossibile inizializzare la funzione '%-.64s'; %-.80s" + jpn "function '%-.64s' ¤ò½é´ü²½¤Ç¤¤Þ¤»¤ó; %-.80s" + kor "'%-.64s' ÇÔ¼ö¸¦ ÃʱâÈ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s" + por "Não pode inicializar a função '%-.64s' - '%-.80s'" + rum "Nu pot initializa functia '%-.64s'; %-.80s" + rus "îÅ×ÏÚÍÏÖÎÏ ÉÎÉÃÉÁÌÉÚÉÒÏ×ÁÔØ ÆÕÎËÃÉÀ '%-.64s'; %-.80s" + serbian "Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s" + slo "Nemô¾em inicializova» funkciu '%-.64s'; %-.80s" + spa "No puedo inicializar función '%-.64s'; %-.80s" + swe "Kan inte initialisera funktionen '%-.64s'; '%-.80s'" + ukr "îÅ ÍÏÖÕ ¦Î¦Ã¦Á̦ÚÕ×ÁÔÉ ÆÕÎËæÀ '%-.64s'; %-.80s" +ER_UDF_NO_PATHS + cze "Pro sd-Bílenou knihovnu nejsou povoleny cesty" + dan "Angivelse af sti ikke tilladt for delt bibliotek" + nla "Geen pad toegestaan voor shared library" + eng "No paths allowed for shared library" + est "Teegi nimes ei tohi olla kataloogi" + fre "Chemin interdit pour les bibliothèques partagées" + ger "Keine Pfade gestattet für Shared Library" + greek "Äåí âñÝèçêáí paths ãéá ôçí shared library" + hun "Nincs ut a megosztott konyvtarakhoz (shared library)" + ita "Non sono ammessi path per le librerie condivisa" + jpn "shared library ¤Ø¤Î¥Ñ¥¹¤¬Ä̤äƤ¤¤Þ¤»¤ó" + kor "°øÀ¯ ¶óÀ̹ö·¯¸®¸¦ À§ÇÑ ÆÐ½º°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù." + por "Não há caminhos (paths) permitidos para biblioteca compartilhada" + rum "Nici un paths nu e permis pentru o librarie shared" + rus "îÅÄÏÐÕÓÔÉÍÏ ÕËÁÚÙ×ÁÔØ ÐÕÔÉ ÄÌÑ ÄÉÎÁÍÉÞÅÓËÉÈ ÂÉÂÌÉÏÔÅË" + serbian "Ne postoje dozvoljene putanje do share-ovane biblioteke" + slo "Neprípustné ¾iadne cesty k zdieµanej kni¾nici" + spa "No pasos permitidos para librarias conjugadas" + swe "Man får inte ange sökväg för dynamiska bibliotek" + ukr "îÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÐÕÔ¦ ÄÌÑ ÒÏÚĦÌÀ×ÁÎÉÈ Â¦Â̦ÏÔÅË" +ER_UDF_EXISTS + cze "Funkce '%-.64s' ji-B¾ existuje" + dan "Funktionen '%-.64s' findes allerede" + nla "Functie '%-.64s' bestaat reeds" + eng "Function '%-.64s' already exists" + est "Funktsioon '%-.64s' juba eksisteerib" + fre "La fonction '%-.64s' existe déjà" + ger "Funktion '%-.64s' existiert schon" + greek "Ç óõíÜñôçóç '%-.64s' õðÜñ÷åé Þäç" + hun "A '%-.64s' fuggveny mar letezik" + ita "La funzione '%-.64s' esiste gia`" + jpn "Function '%-.64s' ¤Ï´û¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤¹" + kor "'%-.64s' ÇÔ¼ö´Â ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù." + por "Função '%-.64s' já existe" + rum "Functia '%-.64s' exista deja" + rus "æÕÎËÃÉÑ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Funkcija '%-.64s' veæ postoji" + slo "Funkcia '%-.64s' u¾ existuje" + spa "Función '%-.64s' ya existe" + swe "Funktionen '%-.64s' finns redan" + ukr "æÕÎËÃ¦Ñ '%-.64s' ×ÖÅ ¦ÓÎÕ¤" +ER_CANT_OPEN_LIBRARY + cze "Nemohu otev-Bøít sdílenou knihovnu '%-.64s' (errno: %d %s)" + dan "Kan ikke åbne delt bibliotek '%-.64s' (errno: %d %s)" + nla "Kan shared library '%-.64s' niet openen (Errcode: %d %s)" + eng "Can't open shared library '%-.64s' (errno: %d %-.64s)" + est "Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.64s)" + fre "Impossible d'ouvrir la bibliothèque partagée '%-.64s' (errno: %d %s)" + ger "Kann Shared Library '%-.64s' nicht öffnen (Fehler: %d %-.64s)" + greek "Äåí åßíáé äõíáôÞ ç áíÜãíùóç ôçò shared library '%-.64s' (êùäéêüò ëÜèïõò: %d %s)" + hun "A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %s)" + ita "Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %s)" + jpn "shared library '%-.64s' ¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d %s)" + kor "'%-.64s' °øÀ¯ ¶óÀ̹ö·¯¸®¸¦ ¿¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£: %d %s)" + nor "Can't open shared library '%-.64s' (errno: %d %s)" + norwegian-ny "Can't open shared library '%-.64s' (errno: %d %s)" + pol "Can't open shared library '%-.64s' (errno: %d %s)" + por "Não pode abrir biblioteca compartilhada '%-.64s' (erro no. '%d' - '%-.64s')" + rum "Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.64s)" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÄÉÎÁÍÉÞÅÓËÕÀ ÂÉÂÌÉÏÔÅËÕ '%-.64s' (ÏÛÉÂËÁ: %d %-.64s)" + serbian "Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.64s)" + slo "Nemô¾em otvori» zdieµanú kni¾nicu '%-.64s' (chybový kód: %d %s)" + spa "No puedo abrir libraria conjugada '%-.64s' (errno: %d %s)" + swe "Kan inte öppna det dynamiska biblioteket '%-.64s' (Felkod: %d %s)" + ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÒÏÚĦÌÀ×ÁÎÕ Â¦Â̦ÏÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d %-.64s)" +ER_CANT_FIND_DL_ENTRY + cze "Nemohu naj-Bít funkci '%-.64s' v knihovnì'" + dan "Kan ikke finde funktionen '%-.64s' i bibliotek'" + nla "Kan functie '%-.64s' niet in library vinden" + eng "Can't find function '%-.64s' in library'" + est "Ei leia funktsiooni '%-.64s' antud teegis" + fre "Impossible de trouver la fonction '%-.64s' dans la bibliothèque'" + ger "Kann Funktion '%-.64s' in der Library nicht finden" + greek "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò óõíÜñôçóçò '%-.64s' óôçí âéâëéïèÞêç'" + hun "A(z) '%-.64s' fuggveny nem talalhato a konyvtarban" + ita "Impossibile trovare la funzione '%-.64s' nella libreria" + jpn "function '%-.64s' ¤ò¥é¥¤¥Ö¥é¥ê¡¼Ãæ¤Ë¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó" + kor "¶óÀ̹ö·¯¸®¿¡¼ '%-.64s' ÇÔ¼ö¸¦ ãÀ» ¼ö ¾ø½À´Ï´Ù." + por "Não pode encontrar a função '%-.64s' na biblioteca" + rum "Nu pot gasi functia '%-.64s' in libraria" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÆÕÎËÃÉÀ '%-.64s' × ÂÉÂÌÉÏÔÅËÅ" + serbian "Ne mogu da pronadjem funkciju '%-.64s' u biblioteci" + slo "Nemô¾em nájs» funkciu '%-.64s' v kni¾nici'" + spa "No puedo encontrar función '%-.64s' en libraria'" + swe "Hittar inte funktionen '%-.64s' in det dynamiska biblioteket" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÕÎËæÀ '%-.64s' Õ Â¦Â̦ÏÔÅæ'" +ER_FUNCTION_NOT_DEFINED + cze "Funkce '%-.64s' nen-Bí definována" + dan "Funktionen '%-.64s' er ikke defineret" + nla "Functie '%-.64s' is niet gedefinieerd" + eng "Function '%-.64s' is not defined" + est "Funktsioon '%-.64s' ei ole defineeritud" + fre "La fonction '%-.64s' n'est pas définie" + ger "Funktion '%-.64s' ist nicht definiert" + greek "Ç óõíÜñôçóç '%-.64s' äåí Ý÷åé ïñéóèåß" + hun "A '%-.64s' fuggveny nem definialt" + ita "La funzione '%-.64s' non e` definita" + jpn "Function '%-.64s' ¤ÏÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.64s' ÇÔ¼ö°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù." + por "Função '%-.64s' não está definida" + rum "Functia '%-.64s' nu e definita" + rus "æÕÎËÃÉÑ '%-.64s' ÎÅ ÏÐÒÅÄÅÌÅÎÁ" + serbian "Funkcija '%-.64s' nije definisana" + slo "Funkcia '%-.64s' nie je definovaná" + spa "Función '%-.64s' no está definida" + swe "Funktionen '%-.64s' är inte definierad" + ukr "æÕÎËæÀ '%-.64s' ÎÅ ×ÉÚÎÁÞÅÎÏ" +ER_HOST_IS_BLOCKED + cze "Stroj '%-.64s' je zablokov-Bán kvùli mnoha chybám pøi pøipojování. Odblokujete pou¾itím 'mysqladmin flush-hosts'" + dan "Værten er blokeret på grund af mange fejlforespørgsler. Lås op med 'mysqladmin flush-hosts'" + nla "Host '%-.64s' is geblokkeeerd vanwege te veel verbindings fouten. Deblokkeer met 'mysqladmin flush-hosts'" + eng "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'" + est "Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga" + fre "L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connection. Débloquer le par 'mysqladmin flush-hosts'" + ger "Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'" + greek "Ï õðïëïãéóôÞò Ý÷åé áðïêëåéóèåß ëüãù ðïëëáðëþí ëáèþí óýíäåóçò. ÐñïóðáèÞóôå íá äéïñþóåôå ìå 'mysqladmin flush-hosts'" + hun "A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot" + ita "Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'" + jpn "Host '%-.64s' ¤Ï many connection error ¤Î¤¿¤á¡¢µñÈݤµ¤ì¤Þ¤·¤¿. 'mysqladmin flush-hosts' ¤Ç²ò½ü¤·¤Æ¤¯¤À¤µ¤¤" + kor "³Ê¹« ¸¹Àº ¿¬°á¿À·ù·Î ÀÎÇÏ¿© È£½ºÆ® '%-.64s'´Â ºí¶ôµÇ¾ú½À´Ï´Ù. 'mysqladmin flush-hosts'¸¦ ÀÌ¿ëÇÏ¿© ºí¶ôÀ» ÇØÁ¦Çϼ¼¿ä" + por "'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'" + rum "Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'" + rus "èÏÓÔ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÉÚ-ÚÁ ÓÌÉÛËÏÍ ÂÏÌØÛÏÇÏ ËÏÌÉÞÅÓÔ×Á ÏÛÉÂÏË ÓÏÅÄÉÎÅÎÉÑ. òÁÚÂÌÏËÉÒÏ×ÁÔØ ÅÇÏ ÍÏÖÎÏ Ó ÐÏÍÏÝØÀ 'mysqladmin flush-hosts'" + serbian "Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoæu komande 'mysqladmin flush-hosts'" + spa "Servidor '%-.64s' está bloqueado por muchos errores de conexión. Desbloquear con 'mysqladmin flush-hosts'" + swe "Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna" + ukr "èÏÓÔ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ú ÐÒÉÞÉÎÉ ×ÅÌÉËϧ Ë¦ÌØËÏÓÔ¦ ÐÏÍÉÌÏË Ú'¤ÄÎÁÎÎÑ. äÌÑ ÒÏÚÂÌÏËÕ×ÁÎÎÑ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqladmin flush-hosts'" +ER_HOST_NOT_PRIVILEGED + cze "Stroj '%-.64s' nem-Bá povoleno se k tomuto MySQL serveru pøipojit" + dan "Værten '%-.64s' kan ikke tilkoble denne MySQL-server" + nla "Het is host '%-.64s' is niet toegestaan verbinding te maken met deze MySQL server" + eng "Host '%-.64s' is not allowed to connect to this MySQL server" + est "Masinal '%-.64s' puudub ligipääs sellele MySQL serverile" + fre "Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MySQL" + ger "Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden" + greek "Ï õðïëïãéóôÞò äåí Ý÷åé äéêáßùìá óýíäåóçò ìå ôïí MySQL server" + hun "A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez" + ita "Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL" + jpn "Host '%-.64s' ¤Ï MySQL server ¤ËÀܳ¤òµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.64s' È£½ºÆ®´Â ÀÌ MySQL¼¹ö¿¡ Á¢¼ÓÇÒ Çã°¡¸¦ ¹ÞÁö ¸øÇß½À´Ï´Ù." + por "'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL" + rum "Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL" + rus "èÏÓÔÕ '%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÐÏÄËÌÀÞÁÔØÓÑ Ë ÜÔÏÍÕ ÓÅÒ×ÅÒÕ MySQL" + serbian "Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server" + spa "Servidor '%-.64s' no está permitido para conectar con este servidor MySQL" + swe "Denna dator, '%-.64s', har inte privileger att använda denna MySQL server" + ukr "èÏÓÔÕ '%-.64s' ÎÅ ÄÏ×ÏÌÅÎÏ Ú×'ÑÚÕ×ÁÔÉÓØ Ú ÃÉÍ ÓÅÒ×ÅÒÏÍ MySQL" +ER_PASSWORD_ANONYMOUS_USER 42000 + cze "Pou-B¾íváte MySQL jako anonymní u¾ivatel a anonymní u¾ivatelé nemají povoleno mìnit hesla" + dan "Du bruger MySQL som anonym bruger. Anonyme brugere må ikke ændre adgangskoder" + nla "U gebruikt MySQL als anonieme gebruiker en deze mogen geen wachtwoorden wijzigen" + eng "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords" + est "Te kasutate MySQL-i anonüümse kasutajana, kelledel pole parooli muutmise õigust" + fre "Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe" + ger "Sie benutzen MySQL als anonymer Benutzer und dürfen daher keine Passwörter ändern" + greek "×ñçóéìïðïéåßôå ôçí MySQL óáí anonymous user êáé Ýôóé äåí ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí" + hun "Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas" + ita "Impossibile cambiare la password usando MySQL come utente anonimo" + jpn "MySQL ¤ò anonymous users ¤Ç»ÈÍѤ·¤Æ¤¤¤ë¾õÂ֤Ǥϡ¢¥Ñ¥¹¥ï¡¼¥É¤ÎÊѹ¹¤Ï¤Ç¤¤Þ¤»¤ó" + kor "´ç½ÅÀº MySQL¼¹ö¿¡ À͸íÀÇ »ç¿ëÀÚ·Î Á¢¼ÓÀ» Çϼ̽À´Ï´Ù.À͸íÀÇ »ç¿ëÀÚ´Â ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ¾ø½À´Ï´Ù." + por "Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas" + rum "Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele" + rus "÷Ù ÉÓÐÏÌØÚÕÅÔÅ MySQL ÏÔ ÉÍÅÎÉ ÁÎÏÎÉÍÎÏÇÏ ÐÏÌØÚÏ×ÁÔÅÌÑ, Á ÁÎÏÎÉÍÎÙÍ ÐÏÌØÚÏ×ÁÔÅÌÑÍ ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÍÅÎÑÔØ ÐÁÒÏÌÉ" + serbian "Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke" + spa "Tu estás usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves" + swe "Du använder MySQL som en anonym användare och som sådan får du inte ändra ditt lösenord" + ukr "÷É ×ÉËÏÒÉÓÔÏ×Õ¤ÔÅ MySQL ÑË ÁÎÏΦÍÎÉÊ ËÏÒÉÓÔÕ×ÁÞ, ÔÏÍÕ ×ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ÚͦÎÀ×ÁÔÉ ÐÁÒÏ̦" +ER_PASSWORD_NOT_ALLOWED 42000 + cze "Na zm-Bìnu hesel ostatním musíte mít právo provést update tabulek v databázi mysql" + dan "Du skal have tilladelse til at opdatere tabeller i MySQL databasen for at ændre andres adgangskoder" + nla "U moet tabel update priveleges hebben in de mysql database om wachtwoorden voor anderen te mogen wijzigen" + eng "You must have privileges to update tables in the mysql database to be able to change passwords for others" + est "Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis" + fre "Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres" + ger "Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können" + greek "ÐñÝðåé íá Ý÷åôå äéêáßùìá äéüñèùóçò ðéíÜêùí (update) óôç âÜóç äåäïìÝíùí mysql ãéá íá ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí" + hun "Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz" + ita "E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti" + jpn "¾¤Î¥æ¡¼¥¶¡¼¤Î¥Ñ¥¹¥ï¡¼¥É¤òÊѹ¹¤¹¤ë¤¿¤á¤Ë¤Ï, mysql ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ËÂФ·¤Æ update ¤Îµö²Ä¤¬¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó." + kor "´ç½ÅÀº ´Ù¸¥»ç¿ëÀÚµéÀÇ ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ÀÖµµ·Ï µ¥ÀÌŸº£À̽º º¯°æ±ÇÇÑÀ» °¡Á®¾ß ÇÕ´Ï´Ù." + por "Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros" + rum "Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora" + rus "äÌÑ ÔÏÇÏ ÞÔÏÂÙ ÉÚÍÅÎÑÔØ ÐÁÒÏÌÉ ÄÒÕÇÉÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ, Õ ×ÁÓ ÄÏÌÖÎÙ ÂÙÔØ ÐÒÉ×ÉÌÅÇÉÉ ÎÁ ÉÚÍÅÎÅÎÉÅ ÔÁÂÌÉÃ × ÂÁÚÅ ÄÁÎÎÙÈ mysql" + serbian "Morate imati privilegije da možete da update-ujete odreðene tabele ako želite da menjate lozinke za druge korisnike" + spa "Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros" + swe "För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen" + ukr "÷É ÐÏ×ÉΦ ÍÁÔÉ ÐÒÁ×Ï ÎÁ ÏÎÏ×ÌÅÎÎÑ ÔÁÂÌÉÃØ Õ ÂÁÚ¦ ÄÁÎÎÉÈ mysql, ÁÂÉ ÍÁÔÉ ÍÏÖÌÉצÓÔØ ÚͦÎÀ×ÁÔÉ ÐÁÒÏÌØ ¦ÎÛÉÍ" +ER_PASSWORD_NO_MATCH 42000 + cze "V tabulce user nen-Bí ¾ádný odpovídající øádek" + dan "Kan ikke finde nogen tilsvarende poster i bruger tabellen" + nla "Kan geen enkele passende rij vinden in de gebruikers tabel" + eng "Can't find any matching row in the user table" + est "Ei leia vastavat kirjet kasutajate tabelis" + fre "Impossible de trouver un enregistrement correspondant dans la table user" + ger "Kann keinen passenden Datensatz in Tabelle 'user' finden" + greek "Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò áíôßóôïé÷çò åããñáöÞò óôïí ðßíáêá ôùí ÷ñçóôþí" + hun "Nincs megegyezo sor a user tablaban" + ita "Impossibile trovare la riga corrispondente nella tabella user" + kor "»ç¿ëÀÚ Å×ÀÌºí¿¡¼ ÀÏÄ¡ÇÏ´Â °ÍÀ» ãÀ» ¼ö ¾øÀ¾´Ï´Ù." + por "Não pode encontrar nenhuma linha que combine na tabela usuário (user table)" + rum "Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÄÈÏÄÑÝÕÀ ÚÁÐÉÓØ × ÔÁÂÌÉÃÅ ÐÏÌØÚÏ×ÁÔÅÌÅÊ" + serbian "Ne mogu da pronaðem odgovarajuæi slog u 'user' tabeli" + spa "No puedo encontrar una línea correponsdiente en la tabla user" + swe "Hittade inte användaren i 'user'-tabellen" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ ×¦ÄÐÏצÄÎÉÈ ÚÁÐÉÓ¦× Õ ÔÁÂÌÉæ ËÏÒÉÓÔÕ×ÁÞÁ" +ER_UPDATE_INFO + cze "Nalezen-Bých øádkù: %ld Zmìnìno: %ld Varování: %ld" + dan "Poster fundet: %ld Ændret: %ld Advarsler: %ld" + nla "Passende rijen: %ld Gewijzigd: %ld Waarschuwingen: %ld" + eng "Rows matched: %ld Changed: %ld Warnings: %ld" + est "Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld" + fre "Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld" + ger "Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld" + hun "Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld" + ita "Rows riconosciute: %ld Cambiate: %ld Warnings: %ld" + jpn "°ìÃ׿ô(Rows matched): %ld Êѹ¹: %ld Warnings: %ld" + kor "ÀÏÄ¡ÇÏ´Â Rows : %ld°³ º¯°æµÊ: %ld°³ °æ°í: %ld°³" + por "Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld" + rum "Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld" + rus "óÏ×ÐÁÌÏ ÚÁÐÉÓÅÊ: %ld éÚÍÅÎÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld" + serbian "Odgovarajuæih slogova: %ld Promenjeno: %ld Upozorenja: %ld" + spa "Líneas correspondientes: %ld Cambiadas: %ld Avisos: %ld" + swe "Rader: %ld Uppdaterade: %ld Varningar: %ld" + ukr "úÁÐÉÓ¦× ×¦ÄÐÏצÄÁ¤: %ld úͦÎÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld" +ER_CANT_CREATE_THREAD + cze "Nemohu vytvo-Bøit nový thread (errno %d). Pokud je je¹tì nìjaká volná pamì», podívejte se do manuálu na èást o chybách specifických pro jednotlivé operaèní systémy" + dan "Kan ikke danne en ny tråd (fejl nr. %d). Hvis computeren ikke er løbet tør for hukommelse, kan du se i brugervejledningen for en mulig operativ-system - afhængig fejl" + nla "Kan geen nieuwe thread aanmaken (Errcode: %d). Indien er geen tekort aan geheugen is kunt u de handleiding consulteren over een mogelijke OS afhankelijke fout" + eng "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug" + est "Ei suuda luua uut lõime (veakood %d). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga" + fre "Impossible de créer une nouvelle tâche (errno %d). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS" + ger "Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen" + hun "Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet" + ita "Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO" + jpn "¿·µ¬¤Ë¥¹¥ì¥Ã¥É¤¬ºî¤ì¤Þ¤»¤ó¤Ç¤·¤¿ (errno %d). ¤â¤·ºÇÂç»ÈÍѵö²Ä¥á¥â¥ê¡¼¿ô¤ò±Û¤¨¤Æ¤¤¤Ê¤¤¤Î¤Ë¥¨¥é¡¼¤¬È¯À¸¤·¤Æ¤¤¤ë¤Ê¤é, ¥Þ¥Ë¥å¥¢¥ë¤ÎÃæ¤«¤é 'possible OS-dependent bug' ¤È¤¤¤¦Ê¸»ú¤òõ¤·¤Æ¤¯¤ß¤Æ¤À¤µ¤¤." + kor "»õ·Î¿î ¾²·¹µå¸¦ ¸¸µé ¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£ %d). ¸¸¾à ¿©À¯¸Þ¸ð¸®°¡ ÀÖ´Ù¸é OS-dependent¹ö±× ÀÇ ¸Þ´º¾ó ºÎºÐÀ» ã¾Æº¸½Ã¿À." + nor "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + norwegian-ny "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + pol "Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug" + por "Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional" + rum "Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÎÏ×ÙÊ ÐÏÔÏË (ÏÛÉÂËÁ %d). åÓÌÉ ÜÔÏ ÎÅ ÓÉÔÕÁÃÉÑ, Ó×ÑÚÁÎÎÁÑ Ó ÎÅÈ×ÁÔËÏÊ ÐÁÍÑÔÉ, ÔÏ ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÕÞÉÔØ ÄÏËÕÍÅÎÔÁÃÉÀ ÎÁ ÐÒÅÄÍÅÔ ÏÐÉÓÁÎÉÑ ×ÏÚÍÏÖÎÏÊ ÏÛÉÂËÉ ÒÁÂÏÔÙ × ËÏÎËÒÅÔÎÏÊ ïó" + serbian "Ne mogu da kreiram novi thread (errno %d). Ako imate još slobodne memorije, trebali biste da pogledate u priruèniku da li je ovo specifièna greška vašeg operativnog sistema" + spa "No puedo crear un nuevo thread (errno %d). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO" + swe "Kan inte skapa en ny tråd (errno %d)" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÎÏ×Õ Ç¦ÌËÕ (ÐÏÍÉÌËÁ %d). ñËÝÏ ×É ÎÅ ×ÉËÏÒÉÓÔÁÌÉ ÕÓÀ ÐÁÍ'ÑÔØ, ÔÏ ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÄÏ ×ÁÛϧ ïó - ÍÏÖÌÉ×Ï ÃÅ ÐÏÍÉÌËÁ ïó" +ER_WRONG_VALUE_COUNT_ON_ROW 21S01 + cze "Po-Bèet sloupcù neodpovídá poètu hodnot na øádku %ld" + dan "Kolonne antallet stemmer ikke overens med antallet af værdier i post %ld" + nla "Kolom aantal komt niet overeen met waarde aantal in rij %ld" + eng "Column count doesn't match value count at row %ld" + est "Tulpade hulk erineb väärtuste hulgast real %ld" + ger "Anzahl der Spalten stimmt nicht mit der Anzahl der Werte in Zeile %ld überein" + hun "Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel" + ita "Il numero delle colonne non corrisponde al conteggio alla riga %ld" + kor "Row %ld¿¡¼ Ä®·³ Ä«¿îÆ®¿Í value Ä«¿îÅÍ¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù." + por "Contagem de colunas não confere com a contagem de valores na linha %ld" + rum "Numarul de coloane nu corespunde cu numarul de valori la linia %ld" + rus "ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ × ÚÁÐÉÓÉ %ld" + serbian "Broj kolona ne odgovara broju vrednosti u slogu %ld" + spa "El número de columnas no corresponde al número en la línea %ld" + swe "Antalet kolumner motsvarar inte antalet värden på rad: %ld" + ukr "ë¦ÌØË¦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØË¦ÓÔÀ ÚÎÁÞÅÎØ Õ ÓÔÒÏæ %ld" +ER_CANT_REOPEN_TABLE + cze "Nemohu znovuotev-Bøít tabulku: '%-.64s" + dan "Kan ikke genåbne tabel '%-.64s" + nla "Kan tabel niet opnieuw openen: '%-.64s" + eng "Can't reopen table: '%-.64s'" + est "Ei suuda taasavada tabelit '%-.64s'" + fre "Impossible de réouvrir la table: '%-.64s" + ger "Kann Tabelle'%-.64s' nicht erneut öffnen" + hun "Nem lehet ujra-megnyitni a tablat: '%-.64s" + ita "Impossibile riaprire la tabella: '%-.64s'" + kor "Å×À̺íÀ» ´Ù½Ã ¿¼ö ¾ø±º¿ä: '%-.64s" + nor "Can't reopen table: '%-.64s" + norwegian-ny "Can't reopen table: '%-.64s" + pol "Can't reopen table: '%-.64s" + por "Não pode reabrir a tabela '%-.64s" + rum "Nu pot redeschide tabela: '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÎÏ×Ï ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ '%-.64s'" + serbian "Ne mogu da ponovo otvorim tabelu '%-.64s'" + slo "Can't reopen table: '%-.64s" + spa "No puedo reabrir tabla: '%-.64s" + swe "Kunde inte stänga och öppna tabell '%-.64s" + ukr "îÅ ÍÏÖÕ ÐÅÒÅצÄËÒÉÔÉ ÔÁÂÌÉÃÀ: '%-.64s'" +ER_INVALID_USE_OF_NULL 22004 + cze "Neplatn-Bé u¾ití hodnoty NULL" + dan "Forkert brug af nulværdi (NULL)" + nla "Foutief gebruik van de NULL waarde" + eng "Invalid use of NULL value" + est "NULL väärtuse väärkasutus" + fre "Utilisation incorrecte de la valeur NULL" + ger "Unerlaubte Verwendung eines NULL-Werts" + hun "A NULL ervenytelen hasznalata" + ita "Uso scorretto del valore NULL" + jpn "NULL ÃͤλÈÍÑÊýË¡¤¬ÉÔŬÀڤǤ¹" + kor "NULL °ªÀ» À߸ø »ç¿ëÇϼ̱º¿ä..." + por "Uso inválido do valor NULL" + rum "Folosirea unei value NULL e invalida" + rus "îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ×ÅÌÉÞÉÎÙ NULL" + serbian "Pogrešna upotreba vrednosti NULL" + spa "Invalido uso de valor NULL" + swe "Felaktig använding av NULL" + ukr "èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÚÎÁÞÅÎÎÑ NULL" +ER_REGEXP_ERROR 42000 + cze "Regul-Bární výraz vrátil chybu '%-.64s'" + dan "Fik fejl '%-.64s' fra regexp" + nla "Fout '%-.64s' ontvangen van regexp" + eng "Got error '%-.64s' from regexp" + est "regexp tagastas vea '%-.64s'" + fre "Erreur '%-.64s' provenant de regexp" + ger "regexp lieferte Fehler '%-.64s'" + hun "'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)" + ita "Errore '%-.64s' da regexp" + kor "regexp¿¡¼ '%-.64s'°¡ ³µ½À´Ï´Ù." + por "Obteve erro '%-.64s' em regexp" + rum "Eroarea '%-.64s' obtinuta din expresia regulara (regexp)" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ '%-.64s' ÏÔ ÒÅÇÕÌÑÒÎÏÇÏ ×ÙÒÁÖÅÎÉÑ" + serbian "Funkcija regexp je vratila grešku '%-.64s'" + spa "Obtenido error '%-.64s' de regexp" + swe "Fick fel '%-.64s' från REGEXP" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ '%-.64s' ×¦Ä ÒÅÇÕÌÑÒÎÏÇÏ ×ÉÒÁÚÕ" +ER_MIX_OF_GROUP_FUNC_AND_FIELDS 42000 + cze "Pokud nen-Bí ¾ádná GROUP BY klauzule, není dovoleno souèasné pou¾ití GROUP polo¾ek (MIN(),MAX(),COUNT()...) s ne GROUP polo¾kami" + dan "Sammenblanding af GROUP kolonner (MIN(),MAX(),COUNT()...) uden GROUP kolonner er ikke tilladt, hvis der ikke er noget GROUP BY prædikat" + nla "Het mixen van GROUP kolommen (MIN(),MAX(),COUNT()...) met no-GROUP kolommen is foutief indien er geen GROUP BY clausule is" + eng "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause" + est "GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud" + fre "Mélanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY" + ger "Das Vermischen von GROUP-Spalten (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Spalten ist nicht zulässig, wenn keine GROUP BY-Klausel vorhanden ist" + hun "A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul" + ita "Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY" + kor "Mixing of GROUP Ä®·³s (MIN(),MAX(),COUNT(),...) with no GROUP Ä®·³s is illegal if there is no GROUP BY clause" + por "Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)" + rum "Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY" + rus "ïÄÎÏ×ÒÅÍÅÎÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÈ (GROUP) ÓÔÏÌÂÃÏ× (MIN(),MAX(),COUNT(),...) Ó ÎÅÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÍÉ ÓÔÏÌÂÃÁÍÉ Ñ×ÌÑÅÔÓÑ ÎÅËÏÒÒÅËÔÎÙÍ, ÅÓÌÉ × ×ÙÒÁÖÅÎÉÉ ÅÓÔØ GROUP BY" + serbian "Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz" + spa "Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY" + swe "Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del" + ukr "úͦÛÕ×ÁÎÎÑ GROUP ÓÔÏ×ÂÃ¦× (MIN(),MAX(),COUNT()...) Ú ÎÅ GROUP ÓÔÏ×ÂÃÑÍÉ ¤ ÚÁÂÏÒÏÎÅÎÉÍ, ÑËÝÏ ÎÅ ÍÁ¤ GROUP BY" +ER_NONEXISTING_GRANT 42000 + cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s'" + dan "Denne tilladelse findes ikke for brugeren '%-.32s' på vært '%-.64s'" + nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s'" + eng "There is no such grant defined for user '%-.32s' on host '%-.64s'" + est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'" + fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s'" + ger "Für Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung" + hun "A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on" + ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'" + jpn "¥æ¡¼¥¶¡¼ '%-.32s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "»ç¿ëÀÚ '%-.32s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù." + por "Não existe tal permissão (grant) definida para o usuário '%-.32s' no 'host' '%-.64s'" + rum "Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'" + rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ÈÏÓÔÅ '%-.64s'" + serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'" + spa "No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'" + swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s'" + ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s'" +ER_TABLEACCESS_DENIED_ERROR 42000 + cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro tabulku '%-.64s'" + dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for tabellen '%-.64s'" + nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor tabel '%-.64s'" + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'" + est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'" + fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'" + ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und für Tabelle '%-.64s'" + hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban" + ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'" + jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Å×À̺í '%-.64s'" + por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na tabela '%-.64s'" + rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'" + rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'" + serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'" + spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'" + swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för tabell '%-.64s'" + ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.64s'" +ER_COLUMNACCESS_DENIED_ERROR 42000 + cze "%-.16s p-Bøíkaz nepøístupný pro u¾ivatele: '%-.32s'@'%-.64s' pro sloupec '%-.64s' v tabulce '%-.64s'" + dan "%-.16s-kommandoen er ikke tilladt for brugeren '%-.32s'@'%-.64s' for kolonne '%-.64s' in tabellen '%-.64s'" + nla "%-.16s commando geweigerd voor gebruiker: '%-.32s'@'%-.64s' voor kolom '%-.64s' in tabel '%-.64s'" + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'" + est "%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'" + fre "La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'" + ger "%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und Spalte '%-.64s' in Tabelle '%-.64s'" + hun "%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban" + ita "Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'" + jpn "¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó" + kor "'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'" + por "Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'" + rum "Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'" + rus "ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'" + serbian "%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'" + spa "%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'" + swe "%-.16s ej tillåtet för '%-.32s'@'%-.64s' för kolumn '%-.64s' i tabell '%-.64s'" + ukr "%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'" +ER_ILLEGAL_GRANT_FOR_TABLE 42000 + cze "Neplatn-Bý pøíkaz GRANT/REVOKE. Prosím, pøeètìte si v manuálu, jaká privilegia je mo¾né pou¾ít." + dan "Forkert GRANT/REVOKE kommando. Se i brugervejledningen hvilke privilegier der kan specificeres." + nla "Foutief GRANT/REVOKE commando. Raadpleeg de handleiding welke priveleges gebruikt kunnen worden." + eng "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used" + est "Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga" + fre "Commande GRANT/REVOKE incorrecte. Consultez le manuel." + ger "Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt" + greek "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used." + hun "Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek" + ita "Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati." + jpn "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + kor "À߸øµÈ GRANT/REVOKE ¸í·É. ¾î¶² ±Ç¸®¿Í ½ÂÀÎÀÌ »ç¿ëµÇ¾î Áú ¼ö ÀÖ´ÂÁö ¸Þ´º¾óÀ» º¸½Ã¿À." + nor "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + norwegian-ny "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + pol "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + por "Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados." + rum "Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite." + rus "îÅ×ÅÒÎÁÑ ËÏÍÁÎÄÁ GRANT ÉÌÉ REVOKE. ïÂÒÁÔÉÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ, ÞÔÏÂÙ ×ÙÑÓÎÉÔØ, ËÁËÉÅ ÐÒÉ×ÉÌÅÇÉÉ ÍÏÖÎÏ ÉÓÐÏÌØÚÏ×ÁÔØ" + serbian "Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruèniku koje vrednosti mogu biti upotrebljene." + slo "Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used." + spa "Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados." + swe "Felaktigt GRANT-privilegium använt" + ukr "èÉÂÎÁ GRANT/REVOKE ËÏÍÁÎÄÁ; ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÓÔÏÓÏ×ÎÏ ÔÏÇÏ, Ñ˦ ÐÒÁ×Á ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ" +ER_GRANT_WRONG_HOST_OR_USER 42000 + cze "Argument p-Bøíkazu GRANT u¾ivatel nebo stroj je pøíli¹ dlouhý" + dan "Værts- eller brugernavn for langt til GRANT" + nla "De host of gebruiker parameter voor GRANT is te lang" + eng "The host or user argument to GRANT is too long" + est "Masina või kasutaja nimi GRANT lauses on liiga pikk" + fre "L'hôte ou l'utilisateur donné en argument à GRANT est trop long" + ger "Das Host- oder User-Argument für GRANT ist zu lang" + hun "A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban" + ita "L'argomento host o utente per la GRANT e` troppo lungo" + kor "½ÂÀÎ(GRANT)À» À§ÇÏ¿© »ç¿ëÇÑ »ç¿ëÀÚ³ª È£½ºÆ®ÀÇ °ªµéÀÌ ³Ê¹« ±é´Ï´Ù." + por "Argumento de 'host' ou de usuário para o GRANT é longo demais" + rum "Argumentul host-ului sau utilizatorului pentru GRANT e prea lung" + rus "óÌÉÛËÏÍ ÄÌÉÎÎÏÅ ÉÍÑ ÐÏÌØÚÏ×ÁÔÅÌÑ/ÈÏÓÔÁ ÄÌÑ GRANT" + serbian "Argument 'host' ili 'korisnik' prosleðen komandi 'GRANT' je predugaèak" + spa "El argumento para servidor o usuario para GRANT es demasiado grande" + swe "Felaktigt maskinnamn eller användarnamn använt med GRANT" + ukr "áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ" +ER_NO_SUCH_TABLE 42S02 + cze "Tabulka '%-.64s.%s' neexistuje" + dan "Tabellen '%-.64s.%-.64s' eksisterer ikke" + nla "Tabel '%-.64s.%s' bestaat niet" + eng "Table '%-.64s.%-.64s' doesn't exist" + est "Tabelit '%-.64s.%-.64s' ei eksisteeri" + fre "La table '%-.64s.%s' n'existe pas" + ger "Tabelle '%-.64s.%-.64s' existiert nicht" + hun "A '%-.64s.%s' tabla nem letezik" + ita "La tabella '%-.64s.%s' non esiste" + jpn "Table '%-.64s.%s' doesn't exist" + kor "Å×À̺í '%-.64s.%s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù." + nor "Table '%-.64s.%s' doesn't exist" + norwegian-ny "Table '%-.64s.%s' doesn't exist" + pol "Table '%-.64s.%s' doesn't exist" + por "Tabela '%-.64s.%-.64s' não existe" + rum "Tabela '%-.64s.%-.64s' nu exista" + rus "ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ" + serbian "Tabela '%-.64s.%-.64s' ne postoji" + slo "Table '%-.64s.%s' doesn't exist" + spa "Tabla '%-.64s.%s' no existe" + swe "Det finns ingen tabell som heter '%-.64s.%s'" + ukr "ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤" +ER_NONEXISTING_TABLE_GRANT 42000 + cze "Neexistuje odpov-Bídající grant pro u¾ivatele '%-.32s' na stroji '%-.64s' pro tabulku '%-.64s'" + dan "Denne tilladelse eksisterer ikke for brugeren '%-.32s' på vært '%-.64s' for tabellen '%-.64s'" + nla "Deze toegang (GRANT) is niet toegekend voor gebruiker '%-.32s' op host '%-.64s' op tabel '%-.64s'" + eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'" + est "Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'" + fre "Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s' sur la table '%-.64s'" + ger "Keine solche Berechtigung für User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s'" + hun "A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett" + ita "GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'" + kor "»ç¿ëÀÚ '%-.32s'(È£½ºÆ® '%-.64s')´Â Å×À̺í '%-.64s'¸¦ »ç¿ëÇϱâ À§ÇÏ¿© Á¤ÀÇµÈ ½ÂÀÎÀº ¾ø½À´Ï´Ù. " + por "Não existe tal permissão (grant) definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'" + rum "Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'" + rus "ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ËÏÍÐØÀÔÅÒÅ '%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'" + serbian "Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'" + spa "No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'" + swe "Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s' för tabell '%-.64s'" + ukr "ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.64s'" +ER_NOT_ALLOWED_COMMAND 42000 + cze "Pou-B¾itý pøíkaz není v této verzi MySQL povolen" + dan "Den brugte kommando er ikke tilladt med denne udgave af MySQL" + nla "Het used commando is niet toegestaan in deze MySQL versie" + eng "The used command is not allowed with this MySQL version" + est "Antud käsk ei ole lubatud käesolevas MySQL versioonis" + fre "Cette commande n'existe pas dans cette version de MySQL" + ger "Der verwendete Befehl ist in dieser MySQL-Version nicht zulässig" + hun "A hasznalt parancs nem engedelyezett ebben a MySQL verzioban" + ita "Il comando utilizzato non e` supportato in questa versione di MySQL" + kor "»ç¿ëµÈ ¸í·ÉÀº ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â ÀÌ¿ëµÇÁö ¾Ê½À´Ï´Ù." + por "Comando usado não é permitido para esta versão do MySQL" + rum "Comanda folosita nu este permisa pentru aceasta versiune de MySQL" + rus "üÔÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÐÕÓËÁÅÔÓÑ × ÄÁÎÎÏÊ ×ÅÒÓÉÉ MySQL" + serbian "Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera" + spa "El comando usado no es permitido con esta versión de MySQL" + swe "Du kan inte använda detta kommando med denna MySQL version" + ukr "÷ÉËÏÒÉÓÔÏ×Õ×ÁÎÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL" +ER_SYNTAX_ERROR 42000 + cze "Va-B¹e syntaxe je nìjaká divná" + dan "Der er en fejl i SQL syntaksen" + nla "Er is iets fout in de gebruikte syntax" + eng "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use" + est "Viga SQL süntaksis" + fre "Erreur de syntaxe" + ger "Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen (diese kann für verschiedene Server-Versionen unterschiedlich sein)" + greek "You have an error in your SQL syntax" + hun "Szintaktikai hiba" + ita "Errore di sintassi nella query SQL" + jpn "Something is wrong in your syntax" + kor "SQL ±¸¹®¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù." + nor "Something is wrong in your syntax" + norwegian-ny "Something is wrong in your syntax" + pol "Something is wrong in your syntax" + por "Você tem um erro de sintaxe no seu SQL" + rum "Aveti o eroare in sintaxa RSQL" + rus "õ ×ÁÓ ÏÛÉÂËÁ × ÚÁÐÒÏÓÅ. éÚÕÞÉÔÅ ÄÏËÕÍÅÎÔÁÃÉÀ ÐÏ ÉÓÐÏÌØÚÕÅÍÏÊ ×ÅÒÓÉÉ MySQL ÎÁ ÐÒÅÄÍÅÔ ËÏÒÒÅËÔÎÏÇÏ ÓÉÎÔÁËÓÉÓÁ" + serbian "Imate grešku u vašoj SQL sintaksi" + slo "Something is wrong in your syntax" + spa "Algo está equivocado en su sintax" + swe "Du har något fel i din syntax" + ukr "õ ×ÁÓ ÐÏÍÉÌËÁ Õ ÓÉÎÔÁËÓÉÓ¦ SQL" +ER_DELAYED_CANT_CHANGE_LOCK + cze "Zpo-B¾dìný insert threadu nebyl schopen získat po¾adovaný zámek pro tabulku %-.64s" + dan "Forsinket indsættelse tråden (delayed insert thread) kunne ikke opnå lås på tabellen %-.64s" + nla "'Delayed insert' thread kon de aangevraagde 'lock' niet krijgen voor tabel %-.64s" + eng "Delayed insert thread couldn't get requested lock for table %-.64s" + est "INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.64s" + fre "La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.64s" + ger "Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.64s' nicht erhalten" + hun "A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz" + ita "Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s" + kor "Áö¿¬µÈ insert ¾²·¹µå°¡ Å×À̺í %-.64sÀÇ ¿ä±¸µÈ ¶ôÅ·À» ó¸®ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù." + por "'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.64s'" + rum "Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s" + rus "ðÏÔÏË, ÏÂÓÌÕÖÉ×ÁÀÝÉÊ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert), ÎÅ ÓÍÏÇ ÐÏÌÕÞÉÔØ ÚÁÐÒÁÛÉ×ÁÅÍÕÀ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÔÁÂÌÉÃÕ %-.64s" + serbian "Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.64s'" + spa "Thread de inserción retarda no pudiendo bloquear para la tabla %-.64s" + swe "DELAYED INSERT-tråden kunde inte låsa tabell '%-.64s'" + ukr "ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s" +ER_TOO_MANY_DELAYED_THREADS + cze "P-Bøíli¹ mnoho zpo¾dìných threadù" + dan "For mange slettede tråde (threads) i brug" + nla "Te veel 'delayed' threads in gebruik" + eng "Too many delayed threads in use" + est "Liiga palju DELAYED lõimesid kasutusel" + fre "Trop de tâche 'delayed' en cours" + ger "Zu viele verzögerte (DELAYED) Threads in Verwendung" + hun "Tul sok kesletetett thread (delayed)" + ita "Troppi threads ritardati in uso" + kor "³Ê¹« ¸¹Àº Áö¿¬ ¾²·¹µå¸¦ »ç¿ëÇϰí ÀÖ½À´Ï´Ù." + por "Excesso de 'threads' retardadas (atrasadas) em uso" + rum "Prea multe threaduri aminate care sint in uz" + rus "óÌÉÛËÏÍ ÍÎÏÇÏ ÐÏÔÏËÏ×, ÏÂÓÌÕÖÉ×ÁÀÝÉÈ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert)" + serbian "Previše prolongiranih thread-ova je u upotrebi" + spa "Muchos threads retardados en uso" + swe "Det finns redan 'max_delayed_threads' trådar i använding" + ukr "úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ" +ER_ABORTING_CONNECTION 08S01 + cze "Zru-B¹eno spojení %ld do databáze: '%-.64s' u¾ivatel: '%-.64s' (%s)" + dan "Afbrudt forbindelse %ld til database: '%-.64s' bruger: '%-.64s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.64s' (%s)" + eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)" + est "Ühendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)" + fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.64s' (%s)" + ger "Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)" + hun "Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.64s' (%s)" + ita "Interrotta la connessione %ld al db: '%-.64s' utente: '%-.64s' (%s)" + jpn "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + kor "µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.64s' (%s)" + nor "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + norwegian-ny "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + pol "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + por "Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)" + rum "Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)" + rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' (%-.64s)" + serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)" + slo "Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)" + spa "Conexión abortada %ld para db: '%-.64s' usuario: '%-.64s' (%s)" + swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.64s' (%s)" + ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)" +ER_NET_PACKET_TOO_LARGE 08S01 + cze "Zji-B¹tìn pøíchozí packet del¹í ne¾ 'max_allowed_packet'" + dan "Modtog en datapakke som var større end 'max_allowed_packet'" + nla "Groter pakket ontvangen dan 'max_allowed_packet'" + eng "Got a packet bigger than 'max_allowed_packet' bytes" + est "Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga" + fre "Paquet plus grand que 'max_allowed_packet' reçu" + ger "Empfangenes Paket ist größer als 'max_allowed_packet'" + hun "A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'" + ita "Ricevuto un pacchetto piu` grande di 'max_allowed_packet'" + kor "'max_allowed_packet'º¸´Ù ´õÅ« ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù." + por "Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)" + rum "Un packet mai mare decit 'max_allowed_packet' a fost primit" + rus "ðÏÌÕÞÅÎÎÙÊ ÐÁËÅÔ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'" + serbian "Primio sam mrežni paket veæi od definisane vrednosti 'max_allowed_packet'" + spa "Obtenido un paquete mayor que 'max_allowed_packet'" + swe "Kommunkationspaketet är större än 'max_allowed_packet'" + ukr "ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö max_allowed_packet" +ER_NET_READ_ERROR_FROM_PIPE 08S01 + cze "Zji-B¹tìna chyba pøi ètení z roury spojení" + dan "Fik læsefejl fra forbindelse (connection pipe)" + nla "Kreeg leesfout van de verbindings pipe" + eng "Got a read error from the connection pipe" + est "Viga ühendustoru lugemisel" + fre "Erreur de lecture reçue du pipe de connection" + ger "Lese-Fehler bei einer Kommunikations-Pipe" + hun "Olvasasi hiba a kapcsolat soran" + ita "Rilevato un errore di lettura dalla pipe di connessione" + kor "¿¬°á ÆÄÀÌÇÁ·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve um erro de leitura no 'pipe' da conexão" + rum "Eroare la citire din cauza lui 'connection pipe'" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ ÏÔ ÐÏÔÏËÁ ÓÏÅÄÉÎÅÎÉÑ (connection pipe)" + serbian "Greška pri èitanju podataka sa pipe-a" + spa "Obtenido un error de lectura de la conexión pipe" + swe "Fick läsfel från klienten vid läsning från 'PIPE'" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ Ú ËÏÍÕΦËÁæÊÎÏÇÏ ËÁÎÁÌÕ" +ER_NET_FCNTL_ERROR 08S01 + cze "Zji-B¹tìna chyba fcntl()" + dan "Fik fejlmeddelelse fra fcntl()" + nla "Kreeg fout van fcntl()" + eng "Got an error from fcntl()" + est "fcntl() tagastas vea" + fre "Erreur reçue de fcntl() " + ger "fcntl() lieferte einen Fehler" + hun "Hiba a fcntl() fuggvenyben" + ita "Rilevato un errore da fcntl()" + kor "fcntl() ÇÔ¼ö·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve um erro em fcntl()" + rum "Eroare obtinuta de la fcntl()" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÏÔ fcntl()" + serbian "Greška pri izvršavanju funkcije fcntl()" + spa "Obtenido un error de fcntl()" + swe "Fick fatalt fel från 'fcntl()'" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËËÕ ×¦Ä fcntl()" +ER_NET_PACKETS_OUT_OF_ORDER 08S01 + cze "P-Bøíchozí packety v chybném poøadí" + dan "Modtog ikke datapakker i korrekt rækkefølge" + nla "Pakketten in verkeerde volgorde ontvangen" + eng "Got packets out of order" + est "Paketid saabusid vales järjekorras" + fre "Paquets reçus dans le désordre" + ger "Pakete nicht in der richtigen Reihenfolge empfangen" + hun "Helytelen sorrendben erkezett adatcsomagok" + ita "Ricevuti pacchetti non in ordine" + kor "¼ø¼°¡ ¸ÂÁö¾Ê´Â ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù." + por "Obteve pacotes fora de ordem" + rum "Packets care nu sint ordonati au fost gasiti" + rus "ðÁËÅÔÙ ÐÏÌÕÞÅÎÙ × ÎÅ×ÅÒÎÏÍ ÐÏÒÑÄËÅ" + serbian "Primio sam mrežne pakete van reda" + spa "Obtenido paquetes desordenados" + swe "Kommunikationspaketen kom i fel ordning" + ukr "ïÔÒÉÍÁÎÏ ÐÁËÅÔÉ Õ ÎÅÎÁÌÅÖÎÏÍÕ ÐÏÒÑÄËÕ" +ER_NET_UNCOMPRESS_ERROR 08S01 + cze "Nemohu rozkomprimovat komunika-Bèní packet" + dan "Kunne ikke dekomprimere kommunikations-pakke (communication packet)" + nla "Communicatiepakket kon niet worden gedecomprimeerd" + eng "Couldn't uncompress communication packet" + est "Viga andmepaketi lahtipakkimisel" + fre "Impossible de décompresser le paquet reçu" + ger "Kommunikationspaket lässt sich nicht entpacken" + hun "A kommunikacios adatcsomagok nem tomorithetok ki" + ita "Impossibile scompattare i pacchetti di comunicazione" + kor "Åë½Å ÆÐŶÀÇ ¾ÐÃàÇØÁ¦¸¦ ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù." + por "Não conseguiu descomprimir pacote de comunicação" + rum "Nu s-a putut decompresa pachetul de comunicatie (communication packet)" + rus "îÅ×ÏÚÍÏÖÎÏ ÒÁÓÐÁËÏ×ÁÔØ ÐÁËÅÔ, ÐÏÌÕÞÅÎÎÙÊ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ" + serbian "Ne mogu da dekompresujem mrežne pakete" + spa "No puedo descomprimir paquetes de comunicación" + swe "Kunde inte packa up kommunikationspaketet" + ukr "îÅ ÍÏÖÕ ÄÅËÏÍÐÒÅÓÕ×ÁÔÉ ËÏÍÕΦËÁæÊÎÉÊ ÐÁËÅÔ" +ER_NET_READ_ERROR 08S01 + cze "Zji-B¹tìna chyba pøi ètení komunikaèního packetu" + dan "Fik fejlmeddelelse ved læsning af kommunikations-pakker (communication packets)" + nla "Fout bij het lezen van communicatiepakketten" + eng "Got an error reading communication packets" + est "Viga andmepaketi lugemisel" + fre "Erreur de lecture des paquets reçus" + ger "Fehler beim Lesen eines Kommunikationspakets" + hun "HIba a kommunikacios adatcsomagok olvasasa soran" + ita "Rilevato un errore ricevendo i pacchetti di comunicazione" + kor "Åë½Å ÆÐŶÀ» Àд Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve um erro na leitura de pacotes de comunicação" + rum "Eroare obtinuta citind pachetele de comunicatie (communication packets)" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ " + serbian "Greška pri primanju mrežnih paketa" + spa "Obtenido un error leyendo paquetes de comunicación" + swe "Fick ett fel vid läsning från klienten" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×" +ER_NET_READ_INTERRUPTED 08S01 + cze "Zji-B¹tìn timeout pøi ètení komunikaèního packetu" + dan "Timeout-fejl ved læsning af kommunukations-pakker (communication packets)" + nla "Timeout bij het lezen van communicatiepakketten" + eng "Got timeout reading communication packets" + est "Kontrollaja ületamine andmepakettide lugemisel" + fre "Timeout en lecture des paquets reçus" + ger "Zeitüberschreitung beim Lesen eines Kommunikationspakets" + hun "Idotullepes a kommunikacios adatcsomagok olvasasa soran" + ita "Rilevato un timeout ricevendo i pacchetti di comunicazione" + kor "Åë½Å ÆÐŶÀ» Àд Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação" + rum "Timeout obtinut citind pachetele de comunicatie (communication packets)" + rus "ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ " + serbian "Vremenski limit za èitanje mrežnih paketa je istekao" + spa "Obtenido timeout leyendo paquetes de comunicación" + swe "Fick 'timeout' vid läsning från klienten" + ukr "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×" +ER_NET_ERROR_ON_WRITE 08S01 + cze "Zji-B¹tìna chyba pøi zápisu komunikaèního packetu" + dan "Fik fejlmeddelelse ved skrivning af kommunukations-pakker (communication packets)" + nla "Fout bij het schrijven van communicatiepakketten" + eng "Got an error writing communication packets" + est "Viga andmepaketi kirjutamisel" + fre "Erreur d'écriture des paquets envoyés" + ger "Fehler beim Schreiben eines Kommunikationspakets" + hun "Hiba a kommunikacios csomagok irasa soran" + ita "Rilevato un errore inviando i pacchetti di comunicazione" + kor "Åë½Å ÆÐŶÀ» ±â·ÏÇÏ´Â Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve um erro na escrita de pacotes de comunicação" + rum "Eroare in scrierea pachetelor de comunicatie (communication packets)" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÐÒÉ ÐÅÒÅÄÁÞÅ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ " + serbian "Greška pri slanju mrežnih paketa" + spa "Obtenido un error de escribiendo paquetes de comunicación" + swe "Fick ett fel vid skrivning till klienten" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×" +ER_NET_WRITE_INTERRUPTED 08S01 + cze "Zji-B¹tìn timeout pøi zápisu komunikaèního packetu" + dan "Timeout-fejl ved skrivning af kommunukations-pakker (communication packets)" + nla "Timeout bij het schrijven van communicatiepakketten" + eng "Got timeout writing communication packets" + est "Kontrollaja ületamine andmepakettide kirjutamisel" + fre "Timeout d'écriture des paquets envoyés" + ger "Zeitüberschreitung beim Schreiben eines Kommunikationspakets" + hun "Idotullepes a kommunikacios csomagok irasa soran" + ita "Rilevato un timeout inviando i pacchetti di comunicazione" + kor "Åë½Å ÆÐÆÂÀ» ±â·ÏÇÏ´Â Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù." + por "Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação" + rum "Timeout obtinut scriind pachetele de comunicatie (communication packets)" + rus "ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ × ÐÒÏÃÅÓÓÅ ÐÅÒÅÄÁÞÉ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ " + serbian "Vremenski limit za slanje mrežnih paketa je istekao" + spa "Obtenido timeout escribiendo paquetes de comunicación" + swe "Fick 'timeout' vid skrivning till klienten" + ukr "ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×" +ER_TOO_LONG_STRING 42000 + cze "V-Býsledný øetìzec je del¹í ne¾ 'max_allowed_packet'" + dan "Strengen med resultater er større end 'max_allowed_packet'" + nla "Resultaat string is langer dan 'max_allowed_packet'" + eng "Result string is longer than 'max_allowed_packet' bytes" + est "Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga" + fre "La chaîne résultat est plus grande que 'max_allowed_packet'" + ger "Ergebnis ist länger als 'max_allowed_packet'" + hun "Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'" + ita "La stringa di risposta e` piu` lunga di 'max_allowed_packet'" + por "'String' resultante é mais longa do que 'max_allowed_packet'" + rum "Sirul rezultat este mai lung decit 'max_allowed_packet'" + rus "òÅÚÕÌØÔÉÒÕÀÝÁÑ ÓÔÒÏËÁ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'" + serbian "Rezultujuèi string je duži nego što to dozvoljava parametar servera 'max_allowed_packet'" + spa "La string resultante es mayor que max_allowed_packet" + swe "Resultatsträngen är längre än max_allowed_packet" + ukr "óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö max_allowed_packet" +ER_TABLE_CANT_HANDLE_BLOB 42000 + cze "Typ pou-B¾ité tabulky nepodporuje BLOB/TEXT sloupce" + dan "Denne tabeltype understøtter ikke brug af BLOB og TEXT kolonner" + nla "Het gebruikte tabel type ondersteunt geen BLOB/TEXT kolommen" + eng "The used table type doesn't support BLOB/TEXT columns" + est "Valitud tabelitüüp ei toeta BLOB/TEXT tüüpi välju" + fre "Ce type de table ne supporte pas les colonnes BLOB/TEXT" + ger "Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Spalten" + hun "A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket" + ita "Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT" + por "Tipo de tabela usado não permite colunas BLOB/TEXT" + rum "Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT" + rus "éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÔÉÐÙ BLOB/TEXT" + serbian "Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'" + spa "El tipo de tabla usada no permite soporte para columnas BLOB/TEXT" + swe "Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ BLOB/TEXT ÓÔÏ×Âæ" +ER_TABLE_CANT_HANDLE_AUTO_INCREMENT 42000 + cze "Typ pou-B¾ité tabulky nepodporuje AUTO_INCREMENT sloupce" + dan "Denne tabeltype understøtter ikke brug af AUTO_INCREMENT kolonner" + nla "Het gebruikte tabel type ondersteunt geen AUTO_INCREMENT kolommen" + eng "The used table type doesn't support AUTO_INCREMENT columns" + est "Valitud tabelitüüp ei toeta AUTO_INCREMENT tüüpi välju" + fre "Ce type de table ne supporte pas les colonnes AUTO_INCREMENT" + ger "Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Spalten" + hun "A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket" + ita "Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT" + por "Tipo de tabela usado não permite colunas AUTO_INCREMENT" + rum "Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT" + rus "éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÅ ÓÔÏÌÂÃÙ" + serbian "Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'" + spa "El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT" + swe "Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ" +ER_DELAYED_INSERT_TABLE_LOCKED + cze "INSERT DELAYED nen-Bí mo¾no s tabulkou '%-.64s' pou¾ít, proto¾e je zamèená pomocí LOCK TABLES" + dan "INSERT DELAYED kan ikke bruges med tabellen '%-.64s', fordi tabellen er låst med LOCK TABLES" + nla "INSERT DELAYED kan niet worden gebruikt bij table '%-.64s', vanwege een 'lock met LOCK TABLES" + eng "INSERT DELAYED can't be used with table '%-.64s' because it is locked with LOCK TABLES" + est "INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES käsuga" + fre "INSERT DELAYED ne peut être utilisé avec la table '%-.64s', car elle est verrouée avec LOCK TABLES" + ger "INSERT DELAYED kann nicht auf Tabelle '%-.64s' angewendet werden, da diese mit LOCK TABLES gesperrt ist" + greek "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + hun "Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)" + ita "L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'" + jpn "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + kor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + nor "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + norwegian-ny "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + pol "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + por "INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque ela está travada com LOCK TABLES" + rum "INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES" + rus "îÅÌØÚÑ ÉÓÐÏÌØÚÏ×ÁÔØ INSERT DELAYED ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s', ÐÏÔÏÍÕ ÞÔÏ ÏÎÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES" + serbian "Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.64s', zbog toga što je zakljuèana komandom 'LOCK TABLES'" + slo "INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES" + spa "INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES" + swe "INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES" + ukr "INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES" +ER_WRONG_COLUMN_NAME 42000 + cze "Nespr-Bávné jméno sloupce '%-.100s'" + dan "Forkert kolonnenavn '%-.100s'" + nla "Incorrecte kolom naam '%-.100s'" + eng "Incorrect column name '%-.100s'" + est "Vigane tulba nimi '%-.100s'" + fre "Nom de colonne '%-.100s' incorrect" + ger "Falscher Spaltenname '%-.100s'" + hun "Ervenytelen mezonev: '%-.100s'" + ita "Nome colonna '%-.100s' non corretto" + por "Nome de coluna '%-.100s' incorreto" + rum "Nume increct de coloana '%-.100s'" + rus "îÅ×ÅÒÎÏÅ ÉÍÑ ÓÔÏÌÂÃÁ '%-.100s'" + serbian "Pogrešno ime kolone '%-.100s'" + spa "Incorrecto nombre de columna '%-.100s'" + swe "Felaktigt kolumnnamn '%-.100s'" + ukr "îÅצÒÎÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.100s'" +ER_WRONG_KEY_COLUMN 42000 + cze "Handler pou-B¾ité tabulky neumí indexovat sloupce '%-.64s'" + dan "Den brugte tabeltype kan ikke indeksere kolonnen '%-.64s'" + nla "De gebruikte tabel 'handler' kan kolom '%-.64s' niet indexeren" + eng "The used storage engine can't index column '%-.64s'" + est "Tabelihandler ei oska indekseerida tulpa '%-.64s'" + fre "Le handler de la table ne peut indexé la colonne '%-.64s'" + ger "Der verwendete Tabellen-Handler kann die Spalte '%-.64s' nicht indizieren" + greek "The used table handler can't index column '%-.64s'" + hun "A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni" + ita "Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'" + jpn "The used table handler can't index column '%-.64s'" + kor "The used table handler can't index column '%-.64s'" + nor "The used table handler can't index column '%-.64s'" + norwegian-ny "The used table handler can't index column '%-.64s'" + pol "The used table handler can't index column '%-.64s'" + por "O manipulador de tabela usado não pode indexar a coluna '%-.64s'" + rum "Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'" + rus "éÓÐÏÌØÚÏ×ÁÎÎÙÊ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÍÏÖÅÔ ÐÒÏÉÎÄÅËÓÉÒÏ×ÁÔØ ÓÔÏÌÂÅà '%-.64s'" + serbian "Handler tabele ne može da indeksira kolonu '%-.64s'" + slo "The used table handler can't index column '%-.64s'" + spa "El manipulador de tabla usado no puede indexar columna '%-.64s'" + swe "Den använda tabelltypen kan inte indexera kolumn '%-.64s'" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ÎÅ ÍÏÖÅ ¦ÎÄÅËÓÕ×ÁÔÉ ÓÔÏ×ÂÅÃØ '%-.64s'" +ER_WRONG_MRG_TABLE + cze "V-B¹echny tabulky v MERGE tabulce nejsou definovány stejnì" + dan "Tabellerne i MERGE er ikke defineret ens" + nla "Niet alle tabellen in de MERGE tabel hebben identieke gedefinities" + eng "All tables in the MERGE table are not identically defined" + est "Kõik tabelid MERGE tabeli määratluses ei ole identsed" + fre "Toutes les tables de la table de type MERGE n'ont pas la même définition" + ger "Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert" + hun "A MERGE tablaban talalhato tablak definicioja nem azonos" + ita "Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica" + jpn "All tables in the MERGE table are not defined identically" + kor "All tables in the MERGE table are not defined identically" + nor "All tables in the MERGE table are not defined identically" + norwegian-ny "All tables in the MERGE table are not defined identically" + pol "All tables in the MERGE table are not defined identically" + por "Todas as tabelas contidas na tabela fundida (MERGE) não estão definidas identicamente" + rum "Toate tabelele din tabela MERGE nu sint definite identic" + rus "îÅ ×ÓÅ ÔÁÂÌÉÃÙ × MERGE ÏÐÒÅÄÅÌÅÎÙ ÏÄÉÎÁËÏ×Ï" + serbian "Tabele iskorištene u 'MERGE' tabeli nisu definisane na isti naèin" + slo "All tables in the MERGE table are not defined identically" + spa "Todas las tablas en la MERGE tabla no estan definidas identicamente" + swe "Tabellerna i MERGE-tabellen är inte identiskt definierade" + ukr "ôÁÂÌÉæ Õ MERGE TABLE ÍÁÀÔØ Ò¦ÚÎÕ ÓÔÒÕËÔÕÒÕ" +ER_DUP_UNIQUE 23000 + cze "Kv-Bùli unique constraintu nemozu zapsat do tabulky '%-.64s'" + dan "Kan ikke skrive til tabellen '%-.64s' fordi det vil bryde CONSTRAINT regler" + nla "Kan niet opslaan naar table '%-.64s' vanwege 'unique' beperking" + eng "Can't write, because of unique constraint, to table '%-.64s'" + est "Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub ühesuse kitsendust" + fre "Écriture impossible à cause d'un index UNIQUE sur la table '%-.64s'" + ger "Schreiben in Tabelle '%-.64s' nicht möglich wegen einer eindeutigen Beschränkung (unique constraint)" + hun "A '%-.64s' nem irhato, az egyedi mezok miatt" + ita "Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`" + por "Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'" + rum "Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'" + rus "îÅ×ÏÚÍÏÖÎÏ ÚÁÐÉÓÁÔØ × ÔÁÂÌÉÃÕ '%-.64s' ÉÚ-ÚÁ ÏÇÒÁÎÉÞÅÎÉÊ ÕÎÉËÁÌØÎÏÇÏ ËÌÀÞÁ" + serbian "Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.64s'" + spa "No puedo escribir, debido al único constraint, para tabla '%-.64s'" + swe "Kan inte skriva till tabell '%-.64s'; UNIQUE-test" + ukr "îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÄÏ ÔÁÂÌÉæ '%-.64s', Ú ÐÒÉÞÉÎÉ ×ÉÍÏÇ ÕΦËÁÌØÎÏÓÔ¦" +ER_BLOB_KEY_WITHOUT_LENGTH 42000 + cze "BLOB sloupec '%-.64s' je pou-B¾it ve specifikaci klíèe bez délky" + dan "BLOB kolonnen '%-.64s' brugt i nøglespecifikation uden nøglelængde" + nla "BLOB kolom '%-.64s' gebruikt in zoeksleutel specificatie zonder zoeksleutel lengte" + eng "BLOB/TEXT column '%-.64s' used in key specification without a key length" + est "BLOB-tüüpi tulp '%-.64s' on kasutusel võtmes ilma pikkust määratlemata" + fre "La colonne '%-.64s' de type BLOB est utilisée dans une définition d'index sans longueur d'index" + ger "BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet" + greek "BLOB column '%-.64s' used in key specification without a key length" + hun "BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul" + ita "La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza" + jpn "BLOB column '%-.64s' used in key specification without a key length" + kor "BLOB column '%-.64s' used in key specification without a key length" + nor "BLOB column '%-.64s' used in key specification without a key length" + norwegian-ny "BLOB column '%-.64s' used in key specification without a key length" + pol "BLOB column '%-.64s' used in key specification without a key length" + por "Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave" + rum "Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita" + rus "óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÂÙÌ ÕËÁÚÁÎ × ÏÐÒÅÄÅÌÅÎÉÉ ËÌÀÞÁ ÂÅÚ ÕËÁÚÁÎÉÑ ÄÌÉÎÙ ËÌÀÞÁ" + serbian "BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa" + slo "BLOB column '%-.64s' used in key specification without a key length" + spa "Columna BLOB column '%-.64s' usada en especificación de clave sin tamaño de la clave" + swe "Du har inte angett någon nyckellängd för BLOB '%-.64s'" + ukr "óÔÏ×ÂÅÃØ BLOB '%-.64s' ×ÉËÏÒÉÓÔÁÎÏ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ ÂÅÚ ×ËÁÚÁÎÎÑ ÄÏ×ÖÉÎÉ ËÌÀÞÁ" +ER_PRIMARY_CANT_HAVE_NULL 42000 + cze "V-B¹echny èásti primárního klíèe musejí být NOT NULL; pokud potøebujete NULL, pou¾ijte UNIQUE" + dan "Alle dele af en PRIMARY KEY skal være NOT NULL; Hvis du skal bruge NULL i nøglen, brug UNIQUE istedet" + nla "Alle delen van een PRIMARY KEY moeten NOT NULL zijn; Indien u NULL in een zoeksleutel nodig heeft kunt u UNIQUE gebruiken" + eng "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead" + est "Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit" + fre "Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE" + ger "Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel verwendet wird, muss ein UNIQUE-Schlüssel verwendet werden" + hun "Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot" + ita "Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE" + por "Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar" + rum "Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb" + rus "÷ÓÅ ÞÁÓÔÉ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ (PRIMARY KEY) ÄÏÌÖÎÙ ÂÙÔØ ÏÐÒÅÄÅÌÅÎÙ ËÁË NOT NULL; åÓÌÉ ×ÁÍ ÎÕÖÎÁ ÐÏÄÄÅÒÖËÁ ×ÅÌÉÞÉÎ NULL × ËÌÀÞÅ, ×ÏÓÐÏÌØÚÕÊÔÅÓØ ÉÎÄÅËÓÏÍ UNIQUE" + serbian "Svi delovi primarnog kljuèa moraju biti razlièiti od NULL; Ako Vam ipak treba NULL vrednost u kljuèu, upotrebite 'UNIQUE'" + spa "Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE" + swe "Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället" + ukr "õÓ¦ ÞÁÓÔÉÎÉ PRIMARY KEY ÐÏ×ÉÎΦ ÂÕÔÉ NOT NULL; ñËÝÏ ×É ÐÏÔÒÅÂÕ¤ÔÅ NULL Õ ËÌÀÞ¦, ÓËÏÒÉÓÔÁÊÔÅÓÑ UNIQUE" +ER_TOO_MANY_ROWS 42000 + cze "V-Býsledek obsahuje více ne¾ jeden øádek" + dan "Resultatet bestod af mere end een række" + nla "Resultaat bevatte meer dan een rij" + eng "Result consisted of more than one row" + est "Tulemis oli rohkem kui üks kirje" + fre "Le résultat contient plus d'un enregistrement" + ger "Ergebnis besteht aus mehr als einer Zeile" + hun "Az eredmeny tobb, mint egy sort tartalmaz" + ita "Il risultato consiste di piu` di una riga" + por "O resultado consistiu em mais do que uma linha" + rum "Resultatul constista din mai multe linii" + rus "÷ ÒÅÚÕÌØÔÁÔÅ ×ÏÚ×ÒÁÝÅÎÁ ÂÏÌÅÅ ÞÅÍ ÏÄÎÁ ÓÔÒÏËÁ" + serbian "Rezultat je saèinjen od više slogova" + spa "Resultado compuesto de mas que una línea" + swe "Resultet bestod av mera än en rad" + ukr "òÅÚÕÌØÔÁÔ ÚÎÁÈÏÄÉÔØÓÑ Õ Â¦ÌØÛÅ Î¦Ö ÏÄÎ¦Ê ÓÔÒÏæ" +ER_REQUIRES_PRIMARY_KEY 42000 + cze "Tento typ tabulky vy-B¾aduje primární klíè" + dan "Denne tabeltype kræver en primærnøgle" + nla "Dit tabel type heeft een primaire zoeksleutel nodig" + eng "This table type requires a primary key" + est "Antud tabelitüüp nõuab primaarset võtit" + fre "Ce type de table nécessite une clé primaire (PRIMARY KEY)" + ger "Dieser Tabellentyp benötigt einen PRIMARY KEY" + hun "Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo" + ita "Questo tipo di tabella richiede una chiave primaria" + por "Este tipo de tabela requer uma chave primária" + rum "Aceast tip de tabela are nevoie de o cheie primara" + rus "üÔÏÔ ÔÉÐ ÔÁÂÌÉÃÙ ÔÒÅÂÕÅÔ ÏÐÒÅÄÅÌÅÎÉÑ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ" + serbian "Ovaj tip tabele zahteva da imate definisan primarni kljuè" + spa "Este tipo de tabla necesita de una primary key" + swe "Denna tabelltyp kräver en PRIMARY KEY" + ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ÐÏÔÒÅÂÕ¤ ÐÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ" +ER_NO_RAID_COMPILED + cze "Tato verze MySQL nen-Bí zkompilována s podporou RAID" + dan "Denne udgave af MySQL er ikke oversat med understøttelse af RAID" + nla "Deze versie van MySQL is niet gecompileerd met RAID ondersteuning" + eng "This version of MySQL is not compiled with RAID support" + est "Antud MySQL versioon on kompileeritud ilma RAID toeta" + fre "Cette version de MySQL n'est pas compilée avec le support RAID" + ger "Diese MySQL-Version ist nicht mit RAID-Unterstützung kompiliert" + hun "Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot" + ita "Questa versione di MYSQL non e` compilata con il supporto RAID" + por "Esta versão do MySQL não foi compilada com suporte a RAID" + rum "Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID" + rus "üÔÁ ×ÅÒÓÉÑ MySQL ÓËÏÍÐÉÌÉÒÏ×ÁÎÁ ÂÅÚ ÐÏÄÄÅÒÖËÉ RAID" + serbian "Ova verzija MySQL servera nije kompajlirana sa podrškom za RAID ureðaje" + spa "Esta versión de MySQL no es compilada con soporte RAID" + swe "Denna version av MySQL är inte kompilerad med RAID" + ukr "ãÑ ×ÅÒÓ¦Ñ MySQL ÎÅ ÚËÏÍÐ¦ÌØÏ×ÁÎÁ Ú Ð¦ÄÔÒÉÍËÏÀ RAID" +ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE + cze "Update tabulky bez WHERE s kl-Bíèem není v módu bezpeèných update dovoleno" + dan "Du bruger sikker opdaterings modus ('safe update mode') og du forsøgte at opdatere en tabel uden en WHERE klausul, der gør brug af et KEY felt" + nla "U gebruikt 'safe update mode' en u probeerde een tabel te updaten zonder een WHERE met een KEY kolom" + eng "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column" + est "Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita" + fre "Vous êtes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index" + ger "MySQL läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel eine KEY-Spalte anzugeben" + hun "On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column" + ita "In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave" + por "Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave" + rus "÷Ù ÒÁÂÏÔÁÅÔÅ × ÒÅÖÉÍÅ ÂÅÚÏÐÁÓÎÙÈ ÏÂÎÏ×ÌÅÎÉÊ (safe update mode) É ÐÏÐÒÏÂÏ×ÁÌÉ ÉÚÍÅÎÉÔØ ÔÁÂÌÉÃÕ ÂÅÚ ÉÓÐÏÌØÚÏ×ÁÎÉÑ ËÌÀÞÅ×ÏÇÏ ÓÔÏÌÂÃÁ × ÞÁÓÔÉ WHERE" + serbian "Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuèa" + spa "Tu estás usando modo de actualización segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna" + swe "Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel" + ukr "÷É Õ ÒÅÖÉͦ ÂÅÚÐÅÞÎÏÇÏ ÏÎÏ×ÌÅÎÎÑ ÔÁ ÎÁÍÁÇÁ¤ÔÅÓØ ÏÎÏ×ÉÔÉ ÔÁÂÌÉÃÀ ÂÅÚ ÏÐÅÒÁÔÏÒÁ WHERE, ÝÏ ×ÉËÏÒÉÓÔÏ×Õ¤ KEY ÓÔÏ×ÂÅÃØ" +ER_KEY_DOES_NOT_EXITS + cze "Kl-Bíè '%-.64s' v tabulce '%-.64s' neexistuje" + dan "Nøglen '%-.64s' eksisterer ikke i tabellen '%-.64s'" + nla "Zoeksleutel '%-.64s' bestaat niet in tabel '%-.64s'" + eng "Key '%-.64s' doesn't exist in table '%-.64s'" + est "Võti '%-.64s' ei eksisteeri tabelis '%-.64s'" + fre "L'index '%-.64s' n'existe pas sur la table '%-.64s'" + ger "Schlüssel '%-.64s' existiert in der Tabelle '%-.64s' nicht" + hun "A '%-.64s' kulcs nem letezik a '%-.64s' tablaban" + ita "La chiave '%-.64s' non esiste nella tabella '%-.64s'" + por "Chave '%-.64s' não existe na tabela '%-.64s'" + rus "ëÌÀÞ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ × ÔÁÂÌÉÃÅ '%-.64s'" + serbian "Kljuè '%-.64s' ne postoji u tabeli '%-.64s'" + spa "Clave '%-.64s' no existe en la tabla '%-.64s'" + swe "Nyckel '%-.64s' finns inte in tabell '%-.64s'" + ukr "ëÌÀÞ '%-.64s' ÎÅ ¦ÓÎÕ¤ × ÔÁÂÌÉæ '%-.64s'" +ER_CHECK_NO_SUCH_TABLE 42000 + cze "Nemohu otev-Bøít tabulku" + dan "Kan ikke åbne tabellen" + nla "Kan tabel niet openen" + eng "Can't open table" + est "Ei suuda avada tabelit" + fre "Impossible d'ouvrir la table" + ger "Kann Tabelle nicht öffnen" + hun "Nem tudom megnyitni a tablat" + ita "Impossibile aprire la tabella" + por "Não pode abrir a tabela" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ" + serbian "Ne mogu da otvorim tabelu" + spa "No puedo abrir tabla" + swe "Kan inte öppna tabellen" + ukr "îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÔÁÂÌÉÃÀ" +ER_CHECK_NOT_IMPLEMENTED 42000 + cze "Handler tabulky nepodporuje %s" + dan "Denne tabeltype understøtter ikke %s" + nla "De 'handler' voor de tabel ondersteund geen %s" + eng "The storage engine for the table doesn't support %s" + est "Antud tabelitüüp ei toeta %s käske" + fre "Ce type de table ne supporte pas les %s" + ger "Die Speicher-Engine für diese Tabelle unterstützt kein %s" + greek "The handler for the table doesn't support %s" + hun "A tabla kezeloje (handler) nem tamogatja az %s" + ita "Il gestore per la tabella non supporta il %s" + jpn "The handler for the table doesn't support %s" + kor "The handler for the table doesn't support %s" + nor "The handler for the table doesn't support %s" + norwegian-ny "The handler for the table doesn't support %s" + pol "The handler for the table doesn't support %s" + por "O manipulador de tabela não suporta %s" + rum "The handler for the table doesn't support %s" + rus "ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÏÇÏ: %s" + serbian "Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande" + slo "The handler for the table doesn't support %s" + spa "El manipulador de la tabla no permite soporte para %s" + swe "Tabellhanteraren för denna tabell kan inte göra %s" + ukr "÷ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕÅ %s" +ER_CANT_DO_THIS_DURING_AN_TRANSACTION 25000 + cze "Proveden-Bí tohoto pøíkazu není v transakci dovoleno" + dan "Du må ikke bruge denne kommando i en transaktion" + nla "Het is u niet toegestaan dit commando uit te voeren binnen een transactie" + eng "You are not allowed to execute this command in a transaction" + est "Seda käsku ei saa kasutada transaktsiooni sees" + fre "Vous n'êtes pas autorisé à exécute cette commande dans une transaction" + ger "Sie dürfen diesen Befehl nicht in einer Transaktion ausführen" + hun "Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban" + ita "Non puoi eseguire questo comando in una transazione" + por "Não lhe é permitido executar este comando em uma transação" + rus "÷ÁÍ ÎÅ ÒÁÚÒÅÛÅÎÏ ×ÙÐÏÌÎÑÔØ ÜÔÕ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËÃÉÉ" + serbian "Nije Vam dozvoljeno da izvršite ovu komandu u transakciji" + spa "No tienes el permiso para ejecutar este comando en una transición" + swe "Du får inte utföra detta kommando i en transaktion" + ukr "÷ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÎÕ×ÁÔÉ ÃÀ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËæ§" +ER_ERROR_DURING_COMMIT + cze "Chyba %d p-Bøi COMMIT" + dan "Modtog fejl %d mens kommandoen COMMIT blev udført" + nla "Kreeg fout %d tijdens COMMIT" + eng "Got error %d during COMMIT" + est "Viga %d käsu COMMIT täitmisel" + fre "Erreur %d lors du COMMIT" + ger "Fehler %d beim COMMIT" + hun "%d hiba a COMMIT vegrehajtasa soran" + ita "Rilevato l'errore %d durante il COMMIT" + por "Obteve erro %d durante COMMIT" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ COMMIT" + serbian "Greška %d za vreme izvršavanja komande 'COMMIT'" + spa "Obtenido error %d durante COMMIT" + swe "Fick fel %d vid COMMIT" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ COMMIT" +ER_ERROR_DURING_ROLLBACK + cze "Chyba %d p-Bøi ROLLBACK" + dan "Modtog fejl %d mens kommandoen ROLLBACK blev udført" + nla "Kreeg fout %d tijdens ROLLBACK" + eng "Got error %d during ROLLBACK" + est "Viga %d käsu ROLLBACK täitmisel" + fre "Erreur %d lors du ROLLBACK" + ger "Fehler %d beim ROLLBACK" + hun "%d hiba a ROLLBACK vegrehajtasa soran" + ita "Rilevato l'errore %d durante il ROLLBACK" + por "Obteve erro %d durante ROLLBACK" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ ROLLBACK" + serbian "Greška %d za vreme izvršavanja komande 'ROLLBACK'" + spa "Obtenido error %d durante ROLLBACK" + swe "Fick fel %d vid ROLLBACK" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ ROLLBACK" +ER_ERROR_DURING_FLUSH_LOGS + cze "Chyba %d p-Bøi FLUSH_LOGS" + dan "Modtog fejl %d mens kommandoen FLUSH_LOGS blev udført" + nla "Kreeg fout %d tijdens FLUSH_LOGS" + eng "Got error %d during FLUSH_LOGS" + est "Viga %d käsu FLUSH_LOGS täitmisel" + fre "Erreur %d lors du FLUSH_LOGS" + ger "Fehler %d bei FLUSH_LOGS" + hun "%d hiba a FLUSH_LOGS vegrehajtasa soran" + ita "Rilevato l'errore %d durante il FLUSH_LOGS" + por "Obteve erro %d durante FLUSH_LOGS" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ FLUSH_LOGS" + serbian "Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'" + spa "Obtenido error %d durante FLUSH_LOGS" + swe "Fick fel %d vid FLUSH_LOGS" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ FLUSH_LOGS" +ER_ERROR_DURING_CHECKPOINT + cze "Chyba %d p-Bøi CHECKPOINT" + dan "Modtog fejl %d mens kommandoen CHECKPOINT blev udført" + nla "Kreeg fout %d tijdens CHECKPOINT" + eng "Got error %d during CHECKPOINT" + est "Viga %d käsu CHECKPOINT täitmisel" + fre "Erreur %d lors du CHECKPOINT" + ger "Fehler %d bei CHECKPOINT" + hun "%d hiba a CHECKPOINT vegrehajtasa soran" + ita "Rilevato l'errore %d durante il CHECKPOINT" + por "Obteve erro %d durante CHECKPOINT" + rus "ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ CHECKPOINT" + serbian "Greška %d za vreme izvršavanja komande 'CHECKPOINT'" + spa "Obtenido error %d durante CHECKPOINT" + swe "Fick fel %d vid CHECKPOINT" + ukr "ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT" +ER_NEW_ABORTING_CONNECTION 08S01 + cze "Spojen-Bí %ld do databáze: '%-.64s' u¾ivatel: '%-.32s' stroj: `%-.64s' (%-.64s) bylo pøeru¹eno" + dan "Afbrød forbindelsen %ld til databasen '%-.64s' bruger: '%-.32s' vært: `%-.64s' (%-.64s)" + nla "Afgebroken verbinding %ld naar db: '%-.64s' gebruiker: '%-.32s' host: `%-.64s' (%-.64s)" + eng "Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)" + est "Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: `%-.64s' (%-.64s)" + fre "Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: `%-.64s' (%-.64s)" + ger "Verbindungsabbruch %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: `%-.64s' (%-.64s)" + ita "Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)" + por "Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' `%-.64s' ('%-.64s')" + rus "ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ `%-.64s' (%-.64s)" + serbian "Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: `%-.64s' (%-.64s)" + spa "Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: `%-.64s' (%-.64s)" + swe "Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)" + ukr "ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: `%-.64s' (%-.64s)" +ER_DUMP_NOT_IMPLEMENTED + cze "Handler tabulky nepodporuje bin-Bární dump" + dan "Denne tabeltype unserstøtter ikke binært tabeldump" + nla "De 'handler' voor de tabel ondersteund geen binaire tabel dump" + eng "The storage engine for the table does not support binary table dump" + fre "Ce type de table ne supporte pas les copies binaires" + ger "Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump" + ita "Il gestore per la tabella non supporta il dump binario" + jpn "The handler for the table does not support binary table dump" + por "O manipulador de tabela não suporta 'dump' binário de tabela" + rum "The handler for the table does not support binary table dump" + rus "ïÂÒÁÂÏÔÞÉË ÜÔÏÊ ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Ä×ÏÉÞÎÏÇÏ ÓÏÈÒÁÎÅÎÉÑ ÏÂÒÁÚÁ ÔÁÂÌÉÃÙ (dump)" + serbian "Handler tabele ne podržava binarni dump tabele" + spa "El manipulador de tabla no soporta dump para tabla binaria" + swe "Tabellhanteraren klarar inte en binär kopiering av tabellen" + ukr "ãÅÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ¦ÎÁÒÎÕ ÐÅÒÅÄÁÞÕ ÔÁÂÌÉæ" +ER_FLUSH_MASTER_BINLOG_CLOSED + cze "Binlog uzav-Bøen pøi pokusu o FLUSH MASTER" + dan "Binlog blev lukket mens kommandoen FLUSH MASTER blev udført" + nla "Binlog gesloten tijdens FLUSH MASTER poging" + eng "Binlog closed, cannot RESET MASTER" + est "Binlog closed while trying to FLUSH MASTER" + fre "Le 'binlog' a été fermé pendant l'exécution du FLUSH MASTER" + ger "Binlog geschlossen. Kann RESET MASTER nicht ausführen" + greek "Binlog closed while trying to FLUSH MASTER" + hun "Binlog closed while trying to FLUSH MASTER" + ita "Binlog e` stato chiuso durante l'esecuzione del FLUSH MASTER" + jpn "Binlog closed while trying to FLUSH MASTER" + kor "Binlog closed while trying to FLUSH MASTER" + nor "Binlog closed while trying to FLUSH MASTER" + norwegian-ny "Binlog closed while trying to FLUSH MASTER" + pol "Binlog closed while trying to FLUSH MASTER" + por "Binlog fechado. Não pode fazer RESET MASTER" + rum "Binlog closed while trying to FLUSH MASTER" + rus "ä×ÏÉÞÎÙÊ ÖÕÒÎÁÌ ÏÂÎÏ×ÌÅÎÉÑ ÚÁËÒÙÔ, ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ RESET MASTER" + serbian "Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'" + slo "Binlog closed while trying to FLUSH MASTER" + spa "Binlog cerrado mientras tentaba el FLUSH MASTER" + swe "Binärloggen stängdes medan FLUSH MASTER utfördes" + ukr "òÅÐ̦ËÁæÊÎÉÊ ÌÏÇ ÚÁËÒÉÔÏ, ÎÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ RESET MASTER" +ER_INDEX_REBUILD + cze "P-Bøebudování indexu dumpnuté tabulky '%-.64s' nebylo úspì¹né" + dan "Kunne ikke genopbygge indekset for den dumpede tabel '%-.64s'" + nla "Gefaald tijdens heropbouw index van gedumpte tabel '%-.64s'" + eng "Failed rebuilding the index of dumped table '%-.64s'" + fre "La reconstruction de l'index de la table copiée '%-.64s' a échoué" + ger "Neuerstellung des Indizes der Dump-Tabelle '%-.64s' fehlgeschlagen" + greek "Failed rebuilding the index of dumped table '%-.64s'" + hun "Failed rebuilding the index of dumped table '%-.64s'" + ita "Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'" + por "Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'" + rus "ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.64s'" + serbian "Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela" + spa "Falla reconstruyendo el indice de la tabla dumped '%-.64s'" + ukr "îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.64s'" +ER_MASTER + cze "Chyba masteru: '%-.64s'" + dan "Fejl fra master: '%-.64s'" + nla "Fout van master: '%-.64s'" + eng "Error from master: '%-.64s'" + fre "Erreur reçue du maître: '%-.64s'" + ger "Fehler vom Master: '%-.64s'" + ita "Errore dal master: '%-.64s" + por "Erro no 'master' '%-.64s'" + rus "ïÛÉÂËÁ ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ: '%-.64s'" + serbian "Greška iz glavnog servera '%-.64s' u klasteru" + spa "Error del master: '%-.64s'" + swe "Fick en master: '%-.64s'" + ukr "ðÏÍÉÌËÁ ×¦Ä ÇÏÌÏ×ÎÏÇÏ: '%-.64s'" +ER_MASTER_NET_READ 08S01 + cze "S-Bí»ová chyba pøi ètení z masteru" + dan "Netværksfejl ved læsning fra master" + nla "Net fout tijdens lezen van master" + eng "Net error reading from master" + fre "Erreur de lecture réseau reçue du maître" + ger "Netzfehler beim Lesen vom Master" + ita "Errore di rete durante la ricezione dal master" + por "Erro de rede lendo do 'master'" + rus "÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ" + serbian "Greška u primanju mrežnih paketa sa glavnog servera u klasteru" + spa "Error de red leyendo del master" + swe "Fick nätverksfel vid läsning från master" + ukr "íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÞÉÔÁÎÎÑ ×¦Ä ÇÏÌÏ×ÎÏÇÏ" +ER_MASTER_NET_WRITE 08S01 + cze "S-Bí»ová chyba pøi zápisu na master" + dan "Netværksfejl ved skrivning til master" + nla "Net fout tijdens schrijven naar master" + eng "Net error writing to master" + fre "Erreur d'écriture réseau reçue du maître" + ger "Netzfehler beim Schreiben zum Master" + ita "Errore di rete durante l'invio al master" + por "Erro de rede gravando no 'master'" + rus "÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÚÁÐÉÓÉ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ" + serbian "Greška u slanju mrežnih paketa na glavni server u klasteru" + spa "Error de red escribiendo para el master" + swe "Fick nätverksfel vid skrivning till master" + ukr "íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÚÁÐÉÓÕ ÄÏ ÇÏÌÏ×ÎÏÇÏ" +ER_FT_MATCHING_KEY_NOT_FOUND + cze "-B®ádný sloupec nemá vytvoøen fulltextový index" + dan "Kan ikke finde en FULLTEXT nøgle som svarer til kolonne listen" + nla "Kan geen FULLTEXT index vinden passend bij de kolom lijst" + eng "Can't find FULLTEXT index matching the column list" + est "Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega" + fre "Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes" + ger "Kann keinen FULLTEXT-Index finden, der der Spaltenliste entspricht" + ita "Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne" + por "Não pode encontrar um índice para o texto todo que combine com a lista de colunas" + rus "îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÌÎÏÔÅËÓÔÏ×ÙÊ (FULLTEXT) ÉÎÄÅËÓ, ÓÏÏÔ×ÅÔÓÔ×ÕÀÝÉÊ ÓÐÉÓËÕ ÓÔÏÌÂÃÏ×" + serbian "Ne mogu da pronaðem 'FULLTEXT' indeks koli odgovara listi kolona" + spa "No puedo encontrar índice FULLTEXT correspondiendo a la lista de columnas" + swe "Hittar inte ett FULLTEXT-index i kolumnlistan" + ukr "îÅ ÍÏÖÕ ÚÎÁÊÔÉ FULLTEXT ¦ÎÄÅËÓ, ÝÏ ×¦ÄÐÏצÄÁ¤ ÐÅÒÅ̦ËÕ ÓÔÏ×Âæ×" +ER_LOCK_OR_ACTIVE_TRANSACTION + cze "Nemohu prov-Bést zadaný pøíkaz, proto¾e existují aktivní zamèené tabulky nebo aktivní transakce" + dan "Kan ikke udføre den givne kommando fordi der findes aktive, låste tabeller eller fordi der udføres en transaktion" + nla "Kan het gegeven commando niet uitvoeren, want u heeft actieve gelockte tabellen of een actieve transactie" + eng "Can't execute the given command because you have active locked tables or an active transaction" + est "Ei suuda täita antud käsku kuna on aktiivseid lukke või käimasolev transaktsioon" + fre "Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active" + ger "Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen" + ita "Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto" + por "Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa" + rus "îÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÕËÁÚÁÎÎÕÀ ËÏÍÁÎÄÕ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÐÒÉÓÕÔÓÔ×ÕÀÔ ÁËÔÉ×ÎÏ ÚÁÂÌÏËÉÒÏ×ÁÎÎÙÅ ÔÁÂÌÉÃÁ ÉÌÉ ÏÔËÒÙÔÁÑ ÔÒÁÎÚÁËÃÉÑ" + serbian "Ne mogu da izvršim datu komandu zbog toga što su tabele zakljuèane ili je transakcija u toku" + spa "No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transición activa" + swe "Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion" + ukr "îÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ ÐÏÄÁÎÕ ËÏÍÁÎÄÕ ÔÏÍÕ, ÝÏ ÔÁÂÌÉÃÑ ÚÁÂÌÏËÏ×ÁÎÁ ÁÂÏ ×ÉËÏÎÕ¤ÔØÓÑ ÔÒÁÎÚÁËæÑ" +ER_UNKNOWN_SYSTEM_VARIABLE + cze "Nezn-Bámá systémová promìnná '%-.64s'" + dan "Ukendt systemvariabel '%-.64s'" + nla "Onbekende systeem variabele '%-.64s'" + eng "Unknown system variable '%-.64s'" + est "Tundmatu süsteemne muutuja '%-.64s'" + fre "Variable système '%-.64s' inconnue" + ger "Unbekannte Systemvariable '%-.64s'" + ita "Variabile di sistema '%-.64s' sconosciuta" + por "Variável de sistema '%-.64s' desconhecida" + rus "îÅÉÚ×ÅÓÔÎÁÑ ÓÉÓÔÅÍÎÁÑ ÐÅÒÅÍÅÎÎÁÑ '%-.64s'" + serbian "Nepoznata sistemska promenljiva '%-.64s'" + spa "Desconocida variable de sistema '%-.64s'" + swe "Okänd systemvariabel: '%-.64s'" + ukr "îÅצÄÏÍÁ ÓÉÓÔÅÍÎÁ ÚͦÎÎÁ '%-.64s'" +ER_CRASHED_ON_USAGE + cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a mìla by být opravena" + dan "Tabellen '%-.64s' er markeret med fejl og bør repareres" + nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en dient te worden gerepareerd" + eng "Table '%-.64s' is marked as crashed and should be repaired" + est "Tabel '%-.64s' on märgitud vigaseks ja tuleb parandada" + fre "La table '%-.64s' est marquée 'crashed' et devrait être réparée" + ger "Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden" + ita "La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata" + por "Tabela '%-.64s' está marcada como danificada e deve ser reparada" + rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÄÏÌÖÎÁ ÐÒÏÊÔÉ ÐÒÏ×ÅÒËÕ É ÒÅÍÏÎÔ" + serbian "Tabela '%-.64s' je markirana kao ošteæena i trebala bi biti popravljena" + spa "Tabla '%-.64s' está marcada como crashed y debe ser reparada" + swe "Tabell '%-.64s' är trasig och bör repareras med REPAIR TABLE" + ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ §§ ÐÏÔÒ¦ÂÎÏ ×¦ÄÎÏ×ÉÔÉ" +ER_CRASHED_ON_REPAIR + cze "Tabulka '%-.64s' je ozna-Bèena jako poru¹ená a poslední (automatická?) oprava se nezdaøila" + dan "Tabellen '%-.64s' er markeret med fejl og sidste (automatiske?) REPAIR fejlede" + nla "Tabel '%-.64s' staat als gecrashed gemarkeerd en de laatste (automatische?) reparatie poging mislukte" + eng "Table '%-.64s' is marked as crashed and last (automatic?) repair failed" + est "Tabel '%-.64s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus" + fre "La table '%-.64s' est marquée 'crashed' et le dernier 'repair' a échoué" + ger "Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl" + ita "La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita" + por "Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou" + rus "ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÐÏÓÌÅÄÎÉÊ (Á×ÔÏÍÁÔÉÞÅÓËÉÊ?) ÒÅÍÏÎÔ ÎÅ ÂÙÌ ÕÓÐÅÛÎÙÍ" + serbian "Tabela '%-.64s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela" + spa "Tabla '%-.64s' está marcada como crashed y la última reparación (automactica?) falló" + swe "Tabell '%-.64s' är trasig och senast (automatiska?) reparation misslyckades" + ukr "ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ" +ER_WARNING_NOT_COMPLETE_ROLLBACK + dan "Advarsel: Visse data i tabeller der ikke understøtter transaktioner kunne ikke tilbagestilles" + nla "Waarschuwing: Roll back mislukt voor sommige buiten transacties gewijzigde tabellen" + eng "Some non-transactional changed tables couldn't be rolled back" + est "Hoiatus: mõnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida" + fre "Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées" + ger "Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden" + ita "Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)" + por "Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)" + rus "÷ÎÉÍÁÎÉÅ: ÐÏ ÎÅËÏÔÏÒÙÍ ÉÚÍÅÎÅÎÎÙÍ ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍ ÔÁÂÌÉÃÁÍ ÎÅ×ÏÚÍÏÖÎÏ ÂÕÄÅÔ ÐÒÏÉÚ×ÅÓÔÉ ÏÔËÁÔ ÔÒÁÎÚÁËÃÉÉ" + serbian "Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'" + spa "Aviso: Algunas tablas no transancionales no pueden tener rolled back" + swe "Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK" + ukr "úÁÓÔÅÒÅÖÅÎÎÑ: äÅÑ˦ ÎÅÔÒÁÎÚÁËæÊΦ ÚͦÎÉ ÔÁÂÌÉÃØ ÎÅ ÍÏÖÎÁ ÂÕÄÅ ÐÏ×ÅÒÎÕÔÉ" +ER_TRANS_CACHE_FULL + dan "Fler-udtryks transaktion krævede mere plads en 'max_binlog_cache_size' bytes. Forhøj værdien af denne variabel og prøv igen" + nla "Multi-statement transactie vereist meer dan 'max_binlog_cache_size' bytes opslag. Verhoog deze mysqld variabele en probeer opnieuw" + eng "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again" + est "Mitme lausendiga transaktsioon nõudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja väärtust ja proovi uuesti" + fre "Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez" + ger "Transaktionen, die aus mehreren Befehlen bestehen, benötigen mehr als 'max_binlog_cache_size' Bytes an Speicher. Diese mysqld-Variable bitte vergrössern und erneut versuchen" + ita "La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare" + por "Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente" + rus "ôÒÁÎÚÁËÃÉÉ, ×ËÌÀÞÁÀÝÅÊ ÂÏÌØÛÏÅ ËÏÌÉÞÅÓÔ×Ï ËÏÍÁÎÄ, ÐÏÔÒÅÂÏ×ÁÌÏÓØ ÂÏÌÅÅ ÞÅÍ 'max_binlog_cache_size' ÂÁÊÔ. õ×ÅÌÉÞØÔÅ ÜÔÕ ÐÅÒÅÍÅÎÎÕÀ ÓÅÒ×ÅÒÁ mysqld É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ" + spa "Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo" + swe "Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt" + ukr "ôÒÁÎÚÁËÃ¦Ñ Ú ÂÁÇÁÔØÍÁ ×ÉÒÁÚÁÍÉ ×ÉÍÁÇÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_binlog_cache_size' ÂÁÊÔ¦× ÄÌÑ ÚÂÅÒ¦ÇÁÎÎÑ. úÂ¦ÌØÛÔÅ ÃÀ ÚͦÎÎÕ mysqld ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ" +ER_SLAVE_MUST_STOP + dan "Denne handling kunne ikke udføres med kørende slave, brug først kommandoen STOP SLAVE" + nla "Deze operatie kan niet worden uitgevoerd met een actieve slave, doe eerst STOP SLAVE" + eng "This operation cannot be performed with a running slave; run STOP SLAVE first" + fre "Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord" + ger "Diese Operation kann nicht bei einem aktiven Slave durchgeführt werden. Bitte zuerst STOP SLAVE ausführen" + ita "Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE" + por "Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro" + rus "üÔÕ ÏÐÅÒÁÃÉÀ ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÐÒÉ ÒÁÂÏÔÁÀÝÅÍ ÐÏÔÏËÅ ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ STOP SLAVE" + serbian "Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server." + spa "Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE" + swe "Denna operation kan inte göras under replikering; Gör STOP SLAVE först" + ukr "ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ STOP SLAVE" +ER_SLAVE_NOT_RUNNING + dan "Denne handling kræver en kørende slave. Konfigurer en slave og brug kommandoen START SLAVE" + nla "Deze operatie vereist een actieve slave, configureer slave en doe dan START SLAVE" + eng "This operation requires a running slave; configure slave and do START SLAVE" + fre "Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE" + ger "Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren" + ita "Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE" + por "Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE" + rus "äÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ ÔÒÅÂÕÅÔÓÑ ÒÁÂÏÔÁÀÝÉÊ ÐÏÄÞÉÎÅÎÎÙÊ ÓÅÒ×ÅÒ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ START SLAVE" + serbian "Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'" + spa "Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE" + swe "Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE" + ukr "ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎÆ¦ÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ START SLAVE" +ER_BAD_SLAVE + dan "Denne server er ikke konfigureret som slave. Ret in config-filen eller brug kommandoen CHANGE MASTER TO" + nla "De server is niet geconfigureerd als slave, fix in configuratie bestand of met CHANGE MASTER TO" + eng "The server is not configured as slave; fix in config file or with CHANGE MASTER TO" + fre "Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO" + ger "Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben" + ita "Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO" + por "O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO" + rus "üÔÏÔ ÓÅÒ×ÅÒ ÎÅ ÎÁÓÔÒÏÅÎ ËÁË ÐÏÄÞÉÎÅÎÎÙÊ. ÷ÎÅÓÉÔÅ ÉÓÐÒÁ×ÌÅÎÉÑ × ËÏÎÆÉÇÕÒÁÃÉÏÎÎÏÍ ÆÁÊÌÅ ÉÌÉ Ó ÐÏÍÏÝØÀ CHANGE MASTER TO" + serbian "Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'" + spa "El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO" + swe "Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO" + ukr "óÅÒ×ÅÒ ÎÅ ÚËÏÎÆ¦ÇÕÒÏ×ÁÎÏ ÑË Ð¦ÄÌÅÇÌÉÊ, ×ÉÐÒÁ×ÔÅ ÃÅ Õ ÆÁÊ̦ ËÏÎÆ¦ÇÕÒÁæ§ ÁÂÏ Ú CHANGE MASTER TO" +ER_MASTER_INFO + eng "Could not initialize master info structure; more error messages can be found in the MySQL error log" + fre "Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL" + serbian "Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'" + swe "Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information" +ER_SLAVE_THREAD + dan "Kunne ikke danne en slave-tråd; check systemressourcerne" + nla "Kon slave thread niet aanmaken, controleer systeem resources" + eng "Could not create slave thread; check system resources" + fre "Impossible de créer une tâche esclave, vérifiez les ressources système" + ger "Konnte keinen Slave-Thread starten. Bitte System-Ressourcen überprüfen" + ita "Impossibile creare il thread 'slave', controllare le risorse di sistema" + por "Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema" + rus "îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÐÏÔÏË ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. ðÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ" + slo "Could not create slave thread, check system resources" + serbian "Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse" + spa "No puedo crear el thread esclavo, verifique recursos del sistema" + swe "Kunde inte starta en tråd för replikering" + ukr "îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ Ð¦ÄÌÅÇÌÕ Ç¦ÌËÕ, ÐÅÒÅצÒÔÅ ÓÉÓÔÅÍΦ ÒÅÓÕÒÓÉ" +ER_TOO_MANY_USER_CONNECTIONS 42000 + dan "Brugeren %-.64s har allerede mere end 'max_user_connections' aktive forbindelser" + nla "Gebruiker %-.64s heeft reeds meer dan 'max_user_connections' actieve verbindingen" + eng "User %-.64s has already more than 'max_user_connections' active connections" + est "Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga" + fre "L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connections actives" + ger "Benutzer '%-.64s' hat mehr als max_user_connections aktive Verbindungen" + ita "L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive" + por "Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas" + rus "õ ÐÏÌØÚÏ×ÁÔÅÌÑ %-.64s ÕÖÅ ÂÏÌØÛÅ ÞÅÍ 'max_user_connections' ÁËÔÉ×ÎÙÈ ÓÏÅÄÉÎÅÎÉÊ" + serbian "Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom" + spa "Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas" + swe "Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar" + ukr "ëÏÒÉÓÔÕ×ÁÞ %-.64s ×ÖÅ ÍÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_user_connections' ÁËÔÉ×ÎÉÈ Ú'¤ÄÎÁÎØ" +ER_SET_CONSTANTS_ONLY + dan "Du må kun bruge konstantudtryk med SET" + nla "U mag alleen constante expressies gebruiken bij SET" + eng "You may only use constant expressions with SET" + est "Ainult konstantsed suurused on lubatud SET klauslis" + fre "Seules les expressions constantes sont autorisées avec SET" + ger "Bei SET dürfen nur konstante Ausdrücke verwendet werden" + ita "Si possono usare solo espressioni costanti con SET" + por "Você pode usar apenas expressões constantes com SET" + rus "÷Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ × SET ÔÏÌØËÏ ËÏÎÓÔÁÎÔÎÙÅ ×ÙÒÁÖÅÎÉÑ" + serbian "Možete upotrebiti samo konstantan iskaz sa komandom 'SET'" + spa "Tu solo debes usar expresiones constantes con SET" + swe "Man kan endast använda konstantuttryck med SET" + ukr "íÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ ×ÉÒÁÚÉ Ú¦ ÓÔÁÌÉÍÉ Õ SET" +ER_LOCK_WAIT_TIMEOUT + dan "Lock wait timeout overskredet" + nla "Lock wacht tijd overschreden" + eng "Lock wait timeout exceeded; try restarting transaction" + est "Kontrollaeg ületatud luku järel ootamisel; Proovi transaktsiooni otsast alata" + fre "Timeout sur l'obtention du verrou" + ger "Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten" + ita "E' scaduto il timeout per l'attesa del lock" + por "Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação." + rus "ôÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÂÌÏËÉÒÏ×ËÉ ÉÓÔÅË; ÐÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ" + serbian "Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju" + spa "Tiempo de bloqueo de espera excedido" + swe "Fick inte ett lås i tid ; Försök att starta om transaktionen" + ukr "úÁÔÒÉÍËÕ ÏÞ¦ËÕ×ÁÎÎÑ ÂÌÏËÕ×ÁÎÎÑ ×ÉÞÅÒÐÁÎÏ" +ER_LOCK_TABLE_FULL + dan "Det totale antal låse overstiger størrelsen på låse-tabellen" + nla "Het totale aantal locks overschrijdt de lock tabel grootte" + eng "The total number of locks exceeds the lock table size" + est "Lukkude koguarv ületab lukutabeli suuruse" + fre "Le nombre total de verrou dépasse la taille de la table des verrous" + ger "Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle" + ita "Il numero totale di lock e' maggiore della grandezza della tabella di lock" + por "O número total de travamentos excede o tamanho da tabela de travamentos" + rus "ïÂÝÅÅ ËÏÌÉÞÅÓÔ×Ï ÂÌÏËÉÒÏ×ÏË ÐÒÅ×ÙÓÉÌÏ ÒÁÚÍÅÒÙ ÔÁÂÌÉÃÙ ÂÌÏËÉÒÏ×ÏË" + serbian "Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja" + spa "El número total de bloqueos excede el tamaño de bloqueo de la tabla" + swe "Antal lås överskrider antalet reserverade lås" + ukr "úÁÇÁÌØÎÁ Ë¦ÌØË¦ÓÔØ ÂÌÏËÕ×ÁÎØ ÐÅÒÅ×ÉÝÉÌÁ ÒÏÚÍ¦Ò ÂÌÏËÕ×ÁÎØ ÄÌÑ ÔÁÂÌÉæ" +ER_READ_ONLY_TRANSACTION 25000 + dan "Update lås kan ikke opnås under en READ UNCOMMITTED transaktion" + nla "Update locks kunnen niet worden verkregen tijdens een READ UNCOMMITTED transactie" + eng "Update locks cannot be acquired during a READ UNCOMMITTED transaction" + est "Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni käigus" + fre "Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED" + ger "Während einer READ UNCOMMITED-Transaktion können keine UPDATE-Sperren angefordert werden" + ita "I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'" + por "Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED" + rus "âÌÏËÉÒÏ×ËÉ ÏÂÎÏ×ÌÅÎÉÊ ÎÅÌØÚÑ ÐÏÌÕÞÉÔØ × ÐÒÏÃÅÓÓÅ ÞÔÅÎÉÑ ÎÅ ÐÒÉÎÑÔÏÊ (× ÒÅÖÉÍÅ READ UNCOMMITTED) ÔÒÁÎÚÁËÃÉÉ" + serbian "Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija" + spa "Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED" + swe "Updateringslås kan inte göras när man använder READ UNCOMMITTED" + ukr "ïÎÏ×ÉÔÉ ÂÌÏËÕ×ÁÎÎÑ ÎÅ ÍÏÖÌÉ×Ï ÎÁ ÐÒÏÔÑÚ¦ ÔÒÁÎÚÁËæ§ READ UNCOMMITTED" +ER_DROP_DB_WITH_READ_LOCK + dan "DROP DATABASE er ikke tilladt mens en tråd holder på globalt read lock" + nla "DROP DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit" + eng "DROP DATABASE not allowed while thread is holding global read lock" + est "DROP DATABASE ei ole lubatud kui lõim omab globaalset READ lukku" + fre "DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture" + ger "DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält" + ita "DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura" + por "DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura" + rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ DROP DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ" + serbian "Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka" + spa "DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global" + swe "DROP DATABASE är inte tillåtet när man har ett globalt läslås" + ukr "DROP DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ" +ER_CREATE_DB_WITH_READ_LOCK + dan "CREATE DATABASE er ikke tilladt mens en tråd holder på globalt read lock" + nla "CREATE DATABASE niet toegestaan terwijl thread een globale 'read lock' bezit" + eng "CREATE DATABASE not allowed while thread is holding global read lock" + est "CREATE DATABASE ei ole lubatud kui lõim omab globaalset READ lukku" + fre "CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture" + ger "CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält" + ita "CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura" + por "CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura" + rus "îÅ ÄÏÐÕÓËÁÅÔÓÑ CREATE DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ" + serbian "Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka" + spa "CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global" + swe "CREATE DATABASE är inte tillåtet när man har ett globalt läslås" + ukr "CREATE DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ" +ER_WRONG_ARGUMENTS + nla "Foutieve parameters voor %s" + eng "Incorrect arguments to %s" + est "Vigased parameetrid %s-le" + fre "Mauvais arguments à %s" + ger "Falsche Argumente für %s" + ita "Argomenti errati a %s" + por "Argumentos errados para %s" + rus "îÅ×ÅÒÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ %s" + serbian "Pogrešni argumenti prosleðeni na %s" + spa "Argumentos errados para %s" + swe "Felaktiga argument till %s" + ukr "èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s" +ER_NO_PERMISSION_TO_CREATE_USER 42000 + nla "'%-.32s'@'%-.64s' mag geen nieuwe gebruikers creeren" + eng "'%-.32s'@'%-.64s' is not allowed to create new users" + est "Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid" + fre "'%-.32s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs" + ger "'%-.32s'@'%-.64s' is nicht berechtigt, neue Benutzer hinzuzufügen" + ita "A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti" + por "Não é permitido a '%-.32s'@'%-.64s' criar novos usuários" + rus "'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ" + serbian "Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike" + spa "'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios" + swe "'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare" + ukr "ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×" +ER_UNION_TABLES_IN_DIFFERENT_DIR + nla "Incorrecte tabel definitie; alle MERGE tabellen moeten tot dezelfde database behoren" + eng "Incorrect table definition; all MERGE tables must be in the same database" + est "Vigane tabelimääratlus; kõik MERGE tabeli liikmed peavad asuma samas andmebaasis" + fre "Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée" + ger "Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden" + ita "Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database" + por "Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados." + rus "îÅ×ÅÒÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ; ÷ÓÅ ÔÁÂÌÉÃÙ × MERGE ÄÏÌÖÎÙ ÐÒÉÎÁÄÌÅÖÁÔØ ÏÄÎÏÊ É ÔÏÊ ÖÅ ÂÁÚÅ ÄÁÎÎÙÈ" + serbian "Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka" + spa "Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos" + swe "Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas" +ER_LOCK_DEADLOCK 40001 + nla "Deadlock gevonden tijdens lock-aanvraag poging; Probeer herstart van de transactie" + eng "Deadlock found when trying to get lock; try restarting transaction" + est "Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast" + fre "Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction" + ger "Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion erneut zu starten" + ita "Trovato deadlock durante il lock; Provare a far ripartire la transazione" + por "Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação." + rus "÷ÏÚÎÉËÌÁ ÔÕÐÉËÏ×ÁÑ ÓÉÔÕÁÃÉÑ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÂÌÏËÉÒÏ×ËÉ; ðÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ" + serbian "Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju" + spa "Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición" + swe "Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen" +ER_TABLE_CANT_HANDLE_FT + nla "Het gebruikte tabel type ondersteund geen FULLTEXT indexen" + eng "The used table type doesn't support FULLTEXT indexes" + est "Antud tabelitüüp ei toeta FULLTEXT indekseid" + fre "Le type de table utilisé ne supporte pas les index FULLTEXT" + ger "Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes" + ita "La tabella usata non supporta gli indici FULLTEXT" + por "O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)" + rus "éÓÐÏÌØÚÕÅÍÙÊ ÔÉÐ ÔÁÂÌÉà ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÐÏÌÎÏÔÅËÓÔÏ×ÙÈ ÉÎÄÅËÓÏ×" + serbian "Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse" + spa "El tipo de tabla usada no soporta índices FULLTEXT" + swe "Tabelltypen har inte hantering av FULLTEXT-index" + ukr "÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×" +ER_CANNOT_ADD_FOREIGN + nla "Kan foreign key beperking niet toevoegen" + eng "Cannot add foreign key constraint" + fre "Impossible d'ajouter des contraintes d'index externe" + ger "Fremdschlüssel-Beschränkung konnte nicht hinzugefügt werden" + ita "Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)" + por "Não pode acrescentar uma restrição de chave estrangeira" + rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÏÇÒÁÎÉÞÅÎÉÑ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ" + serbian "Ne mogu da dodam proveru spoljnog kljuèa" + spa "No puede adicionar clave extranjera constraint" + swe "Kan inte lägga till 'FOREIGN KEY constraint'" +ER_NO_REFERENCED_ROW 23000 + nla "Kan onderliggende rij niet toevoegen: foreign key beperking gefaald" + eng "Cannot add or update a child row: a foreign key constraint fails" + fre "Impossible d'ajouter un enregistrement fils : une constrainte externe l'empèche" + ger "Hinzufügen eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl" + greek "Cannot add a child row: a foreign key constraint fails" + hun "Cannot add a child row: a foreign key constraint fails" + ita "Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto" + norwegian-ny "Cannot add a child row: a foreign key constraint fails" + por "Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou" + rus "îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÄÏÞÅÒÎÀÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ" + spa "No puede adicionar una línea hijo: falla de clave extranjera constraint" + swe "FOREIGN KEY-konflikt: Kan inte skriva barn" +ER_ROW_IS_REFERENCED 23000 + eng "Cannot delete or update a parent row: a foreign key constraint fails" + fre "Impossible de supprimer un enregistrement père : une constrainte externe l'empèche" + ger "Löschen eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl" + greek "Cannot delete a parent row: a foreign key constraint fails" + hun "Cannot delete a parent row: a foreign key constraint fails" + ita "Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto" + por "Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou" + rus "îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÒÏÄÉÔÅÌØÓËÕÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ" + serbian "Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela" + spa "No puede deletar una línea padre: falla de clave extranjera constraint" + swe "FOREIGN KEY-konflikt: Kan inte radera fader" +ER_CONNECT_TO_MASTER 08S01 + nla "Fout bij opbouwen verbinding naar master: %-.128s" + eng "Error connecting to master: %-.128s" + ger "Fehler bei der Verbindung zum Master: %-.128s" + ita "Errore durante la connessione al master: %-.128s" + por "Erro conectando com o master: %-.128s" + rus "ïÛÉÂËÁ ÓÏÅÄÉÎÅÎÉÑ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ: %-.128s" + spa "Error de coneccion a master: %-.128s" + swe "Fick fel vid anslutning till master: %-.128s" +ER_QUERY_ON_MASTER + nla "Fout bij uitvoeren query op master: %-.128s" + eng "Error running query on master: %-.128s" + ger "Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s" + ita "Errore eseguendo una query sul master: %-.128s" + por "Erro rodando consulta no master: %-.128s" + rus "ïÛÉÂËÁ ×ÙÐÏÌÎÅÎÉÑ ÚÁÐÒÏÓÁ ÎÁ ÇÏÌÏ×ÎÏÍ ÓÅÒ×ÅÒÅ: %-.128s" + spa "Error executando el query en master: %-.128s" + swe "Fick fel vid utförande av command på mastern: %-.128s" +ER_ERROR_WHEN_EXECUTING_COMMAND + nla "Fout tijdens uitvoeren van commando %s: %-.128s" + eng "Error when executing command %s: %-.128s" + est "Viga käsu %s täitmisel: %-.128s" + ger "Fehler beim Ausführen des Befehls %s: %-.128s" + ita "Errore durante l'esecuzione del comando %s: %-.128s" + por "Erro quando executando comando %s: %-.128s" + rus "ïÛÉÂËÁ ÐÒÉ ×ÙÐÏÌÎÅÎÉÉ ËÏÍÁÎÄÙ %s: %-.128s" + serbian "Greška pri izvršavanju komande %s: %-.128s" + spa "Error de %s: %-.128s" + swe "Fick fel vid utförande av %s: %-.128s" +ER_WRONG_USAGE + nla "Foutief gebruik van %s en %s" + eng "Incorrect usage of %s and %s" + est "Vigane %s ja %s kasutus" + ger "Falsche Verwendung von %s und %s" + ita "Uso errato di %s e %s" + por "Uso errado de %s e %s" + rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ %s É %s" + serbian "Pogrešna upotreba %s i %s" + spa "Equivocado uso de %s y %s" + swe "Felaktig använding av %s and %s" + ukr "Wrong usage of %s and %s" +ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT 21000 + nla "De gebruikte SELECT commando's hebben een verschillend aantal kolommen" + eng "The used SELECT statements have a different number of columns" + est "Tulpade arv kasutatud SELECT lausetes ei kattu" + ger "Die verwendeten SELECT-Befehle liefern eine unterschiedliche Anzahl von Spalten zurück" + ita "La SELECT utilizzata ha un numero di colonne differente" + por "Os comandos SELECT usados têm diferente número de colunas" + rus "éÓÐÏÌØÚÏ×ÁÎÎÙÅ ÏÐÅÒÁÔÏÒÙ ×ÙÂÏÒËÉ (SELECT) ÄÁÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×" + serbian "Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona" + spa "El comando SELECT usado tiene diferente número de columnas" + swe "SELECT-kommandona har olika antal kolumner" +ER_CANT_UPDATE_WITH_READLOCK + nla "Kan de query niet uitvoeren vanwege een conflicterende read lock" + eng "Can't execute the query because you have a conflicting read lock" + est "Ei suuda täita päringut konfliktse luku tõttu" + ger "Augrund eines READ LOCK-Konflikts kann die Abfrage nicht ausgeführt werden" + ita "Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura" + por "Não posso executar a consulta porque você tem um conflito de travamento de leitura" + rus "îÅ×ÏÚÍÏÖÎÏ ÉÓÐÏÌÎÉÔØ ÚÁÐÒÏÓ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÕÓÔÁÎÏ×ÌÅÎÙ ËÏÎÆÌÉËÔÕÀÝÉÅ ÂÌÏËÉÒÏ×ËÉ ÞÔÅÎÉÑ" + serbian "Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu" + spa "No puedo ejecutar el query porque usted tiene conflicto de traba de lectura" + swe "Kan inte utföra kommandot emedan du har ett READ-lås" +ER_MIXING_NOT_ALLOWED + nla "Het combineren van transactionele en niet-transactionele tabellen is uitgeschakeld." + eng "Mixing of transactional and non-transactional tables is disabled" + est "Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud" + ger "Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert" + ita "E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali" + por "Mistura de tabelas transacional e não-transacional está desabilitada" + rus "éÓÐÏÌØÚÏ×ÁÎÉÅ ÔÒÁÎÚÁËÃÉÏÎÎÙÈ ÔÁÂÌÉà ÎÁÒÑÄÕ Ó ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍÉ ÚÁÐÒÅÝÅÎÏ" + serbian "Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno" + spa "Mezla de transancional y no-transancional tablas está deshabilitada" + swe "Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat" +ER_DUP_ARGUMENT + nla "Optie '%s' tweemaal gebruikt in opdracht" + eng "Option '%s' used twice in statement" + est "Määrangut '%s' on lauses kasutatud topelt" + ger "Option '%s' wird im Befehl zweimal verwendet" + ita "L'opzione '%s' e' stata usata due volte nel comando" + por "Opção '%s' usada duas vezes no comando" + rus "ïÐÃÉÑ '%s' Ä×ÁÖÄÙ ÉÓÐÏÌØÚÏ×ÁÎÁ × ×ÙÒÁÖÅÎÉÉ" + spa "Opción '%s' usada dos veces en el comando" + swe "Option '%s' användes två gånger" +ER_USER_LIMIT_REACHED 42000 + nla "Gebruiker '%-.64s' heeft het maximale gebruik van de '%s' faciliteit overschreden (huidige waarde: %ld)" + eng "User '%-.64s' has exceeded the '%s' resource (current value: %ld)" + ger "Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)" + ita "L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)" + por "Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)" + rus "ðÏÌØÚÏ×ÁÔÅÌØ '%-.64s' ÐÒÅ×ÙÓÉÌ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÒÅÓÕÒÓÁ '%s' (ÔÅËÕÝÅÅ ÚÎÁÞÅÎÉÅ: %ld)" + spa "Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)" + swe "Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)" +ER_SPECIFIC_ACCESS_DENIED_ERROR + nla "Toegang geweigerd. U moet het %-.128s privilege hebben voor deze operatie" + eng "Access denied; you need the %-.128s privilege for this operation" + ger "Befehl nicht zulässig. Hierfür wird die Berechtigung %-.128s benötigt" + ita "Accesso non consentito. Serve il privilegio %-.128s per questa operazione" + por "Acesso negado. Você precisa o privilégio %-.128s para essa operação" + rus "÷ ÄÏÓÔÕÐÅ ÏÔËÁÚÁÎÏ. ÷ÁÍ ÎÕÖÎÙ ÐÒÉ×ÉÌÅÇÉÉ %-.128s ÄÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ" + spa "Acceso negado. Usted necesita el privilegio %-.128s para esta operación" + swe "Du har inte privlegiet '%-.128s' som behövs för denna operation" + ukr "Access denied. You need the %-.128s privilege for this operation" +ER_LOCAL_VARIABLE + nla "Variabele '%-.64s' is SESSION en kan niet worden gebruikt met SET GLOBAL" + eng "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL" + ger "Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden" + ita "La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL" + por "Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (SESSION) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL" + spa "Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL" + swe "Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL" +ER_GLOBAL_VARIABLE + nla "Variabele '%-.64s' is GLOBAL en dient te worden gewijzigd met SET GLOBAL" + eng "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL" + ger "Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden" + ita "La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL" + por "Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÇÌÏÂÁÌØÎÏÊ (GLOBAL) ÐÅÒÅÍÅÎÎÏÊ, É ÅÅ ÓÌÅÄÕÅÔ ÉÚÍÅÎÑÔØ Ó ÐÏÍÏÝØÀ SET GLOBAL" + spa "Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL" + swe "Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL" +ER_NO_DEFAULT 42000 + nla "Variabele '%-.64s' heeft geen standaard waarde" + eng "Variable '%-.64s' doesn't have a default value" + ger "Variable '%-.64s' hat keinen Vorgabewert" + ita "La variabile '%-.64s' non ha un valore di default" + por "Variável '%-.64s' não tem um valor padrão" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÉÍÅÅÔ ÚÎÁÞÅÎÉÑ ÐÏ ÕÍÏÌÞÁÎÉÀ" + spa "Variable '%-.64s' no tiene un valor patrón" + swe "Variabel '%-.64s' har inte ett DEFAULT-värde" +ER_WRONG_VALUE_FOR_VAR 42000 + nla "Variabele '%-.64s' kan niet worden gewijzigd naar de waarde '%-.64s'" + eng "Variable '%-.64s' can't be set to the value of '%-.64s'" + ger "Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden" + ita "Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'" + por "Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'" + spa "Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'" + swe "Variabel '%-.64s' kan inte sättas till '%-.64s'" +ER_WRONG_TYPE_FOR_VAR 42000 + nla "Foutief argumenttype voor variabele '%-.64s'" + eng "Incorrect argument type to variable '%-.64s'" + ger "Falscher Argumenttyp für Variable '%-.64s'" + ita "Tipo di valore errato per la variabile '%-.64s'" + por "Tipo errado de argumento para variável '%-.64s'" + rus "îÅ×ÅÒÎÙÊ ÔÉÐ ÁÒÇÕÍÅÎÔÁ ÄÌÑ ÐÅÒÅÍÅÎÎÏÊ '%-.64s'" + spa "Tipo de argumento equivocado para variable '%-.64s'" + swe "Fel typ av argument till variabel '%-.64s'" +ER_VAR_CANT_BE_READ + nla "Variabele '%-.64s' kan alleen worden gewijzigd, niet gelezen" + eng "Variable '%-.64s' can only be set, not read" + ger "Variable '%-.64s' kann nur verändert, nicht gelesen werden" + ita "Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto" + por "Variável '%-.64s' somente pode ser configurada, não lida" + rus "ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÍÏÖÅÔ ÂÙÔØ ÔÏÌØËÏ ÕÓÔÁÎÏ×ÌÅÎÁ, ÎÏ ÎÅ ÓÞÉÔÁÎÁ" + spa "Variable '%-.64s' solamente puede ser configurada, no leída" + swe "Variabeln '%-.64s' kan endast sättas, inte läsas" +ER_CANT_USE_OPTION_HERE 42000 + nla "Foutieve toepassing/plaatsing van '%s'" + eng "Incorrect usage/placement of '%s'" + ger "Falsche Verwendung oder Platzierung von '%s'" + ita "Uso/posizione di '%s' sbagliato" + por "Errado uso/colocação de '%s'" + rus "îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'" + spa "Equivocado uso/colocación de '%s'" + swe "Fel använding/placering av '%s'" +ER_NOT_SUPPORTED_YET 42000 + nla "Deze versie van MySQL ondersteunt nog geen '%s'" + eng "This version of MySQL doesn't yet support '%s'" + ger "Diese MySQL-Version unterstützt '%s' nicht" + ita "Questa versione di MySQL non supporta ancora '%s'" + por "Esta versão de MySQL não suporta ainda '%s'" + rus "üÔÁ ×ÅÒÓÉÑ MySQL ÐÏËÁ ÅÝÅ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ '%s'" + spa "Esta versión de MySQL no soporta todavia '%s'" + swe "Denna version av MySQL kan ännu inte utföra '%s'" +ER_MASTER_FATAL_ERROR_READING_BINLOG + nla "Kreeg fatale fout %d: '%-.128s' van master tijdens lezen van data uit binaire log" + eng "Got fatal error %d: '%-.128s' from master when reading data from binary log" + ger "Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs aufgetreten" + ita "Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario" + por "Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log" + rus "ðÏÌÕÞÅÎÁ ÎÅÉÓÐÒÁ×ÉÍÁÑ ÏÛÉÂËÁ %d: '%-.128s' ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ × ÐÒÏÃÅÓÓÅ ×ÙÂÏÒËÉ ÄÁÎÎÙÈ ÉÚ Ä×ÏÉÞÎÏÇÏ ÖÕÒÎÁÌÁ" + spa "Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log" + swe "Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen" +ER_SLAVE_IGNORED_TABLE + eng "Slave SQL thread ignored the query because of replicate-*-table rules" + ger "Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert" + por "Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela" + spa "Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla" + swe "Slav SQL tråden ignorerade frågan pga en replicate-*-table regel" +ER_INCORRECT_GLOBAL_LOCAL_VAR + eng "Variable '%-.64s' is a %s variable" + serbian "Incorrect foreign key definition for '%-.64s': %s" + spa "Variable '%-.64s' es una %s variable" + swe "Variabel '%-.64s' är av typ %s" +ER_WRONG_FK_DEF 42000 + eng "Incorrect foreign key definition for '%-.64s': %s" + ger "Falsche Fremdschlüssel-Definition für '%-64s': %s" + por "Definição errada da chave estrangeira para '%-.64s': %s" + spa "Equivocada definición de llave extranjera para '%-.64s': %s" + swe "Felaktig FOREIGN KEY-definition för '%-.64s': %s" +ER_KEY_REF_DO_NOT_MATCH_TABLE_REF + eng "Key reference and table reference don't match" + ger "Schlüssel- und Tabellenverweis passen nicht zusammen" + por "Referência da chave e referência da tabela não coincidem" + spa "Referencia de llave y referencia de tabla no coinciden" + swe "Nyckelreferensen och tabellreferensen stämmer inte överens" +ER_OPERAND_COLUMNS 21000 + eng "Operand should contain %d column(s)" + ger "Operand solle %d Spalte(n) enthalten" + rus "ïÐÅÒÁÎÄ ÄÏÌÖÅÎ ÓÏÄÅÒÖÁÔØ %d ËÏÌÏÎÏË" + spa "Operando debe tener %d columna(s)" + ukr "ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×" +ER_SUBQUERY_NO_1_ROW 21000 + eng "Subquery returns more than 1 row" + ger "Unterabfrage lieferte mehr als einen Datensatz zurück" + por "Subconsulta retorna mais que 1 registro" + rus "ðÏÄÚÁÐÒÏÓ ×ÏÚ×ÒÁÝÁÅÔ ÂÏÌÅÅ ÏÄÎÏÊ ÚÁÐÉÓÉ" + spa "Subconsulta retorna mas que 1 línea" + swe "Subquery returnerade mer än 1 rad" + ukr "ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ Â¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ" +ER_UNKNOWN_STMT_HANDLER + dan "Unknown prepared statement handler (%ld) given to %s" + eng "Unknown prepared statement handler (%.*s) given to %s" + ger "Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben" + por "Desconhecido manipulador de declaração preparado (%.*s) determinado para %s" + spa "Desconocido preparado comando handler (%ld) dado para %s" + swe "Okänd PREPARED STATEMENT id (%ld) var given till %s" + ukr "Unknown prepared statement handler (%ld) given to %s" +ER_CORRUPT_HELP_DB + eng "Help database is corrupt or does not exist" + ger "Die Hilfe-Datenbank ist beschädigt oder existiert nicht" + por "Banco de dado de ajuda corrupto ou não existente" + spa "Base de datos Help está corrupto o no existe" + swe "Hjälpdatabasen finns inte eller är skadad" +ER_CYCLIC_REFERENCE + eng "Cyclic reference on subqueries" + ger "Zyklischer Verweis in Unterabfragen" + por "Referência cíclica em subconsultas" + rus "ãÉËÌÉÞÅÓËÁÑ ÓÓÙÌËÁ ÎÁ ÐÏÄÚÁÐÒÏÓ" + spa "Cíclica referencia en subconsultas" + swe "Cyklisk referens i subqueries" + ukr "ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ" +ER_AUTO_CONVERT + eng "Converting column '%s' from %s to %s" + ger "Spalte '%s' wird von %s nach %s umgewandelt" + por "Convertendo coluna '%s' de %s para %s" + rus "ðÒÅÏÂÒÁÚÏ×ÁÎÉÅ ÐÏÌÑ '%s' ÉÚ %s × %s" + spa "Convirtiendo columna '%s' de %s para %s" + swe "Konvertar kolumn '%s' från %s till %s" + ukr "ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s" +ER_ILLEGAL_REFERENCE 42S22 + eng "Reference '%-.64s' not supported (%s)" + ger "Verweis '%-.64s' wird nicht unterstützt (%s)" + por "Referência '%-.64s' não suportada (%s)" + rus "óÓÙÌËÁ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔÓÑ (%s)" + spa "Referencia '%-.64s' no soportada (%s)" + swe "Referens '%-.64s' stöds inte (%s)" + ukr "ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)" +ER_DERIVED_MUST_HAVE_ALIAS 42000 + eng "Every derived table must have its own alias" + ger "Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden" + por "Cada tabela derivada deve ter seu próprio alias" + spa "Cada tabla derivada debe tener su propio alias" + swe "Varje 'derived table' måste ha sitt eget alias" +ER_SELECT_REDUCED 01000 + eng "Select %u was reduced during optimization" + ger "Select %u wurde während der Optimierung reduziert" + por "Select %u foi reduzido durante otimização" + rus "Select %u ÂÙÌ ÕÐÒÁÚÄÎÅÎ × ÐÒÏÃÅÓÓÅ ÏÐÔÉÍÉÚÁÃÉÉ" + spa "Select %u fué reducido durante optimización" + swe "Select %u reducerades vid optimiering" + ukr "Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii" +ER_TABLENAME_NOT_ALLOWED_HERE 42000 + eng "Table '%-.64s' from one of the SELECTs cannot be used in %-.32s" + ger "Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden" + por "Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s" + spa "Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s" + swe "Tabell '%-.64s' från en SELECT kan inte användas i %-.32s" +ER_NOT_SUPPORTED_AUTH_MODE 08004 + eng "Client does not support authentication protocol requested by server; consider upgrading MySQL client" + ger "Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client" + por "Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL" + spa "Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL" + swe "Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet." +ER_SPATIAL_CANT_HAVE_NULL 42000 + eng "All parts of a SPATIAL index must be NOT NULL" + ger "Alle Teile eines SPATIAL index müssen als NOT NULL deklariert sein" + por "Todas as partes de uma SPATIAL index devem ser NOT NULL" + spa "Todas las partes de una SPATIAL index deben ser NOT NULL" + swe "Alla delar av en SPATIAL index måste vara NOT NULL" +ER_COLLATION_CHARSET_MISMATCH 42000 + eng "COLLATION '%s' is not valid for CHARACTER SET '%s'" + ger "COLLATION '%s' ist für CHARACTER SET '%s' ungültig" + por "COLLATION '%s' não é válida para CHARACTER SET '%s'" + spa "COLLATION '%s' no es válido para CHARACTER SET '%s'" + swe "COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'" +ER_SLAVE_WAS_RUNNING + eng "Slave is already running" + ger "Slave läuft bereits" + por "O slave já está rodando" + spa "Slave ya está funcionando" + swe "Slaven har redan startat" +ER_SLAVE_WAS_NOT_RUNNING + eng "Slave has already been stopped" + ger "Slave wurde bereits angehalten" + por "O slave já está parado" + spa "Slave ya fué parado" + swe "Slaven har redan stoppat" +ER_TOO_BIG_FOR_UNCOMPRESS + eng "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)" + ger "Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d" + por "Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)" + spa "Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)" +ER_ZLIB_Z_MEM_ERROR + eng "ZLIB: Not enough memory" + ger "ZLIB: Steht nicht genug Speicher zur Verfügung" + por "ZLIB: Não suficiente memória disponível" + spa "Z_MEM_ERROR: No suficiente memoria para zlib" +ER_ZLIB_Z_BUF_ERROR + eng "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)" + ger "ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)" + por "ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)" + spa "Z_BUF_ERROR: No suficiente espacio en el búfer de salida para zlib (probablemente, extensión de datos descomprimidos fué corrompida)" +ER_ZLIB_Z_DATA_ERROR + eng "ZLIB: Input data corrupted" + ger "ZLIB: Eingabedaten beschädigt" + por "ZLIB: Dados de entrada está corrupto" + spa "Z_DATA_ERROR: Dato de entrada fué corrompido para zlib" + swe "Z_DATA_ERROR: Input data was corrupted for zlib" + ukr "Z_DATA_ERROR: Input data was corrupted for zlib" +ER_CUT_VALUE_GROUP_CONCAT + eng "%d line(s) were cut by GROUP_CONCAT()" + ger "%d Zeile(n) durch GROUP_CONCAT() abgeschnitten" + por "%d linha(s) foram cortada(s) por GROUP_CONCAT()" + spa "%d línea(s) fue(fueron) cortadas por group_concat()" + swe "%d rad(er) kapades av group_concat()" + ukr "%d line(s) was(were) cut by group_concat()" +ER_WARN_TOO_FEW_RECORDS 01000 + eng "Row %ld doesn't contain data for all columns" + ger "Anzahl der Datensätze in Zeile %ld geringer als Anzahl der Spalten" + por "Conta de registro é menor que a conta de coluna na linha %ld" + spa "Línea %ld no contiene datos para todas las columnas" +ER_WARN_TOO_MANY_RECORDS 01000 + eng "Row %ld was truncated; it contained more data than there were input columns" + ger "Anzahl der Datensätze in Zeile %ld größer als Anzahl der Spalten" + por "Conta de registro é maior que a conta de coluna na linha %ld" + spa "Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada" +ER_WARN_NULL_TO_NOTNULL 22004 + eng "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld" + ger "Daten abgeschnitten, NULL für NOT NULL-Spalte '%s' in Zeile %ld angegeben" + por "Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld" + spa "Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld" +ER_WARN_DATA_OUT_OF_RANGE 22003 + eng "Out of range value adjusted for column '%s' at row %ld" + ger "Daten abgeschnitten, außerhalb des Wertebereichs für Spalte '%s' in Zeile %ld" + por "Dado truncado, fora de alcance para coluna '%s' na linha %ld" + spa "Datos truncados, fuera de gama para columna '%s' en la línea %ld" +ER_WARN_DATA_TRUNCATED 01000 + eng "Data truncated for column '%s' at row %ld" + ger "Daten abgeschnitten für Spalte '%s' in Zeile %ld" + por "Dado truncado para coluna '%s' na linha %ld" + spa "Datos truncados para columna '%s' en la línea %ld" +ER_WARN_USING_OTHER_HANDLER + eng "Using storage engine %s for table '%s'" + ger "Für Tabelle '%s' wird Speicher-Engine %s benutzt" + por "Usando engine de armazenamento %s para tabela '%s'" + spa "Usando motor de almacenamiento %s para tabla '%s'" + swe "Använder handler %s för tabell '%s'" +ER_CANT_AGGREGATE_2COLLATIONS + eng "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'" + ger "Unerlaubte Vermischung der Kollationen (%s,%s) und (%s,%s) für die Operation '%s'" + por "Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'" + spa "Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'" +ER_DROP_USER + eng "Cannot drop one or more of the requested users" + ger "Kann einen oder mehrere der angegebenen Benutzer nicht löschen" +ER_REVOKE_GRANTS + eng "Can't revoke all privileges, grant for one or more of the requested users" + ger "Kann nicht alle Berechtigungen widerrufen, grant for one or more of the requested users" + por "Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos" + spa "No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados" +ER_CANT_AGGREGATE_3COLLATIONS + eng "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'" + ger "Unerlaubte Vermischung der Kollationen (%s,%s), (%s,%s), (%s,%s) für die Operation '%s'" + por "Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'" + spa "Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'" +ER_CANT_AGGREGATE_NCOLLATIONS + eng "Illegal mix of collations for operation '%s'" + ger "Unerlaubte Vermischung der Kollationen für die Operation '%s'" + por "Ilegal combinação de collations para operação '%s'" + spa "Ilegal mezcla de collations para operación '%s'" +ER_VARIABLE_IS_NOT_STRUCT + eng "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)" + ger "Variable '%-.64s' ist keine Variablen-Komponenten (kann nicht als XXXX.variablen_name verwendet werden)" + por "Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)" + spa "Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)" +ER_UNKNOWN_COLLATION + eng "Unknown collation: '%-.64s'" + ger "Unbekannte Kollation: '%-.64s'" + por "Collation desconhecida: '%-.64s'" + spa "Collation desconocida: '%-.64s'" +ER_SLAVE_IGNORED_SSL_PARAMS + eng "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started" + ger "SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn der MySQL-Slave mit SSL gestartet wird" + por "SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado." + spa "Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado" + swe "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later when MySQL slave with SSL will be started" + ukr "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later when MySQL slave with SSL will be started" +ER_SERVER_IS_IN_SECURE_AUTH_MODE + eng "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format" + ger "Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern" + por "Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato" + rus "óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ" + spa "Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato" +ER_WARN_FIELD_RESOLVED + eng "Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d" + ger "Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst" + por "Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d" + rus "ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d" + spa "Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d" + ukr "óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d" +ER_BAD_SLAVE_UNTIL_COND + eng "Incorrect parameter or combination of parameters for START SLAVE UNTIL" + ger "Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL" + por "Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL" + spa "Parametro equivocado o combinación de parametros para START SLAVE UNTIL" +ER_MISSING_SKIP_SLAVE + eng "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart" + ger "Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn der Slave-Server unerwartet neu startet" + por "É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo" + spa "Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave" +ER_UNTIL_COND_IGNORED + eng "SQL thread is not to be started so UNTIL options are ignored" + ger "SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert" + por "Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas" + spa "SQL thread no es inicializado tal que opciones UNTIL son ignoradas" +ER_WRONG_NAME_FOR_INDEX 42000 + eng "Incorrect index name '%-.100s'" + por "Incorreto nome de índice '%-.100s'" + spa "Nombre de índice incorrecto '%-.100s'" + swe "Felaktigt index namn '%-.100s'" +ER_WRONG_NAME_FOR_CATALOG 42000 + eng "Incorrect catalog name '%-.100s'" + por "Incorreto nome de catálogo '%-.100s'" + spa "Nombre de catalog incorrecto '%-.100s'" + swe "Felaktigt katalog namn '%-.100s'" +ER_WARN_QC_RESIZE + eng "Query cache failed to set size %lu; new query cache size is %lu" + por "Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu" + rus "ëÅÛ ÚÁÐÒÏÓÏ× ÎÅ ÍÏÖÅÔ ÕÓÔÁÎÏ×ÉÔØ ÒÁÚÍÅÒ %lu, ÎÏ×ÙÊ ÒÁÚÍÅÒ ËÅÛÁ ÚÐÒÏÓÏ× - %lu" + spa "Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu" + swe "Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu" + ukr "ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu" +ER_BAD_FT_COLUMN + eng "Column '%-.64s' cannot be part of FULLTEXT index" + por "Coluna '%-.64s' não pode ser parte de índice FULLTEXT" + spa "Columna '%-.64s' no puede ser parte de FULLTEXT index" + swe "Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index" +ER_UNKNOWN_KEY_CACHE + eng "Unknown key cache '%-.100s'" + por "Key cache desconhecida '%-.100s'" + spa "Desconocida key cache '%-.100s'" + swe "Okänd nyckel cache '%-.100s'" +ER_WARN_HOSTNAME_WONT_WORK + eng "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work" + por "MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar" + spa "MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar" +ER_UNKNOWN_STORAGE_ENGINE 42000 + eng "Unknown table engine '%s'" + por "Motor de tabela desconhecido '%s'" + spa "Desconocido motor de tabla '%s'" +ER_WARN_DEPRECATED_SYNTAX + eng "'%s' is deprecated; use '%s' instead" + por "'%s' é desatualizado. Use '%s' em seu lugar" + spa "'%s' está desaprobado, use '%s' en su lugar" +ER_NON_UPDATABLE_TABLE + eng "The target table %-.100s of the %s is not updatable" + por "A tabela destino %-.100s do %s não é atualizável" + rus "ôÁÂÌÉÃÁ %-.100s × %s ÎÅ ÍÏÖÅÔ ÉÚÍÅÎÑÔÓÑ" + spa "La tabla destino %-.100s del %s no es actualizable" + swe "Tabel %-.100s använd med '%s' är inte uppdateringsbar" + ukr "ôÁÂÌÉÃÑ %-.100s Õ %s ÎÅ ÍÏÖÅ ÏÎÏ×ÌÀ×ÁÔÉÓØ" +ER_FEATURE_DISABLED + eng "The '%s' feature is disabled; you need MySQL built with '%s' to have it working" + por "O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando" + spa "El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando" + swe "'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad" +ER_OPTION_PREVENTS_STATEMENT + eng "The MySQL server is running with the %s option so it cannot execute this statement" + por "O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando" + spa "El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando" + swe "MySQL är startad med --skip-grant-tables. Pga av detta kan du inte använda detta kommando" +ER_DUPLICATED_VALUE_IN_TYPE + eng "Column '%-.100s' has duplicated value '%-.64s' in %s" + por "Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" + spa "Columna '%-.100s' tiene valor doblado '%-.64s' en %s" +ER_TRUNCATED_WRONG_VALUE 22007 + eng "Truncated incorrect %-.32s value: '%-.128s'" + por "Truncado errado %-.32s valor: '%-.128s'" + spa "Equivocado truncado %-.32s valor: '%-.128s'" +ER_TOO_MUCH_AUTO_TIMESTAMP_COLS + eng "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" + por "Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula" + spa "Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula" + swe "Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" + ukr "Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" +ER_INVALID_ON_UPDATE + eng "Invalid ON UPDATE clause for '%-.64s' column" + por "Inválida cláusula ON UPDATE para campo '%-.64s'" + spa "Inválido ON UPDATE cláusula para campo '%-.64s'" + swe "Invalid ON UPDATE clause for '%-.64s' field" + ukr "Invalid ON UPDATE clause for '%-.64s' field" +ER_UNSUPPORTED_PS + eng "This command is not supported in the prepared statement protocol yet" +ER_GET_ERRMSG + dan "Modtog fejl %d '%-.100s' fra %s" + eng "Got error %d '%-.100s' from %s" + jpn "Got NDB error %d '%-.100s'" + nor "Mottok feil %d '%-.100s' fa %s" + norwegian-ny "Mottok feil %d '%-.100s' fra %s" +ER_GET_TEMPORARY_ERRMSG + dan "Modtog temporary fejl %d '%-.100s' fra %s" + eng "Got temporary error %d '%-.100s' from %s" + jpn "Got temporary NDB error %d '%-.100s'" + nor "Mottok temporary feil %d '%-.100s' fra %s" + norwegian-ny "Mottok temporary feil %d '%-.100s' fra %s" +ER_UNKNOWN_TIME_ZONE + eng "Unknown or incorrect time zone: '%-.64s'" +ER_WARN_INVALID_TIMESTAMP + eng "Invalid TIMESTAMP value in column '%s' at row %ld" +ER_INVALID_CHARACTER_STRING + eng "Invalid %s character string: '%.64s'" +ER_WARN_ALLOWED_PACKET_OVERFLOWED + eng "Result of %s() was larger than max_allowed_packet (%ld) - truncated" +ER_CONFLICTING_DECLARATIONS + eng "Conflicting declarations: '%s%s' and '%s%s'" +ER_SP_NO_RECURSIVE_CREATE 2F003 + eng "Can't create a %s from within another stored routine" +ER_SP_ALREADY_EXISTS 42000 + eng "%s %s already exists" +ER_SP_DOES_NOT_EXIST 42000 + eng "%s %s does not exist" +ER_SP_DROP_FAILED + eng "Failed to DROP %s %s" +ER_SP_STORE_FAILED + eng "Failed to CREATE %s %s" +ER_SP_LILABEL_MISMATCH 42000 + eng "%s with no matching label: %s" +ER_SP_LABEL_REDEFINE 42000 + eng "Redefining label %s" +ER_SP_LABEL_MISMATCH 42000 + eng "End-label %s without match" +ER_SP_UNINIT_VAR 01000 + eng "Referring to uninitialized variable %s" +ER_SP_BADSELECT 0A000 + eng "SELECT in a stored procedure must have INTO" +ER_SP_BADRETURN 42000 + eng "RETURN is only allowed in a FUNCTION" +ER_SP_BADSTATEMENT 0A000 + eng "Statements like SELECT, INSERT, UPDATE (and others) are not allowed in a FUNCTION" +ER_UPDATE_LOG_DEPRECATED_IGNORED 42000 + eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored" +ER_UPDATE_LOG_DEPRECATED_TRANSLATED 42000 + eng "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN" +ER_QUERY_INTERRUPTED 70100 + eng "Query execution was interrupted" +ER_SP_WRONG_NO_OF_ARGS 42000 + eng "Incorrect number of arguments for %s %s; expected %u, got %u" +ER_SP_COND_MISMATCH 42000 + eng "Undefined CONDITION: %s" +ER_SP_NORETURN 42000 + eng "No RETURN found in FUNCTION %s" +ER_SP_NORETURNEND 2F005 + eng "FUNCTION %s ended without RETURN" +ER_SP_BAD_CURSOR_QUERY 42000 + eng "Cursor statement must be a SELECT" +ER_SP_BAD_CURSOR_SELECT 42000 + eng "Cursor SELECT must not have INTO" +ER_SP_CURSOR_MISMATCH 42000 + eng "Undefined CURSOR: %s" +ER_SP_CURSOR_ALREADY_OPEN 24000 + eng "Cursor is already open" +ER_SP_CURSOR_NOT_OPEN 24000 + eng "Cursor is not open" +ER_SP_UNDECLARED_VAR 42000 + eng "Undeclared variable: %s" +ER_SP_WRONG_NO_OF_FETCH_ARGS + eng "Incorrect number of FETCH variables" +ER_SP_FETCH_NO_DATA 02000 + eng "No data to FETCH" +ER_SP_DUP_PARAM 42000 + eng "Duplicate parameter: %s" +ER_SP_DUP_VAR 42000 + eng "Duplicate variable: %s" +ER_SP_DUP_COND 42000 + eng "Duplicate condition: %s" +ER_SP_DUP_CURS 42000 + eng "Duplicate cursor: %s" +ER_SP_CANT_ALTER + eng "Failed to ALTER %s %s" +ER_SP_SUBSELECT_NYI 0A000 + eng "Subselect value not supported" +ER_SP_NO_USE 42000 + eng "USE is not allowed in a stored procedure" +ER_SP_VARCOND_AFTER_CURSHNDLR 42000 + eng "Variable or condition declaration after cursor or handler declaration" +ER_SP_CURSOR_AFTER_HANDLER 42000 + eng "Cursor declaration after handler declaration" +ER_SP_CASE_NOT_FOUND 20000 + eng "Case not found for CASE statement" +ER_FPARSER_TOO_BIG_FILE + eng "Configuration file '%-.64s' is too big" + rus "óÌÉÛËÏÍ ÂÏÌØÛÏÊ ËÏÎÆÉÇÕÒÁÃÉÏÎÎÙÊ ÆÁÊÌ '%-.64s'" + ukr "úÁÎÁÄÔÏ ×ÅÌÉËÉÊ ËÏÎÆ¦ÇÕÒÁæÊÎÉÊ ÆÁÊÌ '%-.64s'" +ER_FPARSER_BAD_HEADER + eng "Malformed file type header in file '%-.64s'" + rus "îÅ×ÅÒÎÙÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÁ ÆÁÊÌÁ '%-.64s'" + ukr "îÅצÒÎÉÊ ÚÁÇÏÌÏ×ÏË ÔÉÐÕ Õ ÆÁÊ̦ '%-.64s'" +ER_FPARSER_EOF_IN_COMMENT + eng "Unexpected end of file while parsing comment '%-.64s'" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ × ËÏÍÅÎÔÁÒÉÉ '%-.64s'" + ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ËÏÍÅÎÔÁÒ¦ '%-.64s'" +ER_FPARSER_ERROR_IN_PARAMETER + eng "Error while parsing parameter '%-.64s' (line: '%-.64s')" + rus "ïÛÉÂËÁ ÐÒÉ ÒÁÓÐÏÚÎÁ×ÁÎÉÉ ÐÁÒÁÍÅÔÒÁ '%-.64s' (ÓÔÒÏËÁ: '%-.64s')" + ukr "ðÏÍÉÌËÁ × ÒÏÓЦÚÎÁ×ÁÎΦ ÐÁÒÁÍÅÔÒÕ '%-.64s' (ÒÑÄÏË: '%-.64s')" +ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER + eng "Unexpected end of file while skipping unknown parameter '%-.64s'" + rus "îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ ÐÒÉ ÐÒÏÐÕÓËÅ ÎÅÉÚ×ÅÓÔÎÏÇÏ ÐÁÒÁÍÅÔÒÁ '%-.64s'" + ukr "îÅÓÐÏĦ×ÁÎÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ Õ ÓÐÒϦ ÐÒÏÍÉÎÕÔÉ ÎÅצÄÏÍÉÊ ÐÁÒÁÍÅÔÒ '%-.64s'" +ER_VIEW_NO_EXPLAIN + eng "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table" + rus "EXPLAIN/SHOW ÎÅ ÍÏÖÅÔ ÂÙÔØ ×ÙÐÏÌÎÅÎÎÏ; ÎÅÄÏÓÔÁÔÏÞÎÏ ÐÒÁ× ÎÁ ÔÁËÂÌÉÃÙ ÚÁÐÒÏÓÁ" + ukr "EXPLAIN/SHOW ÎÅ ÍÏÖÅ ÂÕÔÉ ×¦ËÏÎÁÎÏ; ÎÅÍÁ¤ ÐÒÁ× ÎÁ ÔÉÂÌÉæ ÚÁÐÉÔÕ" +ER_FRM_UNKNOWN_TYPE + eng "File '%-.64s' has unknown type '%-.64s' in its header" + rus "æÁÊÌ '%-.64s' ÓÏÄÅÒÖÉÔ ÎÅÉÚ×ÅÓÔÎÙÊ ÔÉÐ '%-.64s' × ÚÁÇÏÌÏ×ËÅ" + ukr "æÁÊÌ '%-.64s' ÍÁ¤ ÎÅצÄÏÍÉÊ ÔÉÐ '%-.64s' Õ ÚÁÇÏÌÏ×ËÕ" +ER_WRONG_OBJECT + eng "'%-.64s.%-.64s' is not %s" + rus "'%-.64s.%-.64s' - ÎÅ %s" + ukr "'%-.64s.%-.64s' ÎÅ ¤ %s" +ER_NONUPDATEABLE_COLUMN + eng "Column '%-.64s' is not updatable" + rus "óÔÏÌÂÅà '%-.64s' ÎÅ ÏÂÎÏ×ÌÑÅÍÙÊ" + ukr "óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÚÍÉÎÅÎÉÊ" +ER_VIEW_SELECT_DERIVED + eng "View's SELECT contains a subquery in the FROM clause" + rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÏÄÚÁÐÒÏÓ × ËÏÎÓÔÒÕËÃÉÉ FROM" + ukr "View SELECT ÍÁ¤ ЦÄÚÁÐÉÔ Õ ËÏÎÓÔÒÕËæ§ FROM" +ER_VIEW_SELECT_CLAUSE + eng "View's SELECT contains a '%s' clause" + rus "View SELECT ÓÏÄÅÒÖÉÔ ËÏÎÓÔÒÕËÃÉÀ '%s'" + ukr "View SELECT ÍÁ¤ ËÏÎÓÔÒÕËæÀ '%s'" +ER_VIEW_SELECT_VARIABLE + eng "View's SELECT contains a variable or parameter" + rus "View SELECT ÓÏÄÅÒÖÉÔ ÐÅÒÅÍÅÎÎÕÀ ÉÌÉ ÐÁÒÁÍÅÔÒ" + ukr "View SELECT ÍÁ¤ ÚÍÉÎÎÕ ÁÂÏ ÐÁÒÁÍÅÔÅÒ" +ER_VIEW_SELECT_TMPTABLE + eng "View's SELECT contains a temporary table '%-.64s'" + rus "View SELECT ÓÏÄÅÒÖÉÔ ÓÓÙÌËÕ ÎÁ ×ÒÅÍÅÎÎÕÀ ÔÁÂÌÉÃÕ '%-.64s'" + ukr "View SELECT ×ÉËÏÒÉÓÔÏ×Õ¤ ÔÉÍÞÁÓÏ×Õ ÔÁÂÌÉÃÀ '%-.64s'" +ER_VIEW_WRONG_LIST + eng "View's SELECT and view's field list have different column counts" + rus "View SELECT É ÓÐÉÓÏË ÐÏÌÅÊ view ÉÍÅÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×" + ukr "View SELECT ¦ ÐÅÒÅÌ¦Ë ÓÔÏ×ÂÃ¦× view ÍÁÀÔØ Ò¦ÚÎÕ Ë¦ÌØË¦ÓÔØ ÓËÏ×Âæ×" +ER_WARN_VIEW_MERGE + eng "View merge algorithm can't be used here for now (assumed undefined algorithm)" + rus "áÌÇÏÒÉÔÍ ÓÌÉÑÎÉÑ view ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ÓÅÊÞÁÓ (ÁÌÇÏÒÉÔÍ ÂÕÄÅÔ ÎÅÏÐÅÒÅÄÅÌÅÎÎÙÍ)" + ukr "áÌÇÏÒÉÔÍ ÚÌÉ×ÁÎÎÑ view ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ ÚÁÒÁÚ (ÁÌÇÏÒÉÔÍ ÂÕÄÅ ÎÅ×ÉÚÎÁÞÅÎÉÊ)" +ER_WARN_VIEW_WITHOUT_KEY + eng "View being updated does not have complete key of underlying table in it" + rus "ïÂÎÏ×ÌÑÅÍÙÊ view ÎÅ ÓÏÄÅÒÖÉÔ ËÌÀÞÁ ÉÓÐÏÌØÚÏ×ÁÎÎÙÈ(ÏÊ) × ÎÅÍ ÔÁÂÌÉÃ(Ù)" + ukr "View, ÝÏ ÏÎÏ×ÌÀÅÔØÓÑ, ΊͦÓÔÉÔØ ÐÏ×ÎÏÇÏ ËÌÀÞÁ ÔÁÂÌÉæ(Ø), ÝÏ ×ÉËÏÒ¦ÓÔÁÎÁ × ÎØÀÏÍÕ" +ER_VIEW_INVALID + eng "View '%-.64s.%-.64s' references invalid table(s) or column(s) or function(s)" + rus "View '%-.64s.%-.64s' ÓÓÙÌÁÅÔÓÑ ÎÁ ÎÅÓÕÝÅÓÔ×ÕÀÝÉÅ ÔÁÂÌÉÃÙ ÉÌÉ ÓÔÏÌÂÃÙ ÉÌÉ ÆÕÎËÃÉÉ" +ER_SP_NO_DROP_SP + eng "Can't drop a %s from within another stored routine" +ER_SP_GOTO_IN_HNDLR + eng "GOTO is not allowed in a stored procedure handler" +ER_TRG_ALREADY_EXISTS + eng "Trigger already exists" +ER_TRG_DOES_NOT_EXIST + eng "Trigger does not exist" +ER_TRG_ON_VIEW_OR_TEMP_TABLE + eng "Trigger's '%-.64s' is view or temporary table" +ER_TRG_CANT_CHANGE_ROW + eng "Updating of %s row is not allowed in %strigger" +ER_TRG_NO_SUCH_ROW_IN_TRG + eng "There is no %s row in %s trigger" +ER_NO_DEFAULT_FOR_FIELD + eng "Field '%-.64s' doesn't have a default value" +ER_DIVISION_BY_ZERO 22012 + eng "Division by 0" +ER_TRUNCATED_WRONG_VALUE_FOR_FIELD + eng "Incorrect %-.32s value: '%-.128s' for column '%.64s' at row %ld" +ER_ILLEGAL_VALUE_FOR_TYPE 22007 + eng "Illegal %s '%-.64s' value found during parsing" +ER_VIEW_NONUPD_CHECK + eng "CHECK OPTION on non-updatable view '%-.64s.%-.64s'" + rus "CHECK OPTION ÄÌÑ ÎÅÏÂÎÏ×ÌÑÅÍÏÇÏ VIEW '%-.64s.%-.64s'" + ukr "CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÝÏ ÎÅ ÍÏÖÅ ÂÕÔÉ ÏÎÏ×ÌÅÎÎÉÍ" +ER_VIEW_CHECK_FAILED + eng "CHECK OPTION failed '%-.64s.%-.64s'" + rus "ÐÒÏ×ÅÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÐÒÏ×ÁÌÉÌÁÓØ" + ukr "ðÅÒÅצÒËÁ CHECK OPTION ÄÌÑ VIEW '%-.64s.%-.64s' ÎÅ ÐÒÏÊÛÌÁ" +ER_PROCACCESS_DENIED_ERROR 42000 + eng "%-.16s command denied to user '%-.32s'@'%-.64s' for routine '%-.64s'" +ER_RELAY_LOG_FAIL + eng "Failed purging old relay logs: %s" +ER_PASSWD_LENGTH + eng "Password hash should be a %d-digit hexadecimal number" +ER_UNKNOWN_TARGET_BINLOG + eng "Target log not found in binlog index" +ER_IO_ERR_LOG_INDEX_READ + eng "I/O error reading log index file" +ER_BINLOG_PURGE_PROHIBITED + eng "Server configuration does not permit binlog purge" +ER_FSEEK_FAIL + eng "Failed on fseek()" +ER_BINLOG_PURGE_FATAL_ERR + eng "Fatal error during log purge" +ER_LOG_IN_USE + eng "A purgeable log is in use, will not purge" +ER_LOG_PURGE_UNKNOWN_ERR + eng "Unknown error during log purge" +ER_RELAY_LOG_INIT + eng "Failed initializing relay log position: %s" +ER_NO_BINARY_LOGGING + eng "You are not using binary logging" +ER_RESERVED_SYNTAX + eng "The '%-.64s' syntax is reserved for purposes internal to the MySQL server" +ER_WSAS_FAILED + eng "WSAStartup Failed" +ER_DIFF_GROUPS_PROC + eng "Can't handle procedures with differents groups yet" +ER_NO_GROUP_FOR_PROC + eng "Select must have a group with this procedure" +ER_ORDER_WITH_PROC + eng "Can't use ORDER clause with this procedure" +ER_LOGING_PROHIBIT_CHANGING_OF + eng "Binary logging and replication forbid changing the global server %s" +ER_NO_FILE_MAPPING + eng "Can't map file: %-.64s, errno: %d" +ER_WRONG_MAGIC + eng "Wrong magic in %-.64s" +ER_PS_MANY_PARAM + eng "Prepared statement contains too many placeholders" +ER_KEY_PART_0 + eng "Key part '%-.64s' length cannot be 0" +ER_VIEW_CHECKSUM + eng "View text checksum failed" + rus "ðÒÏ×ÅÒËÁ ËÏÎÔÒÏÌØÎÏÊ ÓÕÍÍÙ ÔÅËÓÔÁ VIEW ÐÒÏ×ÁÌÉÌÁÓØ" + ukr "ðÅÒÅצÒËÁ ËÏÎÔÒÏÌØÎϧ ÓÕÍÉ ÔÅËÓÔÕ VIEW ÎÅ ÐÒÏÊÛÌÁ" +ER_VIEW_MULTIUPDATE + eng "Can not modify more than one base table through a join view '%-.64s.%-.64s'" + rus "îÅÌØÚÑ ÉÚÍÅÎÉÔØ ÂÏÌØÛÅ ÞÅÍ ÏÄÎÕ ÂÁÚÏ×ÕÀ ÔÁÂÌÉÃÕ ÉÓÐÏÌØÚÕÑ ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s'" + ukr "îÅÍÏÖÌÉ×Ï ÏÎÏ×ÉÔÉ Â¦ÌØÛ ÎÉÖ ÏÄÎÕ ÂÁÚÏ×Õ ÔÁÂÌÉÃÀ ×ÙËÏÒÉÓÔÏ×ÕÀÞÉ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔ¦ÔØ ÄÅË¦ÌØËÁ ÔÁÂÌÉÃØ" +ER_VIEW_NO_INSERT_FIELD_LIST + eng "Can not insert into join view '%-.64s.%-.64s' without fields list" + rus "îÅÌØÚÑ ×ÓÔÁ×ÌÑÔØ ÚÁÐÉÓÉ × ÍÎÏÇÏÔÁÂÌÉÞÎÙÊ VIEW '%-.64s.%-.64s' ÂÅÚ ÓÐÉÓËÁ ÐÏÌÅÊ" + ukr "îÅÍÏÖÌÉ×Ï ÕÓÔÁ×ÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅË¦ÌØËÁ ÔÁÂÌÉÃØ, ÂÅÚ ÓÐÉÓËÕ ÓÔÏ×Âæ×" +ER_VIEW_DELETE_MERGE_VIEW + eng "Can not delete from join view '%-.64s.%-.64s'" + rus "îÅÌØÚÑ ÕÄÁÌÑÔØ ÉÚ ÍÎÏÇÏÔÁÂÌÉÞÎÏÇÏ VIEW '%-.64s.%-.64s'" + ukr "îÅÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ÒÑÄËÉ Õ VIEW '%-.64s.%-.64s', ÝÏ Í¦ÓÔÉÔØ ÄÅË¦ÌØËÁ ÔÁÂÌÉÃØ" +ER_CANNOT_USER + eng "Operation %s failed for %.256s" + ger "Das Kommando %s scheiterte für %.256s" + norwegian-ny "Operation %s failed for '%.256s'" +ER_NONEXISTING_PROC_GRANT 42000 + eng "There is no such grant defined for user '%-.32s' on host '%-.64s' on routine '%-.64s'" +ER_PROC_AUTO_GRANT_FAIL + eng "Failed to grant EXECUTE and ALTER ROUTINE privileges" +ER_PROC_AUTO_REVOKE_FAIL + eng "Failed to revoke all privileges to dropped routine" diff --git a/sql/share/estonian/errmsg.txt b/sql/share/estonian/errmsg.txt deleted file mode 100644 index 5aab524e0d9..00000000000 --- a/sql/share/estonian/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Esialgne tõlge: Tõnu Samuel (tonu@spam.ee) - Parandanud ja täiendanud: Indrek Siitan (tfr@mysql.com) -*/ - -character-set=latin7 - -"hashchk", -"isamchk", -"EI", -"JAH", -"Ei suuda luua faili '%-.64s' (veakood: %d)", -"Ei suuda luua tabelit '%-.64s' (veakood: %d)", -"Ei suuda luua andmebaasi '%-.64s' (veakood: %d)", -"Ei suuda luua andmebaasi '%-.64s': andmebaas juba eksisteerib", -"Ei suuda kustutada andmebaasi '%-.64s': andmebaasi ei eksisteeri", -"Viga andmebaasi kustutamisel (ei suuda kustutada faili '%-.64s', veakood: %d)", -"Viga andmebaasi kustutamisel (ei suuda kustutada kataloogi '%-.64s', veakood: %d)", -"Viga '%-.64s' kustutamisel (veakood: %d)", -"Ei suuda lugeda kirjet süsteemsest tabelist", -"Ei suuda lugeda '%-.64s' olekut (veakood: %d)", -"Ei suuda identifitseerida jooksvat kataloogi (veakood: %d)", -"Ei suuda lukustada faili (veakood: %d)", -"Ei suuda avada faili '%-.64s' (veakood: %d)", -"Ei suuda leida faili '%-.64s' (veakood: %d)", -"Ei suuda lugeda kataloogi '%-.64s' (veakood: %d)", -"Ei suuda siseneda kataloogi '%-.64s' (veakood: %d)", -"Kirje tabelis '%-.64s' on muutunud viimasest lugemisest saadik", -"Ketas täis (%s). Ootame kuni tekib vaba ruumi...", -"Ei saa kirjutada, korduv võti tabelis '%-.64s'", -"Viga faili '%-.64s' sulgemisel (veakood: %d)", -"Viga faili '%-.64s' lugemisel (veakood: %d)", -"Viga faili '%-.64s' ümbernimetamisel '%-.64s'-ks (veakood: %d)", -"Viga faili '%-.64s' kirjutamisel (veakood: %d)", -"'%-.64s' on lukustatud muudatuste vastu", -"Sorteerimine katkestatud", -"Vaade '%-.64s' ei eksisteeri '%-.64s' jaoks", -"Tabeli handler tagastas vea %d", -"Tabeli '%-.64s' handler ei toeta antud operatsiooni", -"Ei suuda leida kirjet '%-.64s'-s", -"Vigane informatsioon failis '%-.64s'", -"Tabeli '%-.64s' võtmefail on vigane; proovi seda parandada", -"Tabeli '%-.64s' võtmefail on aegunud; paranda see!", -"Tabel '%-.64s' on ainult lugemiseks", -"Mälu sai otsa. Proovi MySQL uuesti käivitada (puudu jäi %d baiti)", -"Mälu sai sorteerimisel otsa. Suurenda MySQL-i sorteerimispuhvrit", -"Ootamatu faililõpumärgend faili '%-.64s' lugemisel (veakood: %d)", -"Liiga palju samaaegseid ühendusi", -"Mälu sai otsa. Võimalik, et aitab swap-i lisamine või käsu 'ulimit' abil MySQL-le rohkema mälu kasutamise lubamine", -"Ei suuda lahendada IP aadressi masina nimeks", -"Väär handshake", -"Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' andmebaasile '%-.64s'", -"Ligipääs keelatud kasutajale '%-.32s'@'%-.64s' (kasutab parooli: %s)", -"Andmebaasi ei ole valitud", -"Tundmatu käsk", -"Tulp '%-.64s' ei saa omada nullväärtust", -"Tundmatu andmebaas '%-.64s'", -"Tabel '%-.64s' juba eksisteerib", -"Tundmatu tabel '%-.64s'", -"Väli '%-.64s' %-.64s-s ei ole ühene", -"Serveri seiskamine käib", -"Tundmatu tulp '%-.64s' '%-.64s'-s", -"'%-.64s' puudub GROUP BY klauslis", -"Ei saa grupeerida '%-.64s' järgi", -"Lauses on korraga nii tulbad kui summeerimisfunktsioonid", -"Tulpade arv erineb väärtuste arvust", -"Identifikaatori '%-.100s' nimi on liiga pikk", -"Kattuv tulba nimi '%-.64s'", -"Kattuv võtme nimi '%-.64s'", -"Kattuv väärtus '%-.64s' võtmele %d", -"Vigane tulba kirjeldus tulbale '%-.64s'", -"%s '%-.80s' ligidal real %d", -"Tühi päring", -"Ei ole unikaalne tabel/alias '%-.64s'", -"Vigane vaikeväärtus '%-.64s' jaoks", -"Mitut primaarset võtit ei saa olla", -"Liiga palju võtmeid. Maksimaalselt võib olla %d võtit", -"Võti koosneb liiga paljudest osadest. Maksimaalselt võib olla %d osa", -"Võti on liiga pikk. Maksimaalne võtmepikkus on %d", -"Võtme tulp '%-.64s' puudub tabelis", -"BLOB-tüüpi tulpa '%-.64s' ei saa kasutada võtmena", -"Tulba '%-.64s' pikkus on liiga pikk (maksimaalne pikkus: %d). Kasuta BLOB väljatüüpi", -"Vigane tabelikirjeldus; Tabelis tohib olla üks auto_increment tüüpi tulp ning see peab olema defineeritud võtmena", -"%s: ootab ühendusi", -"%s: MySQL lõpetas\n", -"%s: sain signaali %d. Lõpetan!\n", -"%s: Lõpp\n", -"%s: Sulgen jõuga lõime %ld kasutaja: '%-.32s'\n", -"Ei suuda luua IP socketit", -"Tabelil '%-.64s' puuduvad võtmed. Loo tabel uuesti", -"Väljade eraldaja erineb oodatust. Tutvu kasutajajuhendiga", -"BLOB-tüüpi väljade olemasolul ei saa kasutada fikseeritud väljapikkust. Vajalik 'fields terminated by' määrang.", -"Fail '%-.64s' peab asuma andmebaasi kataloogis või olema kõigile loetav", -"Fail '%-.80s' juba eksisteerib", -"Kirjeid: %ld Kustutatud: %ld Vahele jäetud: %ld Hoiatusi: %ld", -"Kirjeid: %ld Kattuvaid: %ld", -"Vigane võtme osa. Kasutatud võtmeosa ei ole string tüüpi, määratud pikkus on pikem kui võtmeosa või tabelihandler ei toeta seda tüüpi võtmeid", -"ALTER TABLE kasutades ei saa kustutada kõiki tulpasid. Kustuta tabel DROP TABLE abil", -"Ei suuda kustutada '%-.64s'. Kontrolli kas tulp/võti eksisteerib", -"Kirjeid: %ld Kattuvaid: %ld Hoiatusi: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Tundmatu lõim: %lu", -"Ei ole lõime %lu omanik", -"Ühtegi tabelit pole kasutusel", -"Liiga palju string tulbale %-.64s tüübile SET", -"Ei suuda luua unikaalset logifaili nime %-.64s.(1-999)\n", -"Tabel '%-.64s' on lukustatud READ lukuga ning ei ole muudetav", -"Tabel '%-.64s' ei ole lukustatud käsuga LOCK TABLES", -"BLOB-tüüpi tulp '%-.64s' ei saa omada vaikeväärtust", -"Vigane andmebaasi nimi '%-.100s'", -"Vigane tabeli nimi '%-.100s'", -"SELECT lause peab läbi vaatama suure hulga kirjeid ja võtaks tõenäoliselt liiga kaua aega. Tasub kontrollida WHERE klauslit ja vajadusel kasutada käsku SET SQL_BIG_SELECTS=1", -"Tundmatu viga", -"Tundmatu protseduur '%-.64s'", -"Vale parameetrite hulk protseduurile '%-.64s'", -"Vigased parameetrid protseduurile '%-.64s'", -"Tundmatu tabel '%-.64s' %-.32s-s", -"Tulp '%-.64s' on määratletud topelt", -"Vigane grupeerimisfunktsiooni kasutus", -"Tabel '%-.64s' kasutab laiendust, mis ei eksisteeri antud MySQL versioonis", -"Tabelis peab olema vähemalt üks tulp", -"Tabel '%-.64s' on täis", -"Vigane kooditabel '%-.64s'", -"Liiga palju tabeleid. MySQL suudab JOINiga ühendada kuni %d tabelit", -"Liiga palju tulpasid", -"Liiga pikk kirje. Kirje maksimumpikkus arvestamata BLOB-tüüpi välju on %d. Muuda mõned väljad BLOB-tüüpi väljadeks", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Ristsõltuvus OUTER JOIN klauslis. Kontrolli oma ON tingimusi", -"Tulp '%-.64s' on kasutusel indeksina, kuid ei ole määratletud kui NOT NULL", -"Ei suuda avada funktsiooni '%-.64s'", -"Ei suuda algväärtustada funktsiooni '%-.64s'; %-.80s", -"Teegi nimes ei tohi olla kataloogi", -"Funktsioon '%-.64s' juba eksisteerib", -"Ei suuda avada jagatud teeki '%-.64s' (veakood: %d %-.64s)", -"Ei leia funktsiooni '%-.64s' antud teegis", -"Funktsioon '%-.64s' ei ole defineeritud", -"Masin '%-.64s' on blokeeritud hulgaliste ühendusvigade tõttu. Blokeeringu saab tühistada 'mysqladmin flush-hosts' käsuga", -"Masinal '%-.64s' puudub ligipääs sellele MySQL serverile", -"Te kasutate MySQL-i anonüümse kasutajana, kelledel pole parooli muutmise õigust", -"Teiste paroolide muutmiseks on nõutav tabelite muutmisõigus 'mysql' andmebaasis", -"Ei leia vastavat kirjet kasutajate tabelis", -"Sobinud kirjeid: %ld Muudetud: %ld Hoiatusi: %ld", -"Ei suuda luua uut lõime (veakood %d). Kui mälu ei ole otsas, on tõenäoliselt tegemist operatsioonisüsteemispetsiifilise veaga", -"Tulpade hulk erineb väärtuste hulgast real %ld", -"Ei suuda taasavada tabelit '%-.64s'", -"NULL väärtuse väärkasutus", -"regexp tagastas vea '%-.64s'", -"GROUP tulpade (MIN(),MAX(),COUNT()...) kooskasutamine tavaliste tulpadega ilma GROUP BY klauslita ei ole lubatud", -"Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s'", -"%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tabelis '%-.64s'", -"%-.16s käsk ei ole lubatud kasutajale '%-.32s'@'%-.64s' tulbale '%-.64s' tabelis '%-.64s'", -"Vigane GRANT/REVOKE käsk. Tutvu kasutajajuhendiga", -"Masina või kasutaja nimi GRANT lauses on liiga pikk", -"Tabelit '%-.64s.%-.64s' ei eksisteeri", -"Sellist õigust ei ole defineeritud kasutajale '%-.32s' masinast '%-.64s' tabelile '%-.64s'", -"Antud käsk ei ole lubatud käesolevas MySQL versioonis", -"Viga SQL süntaksis", -"INSERT DELAYED lõim ei suutnud saada soovitud lukku tabelile %-.64s", -"Liiga palju DELAYED lõimesid kasutusel", -"Ühendus katkestatud %ld andmebaasile: '%-.64s' kasutajale: '%-.32s' (%-.64s)", -"Saabus suurem pakett kui lubatud 'max_allowed_packet' muutujaga", -"Viga ühendustoru lugemisel", -"fcntl() tagastas vea", -"Paketid saabusid vales järjekorras", -"Viga andmepaketi lahtipakkimisel", -"Viga andmepaketi lugemisel", -"Kontrollaja ületamine andmepakettide lugemisel", -"Viga andmepaketi kirjutamisel", -"Kontrollaja ületamine andmepakettide kirjutamisel", -"Tulemus on pikem kui lubatud 'max_allowed_packet' muutujaga", -"Valitud tabelitüüp ei toeta BLOB/TEXT tüüpi välju", -"Valitud tabelitüüp ei toeta AUTO_INCREMENT tüüpi välju", -"INSERT DELAYED ei saa kasutada tabeli '%-.64s' peal, kuna see on lukustatud LOCK TABLES käsuga", -"Vigane tulba nimi '%-.100s'", -"Tabelihandler ei oska indekseerida tulpa '%-.64s'", -"Kõik tabelid MERGE tabeli määratluses ei ole identsed", -"Ei suuda kirjutada tabelisse '%-.64s', kuna see rikub ühesuse kitsendust", -"BLOB-tüüpi tulp '%-.64s' on kasutusel võtmes ilma pikkust määratlemata", -"Kõik PRIMARY KEY peavad olema määratletud NOT NULL piiranguga; vajadusel kasuta UNIQUE tüüpi võtit", -"Tulemis oli rohkem kui üks kirje", -"Antud tabelitüüp nõuab primaarset võtit", -"Antud MySQL versioon on kompileeritud ilma RAID toeta", -"Katse muuta tabelit turvalises rezhiimis ilma WHERE klauslita", -"Võti '%-.64s' ei eksisteeri tabelis '%-.64s'", -"Ei suuda avada tabelit", -"Antud tabelitüüp ei toeta %s käske", -"Seda käsku ei saa kasutada transaktsiooni sees", -"Viga %d käsu COMMIT täitmisel", -"Viga %d käsu ROLLBACK täitmisel", -"Viga %d käsu FLUSH_LOGS täitmisel", -"Viga %d käsu CHECKPOINT täitmisel", -"Ühendus katkestatud %ld andmebaas: '%-.64s' kasutaja: '%-.32s' masin: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Ei suutnud leida FULLTEXT indeksit, mis kattuks kasutatud tulpadega", -"Ei suuda täita antud käsku kuna on aktiivseid lukke või käimasolev transaktsioon", -"Tundmatu süsteemne muutuja '%-.64s'", -"Tabel '%-.64s' on märgitud vigaseks ja tuleb parandada", -"Tabel '%-.64s' on märgitud vigaseks ja viimane (automaatne?) parandus ebaõnnestus", -"Hoiatus: mõnesid transaktsioone mittetoetavaid tabeleid ei suudetud tagasi kerida", -"Mitme lausendiga transaktsioon nõudis rohkem ruumi kui lubatud 'max_binlog_cache_size' muutujaga. Suurenda muutuja väärtust ja proovi uuesti", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"Kasutajal %-.64s on juba rohkem ühendusi kui lubatud 'max_user_connections' muutujaga", -"Ainult konstantsed suurused on lubatud SET klauslis", -"Kontrollaeg ületatud luku järel ootamisel; Proovi transaktsiooni otsast alata", -"Lukkude koguarv ületab lukutabeli suuruse", -"Uuenduslukke ei saa kasutada READ UNCOMMITTED transaktsiooni käigus", -"DROP DATABASE ei ole lubatud kui lõim omab globaalset READ lukku", -"CREATE DATABASE ei ole lubatud kui lõim omab globaalset READ lukku", -"Vigased parameetrid %s-le", -"Kasutajal '%-.32s'@'%-.64s' ei ole lubatud luua uusi kasutajaid", -"Vigane tabelimääratlus; kõik MERGE tabeli liikmed peavad asuma samas andmebaasis", -"Lukustamisel tekkis tupik (deadlock); alusta transaktsiooni otsast", -"Antud tabelitüüp ei toeta FULLTEXT indekseid", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Viga käsu %s täitmisel: %-.128s", -"Vigane %s ja %s kasutus", -"Tulpade arv kasutatud SELECT lausetes ei kattu", -"Ei suuda täita päringut konfliktse luku tõttu", -"Transaktsioone toetavate ning mittetoetavate tabelite kooskasutamine ei ole lubatud", -"Määrangut '%s' on lauses kasutatud topelt", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/french/errmsg.txt b/sql/share/french/errmsg.txt deleted file mode 100644 index bb2bd29171a..00000000000 --- a/sql/share/french/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NON", -"OUI", -"Ne peut créer le fichier '%-.64s' (Errcode: %d)", -"Ne peut créer la table '%-.64s' (Errcode: %d)", -"Ne peut créer la base '%-.64s' (Erreur %d)", -"Ne peut créer la base '%-.64s'; elle existe déjà", -"Ne peut effacer la base '%-.64s'; elle n'existe pas", -"Ne peut effacer la base '%-.64s' (erreur %d)", -"Erreur en effaçant la base (rmdir '%-.64s', erreur %d)", -"Erreur en effaçant '%-.64s' (Errcode: %d)", -"Ne peut lire un enregistrement de la table 'system'", -"Ne peut obtenir le status de '%-.64s' (Errcode: %d)", -"Ne peut obtenir le répertoire de travail (Errcode: %d)", -"Ne peut verrouiller le fichier (Errcode: %d)", -"Ne peut ouvrir le fichier: '%-.64s' (Errcode: %d)", -"Ne peut trouver le fichier: '%-.64s' (Errcode: %d)", -"Ne peut lire le répertoire de '%-.64s' (Errcode: %d)", -"Ne peut changer le répertoire pour '%-.64s' (Errcode: %d)", -"Enregistrement modifié depuis sa dernière lecture dans la table '%-.64s'", -"Disque plein (%s). J'attend que quelqu'un libère de l'espace...", -"Ecriture impossible, doublon dans une clé de la table '%-.64s'", -"Erreur a la fermeture de '%-.64s' (Errcode: %d)", -"Erreur en lecture du fichier '%-.64s' (Errcode: %d)", -"Erreur en renommant '%-.64s' en '%-.64s' (Errcode: %d)", -"Erreur d'écriture du fichier '%-.64s' (Errcode: %d)", -"'%-.64s' est verrouillé contre les modifications", -"Tri alphabétique abandonné", -"La vue (View) '%-.64s' n'existe pas pour '%-.64s'", -"Reçu l'erreur %d du handler de la table", -"Le handler de la table '%-.64s' n'a pas cette option", -"Ne peut trouver l'enregistrement dans '%-.64s'", -"Information erronnée dans le fichier: '%-.64s'", -"Index corrompu dans la table: '%-.64s'; essayez de le réparer", -"Vieux fichier d'index pour la table '%-.64s'; réparez le!", -"'%-.64s' est en lecture seulement", -"Manque de mémoire. Redémarrez le démon et ré-essayez (%d octets nécessaires)", -"Manque de mémoire pour le tri. Augmentez-la.", -"Fin de fichier inattendue en lisant '%-.64s' (Errcode: %d)", -"Trop de connections", -"Manque de 'threads'/mémoire", -"Ne peut obtenir de hostname pour votre adresse", -"Mauvais 'handshake'", -"Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s'. Base '%-.64s'", -"Accès refusé pour l'utilisateur: '%-.32s'@'@%-.64s' (mot de passe: %s)", -"Aucune base n'a été sélectionnée", -"Commande inconnue", -"Le champ '%-.64s' ne peut être vide (null)", -"Base '%-.64s' inconnue", -"La table '%-.64s' existe déjà", -"Table '%-.64s' inconnue", -"Champ: '%-.64s' dans %s est ambigu", -"Arrêt du serveur en cours", -"Champ '%-.64s' inconnu dans %s", -"'%-.64s' n'est pas dans 'group by'", -"Ne peut regrouper '%-.64s'", -"Vous demandez la fonction sum() et des champs dans la même commande", -"Column count doesn't match value count", -"Le nom de l'identificateur '%-.64s' est trop long", -"Nom du champ '%-.64s' déjà utilisé", -"Nom de clef '%-.64s' déjà utilisé", -"Duplicata du champ '%-.64s' pour la clef %d", -"Mauvais paramètre de champ pour le champ '%-.64s'", -"%s près de '%-.64s' à la ligne %d", -"Query est vide", -"Table/alias: '%-.64s' non unique", -"Valeur par défaut invalide pour '%-.64s'", -"Plusieurs clefs primaires définies", -"Trop de clefs sont définies. Maximum de %d clefs alloué", -"Trop de parties specifiées dans la clef. Maximum de %d parties", -"La clé est trop longue. Longueur maximale: %d", -"La clé '%-.64s' n'existe pas dans la table", -"Champ BLOB '%-.64s' ne peut être utilisé dans une clé", -"Champ '%-.64s' trop long (max = %d). Utilisez un BLOB", -"Un seul champ automatique est permis et il doit être indexé", -"%s: Prêt pour des connections", -"%s: Arrêt normal du serveur\n", -"%s: Reçu le signal %d. Abandonne!\n", -"%s: Arrêt du serveur terminé\n", -"%s: Arrêt forcé de la tâche (thread) %ld utilisateur: '%-.64s'\n", -"Ne peut créer la connection IP (socket)", -"La table '%-.64s' n'a pas d'index comme celle utilisée dans CREATE INDEX. Recréez la table", -"Séparateur de champs inconnu. Vérifiez dans le manuel", -"Vous ne pouvez utiliser des lignes de longueur fixe avec des BLOBs. Utiliser 'fields terminated by'.", -"Le fichier '%-.64s' doit être dans le répertoire de la base et lisible par tous", -"Le fichier '%-.64s' existe déjà", -"Enregistrements: %ld Effacés: %ld Non traités: %ld Avertissements: %ld", -"Enregistrements: %ld Doublons: %ld", -"Mauvaise sous-clef. Ce n'est pas un 'string' ou la longueur dépasse celle définie dans la clef", -"Vous ne pouvez effacer tous les champs avec ALTER TABLE. Utilisez DROP TABLE", -"Ne peut effacer (DROP) '%-.64s'. Vérifiez s'il existe", -"Enregistrements: %ld Doublons: %ld Avertissements: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Numéro de tâche inconnu: %lu", -"Vous n'êtes pas propriétaire de la tâche no: %lu", -"Aucune table utilisée", -"Trop de chaînes dans la colonne %s avec SET", -"Ne peut générer un unique nom de journal %s.(1-999)\n", -"Table '%-.64s' verrouillée lecture (READ): modification impossible", -"Table '%-.64s' non verrouillée: utilisez LOCK TABLES", -"BLOB '%-.64s' ne peut avoir de valeur par défaut", -"Nom de base de donnée illégal: '%-.64s'", -"Nom de table illégal: '%-.64s'", -"SELECT va devoir examiner beaucoup d'enregistrements ce qui va prendre du temps. Vérifiez la clause WHERE et utilisez SET SQL_BIG_SELECTS=1 si SELECT se passe bien", -"Erreur inconnue", -"Procédure %s inconnue", -"Mauvais nombre de paramètres pour la procedure %s", -"Paramètre erroné pour la procedure %s", -"Table inconnue '%-.64s' dans %s", -"Champ '%-.64s' spécifié deux fois", -"Utilisation invalide de la clause GROUP", -"Table '%-.64s' : utilise une extension invalide pour cette version de MySQL", -"Une table doit comporter au moins une colonne", -"La table '%-.64s' est pleine", -"Jeu de caractères inconnu: '%-.64s'", -"Trop de tables. MySQL ne peut utiliser que %d tables dans un JOIN", -"Trop de champs", -"Ligne trop grande. Le taille maximale d'une ligne, sauf les BLOBs, est %d. Changez le type de quelques colonnes en BLOB", -"Débordement de la pile des tâches (Thread stack). Utilisées: %ld pour une pile de %ld. Essayez 'mysqld -O thread_stack=#' pour indiquer une plus grande valeur", -"Dépendance croisée dans une clause OUTER JOIN. Vérifiez la condition ON", -"La colonne '%-.32s' fait partie d'un index UNIQUE ou INDEX mais n'est pas définie comme NOT NULL", -"Imposible de charger la fonction '%-.64s'", -"Impossible d'initialiser la fonction '%-.64s'; %-.80s", -"Chemin interdit pour les bibliothèques partagées", -"La fonction '%-.64s' existe déjà", -"Impossible d'ouvrir la bibliothèque partagée '%-.64s' (errno: %d %s)", -"Impossible de trouver la fonction '%-.64s' dans la bibliothèque'", -"La fonction '%-.64s' n'est pas définie", -"L'hôte '%-.64s' est bloqué à cause d'un trop grand nombre d'erreur de connection. Débloquer le par 'mysqladmin flush-hosts'", -"Le hôte '%-.64s' n'est pas authorisé à se connecter à ce serveur MySQL", -"Vous utilisez un utilisateur anonyme et les utilisateurs anonymes ne sont pas autorisés à changer les mots de passe", -"Vous devez avoir le privilège update sur les tables de la base de donnée mysql pour pouvoir changer les mots de passe des autres", -"Impossible de trouver un enregistrement correspondant dans la table user", -"Enregistrements correspondants: %ld Modifiés: %ld Warnings: %ld", -"Impossible de créer une nouvelle tâche (errno %d). S'il reste de la mémoire libre, consultez le manual pour trouver un éventuel bug dépendant de l'OS", -"Column count doesn't match value count at row %ld", -"Impossible de réouvrir la table: '%-.64s", -"Utilisation incorrecte de la valeur NULL", -"Erreur '%-.64s' provenant de regexp", -"Mélanger les colonnes GROUP (MIN(),MAX(),COUNT()...) avec des colonnes normales est interdit s'il n'y a pas de clause GROUP BY", -"Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s'", -"La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la table '%-.64s'", -"La commande '%-.16s' est interdite à l'utilisateur: '%-.32s'@'@%-.64s' sur la colonne '%-.64s' de la table '%-.64s'", -"Commande GRANT/REVOKE incorrecte. Consultez le manuel.", -"L'hôte ou l'utilisateur donné en argument à GRANT est trop long", -"La table '%-.64s.%s' n'existe pas", -"Un tel droit n'est pas défini pour l'utilisateur '%-.32s' sur l'hôte '%-.64s' sur la table '%-.64s'", -"Cette commande n'existe pas dans cette version de MySQL", -"Erreur de syntaxe", -"La tâche 'delayed insert' n'a pas pu obtenir le verrou démandé sur la table %-.64s", -"Trop de tâche 'delayed' en cours", -"Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.64s' (%s)", -"Paquet plus grand que 'max_allowed_packet' reçu", -"Erreur de lecture reçue du pipe de connection", -"Erreur reçue de fcntl() ", -"Paquets reçus dans le désordre", -"Impossible de décompresser le paquet reçu", -"Erreur de lecture des paquets reçus", -"Timeout en lecture des paquets reçus", -"Erreur d'écriture des paquets envoyés", -"Timeout d'écriture des paquets envoyés", -"La chaîne résultat est plus grande que 'max_allowed_packet'", -"Ce type de table ne supporte pas les colonnes BLOB/TEXT", -"Ce type de table ne supporte pas les colonnes AUTO_INCREMENT", -"INSERT DELAYED ne peut être utilisé avec la table '%-.64s', car elle est verrouée avec LOCK TABLES", -"Nom de colonne '%-.100s' incorrect", -"Le handler de la table ne peut indexé la colonne '%-.64s'", -"Toutes les tables de la table de type MERGE n'ont pas la même définition", -"Écriture impossible à cause d'un index UNIQUE sur la table '%-.64s'", -"La colonne '%-.64s' de type BLOB est utilisée dans une définition d'index sans longueur d'index", -"Toutes les parties d'un index PRIMARY KEY doivent être NOT NULL; Si vous avez besoin d'un NULL dans l'index, utilisez un index UNIQUE", -"Le résultat contient plus d'un enregistrement", -"Ce type de table nécessite une clé primaire (PRIMARY KEY)", -"Cette version de MySQL n'est pas compilée avec le support RAID", -"Vous êtes en mode 'safe update' et vous essayez de faire un UPDATE sans clause WHERE utilisant un index", -"L'index '%-.64s' n'existe pas sur la table '%-.64s'", -"Impossible d'ouvrir la table", -"Ce type de table ne supporte pas les %s", -"Vous n'êtes pas autorisé à exécute cette commande dans une transaction", -"Erreur %d lors du COMMIT", -"Erreur %d lors du ROLLBACK", -"Erreur %d lors du FLUSH_LOGS", -"Erreur %d lors du CHECKPOINT", -"Connection %ld avortée vers la bd: '%-.64s' utilisateur: '%-.32s' hôte: `%-.64s' (%-.64s)", -"Ce type de table ne supporte pas les copies binaires", -"Le 'binlog' a été fermé pendant l'exécution du FLUSH MASTER", -"La reconstruction de l'index de la table copiée '%-.64s' a échoué", -"Erreur reçue du maître: '%-.64s'", -"Erreur de lecture réseau reçue du maître", -"Erreur d'écriture réseau reçue du maître", -"Impossible de trouver un index FULLTEXT correspondant à cette liste de colonnes", -"Impossible d'exécuter la commande car vous avez des tables verrouillées ou une transaction active", -"Variable système '%-.64s' inconnue", -"La table '%-.64s' est marquée 'crashed' et devrait être réparée", -"La table '%-.64s' est marquée 'crashed' et le dernier 'repair' a échoué", -"Attention: certaines tables ne supportant pas les transactions ont été changées et elles ne pourront pas être restituées", -"Cette transaction à commandes multiples nécessite plus de 'max_binlog_cache_size' octets de stockage, augmentez cette variable de mysqld et réessayez", -"Cette opération ne peut être réalisée avec un esclave actif, faites STOP SLAVE d'abord", -"Cette opération nécessite un esclave actif, configurez les esclaves et faites START SLAVE", -"Le server n'est pas configuré comme un esclave, changez le fichier de configuration ou utilisez CHANGE MASTER TO", -"Impossible d'initialiser les structures d'information de maître, vous trouverez des messages d'erreur supplémentaires dans le journal des erreurs de MySQL", -"Impossible de créer une tâche esclave, vérifiez les ressources système", -"L'utilisateur %-.64s possède déjà plus de 'max_user_connections' connections actives", -"Seules les expressions constantes sont autorisées avec SET", -"Timeout sur l'obtention du verrou", -"Le nombre total de verrou dépasse la taille de la table des verrous", -"Un verrou en update ne peut être acquit pendant une transaction READ UNCOMMITTED", -"DROP DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture", -"CREATE DATABASE n'est pas autorisée pendant qu'une tâche possède un verrou global en lecture", -"Mauvais arguments à %s", -"'%-.32s'@'%-.64s' n'est pas autorisé à créer de nouveaux utilisateurs", -"Définition de table incorrecte; toutes les tables MERGE doivent être dans la même base de donnée", -"Deadlock découvert en essayant d'obtenir les verrous : essayez de redémarrer la transaction", -"Le type de table utilisé ne supporte pas les index FULLTEXT", -"Impossible d'ajouter des contraintes d'index externe", -"Impossible d'ajouter un enregistrement fils : une constrainte externe l'empèche", -"Impossible de supprimer un enregistrement père : une constrainte externe l'empèche", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/german/errmsg.txt b/sql/share/german/errmsg.txt deleted file mode 100644 index 03e838dd805..00000000000 --- a/sql/share/german/errmsg.txt +++ /dev/null @@ -1,334 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Dirk Munzinger (dmun@4t2.com) - 2001-06-07 - - Georg Richter (georg@php.net) - fixed typos and translation - translated new error messages - 2002-12-11 - - Stefan Hinz (stefan@mysql.com) - 2003-10-01 -*/ - -character-set=latin1 - -"hashchk", -"isamchk", -"Nein", -"Ja", -"Kann Datei '%-.64s' nicht erzeugen (Fehler: %d)", -"Kann Tabelle '%-.64s' nicht erzeugen (Fehler: %d)", -"Kann Datenbank '%-.64s' nicht erzeugen (Fehler: %d)", -"Kann Datenbank '%-.64s' nicht erzeugen. Datenbank '%-.64s' existiert bereits", -"Kann Datenbank '%-.64s' nicht löschen. Keine Datenbank '%-.64s' vorhanden", -"Fehler beim Löschen der Datenbank ('%-.64s' kann nicht gelöscht werden, Fehlernuumer: %d)", -"Fehler beim Löschen der Datenbank (Verzeichnis '%-.64s' kann nicht gelöscht werden, Fehlernummer: %d)", -"Fehler beim Löschen von '%-.64s' (Fehler: %d)", -"Datensatz in der Systemtabelle nicht lesbar", -"Kann Status von '%-.64s' nicht ermitteln (Fehler: %d)", -"Kann Arbeitsverzeichnis nicht ermitteln (Fehler: %d)", -"Datei kann nicht gesperrt werden (Fehler: %d)", -"Datei '%-.64s' nicht öffnen (Fehler: %d)", -"Kann Datei '%-.64s' nicht finden (Fehler: %d)", -"Verzeichnis von '%-.64s' nicht lesbar (Fehler: %d)", -"Kann nicht in das Verzeichnis '%-.64s' wechseln (Fehler: %d)", -"Datensatz hat sich seit dem letzten Zugriff auf Tabelle '%-.64s' geändert", -"Festplatte voll (%-.64s). Warte, bis jemand Platz schafft ...", -"Kann nicht speichern, Grund: doppelter Schlüssel in Tabelle '%-.64s'", -"Fehler beim Schließen von '%-.64s' (Fehler: %d)", -"Fehler beim Lesen der Datei '%-.64s' (Fehler: %d)", -"Fehler beim Umbenennen von '%-.64s' in '%-.64s' (Fehler: %d)", -"Fehler beim Speichern der Datei '%-.64s' (Fehler: %d)", -"'%-.64s' ist für Änderungen gesperrt", -"Sortiervorgang abgebrochen", -"View '%-.64s' existiert für '%-.64s' nicht", -"Fehler %d (Tabellenhandler)", -"Diese Option gibt es nicht (Tabellenhandler)", -"Kann Datensatz nicht finden", -"Falsche Information in Datei '%-.64s'", -"Falsche Schlüssel-Datei für Tabelle '%-.64s'. versuche zu reparieren", -"Alte Schlüssel-Datei für Tabelle '%-.64s'. Bitte reparieren", -"'%-.64s' ist nur lesbar", -"Kein Speicher vorhanden (%d Bytes benötigt). Bitte Server neu starten", -"Kein Speicher zum Sortieren vorhanden. sort_buffer_size sollte erhöht werden", -"Unerwartetes Ende beim Lesen der Datei '%-.64s' (Fehler: %d)", -"Zu viele Verbindungen", -"Kein Speicher mehr vorhanden. Prüfen Sie, ob mysqld oder ein anderer Prozess allen Speicher verbraucht. Wenn nicht, sollten Sie mit 'ulimit' dafür sorgen, dass mysqld mehr Speicher benutzen darf, oder mehr Swap-Speicher einrichten", -"Kann Hostnamen für diese Adresse nicht erhalten", -"Schlechter Handshake", -"Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung für Datenbank '%-.64s'", -"Benutzer '%-.32s'@'%-.64s' hat keine Zugriffsberechtigung (verwendetes Passwort: %-.64s)", -"Keine Datenbank ausgewählt", -"Unbekannter Befehl", -"Feld '%-.64s' darf nicht NULL sein", -"Unbekannte Datenbank '%-.64s'", -"Tabelle '%-.64s' bereits vorhanden", -"Unbekannte Tabelle '%-.64s'", -"Spalte '%-.64s' in %-.64s ist nicht eindeutig", -"Der Server wird heruntergefahren", -"Unbekanntes Tabellenfeld '%-.64s' in %-.64s", -"'%-.64s' ist nicht in GROUP BY vorhanden", -"Gruppierung über '%-.64s' nicht möglich", -"Die Verwendung von Summierungsfunktionen und Spalten im selben Befehl ist nicht erlaubt", -"Die Anzahl der Spalten entspricht nicht der Anzahl der Werte", -"Name des Bezeichners '%-.64s' ist zu lang", -"Doppelter Spaltenname vorhanden: '%-.64s'", -"Doppelter Name für Schlüssel (Key) vorhanden: '%-.64s'", -"Doppelter Eintrag '%-.64s' für Schlüssel %d", -"Falsche Spaltenangaben für Spalte '%-.64s'", -"%s bei '%-.80s' in Zeile %d", -"Leere Abfrage", -"Tabellenname/Alias '%-.64s' nicht eindeutig", -"Fehlerhafter Vorgabewert (DEFAULT): '%-.64s'", -"Mehrfacher Primärschlüssel (PRIMARY KEY) definiert", -"Zu viele Schlüssel definiert. Maximal %d Schlüssel erlaubt", -"Zu viele Teilschlüssel definiert. Maximal sind %d Teilschlüssel erlaubt", -"Schlüssel ist zu lang. Die maximale Schlüssellänge beträgt %d", -"In der Tabelle gibt es keine Schlüsselspalte '%-.64s'", -"BLOB-Feld '%-.64s' kann beim verwendeten Tabellentyp nicht als Schlüssel verwendet werden", -"Feldlänge für Feld '%-.64s' zu groß (maximal %d). BLOB-Feld verwenden!", -"Falsche Tabellendefinition. Es darf nur ein Auto-Feld geben und dieses muss als Schlüssel definiert werden", -"%-.64s: Bereit für Verbindungen", -"%-.64s: Normal heruntergefahren\n", -"%-.64s: Signal %d erhalten. Abbruch!\n", -"%-.64s: Heruntergefahren (shutdown)\n", -"%s: Thread %ld zwangsweise beendet. Benutzer: '%-.32s'\n", -"Kann IP-Socket nicht erzeugen", -"Tabelle '%-.64s' besitzt keinen wie den in CREATE INDEX verwendeten Index. Index neu anlegen", -"Feldbegrenzer-Argument ist nicht in der erwarteten Form. Bitte im Handbuch nachlesen", -"Eine feste Zeilenlänge kann für BLOB-Felder nicht verwendet werden. Bitte 'fields terminated by' verwenden", -"Datei '%-.64s' muss im Datenbank-Verzeichnis vorhanden und lesbar für alle sein", -"Datei '%-.64s' bereits vorhanden", -"Datensätze: %ld Gelöscht: %ld Ausgelassen: %ld Warnungen: %ld", -"Datensätze: %ld Duplikate: %ld", -"Falscher Unterteilschlüssel. Der verwendete Schlüsselteil ist entweder kein String, die verwendete Länge ist länger als der Teilschlüssel oder der Tabellenhandler unterstützt keine Unterteilschlüssel", -"Mit ALTER TABLE können nicht alle Felder auf einmal gelöscht werden. Dafür DROP TABLE verwenden", -"Kann '%-.64s' nicht löschen. Existiert das Feld / der Schlüssel?", -"Datensätze: %ld Duplikate: %ld Warnungen: %ld", -"Die Verwendung der zu aktualisierenden Zieltabelle '%-.64s' ist in der FROM-Klausel nicht zulässig.", -"Unbekannte Thread-ID: %lu", -"Sie sind nicht Eigentümer von Thread %lu", -"Keine Tabellen verwendet", -"Zu viele Strings für SET-Spalte %-.64s angegeben", -"Kann keinen eindeutigen Dateinamen für die Logdatei %-.64s erzeugen (1-999)\n", -"Tabelle '%-.64s' ist mit Lesesperre versehen und kann nicht aktualisiert werden", -"Tabelle '%-.64s' wurde nicht mit LOCK TABLES gesperrt", -"BLOB-Feld '%-.64s' darf keinen Vorgabewert (DEFAULT) haben", -"Unerlaubter Datenbankname '%-.64s'", -"Unerlaubter Tabellenname '%-.64s'", -"Die Ausführung des SELECT würde zu viele Datensätze untersuchen und wahrscheinlich sehr lange dauern. Bitte WHERE-Klausel überprüfen oder gegebenenfalls SET SQL_BIG_SELECTS=1 oder SET SQL_MAX_JOIN_SIZE=# verwenden", -"Unbekannter Fehler", -"Unbekannte Prozedur '%-.64s'", -"Falsche Parameterzahl für Prozedur '%-.64s'", -"Falsche Parameter für Prozedur '%-.64s'", -"Unbekannte Tabelle '%-.64s' in '%-.64s'", -"Feld '%-.64s' wurde zweimal angegeben", -"Falsche Verwendung einer Gruppierungsfunktion", -"Tabelle '%-.64s' verwendet eine Extension, die in dieser MySQL-Version nicht verfügbar ist", -"Eine Tabelle muß mindestens 1 Spalte besitzen", -"Tabelle '%-.64s' ist voll", -"Unbekannter Zeichensatz: '%-.64s'", -"Zu viele Tabellen. MySQL kann in einem Join maximal %d Tabellen verwenden", -"Zu viele Spalten", -"Zeilenlänge zu groß. Die maximale Spaltenlänge für den verwendeten Tabellentyp (ohne BLOB-Felder) beträgt %d. Einige Felder müssen in BLOB oder TEXT umgewandelt werden", -"Thread-Stack-Überlauf. Benutzt: %ld von %ld Stack. 'mysqld -O thread_stack=#' verwenen, um notfalls einen größeren Stack anzulegen", -"OUTER JOIN enthält fehlerhafte Abhängigkeiten. In ON verwendete Bedingungen überprüfen", -"Spalte '%-.64s' wurde mit UNIQUE oder INDEX benutzt, ist aber nicht als NOT NULL definiert", -"Kann Funktion '%-.64s' nicht laden", -"Kann Funktion '%-.64s' nicht initialisieren: %-.80s", -"Keine Pfade gestattet für Shared Library", -"Funktion '%-.64s' existiert schon", -"Kann Shared Library '%-.64s' nicht öffnen (Fehler: %d %-.64s)", -"Kann Funktion '%-.64s' in der Library nicht finden", -"Funktion '%-.64s' ist nicht definiert", -"Host '%-.64s' blockiert wegen zu vieler Verbindungsfehler. Aufheben der Blockierung mit 'mysqladmin flush-hosts'", -"Host '%-.64s' hat keine Berechtigung, sich mit diesem MySQL-Server zu verbinden", -"Sie benutzen MySQL als anonymer Benutzer und dürfen daher keine Passwörter ändern", -"Sie benötigen die Berechtigung zum Aktualisieren von Tabellen in der Datenbank 'mysql', um die Passwörter anderer Benutzer ändern zu können", -"Kann keinen passenden Datensatz in Tabelle 'user' finden", -"Datensätze gefunden: %ld Geändert: %ld Warnungen: %ld", -"Kann keinen neuen Thread erzeugen (Fehler: %d). Sollte noch Speicher verfügbar sein, bitte im Handbuch wegen möglicher Fehler im Betriebssystem nachschlagen", -"Anzahl der Spalten stimmt nicht mit der Anzahl der Werte in Zeile %ld überein", -"Kann Tabelle'%-.64s' nicht erneut öffnen", -"Unerlaubte Verwendung eines NULL-Werts", -"regexp lieferte Fehler '%-.64s'", -"Das Vermischen von GROUP-Spalten (MIN(),MAX(),COUNT()...) mit Nicht-GROUP-Spalten ist nicht zulässig, wenn keine GROUP BY-Klausel vorhanden ist", -"Für Benutzer '%-.32s' auf Host '%-.64s' gibt es keine solche Berechtigung", -"%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und für Tabelle '%-.64s'", -"%-.16s Befehl nicht erlaubt für Benutzer '%-.32s'@'%-.64s' und Spalte '%-.64s' in Tabelle '%-.64s'", -"Unzulässiger GRANT- oder REVOKE-Befehl. Verfügbare Berechtigungen sind im Handbuch aufgeführt", -"Das Host- oder User-Argument für GRANT ist zu lang", -"Tabelle '%-.64s.%-.64s' existiert nicht", -"Keine solche Berechtigung für User '%-.32s' auf Host '%-.64s' an Tabelle '%-.64s'", -"Der verwendete Befehl ist in dieser MySQL-Version nicht zulässig", -"Fehler in der SQL-Syntax. Bitte die korrekte Syntax im Handbuch nachschlagen (diese kann für verschiedene Server-Versionen unterschiedlich sein)", -"Verzögerter (DELAYED) Einfüge-Thread konnte die angeforderte Sperre für Tabelle '%-.64s' nicht erhalten", -"Zu viele verzögerte (DELAYED) Threads in Verwendung", -"Abbruch der Verbindung %ld zur Datenbank '%-.64s'. Benutzer: '%-.64s' (%-.64s)", -"Empfangenes Paket ist größer als 'max_allowed_packet'", -"Lese-Fehler bei einer Kommunikations-Pipe", -"fcntl() lieferte einen Fehler", -"Pakete nicht in der richtigen Reihenfolge empfangen", -"Kommunikationspaket lässt sich nicht entpacken", -"Fehler beim Lesen eines Kommunikationspakets", -"Zeitüberschreitung beim Lesen eines Kommunikationspakets", -"Fehler beim Schreiben eines Kommunikationspakets", -"Zeitüberschreitung beim Schreiben eines Kommunikationspakets", -"Ergebnis ist länger als 'max_allowed_packet'", -"Der verwendete Tabellentyp unterstützt keine BLOB- und TEXT-Spalten", -"Der verwendete Tabellentyp unterstützt keine AUTO_INCREMENT-Spalten", -"INSERT DELAYED kann nicht auf Tabelle '%-.64s' angewendet werden, da diese mit LOCK TABLES gesperrt ist", -"Falscher Spaltenname '%-.100s'", -"Der verwendete Tabellen-Handler kann die Spalte '%-.64s' nicht indizieren", -"Nicht alle Tabellen in der MERGE-Tabelle sind gleich definiert", -"Schreiben in Tabelle '%-.64s' nicht möglich wegen einer eindeutigen Beschränkung (unique constraint)", -"BLOB- oder TEXT-Spalte '%-.64s' wird in der Schlüsseldefinition ohne Schlüssellängenangabe verwendet", -"Alle Teile eines PRIMARY KEY müssen als NOT NULL definiert sein. Wenn NULL in einem Schlüssel verwendet wird, muss ein UNIQUE-Schlüssel verwendet werden", -"Ergebnis besteht aus mehr als einer Zeile", -"Dieser Tabellentyp benötigt einen PRIMARY KEY", -"Diese MySQL-Version ist nicht mit RAID-Unterstützung kompiliert", -"MySQL läuft im sicheren Aktualisierungsmodus (safe update mode). Sie haben versucht, eine Tabelle zu aktualisieren, ohne in der WHERE-Klausel eine KEY-Spalte anzugeben", -"Schlüssel '%-.64s' existiert in der Tabelle '%-.64s' nicht", -"Kann Tabelle nicht öffnen", -"Die Speicher-Engine für diese Tabelle unterstützt kein %s", -"Sie dürfen diesen Befehl nicht in einer Transaktion ausführen", -"Fehler %d beim COMMIT", -"Fehler %d beim ROLLBACK", -"Fehler %d bei FLUSH_LOGS", -"Fehler %d bei CHECKPOINT", -"Verbindungsabbruch %ld zur Datenbank '%-.64s'. Benutzer: '%-.32s', Host: `%-.64s' (%-.64s)", -"Die Speicher-Engine für die Tabelle unterstützt keinen binären Tabellen-Dump", -"Binlog geschlossen. Kann RESET MASTER nicht ausführen", -"Neuerstellung des Indizes der Dump-Tabelle '%-.64s' fehlgeschlagen", -"Fehler vom Master: '%-.64s'", -"Netzfehler beim Lesen vom Master", -"Netzfehler beim Schreiben zum Master", -"Kann keinen FULLTEXT-Index finden, der der Spaltenliste entspricht", -"Kann den angegebenen Befehl wegen einer aktiven Tabellensperre oder einer aktiven Transaktion nicht ausführen", -"Unbekannte Systemvariable '%-.64s'", -"Tabelle '%-.64s' ist als defekt markiert und sollte repariert werden", -"Tabelle '%-.64s' ist als defekt markiert und der letzte (automatische?) Reparaturversuch schlug fehl", -"Änderungen an einigen nicht transaktionalen Tabellen konnten nicht zurückgerollt werden", -"Transaktionen, die aus mehreren Befehlen bestehen, benötigen mehr als 'max_binlog_cache_size' Bytes an Speicher. Diese mysqld-Variable bitte vergrössern und erneut versuchen", -"Diese Operation kann nicht bei einem aktiven Slave durchgeführt werden. Bitte zuerst STOP SLAVE ausführen", -"Diese Operation benötigt einen aktiven Slave. Bitte Slave konfigurieren und mittels START SLAVE aktivieren", -"Der Server ist nicht als Slave konfiguriert. Bitte in der Konfigurationsdatei oder mittels CHANGE MASTER TO beheben", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Konnte keinen Slave-Thread starten. Bitte System-Ressourcen überprüfen", -"Benutzer '%-.64s' hat mehr als max_user_connections aktive Verbindungen", -"Bei SET dürfen nur konstante Ausdrücke verwendet werden", -"Beim Warten auf eine Sperre wurde die zulässige Wartezeit überschritten. Bitte versuchen Sie, die Transaktion neu zu starten", -"Die Gesamtzahl der Sperren überschreitet die Größe der Sperrtabelle", -"Während einer READ UNCOMMITED-Transaktion können keine UPDATE-Sperren angefordert werden", -"DROP DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält", -"CREATE DATABASE ist nicht erlaubt, solange der Thread eine globale Lesesperre hält", -"Falsche Argumente für %s", -"'%-.32s'@'%-.64s' is nicht berechtigt, neue Benutzer hinzuzufügen", -"Falsche Tabellendefinition. Alle MERGE-Tabellen müssen sich in derselben Datenbank befinden", -"Beim Versuch, eine Sperre anzufordern, ist ein Deadlock aufgetreten. Versuchen Sie, die Transaktion erneut zu starten", -"Der verwendete Tabellentyp unterstützt keine FULLTEXT-Indizes", -"Fremdschlüssel-Beschränkung konnte nicht hinzugefügt werden", -"Hinzufügen eines Kind-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl", -"Löschen eines Eltern-Datensatzes schlug aufgrund einer Fremdschlüssel-Beschränkung fehl", -"Fehler bei der Verbindung zum Master: %-.128s", -"Beim Ausführen einer Abfrage auf dem Master trat ein Fehler auf: %-.128s", -"Fehler beim Ausführen des Befehls %s: %-.128s", -"Falsche Verwendung von %s und %s", -"Die verwendeten SELECT-Befehle liefern eine unterschiedliche Anzahl von Spalten zurück", -"Augrund eines READ LOCK-Konflikts kann die Abfrage nicht ausgeführt werden", -"Die gleichzeitige Verwendung von Tabellen mit und ohne Transaktionsunterstützung ist deaktiviert", -"Option '%s' wird im Befehl zweimal verwendet", -"Benutzer '%-.64s' hat die Ressourcenbeschränkung '%s' überschritten (aktueller Wert: %ld)", -"Befehl nicht zulässig. Hierfür wird die Berechtigung %-.128s benötigt", -"Variable '%-.64s' ist eine lokale Variable und kann nicht mit SET GLOBAL verändert werden", -"Variable '%-.64s' ist eine globale Variable und muss mit SET GLOBAL verändert werden", -"Variable '%-.64s' hat keinen Vorgabewert", -"Variable '%-.64s' kann nicht auf '%-.64s' gesetzt werden", -"Falscher Argumenttyp für Variable '%-.64s'", -"Variable '%-.64s' kann nur verändert, nicht gelesen werden", -"Falsche Verwendung oder Platzierung von '%s'", -"Diese MySQL-Version unterstützt '%s' nicht", -"Schwerer Fehler %d: '%-.128s vom Master beim Lesen des binären Logs aufgetreten", -"Slave-SQL-Thread hat die Abfrage aufgrund von replicate-*-table-Regeln ignoriert", -"Variable '%-.64s' is a %s variable", -"Falsche Fremdschlüssel-Definition für '%-64s': %s", -"Schlüssel- und Tabellenverweis passen nicht zusammen", -"Operand solle %d Spalte(n) enthalten", -"Unterabfrage lieferte mehr als einen Datensatz zurück", -"Unbekannter Prepared-Statement-Handler (%.*s) für %s angegeben", -"Die Hilfe-Datenbank ist beschädigt oder existiert nicht", -"Zyklischer Verweis in Unterabfragen", -"Spalte '%s' wird von %s nach %s umgewandelt", -"Verweis '%-.64s' wird nicht unterstützt (%s)", -"Für jede abgeleitete Tabelle muss ein eigener Alias angegeben werden", -"Select %u wurde während der Optimierung reduziert", -"Tabelle '%-.64s', die in einem der SELECT-Befehle verwendet wurde, kann nicht in %-.32s verwendet werden", -"Client unterstützt das vom Server erwartete Authentifizierungsprotokoll nicht. Bitte aktualisieren Sie Ihren MySQL-Client", -"Alle Teile eines SPATIAL KEY müssen als NOT NULL deklariert sein", -"COLLATION '%s' ist für CHARACTER SET '%s' ungültig", -"Slave läuft bereits", -"Slave wurde bereits angehalten", -"Unkomprimierte Daten sind zu groß. Die maximale Größe beträgt %d", -"ZLIB: Steht nicht genug Speicher zur Verfügung", -"ZLIB: Im Ausgabepuffer ist nicht genug Platz vorhanden (wahrscheinlich wurde die Länge der unkomprimierten Daten beschädigt)", -"ZLIB: Eingabedaten beschädigt", -"%d Zeile(n) durch GROUP_CONCAT() abgeschnitten", -"Anzahl der Datensätze in Zeile %ld geringer als Anzahl der Spalten", -"Anzahl der Datensätze in Zeile %ld größer als Anzahl der Spalten", -"Daten abgeschnitten, NULL für NOT NULL-Spalte '%s' in Zeile %ld angegeben", -"Daten abgeschnitten, außerhalb des Wertebereichs für Spalte '%s' in Zeile %ld", -"Daten abgeschnitten für Spalte '%s' in Zeile %ld", -"Für Tabelle '%s' wird Speicher-Engine %s benutzt", -"Unerlaubte Vermischung der Kollationen (%s,%s) und (%s,%s) für die Operation '%s'", -"Kann einen oder mehrere der angegebenen Benutzer nicht löschen", -"Kann nicht alle Berechtigungen widerrufen, grant for one or more of the requested users", -"Unerlaubte Vermischung der Kollationen (%s,%s), (%s,%s), (%s,%s) für die Operation '%s'", -"Unerlaubte Vermischung der Kollationen für die Operation '%s'", -"Variable '%-.64s' ist keine Variablen-Komponenten (kann nicht als XXXX.variablen_name verwendet werden)", -"Unbekannte Kollation: '%-.64s'", -"SSL-Parameter in CHANGE MASTER werden ignoriert, weil dieser MySQL-Slave ohne SSL-Unterstützung kompiliert wurde. Sie können aber später verwendet werden, wenn der MySQL-Slave mit SSL gestartet wird", -"Server läuft im Modus --secure-auth, aber '%s'@'%s' hat ein Passwort im alten Format. Bitte Passwort ins neue Format ändern", -"Feld oder Verweis '%-.64s%s%-.64s%s%-.64s' im SELECT-Befehl Nr. %d wurde im SELECT-Befehl Nr. %d aufgelöst", -"Falscher Parameter oder falsche Kombination von Parametern für START SLAVE UNTIL", -"Es wird empfohlen, mit --skip-slave-start zu starten, wenn mit START SLAVE UNTIL eine Schritt-für-Schritt-Replikation ausgeführt wird. Ansonsten gibt es Probleme, wenn der Slave-Server unerwartet neu startet", -"SQL-Thread soll nicht gestartet werden. Daher werden UNTIL-Optionen ignoriert", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/greek/errmsg.txt b/sql/share/greek/errmsg.txt deleted file mode 100644 index 87168431595..00000000000 --- a/sql/share/greek/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=greek - -"hashchk", -"isamchk", -"Ï×É", -"ÍÁÉ", -"Áäýíáôç ç äçìéïõñãßá ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç äçìéïõñãßá ôïõ ðßíáêá '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç äçìéïõñãßá ôçò âÜóçò äåäïìÝíùí '%-.64s'; Ç âÜóç äåäïìÝíùí õðÜñ÷åé Þäç", -"Áäýíáôç ç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí '%-.64s'. Ç âÜóç äåäïìÝíùí äåí õðÜñ÷åé", -"ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ '%-.64s', êùäéêüò ëÜèïõò: %d)", -"ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ ôçò âÜóçò äåäïìÝíùí (áäýíáôç ç äéáãñáöÞ ôïõ öáêÝëëïõ '%-.64s', êùäéêüò ëÜèïõò: %d)", -"ÐáñïõóéÜóôçêå ðñüâëçìá êáôÜ ôç äéáãñáöÞ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç áíÜãíùóç åããñáöÞò áðü ðßíáêá ôïõ óõóôÞìáôïò", -"Áäýíáôç ç ëÞøç ðëçñïöïñéþí ãéá ôçí êáôÜóôáóç ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ï öÜêåëëïò åñãáóßáò äåí âñÝèçêå (êùäéêüò ëÜèïõò: %d)", -"Ôï áñ÷åßï äåí ìðïñåß íá êëåéäùèåß (êùäéêüò ëÜèïõò: %d)", -"Äåí åßíáé äõíáôü íá áíïé÷ôåß ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Äåí âñÝèçêå ôï áñ÷åßï: '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Äåí åßíáé äõíáôü íá äéáâáóôåß ï öÜêåëëïò ôïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Áäýíáôç ç áëëáãÞ ôïõ ôñÝ÷ïíôïò êáôáëüãïõ óå '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ç åããñáöÞ Ý÷åé áëëÜîåé áðü ôçí ôåëåõôáßá öïñÜ ðïõ áíáóýñèçêå áðü ôïí ðßíáêá '%-.64s'", -"Äåí õðÜñ÷åé ÷þñïò óôï äßóêï (%s). Ðáñáêáëþ, ðåñéìÝíåôå íá åëåõèåñùèåß ÷þñïò...", -"Äåí åßíáé äõíáôÞ ç êáôá÷þñçóç, ç ôéìÞ õðÜñ÷åé Þäç óôïí ðßíáêá '%-.64s'", -"ÐáñïõóéÜóôçêå ðñüâëçìá êëåßíïíôáò ôï '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ðñüâëçìá êáôÜ ôçí áíÜãíùóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ðñüâëçìá êáôÜ ôçí ìåôïíïìáóßá ôïõ áñ÷åßïõ '%-.64s' to '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"Ðñüâëçìá êáôÜ ôçí áðïèÞêåõóç ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"'%-.64s' äåí åðéôñÝðïíôáé áëëáãÝò", -"Ç äéáäéêáóßá ôáîéíüìéóçò áêõñþèçêå", -"Ôï View '%-.64s' äåí õðÜñ÷åé ãéá '%-.64s'", -"ÅëÞöèç ìÞíõìá ëÜèïõò %d áðü ôïí ÷åéñéóôÞ ðßíáêá (table handler)", -"Ï ÷åéñéóôÞò ðßíáêá (table handler) ãéá '%-.64s' äåí äéáèÝôåé áõôÞ ôçí åðéëïãÞ", -"Áäýíáôç ç áíåýñåóç åããñáöÞò óôï '%-.64s'", -"ËÜèïò ðëçñïöïñßåò óôï áñ÷åßï: '%-.64s'", -"ËÜèïò áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá: '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!", -"Ðáëáéü áñ÷åßï ôáîéíüìéóçò (key file) ãéá ôïí ðßíáêá '%-.64s'; Ðáñáêáëþ, äéïñèþóôå ôï!", -"'%-.64s' åðéôñÝðåôáé ìüíï ç áíÜãíùóç", -"Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç. ÐñïóðáèÞóôå ðÜëé, åðáíåêéíþíôáò ôç äéáäéêáóßá (demon) (÷ñåéÜæïíôáé %d bytes)", -"Äåí õðÜñ÷åé äéáèÝóéìç ìíÞìç ãéá ôáîéíüìéóç. ÁõîÞóôå ôï sort buffer size ãéá ôç äéáäéêáóßá (demon)", -"ÊáôÜ ôç äéÜñêåéá ôçò áíÜãíùóçò, âñÝèçêå áðñïóäüêçôá ôï ôÝëïò ôïõ áñ÷åßïõ '%-.64s' (êùäéêüò ëÜèïõò: %d)", -"ÕðÜñ÷ïõí ðïëëÝò óõíäÝóåéò...", -"Ðñüâëçìá ìå ôç äéáèÝóéìç ìíÞìç (Out of thread space/memory)", -"Äåí Ýãéíå ãíùóôü ôï hostname ãéá ôçí address óáò", -"Ç áíáãíþñéóç (handshake) äåí Ýãéíå óùóôÜ", -"Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' óôç âÜóç äåäïìÝíùí '%-.64s'", -"Äåí åðéôÝñåôáé ç ðñüóâáóç óôï ÷ñÞóôç: '%-.32s'@'%-.64s' (÷ñÞóç password: %s)", -"Äåí åðéëÝ÷èçêå âÜóç äåäïìÝíùí", -"Áãíùóôç åíôïëÞ", -"Ôï ðåäßï '%-.64s' äåí ìðïñåß íá åßíáé êåíü (null)", -"Áãíùóôç âÜóç äåäïìÝíùí '%-.64s'", -"Ï ðßíáêáò '%-.64s' õðÜñ÷åé Þäç", -"Áãíùóôïò ðßíáêáò '%-.64s'", -"Ôï ðåäßï: '%-.64s' óå %-.64s äåí Ý÷åé êáèïñéóôåß", -"Åíáñîç äéáäéêáóßáò áðïóýíäåóçò ôïõ åîõðçñåôçôÞ (server shutdown)", -"Áãíùóôï ðåäßï '%-.64s' óå '%-.64s'", -"×ñçóéìïðïéÞèçêå '%-.64s' ðïõ äåí õðÞñ÷å óôï group by", -"Áäýíáôç ç ïìáäïðïßçóç (group on) '%-.64s'", -"Ç äéáôýðùóç ðåñéÝ÷åé sum functions êáé columns óôçí ßäéá äéáôýðùóç", -"Ôï Column count äåí ôáéñéÜæåé ìå ôï value count", -"Ôï identifier name '%-.100s' åßíáé ðïëý ìåãÜëï", -"ÅðáíÜëçøç column name '%-.64s'", -"ÅðáíÜëçøç key name '%-.64s'", -"ÄéðëÞ åããñáöÞ '%-.64s' ãéá ôï êëåéäß %d", -"ÅóöáëìÝíï column specifier ãéá ôï ðåäßï '%-.64s'", -"%s ðëçóßïí '%-.80s' óôç ãñáììÞ %d", -"Ôï åñþôçìá (query) ðïõ èÝóáôå Þôáí êåíü", -"Áäýíáôç ç áíåýñåóç unique table/alias: '%-.64s'", -"ÅóöáëìÝíç ðñïêáèïñéóìÝíç ôéìÞ (default value) ãéá '%-.64s'", -"Ðåñéóóüôåñá áðü Ýíá primary key ïñßóôçêáí", -"ÐÜñá ðïëëÜ key ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé", -"ÐÜñá ðïëëÜ key parts ïñßóèçêáí. Ôï ðïëý %d åðéôñÝðïíôáé", -"Ôï êëåéäß ðïõ ïñßóèçêå åßíáé ðïëý ìåãÜëï. Ôï ìÝãéóôï ìÞêïò åßíáé %d", -"Ôï ðåäßï êëåéäß '%-.64s' äåí õðÜñ÷åé óôïí ðßíáêá", -"Ðåäßï ôýðïõ Blob '%-.64s' äåí ìðïñåß íá ÷ñçóéìïðïéçèåß óôïí ïñéóìü åíüò êëåéäéïý (key specification)", -"Ðïëý ìåãÜëï ìÞêïò ãéá ôï ðåäßï '%-.64s' (max = %d). Ðáñáêáëþ ÷ñçóéìïðïéåßóôå ôïí ôýðï BLOB", -"Ìðïñåß íá õðÜñ÷åé ìüíï Ýíá auto field êáé ðñÝðåé íá Ý÷åé ïñéóèåß óáí key", -"%s: óå áíáìïíÞ óõíäÝóåùí", -"%s: ÖõóéïëïãéêÞ äéáäéêáóßá shutdown\n", -"%s: ÅëÞöèç ôï ìÞíõìá %d. Ç äéáäéêáóßá åãêáôáëåßðåôáé!\n", -"%s: Ç äéáäéêáóßá Shutdown ïëïêëçñþèçêå\n", -"%s: Ôï thread èá êëåßóåé %ld user: '%-.64s'\n", -"Äåí åßíáé äõíáôÞ ç äçìéïõñãßá IP socket", -"Ï ðßíáêáò '%-.64s' äåí Ý÷åé åõñåôÞñéï (index) óáí áõôü ðïõ ÷ñçóéìïðïéåßôå óôçí CREATE INDEX. Ðáñáêáëþ, îáíáäçìéïõñãÞóôå ôïí ðßíáêá", -"Ï äéá÷ùñéóôÞò ðåäßùí äåí åßíáé áõôüò ðïõ áíáìåíüôáí. Ðáñáêáëþ áíáôñÝîôå óôï manual", -"Äåí ìðïñåßôå íá ÷ñçóéìïðïéÞóåôå fixed rowlength óå BLOBs. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'fields terminated by'.", -"Ôï áñ÷åßï '%-.64s' ðñÝðåé íá õðÜñ÷åé óôï database directory Þ íá ìðïñåß íá äéáâáóôåß áðü üëïõò", -"Ôï áñ÷åßï '%-.64s' õðÜñ÷åé Þäç", -"ÅããñáöÝò: %ld ÄéáãñáöÝò: %ld ÐáñåêÜìöèçóáí: %ld ÐñïåéäïðïéÞóåéò: %ld", -"ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld", -"ÅóöáëìÝíï sub part key. Ôï ÷ñçóéìïðïéïýìåíï key part äåí åßíáé string Þ ôï ìÞêïò ôïõ åßíáé ìåãáëýôåñï", -"Äåí åßíáé äõíáôÞ ç äéáãñáöÞ üëùí ôùí ðåäßùí ìå ALTER TABLE. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå DROP TABLE", -"Áäýíáôç ç äéáãñáöÞ (DROP) '%-.64s'. Ðáñáêáëþ åëÝãîôå áí ôï ðåäßï/êëåéäß õðÜñ÷åé", -"ÅããñáöÝò: %ld ÅðáíáëÞøåéò: %ld ÐñïåéäïðïéÞóåéò: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Áãíùóôï thread id: %lu", -"Äåí åßóèå owner ôïõ thread %lu", -"Äåí ÷ñçóéìïðïéÞèçêáí ðßíáêåò", -"ÐÜñá ðïëëÜ strings ãéá ôï ðåäßï %-.64s êáé SET", -"Áäýíáôç ç äçìéïõñãßá unique log-filename %-.64s.(1-999)\n", -"Ï ðßíáêáò '%-.64s' Ý÷åé êëåéäùèåß ìå READ lock êáé äåí åðéôñÝðïíôáé áëëáãÝò", -"Ï ðßíáêáò '%-.64s' äåí Ý÷åé êëåéäùèåß ìå LOCK TABLES", -"Ôá Blob ðåäßá '%-.64s' äåí ìðïñïýí íá Ý÷ïõí ðñïêáèïñéóìÝíåò ôéìÝò (default value)", -"ËÜèïò üíïìá âÜóçò äåäïìÝíùí '%-.100s'", -"ËÜèïò üíïìá ðßíáêá '%-.100s'", -"Ôï SELECT èá åîåôÜóåé ìåãÜëï áñéèìü åããñáöþí êáé ðéèáíþò èá êáèõóôåñÞóåé. Ðáñáêáëþ åîåôÜóôå ôéò ðáñáìÝôñïõò ôïõ WHERE êáé ÷ñçóéìïðïéåßóôå SET SQL_BIG_SELECTS=1 áí ôï SELECT åßíáé óùóôü", -"ÐñïÝêõøå Üãíùóôï ëÜèïò", -"Áãíùóôç äéáäéêáóßá '%-.64s'", -"ËÜèïò áñéèìüò ðáñáìÝôñùí óôç äéáäéêáóßá '%-.64s'", -"ËÜèïò ðáñÜìåôñïé óôçí äéáäéêáóßá '%-.64s'", -"Áãíùóôïò ðßíáêáò '%-.64s' óå %s", -"Ôï ðåäßï '%-.64s' Ý÷åé ïñéóèåß äýï öïñÝò", -"ÅóöáëìÝíç ÷ñÞóç ôçò group function", -"Ï ðßíáêò '%-.64s' ÷ñçóéìïðïéåß êÜðïéï extension ðïõ äåí õðÜñ÷åé óôçí Ýêäïóç áõôÞ ôçò MySQL", -"Åíáò ðßíáêáò ðñÝðåé íá Ý÷åé ôïõëÜ÷éóôïí Ýíá ðåäßï", -"Ï ðßíáêáò '%-.64s' åßíáé ãåìÜôïò", -"Áãíùóôï character set: '%-.64s'", -"Ðïëý ìåãÜëïò áñéèìüò ðéíÜêùí. Ç MySQL ìðïñåß íá ÷ñçóéìïðïéÞóåé %d ðßíáêåò óå äéáäéêáóßá join", -"Ðïëý ìåãÜëïò áñéèìüò ðåäßùí", -"Ðïëý ìåãÜëï ìÝãåèïò åããñáöÞò. Ôï ìÝãéóôï ìÝãåèïò åããñáöÞò, ÷ùñßò íá õðïëïãßæïíôáé ôá blobs, åßíáé %d. ÐñÝðåé íá ïñßóåôå êÜðïéá ðåäßá óáí blobs", -"Stack overrun óôï thread: Used: %ld of a %ld stack. Ðáñáêáëþ ÷ñçóéìïðïéåßóôå 'mysqld -O thread_stack=#' ãéá íá ïñßóåôå Ýíá ìåãáëýôåñï stack áí ÷ñåéÜæåôáé", -"Cross dependency âñÝèçêå óå OUTER JOIN. Ðáñáêáëþ åîåôÜóôå ôéò óõíèÞêåò ðïõ èÝóáôå óôï ON", -"Ôï ðåäßï '%-.64s' ÷ñçóéìïðïéåßôáé óáí UNIQUE Þ INDEX áëëÜ äåí Ý÷åé ïñéóèåß óáí NOT NULL", -"Äåí åßíáé äõíáôÞ ç äéáäéêáóßá load ãéá ôç óõíÜñôçóç '%-.64s'", -"Äåí åßíáé äõíáôÞ ç Ýíáñîç ôçò óõíÜñôçóçò '%-.64s'; %-.80s", -"Äåí âñÝèçêáí paths ãéá ôçí shared library", -"Ç óõíÜñôçóç '%-.64s' õðÜñ÷åé Þäç", -"Äåí åßíáé äõíáôÞ ç áíÜãíùóç ôçò shared library '%-.64s' (êùäéêüò ëÜèïõò: %d %s)", -"Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò óõíÜñôçóçò '%-.64s' óôçí âéâëéïèÞêç'", -"Ç óõíÜñôçóç '%-.64s' äåí Ý÷åé ïñéóèåß", -"Ï õðïëïãéóôÞò Ý÷åé áðïêëåéóèåß ëüãù ðïëëáðëþí ëáèþí óýíäåóçò. ÐñïóðáèÞóôå íá äéïñþóåôå ìå 'mysqladmin flush-hosts'", -"Ï õðïëïãéóôÞò äåí Ý÷åé äéêáßùìá óýíäåóçò ìå ôïí MySQL server", -"×ñçóéìïðïéåßôå ôçí MySQL óáí anonymous user êáé Ýôóé äåí ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí", -"ÐñÝðåé íá Ý÷åôå äéêáßùìá äéüñèùóçò ðéíÜêùí (update) óôç âÜóç äåäïìÝíùí mysql ãéá íá ìðïñåßôå íá áëëÜîåôå ôá passwords Üëëùí ÷ñçóôþí", -"Äåí åßíáé äõíáôÞ ç áíåýñåóç ôçò áíôßóôïé÷çò åããñáöÞò óôïí ðßíáêá ôùí ÷ñçóôþí", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s'", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%-.64s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"You have an error in your SQL syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' (%-.64s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not identically defined", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/hungarian/errmsg.txt b/sql/share/hungarian/errmsg.txt deleted file mode 100644 index af10c33ee2d..00000000000 --- a/sql/share/hungarian/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Translated by Feher Peter. Forditotta Feher Peter (feherp@mail.matav.hu) 1998 - Updated May, 2000 -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NEM", -"IGEN", -"A '%-.64s' file nem hozhato letre (hibakod: %d)", -"A '%-.64s' tabla nem hozhato letre (hibakod: %d)", -"Az '%-.64s' adatbazis nem hozhato letre (hibakod: %d)", -"Az '%-.64s' adatbazis nem hozhato letre Az adatbazis mar letezik", -"A(z) '%-.64s' adatbazis nem szuntetheto meg. Az adatbazis nem letezik", -"Adatbazis megszuntetesi hiba ('%-.64s' nem torolheto, hibakod: %d)", -"Adatbazis megszuntetesi hiba ('%-.64s' nem szuntetheto meg, hibakod: %d)", -"Torlesi hiba: '%-.64s' (hibakod: %d)", -"Nem olvashato rekord a rendszertablaban", -"A(z) '%-.64s' statusza nem allapithato meg (hibakod: %d)", -"A munkakonyvtar nem allapithato meg (hibakod: %d)", -"A file nem zarolhato. (hibakod: %d)", -"A '%-.64s' file nem nyithato meg (hibakod: %d)", -"A(z) '%-.64s' file nem talalhato (hibakod: %d)", -"A(z) '%-.64s' konyvtar nem olvashato. (hibakod: %d)", -"Konyvtarvaltas nem lehetseges a(z) '%-.64s'-ba. (hibakod: %d)", -"A(z) '%-.64s' tablaban talalhato rekord megvaltozott az utolso olvasas ota", -"A lemez megtelt (%s).", -"Irasi hiba, duplikalt kulcs a '%-.64s' tablaban.", -"Hiba a(z) '%-.64s' zarasakor. (hibakod: %d)", -"Hiba a '%-.64s'file olvasasakor. (hibakod: %d)", -"Hiba a '%-.64s' file atnevezesekor. (hibakod: %d)", -"Hiba a '%-.64s' file irasakor. (hibakod: %d)", -"'%-.64s' a valtoztatas ellen zarolva", -"Sikertelen rendezes", -"A(z) '%-.64s' nezet nem letezik a(z) '%-.64s'-hoz", -"%d hibajelzes a tablakezelotol", -"A(z) '%-.64s' tablakezelonek nincs ilyen opcioja", -"Nem talalhato a rekord '%-.64s'-ben", -"Ervenytelen info a file-ban: '%-.64s'", -"Ervenytelen kulcsfile a tablahoz: '%-.64s'; probalja kijavitani!", -"Regi kulcsfile a '%-.64s'tablahoz; probalja kijavitani!", -"'%-.64s' irasvedett", -"Nincs eleg memoria. Inditsa ujra a demont, es probalja ismet. (%d byte szukseges.)", -"Nincs eleg memoria a rendezeshez. Novelje a rendezo demon puffermeretet", -"Varatlan filevege-jel a '%-.64s'olvasasakor. (hibakod: %d)", -"Tul sok kapcsolat", -"Elfogyott a thread-memoria", -"A gepnev nem allapithato meg a cimbol", -"A kapcsolatfelvetel nem sikerult (Bad handshake)", -"A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres az '%-.64s' adabazishoz.", -"A(z) '%-.32s'@'%-.64s' felhasznalo szamara tiltott eleres. (Hasznalja a jelszot: %s)", -"Nincs kivalasztott adatbazis", -"Ervenytelen parancs", -"A(z) '%-.64s' oszlop erteke nem lehet nulla", -"Ervenytelen adatbazis: '%-.64s'", -"A(z) '%-.64s' tabla mar letezik", -"Ervenytelen tabla: '%-.64s'", -"A(z) '%-.64s' oszlop %-.64s-ben ketertelmu", -"A szerver leallitasa folyamatban", -"A(z) '%-.64s' oszlop ervenytelen '%-.64s'-ben", -"Used '%-.64s' with wasn't in group by", -"A group nem hasznalhato: '%-.64s'", -"Statement has sum functions and columns in same statement", -"Az oszlopban levo ertek nem egyezik meg a szamitott ertekkel", -"A(z) '%-.100s' azonositonev tul hosszu.", -"Duplikalt oszlopazonosito: '%-.64s'", -"Duplikalt kulcsazonosito: '%-.64s'", -"Duplikalt bejegyzes '%-.64s' a %d kulcs szerint.", -"Rossz oszlopazonosito: '%-.64s'", -"A %s a '%-.80s'-hez kozeli a %d sorban", -"Ures lekerdezes.", -"Nem egyedi tabla/alias: '%-.64s'", -"Ervenytelen ertek: '%-.64s'", -"Tobbszoros elsodleges kulcs definialas.", -"Tul sok kulcs. Maximum %d kulcs engedelyezett.", -"Tul sok kulcsdarabot definialt. Maximum %d resz engedelyezett", -"A megadott kulcs tul hosszu. Maximalis kulcshosszusag: %d", -"A(z) '%-.64s'kulcsoszlop nem letezik a tablaban", -"Blob objektum '%-.64s' nem hasznalhato kulcskent", -"A(z) '%-.64s' oszlop tul hosszu. (maximum = %d). Hasznaljon BLOB tipust inkabb.", -"Csak egy auto mezo lehetseges, es azt kulcskent kell definialni.", -"%s: kapcsolatra kesz", -"%s: Normal leallitas\n", -"%s: %d jelzes. Megszakitva!\n", -"%s: A leallitas kesz\n", -"%s: A(z) %ld thread kenyszeritett zarasa. Felhasznalo: '%-.64s'\n", -"Az IP socket nem hozhato letre", -"A(z) '%-.64s' tablahoz nincs meg a CREATE INDEX altal hasznalt index. Alakitsa at a tablat", -"A mezoelvalaszto argumentumok nem egyeznek meg a varttal. Nezze meg a kezikonyvben!", -"Fix hosszusagu BLOB-ok nem hasznalhatok. Hasznalja a 'mezoelvalaszto jelet' .", -"A(z) '%-.64s'-nak az adatbazis konyvtarban kell lennie, vagy mindenki szamara olvashatonak", -"A '%-.64s' file mar letezik.", -"Rekordok: %ld Torolve: %ld Skipped: %ld Warnings: %ld", -"Rekordok: %ld Duplikalva: %ld", -"Rossz alkulcs. A hasznalt kulcsresz nem karaktersorozat vagy hosszabb, mint a kulcsresz", -"Az osszes mezo nem torolheto az ALTER TABLE-lel. Hasznalja a DROP TABLE-t helyette", -"A DROP '%-.64s' nem lehetseges. Ellenorizze, hogy a mezo/kulcs letezik-e", -"Rekordok: %ld Duplikalva: %ld Warnings: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Ervenytelen szal (thread) id: %lu", -"A %lu thread-nek mas a tulajdonosa", -"Nincs hasznalt tabla", -"Tul sok karakter: %-.64s es SET", -"Egyedi log-filenev nem generalhato: %-.64s.(1-999)\n", -"A(z) '%-.64s' tabla zarolva lett (READ lock) es nem lehet frissiteni", -"A(z) '%-.64s' tabla nincs zarolva a LOCK TABLES-szel", -"A(z) '%-.64s' blob objektumnak nem lehet alapertelmezett erteke", -"Hibas adatbazisnev: '%-.100s'", -"Hibas tablanev: '%-.100s'", -"A SELECT tul sok rekordot fog megvizsgalni es nagyon sokaig fog tartani. Ellenorizze a WHERE-t es hasznalja a SET SQL_BIG_SELECTS=1 beallitast, ha a SELECT okay", -"Ismeretlen hiba", -"Ismeretlen eljaras: '%-.64s'", -"Rossz parameter a(z) '%-.64s'eljaras szamitasanal", -"Rossz parameter a(z) '%-.64s' eljarasban", -"Ismeretlen tabla: '%-.64s' %s-ban", -"A(z) '%-.64s' mezot ketszer definialta", -"A group funkcio ervenytelen hasznalata", -"A(z) '%-.64s' tabla olyan bovitest hasznal, amely nem letezik ebben a MySQL versioban.", -"A tablanak legalabb egy oszlopot tartalmazni kell", -"A '%-.64s' tabla megtelt", -"Ervenytelen karakterkeszlet: '%-.64s'", -"Tul sok tabla. A MySQL csak %d tablat tud kezelni osszefuzeskor", -"Tul sok mezo", -"Tul nagy sormeret. A maximalis sormeret (nem szamolva a blob objektumokat) %d. Nehany mezot meg kell valtoztatnia", -"Thread verem tullepes: Used: %ld of a %ld stack. Hasznalja a 'mysqld -O thread_stack=#' nagyobb verem definialasahoz", -"Keresztfuggoseg van az OUTER JOIN-ban. Ellenorizze az ON felteteleket", -"A(z) '%-.64s' oszlop INDEX vagy UNIQUE (egyedi), de a definicioja szerint nem NOT NULL", -"A(z) '%-.64s' fuggveny nem toltheto be", -"A(z) '%-.64s' fuggveny nem inicializalhato; %-.80s", -"Nincs ut a megosztott konyvtarakhoz (shared library)", -"A '%-.64s' fuggveny mar letezik", -"A(z) '%-.64s' megosztott konyvtar nem hasznalhato (hibakod: %d %s)", -"A(z) '%-.64s' fuggveny nem talalhato a konyvtarban", -"A '%-.64s' fuggveny nem definialt", -"A '%-.64s' host blokkolodott, tul sok kapcsolodasi hiba miatt. Hasznalja a 'mysqladmin flush-hosts' parancsot", -"A '%-.64s' host szamara nem engedelyezett a kapcsolodas ehhez a MySQL szerverhez", -"Nevtelen (anonymous) felhasznalokent nem negedelyezett a jelszovaltoztatas", -"Onnek tabla-update joggal kell rendelkeznie a mysql adatbazisban masok jelszavanak megvaltoztatasahoz", -"Nincs megegyezo sor a user tablaban", -"Megegyezo sorok szama: %ld Valtozott: %ld Warnings: %ld", -"Uj thread letrehozasa nem lehetseges (Hibakod: %d). Amenyiben van meg szabad memoria, olvassa el a kezikonyv operacios rendszerfuggo hibalehetosegekrol szolo reszet", -"Az oszlopban talalhato ertek nem egyezik meg a %ld sorban szamitott ertekkel", -"Nem lehet ujra-megnyitni a tablat: '%-.64s", -"A NULL ervenytelen hasznalata", -"'%-.64s' hiba a regularis kifejezes hasznalata soran (regexp)", -"A GROUP mezok (MIN(),MAX(),COUNT()...) kevert hasznalata nem lehetseges GROUP BY hivatkozas nelkul", -"A '%-.32s' felhasznalonak nincs ilyen joga a '%-.64s' host-on", -"%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' tablaban", -"%-.16s parancs a '%-.32s'@'%-.64s' felhasznalo szamara nem engedelyezett a '%-.64s' mezo eseten a '%-.64s' tablaban", -"Ervenytelen GRANT/REVOKE parancs. Kerem, nezze meg a kezikonyvben, milyen jogok lehetsegesek", -"A host vagy felhasznalo argumentuma tul hosszu a GRANT parancsban", -"A '%-.64s.%s' tabla nem letezik", -"A '%-.32s' felhasznalo szamara a '%-.64s' host '%-.64s' tablajaban ez a parancs nem engedelyezett", -"A hasznalt parancs nem engedelyezett ebben a MySQL verzioban", -"Szintaktikai hiba", -"A kesleltetett beillesztes (delayed insert) thread nem kapott zatolast a %-.64s tablahoz", -"Tul sok kesletetett thread (delayed)", -"Megszakitott kapcsolat %ld db: '%-.64s' adatbazishoz, felhasznalo: '%-.64s' (%s)", -"A kapott csomag nagyobb, mint a maximalisan engedelyezett: 'max_allowed_packet'", -"Olvasasi hiba a kapcsolat soran", -"Hiba a fcntl() fuggvenyben", -"Helytelen sorrendben erkezett adatcsomagok", -"A kommunikacios adatcsomagok nem tomorithetok ki", -"HIba a kommunikacios adatcsomagok olvasasa soran", -"Idotullepes a kommunikacios adatcsomagok olvasasa soran", -"Hiba a kommunikacios csomagok irasa soran", -"Idotullepes a kommunikacios csomagok irasa soran", -"Ez eredmeny sztring nagyobb, mint a lehetseges maximum: 'max_allowed_packet'", -"A hasznalt tabla tipus nem tamogatja a BLOB/TEXT mezoket", -"A hasznalt tabla tipus nem tamogatja az AUTO_INCREMENT tipusu mezoket", -"Az INSERT DELAYED nem hasznalhato a '%-.64s' tablahoz, mert a tabla zarolt (LOCK TABLES)", -"Ervenytelen mezonev: '%-.100s'", -"A hasznalt tablakezelo nem tudja a '%-.64s' mezot indexelni", -"A MERGE tablaban talalhato tablak definicioja nem azonos", -"A '%-.64s' nem irhato, az egyedi mezok miatt", -"BLOB mezo '%-.64s' hasznalt a mezo specifikacioban, a mezohossz megadasa nelkul", -"Az elsodleges kulcs teljes egeszeben csak NOT NULL tipusu lehet; Ha NULL mezot szeretne a kulcskent, hasznalja inkabb a UNIQUE-ot", -"Az eredmeny tobb, mint egy sort tartalmaz", -"Az adott tablatipushoz elsodleges kulcs hasznalata kotelezo", -"Ezen leforditott MySQL verzio nem tartalmaz RAID support-ot", -"On a biztonsagos update modot hasznalja, es WHERE that uses a KEY column", -"A '%-.64s' kulcs nem letezik a '%-.64s' tablaban", -"Nem tudom megnyitni a tablat", -"A tabla kezeloje (handler) nem tamogatja az %s", -"Az On szamara nem engedelyezett a parancs vegrehajtasa a tranzakcioban", -"%d hiba a COMMIT vegrehajtasa soran", -"%d hiba a ROLLBACK vegrehajtasa soran", -"%d hiba a FLUSH_LOGS vegrehajtasa soran", -"%d hiba a CHECKPOINT vegrehajtasa soran", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/italian/errmsg.txt b/sql/share/italian/errmsg.txt deleted file mode 100644 index cd66f15db5f..00000000000 --- a/sql/share/italian/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NO", -"SI", -"Impossibile creare il file '%-.64s' (errno: %d)", -"Impossibile creare la tabella '%-.64s' (errno: %d)", -"Impossibile creare il database '%-.64s' (errno: %d)", -"Impossibile creare il database '%-.64s'; il database esiste", -"Impossibile cancellare '%-.64s'; il database non esiste", -"Errore durante la cancellazione del database (impossibile cancellare '%-.64s', errno: %d)", -"Errore durante la cancellazione del database (impossibile rmdir '%-.64s', errno: %d)", -"Errore durante la cancellazione di '%-.64s' (errno: %d)", -"Impossibile leggere il record dalla tabella di sistema", -"Impossibile leggere lo stato di '%-.64s' (errno: %d)", -"Impossibile leggere la directory di lavoro (errno: %d)", -"Impossibile il locking il file (errno: %d)", -"Impossibile aprire il file: '%-.64s' (errno: %d)", -"Impossibile trovare il file: '%-.64s' (errno: %d)", -"Impossibile leggere la directory di '%-.64s' (errno: %d)", -"Impossibile cambiare la directory in '%-.64s' (errno: %d)", -"Il record e` cambiato dall'ultima lettura della tabella '%-.64s'", -"Disco pieno (%s). In attesa che qualcuno liberi un po' di spazio...", -"Scrittura impossibile: chiave duplicata nella tabella '%-.64s'", -"Errore durante la chiusura di '%-.64s' (errno: %d)", -"Errore durante la lettura del file '%-.64s' (errno: %d)", -"Errore durante la rinominazione da '%-.64s' a '%-.64s' (errno: %d)", -"Errore durante la scrittura del file '%-.64s' (errno: %d)", -"'%-.64s' e` soggetto a lock contro i cambiamenti", -"Operazione di ordinamento abbandonata", -"La view '%-.64s' non esiste per '%-.64s'", -"Rilevato l'errore %d dal gestore delle tabelle", -"Il gestore delle tabelle per '%-.64s' non ha questa opzione", -"Impossibile trovare il record in '%-.64s'", -"Informazione errata nel file: '%-.64s'", -"File chiave errato per la tabella : '%-.64s'; prova a riparalo", -"File chiave vecchio per la tabella '%-.64s'; riparalo!", -"'%-.64s' e` di sola lettura", -"Memoria esaurita. Fai ripartire il demone e riprova (richiesti %d bytes)", -"Memoria per gli ordinamenti esaurita. Incrementare il 'sort_buffer' al demone", -"Fine del file inaspettata durante la lettura del file '%-.64s' (errno: %d)", -"Troppe connessioni", -"Fine dello spazio/memoria per i thread", -"Impossibile risalire al nome dell'host dall'indirizzo (risoluzione inversa)", -"Negoziazione impossibile", -"Accesso non consentito per l'utente: '%-.32s'@'%-.64s' al database '%-.64s'", -"Accesso non consentito per l'utente: '%-.32s'@'%-.64s' (Password: %s)", -"Nessun database selezionato", -"Comando sconosciuto", -"La colonna '%-.64s' non puo` essere nulla", -"Database '%-.64s' sconosciuto", -"La tabella '%-.64s' esiste gia`", -"Tabella '%-.64s' sconosciuta", -"Colonna: '%-.64s' di %-.64s e` ambigua", -"Shutdown del server in corso", -"Colonna sconosciuta '%-.64s' in '%-.64s'", -"Usato '%-.64s' che non e` nel GROUP BY", -"Impossibile raggruppare per '%-.64s'", -"Il comando ha una funzione SUM e una colonna non specificata nella GROUP BY", -"Il numero delle colonne non e` uguale al numero dei valori", -"Il nome dell'identificatore '%-.100s' e` troppo lungo", -"Nome colonna duplicato '%-.64s'", -"Nome chiave duplicato '%-.64s'", -"Valore duplicato '%-.64s' per la chiave %d", -"Specifica errata per la colonna '%-.64s'", -"%s vicino a '%-.80s' linea %d", -"La query e` vuota", -"Tabella/alias non unico: '%-.64s'", -"Valore di default non valido per '%-.64s'", -"Definite piu` chiave primarie", -"Troppe chiavi. Sono ammesse max %d chiavi", -"Troppe parti di chiave specificate. Sono ammesse max %d parti", -"La chiave specificata e` troppo lunga. La max lunghezza della chiave e` %d", -"La colonna chiave '%-.64s' non esiste nella tabella", -"La colonna BLOB '%-.64s' non puo` essere usata nella specifica della chiave", -"La colonna '%-.64s' e` troppo grande (max=%d). Utilizza un BLOB.", -"Puo` esserci solo un campo AUTO e deve essere definito come chiave", -"%s: Pronto per le connessioni\n", -"%s: Shutdown normale\n", -"%s: Ricevuto segnale %d. Interruzione!\n", -"%s: Shutdown completato\n", -"%s: Forzata la chiusura del thread %ld utente: '%-.64s'\n", -"Impossibile creare il socket IP", -"La tabella '%-.64s' non ha nessun indice come quello specificatato dalla CREATE INDEX. Ricrea la tabella", -"L'argomento 'Field separator' non e` quello atteso. Controlla il manuale", -"Non possono essere usate righe a lunghezza fissa con i BLOB. Usa 'FIELDS TERMINATED BY'.", -"Il file '%-.64s' deve essere nella directory del database e deve essere leggibile da tutti", -"Il file '%-.64s' esiste gia`", -"Records: %ld Cancellati: %ld Saltati: %ld Avvertimenti: %ld", -"Records: %ld Duplicati: %ld", -"Sotto-parte della chiave errata. La parte di chiave utilizzata non e` una stringa o la lunghezza e` maggiore della parte di chiave.", -"Non si possono cancellare tutti i campi con una ALTER TABLE. Utilizzare DROP TABLE", -"Impossibile cancellare '%-.64s'. Controllare che il campo chiave esista", -"Records: %ld Duplicati: %ld Avvertimenti: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Thread id: %lu sconosciuto", -"Utente non proprietario del thread %lu", -"Nessuna tabella usata", -"Troppe stringhe per la colonna %-.64s e la SET", -"Impossibile generare un nome del file log unico %-.64s.(1-999)\n", -"La tabella '%-.64s' e` soggetta a lock in lettura e non puo` essere aggiornata", -"Non e` stato impostato il lock per la tabella '%-.64s' con LOCK TABLES", -"Il campo BLOB '%-.64s' non puo` avere un valore di default", -"Nome database errato '%-.100s'", -"Nome tabella errato '%-.100s'", -"La SELECT dovrebbe esaminare troppi record e usare troppo tempo. Controllare la WHERE e usa SET SQL_BIG_SELECTS=1 se e` tutto a posto.", -"Errore sconosciuto", -"Procedura '%-.64s' sconosciuta", -"Numero di parametri errato per la procedura '%-.64s'", -"Parametri errati per la procedura '%-.64s'", -"Tabella '%-.64s' sconosciuta in %s", -"Campo '%-.64s' specificato 2 volte", -"Uso non valido di una funzione di raggruppamento", -"La tabella '%-.64s' usa un'estensione che non esiste in questa versione di MySQL", -"Una tabella deve avere almeno 1 colonna", -"La tabella '%-.64s' e` piena", -"Set di caratteri '%-.64s' sconosciuto", -"Troppe tabelle. MySQL puo` usare solo %d tabelle in una join", -"Troppi campi", -"Riga troppo grande. La massima grandezza di una riga, non contando i BLOB, e` %d. Devi cambiare alcuni campi in BLOB", -"Thread stack overrun: Usati: %ld di uno stack di %ld. Usa 'mysqld -O thread_stack=#' per specificare uno stack piu` grande.", -"Trovata una dipendenza incrociata nella OUTER JOIN. Controlla le condizioni ON", -"La colonna '%-.64s' e` usata con UNIQUE o INDEX ma non e` definita come NOT NULL", -"Impossibile caricare la funzione '%-.64s'", -"Impossibile inizializzare la funzione '%-.64s'; %-.80s", -"Non sono ammessi path per le librerie condivisa", -"La funzione '%-.64s' esiste gia`", -"Impossibile aprire la libreria condivisa '%-.64s' (errno: %d %s)", -"Impossibile trovare la funzione '%-.64s' nella libreria", -"La funzione '%-.64s' non e` definita", -"Sistema '%-.64s' bloccato a causa di troppi errori di connessione. Per sbloccarlo: 'mysqladmin flush-hosts'", -"Al sistema '%-.64s' non e` consentita la connessione a questo server MySQL", -"Impossibile cambiare la password usando MySQL come utente anonimo", -"E` necessario il privilegio di update sulle tabelle del database mysql per cambiare le password per gli altri utenti", -"Impossibile trovare la riga corrispondente nella tabella user", -"Rows riconosciute: %ld Cambiate: %ld Warnings: %ld", -"Impossibile creare un nuovo thread (errno %d). Se non ci sono problemi di memoria disponibile puoi consultare il manuale per controllare possibili problemi dipendenti dal SO", -"Il numero delle colonne non corrisponde al conteggio alla riga %ld", -"Impossibile riaprire la tabella: '%-.64s'", -"Uso scorretto del valore NULL", -"Errore '%-.64s' da regexp", -"Il mescolare funzioni di aggregazione (MIN(),MAX(),COUNT()...) e non e` illegale se non c'e` una clausula GROUP BY", -"GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s'", -"Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla tabella '%-.64s'", -"Comando %-.16s negato per l'utente: '%-.32s'@'%-.64s' sulla colonna '%-.64s' della tabella '%-.64s'", -"Comando GRANT/REVOKE illegale. Prego consultare il manuale per sapere quali privilegi possono essere usati.", -"L'argomento host o utente per la GRANT e` troppo lungo", -"La tabella '%-.64s.%s' non esiste", -"GRANT non definita per l'utente '%-.32s' dalla macchina '%-.64s' sulla tabella '%-.64s'", -"Il comando utilizzato non e` supportato in questa versione di MySQL", -"Errore di sintassi nella query SQL", -"Il thread di inserimento ritardato non riesce ad ottenere il lock per la tabella %-.64s", -"Troppi threads ritardati in uso", -"Interrotta la connessione %ld al db: '%-.64s' utente: '%-.64s' (%s)", -"Ricevuto un pacchetto piu` grande di 'max_allowed_packet'", -"Rilevato un errore di lettura dalla pipe di connessione", -"Rilevato un errore da fcntl()", -"Ricevuti pacchetti non in ordine", -"Impossibile scompattare i pacchetti di comunicazione", -"Rilevato un errore ricevendo i pacchetti di comunicazione", -"Rilevato un timeout ricevendo i pacchetti di comunicazione", -"Rilevato un errore inviando i pacchetti di comunicazione", -"Rilevato un timeout inviando i pacchetti di comunicazione", -"La stringa di risposta e` piu` lunga di 'max_allowed_packet'", -"Il tipo di tabella usata non supporta colonne di tipo BLOB/TEXT", -"Il tipo di tabella usata non supporta colonne di tipo AUTO_INCREMENT", -"L'inserimento ritardato (INSERT DELAYED) non puo` essere usato con la tabella '%-.64s', perche` soggetta a lock da 'LOCK TABLES'", -"Nome colonna '%-.100s' non corretto", -"Il gestore delle tabelle non puo` indicizzare la colonna '%-.64s'", -"Non tutte le tabelle nella tabella di MERGE sono definite in maniera identica", -"Impossibile scrivere nella tabella '%-.64s' per limitazione di unicita`", -"La colonna '%-.64s' di tipo BLOB e` usata in una chiave senza specificarne la lunghezza", -"Tutte le parti di una chiave primaria devono essere dichiarate NOT NULL; se necessitano valori NULL nelle chiavi utilizzare UNIQUE", -"Il risultato consiste di piu` di una riga", -"Questo tipo di tabella richiede una chiave primaria", -"Questa versione di MYSQL non e` compilata con il supporto RAID", -"In modalita` 'safe update' si e` cercato di aggiornare una tabella senza clausola WHERE su una chiave", -"La chiave '%-.64s' non esiste nella tabella '%-.64s'", -"Impossibile aprire la tabella", -"Il gestore per la tabella non supporta il %s", -"Non puoi eseguire questo comando in una transazione", -"Rilevato l'errore %d durante il COMMIT", -"Rilevato l'errore %d durante il ROLLBACK", -"Rilevato l'errore %d durante il FLUSH_LOGS", -"Rilevato l'errore %d durante il CHECKPOINT", -"Interrotta la connessione %ld al db: ''%-.64s' utente: '%-.32s' host: '%-.64s' (%-.64s)", -"Il gestore per la tabella non supporta il dump binario", -"Binlog e` stato chiuso durante l'esecuzione del FLUSH MASTER", -"Fallita la ricostruzione dell'indice della tabella copiata '%-.64s'", -"Errore dal master: '%-.64s", -"Errore di rete durante la ricezione dal master", -"Errore di rete durante l'invio al master", -"Impossibile trovare un indice FULLTEXT che corrisponda all'elenco delle colonne", -"Impossibile eseguire il comando richiesto: tabelle sotto lock o transazione in atto", -"Variabile di sistema '%-.64s' sconosciuta", -"La tabella '%-.64s' e` segnalata come corrotta e deve essere riparata", -"La tabella '%-.64s' e` segnalata come corrotta e l'ultima ricostruzione (automatica?) e` fallita", -"Attenzione: Alcune delle modifiche alle tabelle non transazionali non possono essere ripristinate (roll back impossibile)", -"La transazione a comandi multipli (multi-statement) ha richiesto piu` di 'max_binlog_cache_size' bytes di disco: aumentare questa variabile di mysqld e riprovare", -"Questa operazione non puo' essere eseguita con un database 'slave' che gira, lanciare prima STOP SLAVE", -"Questa operaione richiede un database 'slave', configurarlo ed eseguire START SLAVE", -"Il server non e' configurato come 'slave', correggere il file di configurazione cambiando CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Impossibile creare il thread 'slave', controllare le risorse di sistema", -"L'utente %-.64s ha gia' piu' di 'max_user_connections' connessioni attive", -"Si possono usare solo espressioni costanti con SET", -"E' scaduto il timeout per l'attesa del lock", -"Il numero totale di lock e' maggiore della grandezza della tabella di lock", -"I lock di aggiornamento non possono essere acquisiti durante una transazione 'READ UNCOMMITTED'", -"DROP DATABASE non e' permesso mentre il thread ha un lock globale di lettura", -"CREATE DATABASE non e' permesso mentre il thread ha un lock globale di lettura", -"Argomenti errati a %s", -"A '%-.32s'@'%-.64s' non e' permesso creare nuovi utenti", -"Definizione della tabella errata; tutte le tabelle di tipo MERGE devono essere nello stesso database", -"Trovato deadlock durante il lock; Provare a far ripartire la transazione", -"La tabella usata non supporta gli indici FULLTEXT", -"Impossibile aggiungere il vincolo di integrita' referenziale (foreign key constraint)", -"Impossibile aggiungere la riga: un vincolo d'integrita' referenziale non e' soddisfatto", -"Impossibile cancellare la riga: un vincolo d'integrita' referenziale non e' soddisfatto", -"Errore durante la connessione al master: %-.128s", -"Errore eseguendo una query sul master: %-.128s", -"Errore durante l'esecuzione del comando %s: %-.128s", -"Uso errato di %s e %s", -"La SELECT utilizzata ha un numero di colonne differente", -"Impossibile eseguire la query perche' c'e' un conflitto con in lock di lettura", -"E' disabilitata la possibilita' di mischiare tabelle transazionali e non-transazionali", -"L'opzione '%s' e' stata usata due volte nel comando", -"L'utente '%-.64s' ha ecceduto la risorsa '%s' (valore corrente: %ld)", -"Accesso non consentito. Serve il privilegio %-.128s per questa operazione", -"La variabile '%-.64s' e' una variabile locale ( SESSION ) e non puo' essere cambiata usando SET GLOBAL", -"La variabile '%-.64s' e' una variabile globale ( GLOBAL ) e deve essere cambiata usando SET GLOBAL", -"La variabile '%-.64s' non ha un valore di default", -"Alla variabile '%-.64s' non puo' essere assegato il valore '%-.64s'", -"Tipo di valore errato per la variabile '%-.64s'", -"Alla variabile '%-.64s' e' di sola scrittura quindi puo' essere solo assegnato un valore, non letto", -"Uso/posizione di '%s' sbagliato", -"Questa versione di MySQL non supporta ancora '%s'", -"Errore fatale %d: '%-.128s' dal master leggendo i dati dal log binario", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/japanese/errmsg.txt b/sql/share/japanese/errmsg.txt deleted file mode 100644 index eaab58d8403..00000000000 --- a/sql/share/japanese/errmsg.txt +++ /dev/null @@ -1,325 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - 3.22.10-beta euc-japanese (ujis) text -*/ - -character-set=ujis - -"hashchk", -"isamchk", -"NO", -"YES", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Æ¡¼¥Ö¥ë¤¬ºî¤ì¤Þ¤»¤ó.(errno: %d)", -"'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ºî¤ì¤Þ¤»¤ó.´û¤Ë¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬Â¸ºß¤·¤Þ¤¹", -"'%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤òÇË´þ¤Ç¤¤Þ¤»¤ó. ¤½¤Î¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬¤Ê¤¤¤Î¤Ç¤¹.", -"¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤òºï½ü¤Ç¤¤Þ¤»¤ó, errno: %d)", -"¥Ç¡¼¥¿¥Ù¡¼¥¹ÇË´þ¥¨¥é¡¼ ('%-.64s' ¤ò rmdir ¤Ç¤¤Þ¤»¤ó, errno: %d)", -"'%-.64s' ¤Îºï½ü¤¬¥¨¥é¡¼ (errno: %d)", -"system table ¤Î¥ì¥³¡¼¥É¤òÆÉ¤à»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿", -"'%-.64s' ¤Î¥¹¥Æ¥¤¥¿¥¹¤¬ÆÀ¤é¤ì¤Þ¤»¤ó. (errno: %d)", -"working directory ¤òÆÀ¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿ (errno: %d)", -"¥Õ¥¡¥¤¥ë¤ò¥í¥Ã¥¯¤Ç¤¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ò¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó.(errno: %d)", -"'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤¬ÆÉ¤á¤Þ¤»¤ó.(errno: %d)", -"'%-.64s' ¥Ç¥£¥ì¥¯¥È¥ê¤Ë chdir ¤Ç¤¤Þ¤»¤ó.(errno: %d)", -"Record has changed since last read in table '%-.64s'", -"Disk full (%s). 狼¤¬²¿¤«¤ò¸º¤é¤¹¤Þ¤Ç¤Þ¤Ã¤Æ¤¯¤À¤µ¤¤...", -"table '%-.64s' ¤Ë key ¤¬½ÅÊ£¤·¤Æ¤¤¤Æ½ñ¤¤³¤á¤Þ¤»¤ó", -"Error on close of '%-.64s' (errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ÎÆÉ¤ß¹þ¤ß¥¨¥é¡¼ (errno: %d)", -"'%-.64s' ¤ò '%-.64s' ¤Ë rename ¤Ç¤¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤ò½ñ¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d)", -"'%-.64s' ¤Ï¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤¹", -"Sort ÃæÃÇ", -"View '%-.64s' ¤¬ '%-.64s' ¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"Got error %d from table handler", -"Table handler for '%-.64s' doesn't have this option", -"'%-.64s'¤Î¤Ê¤«¤Ë¥ì¥³¡¼¥É¤¬¸«ÉÕ¤«¤ê¤Þ¤»¤ó", -"¥Õ¥¡¥¤¥ë '%-.64s' ¤Î info ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹", -"'%-.64s' ¥Æ¡¼¥Ö¥ë¤Î key file ¤¬´Ö°ã¤Ã¤Æ¤¤¤ë¤è¤¦¤Ç¤¹. ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤", -"'%-.64s' ¥Æ¡¼¥Ö¥ë¤Ï¸Å¤¤·Á¼°¤Î key file ¤Î¤è¤¦¤Ç¤¹; ½¤Éü¤ò¤·¤Æ¤¯¤À¤µ¤¤", -"'%-.64s' ¤ÏÆÉ¤ß¹þ¤ßÀìÍѤǤ¹", -"Out of memory. ¥Ç¡¼¥â¥ó¤ò¥ê¥¹¥¿¡¼¥È¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤ (%d bytes ɬÍ×)", -"Out of sort memory. sort buffer size ¤¬Â¤ê¤Ê¤¤¤è¤¦¤Ç¤¹.", -"'%-.64s' ¥Õ¥¡¥¤¥ë¤òÆÉ¤ß¹þ¤ßÃæ¤Ë EOF ¤¬Í½´ü¤»¤Ì½ê¤Ç¸½¤ì¤Þ¤·¤¿. (errno: %d)", -"Àܳ¤¬Â¿¤¹¤®¤Þ¤¹", -"Out of memory; mysqld ¤«¤½¤Î¾¤Î¥×¥í¥»¥¹¤¬¥á¥â¥ê¡¼¤òÁ´¤Æ»È¤Ã¤Æ¤¤¤ë¤«³Îǧ¤·¤Æ¤¯¤À¤µ¤¤. ¥á¥â¥ê¡¼¤ò»È¤¤ÀڤäƤ¤¤Ê¤¤¾ì¹ç¡¢'ulimit' ¤òÀßÄꤷ¤Æ mysqld ¤Î¥á¥â¥ê¡¼»ÈÍѸ³¦Î̤ò¿¤¯¤¹¤ë¤«¡¢swap space ¤òÁý¤ä¤·¤Æ¤ß¤Æ¤¯¤À¤µ¤¤", -"¤½¤Î address ¤Î hostname ¤¬°ú¤±¤Þ¤»¤ó.", -"Bad handshake", -"¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤Î '%-.64s' ¥Ç¡¼¥¿¥Ù¡¼¥¹¤Ø¤Î¥¢¥¯¥»¥¹¤òµñÈݤ·¤Þ¤¹", -"¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ¤òµñÈݤ·¤Þ¤¹.uUsing password: %s)", -"¥Ç¡¼¥¿¥Ù¡¼¥¹¤¬ÁªÂò¤µ¤ì¤Æ¤¤¤Þ¤»¤ó.", -"¤½¤Î¥³¥Þ¥ó¥É¤Ï²¿¡©", -"Column '%-.64s' ¤Ï null ¤Ë¤Ï¤Ç¤¤Ê¤¤¤Î¤Ç¤¹", -"'%-.64s' ¤Ê¤ó¤Æ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ÏÃΤê¤Þ¤»¤ó.", -"Table '%-.64s' ¤Ï´û¤Ë¤¢¤ê¤Þ¤¹", -"table '%-.64s' ¤Ï¤¢¤ê¤Þ¤»¤ó.", -"Column: '%-.64s' in %-.64s is ambiguous", -"Server ¤ò shutdown Ãæ...", -"'%-.64s' column ¤Ï '%-.64s' ¤Ë¤Ï¤¢¤ê¤Þ¤»¤ó.", -"'%-.64s' isn't in GROUP BY", -"Can't group on '%-.64s'", -"Statement has sum functions and columns in same statement", -"Column count doesn't match value count", -"Identifier name '%-.100s' ¤ÏŤ¹¤®¤Þ¤¹", -"'%-.64s' ¤È¤¤¤¦ column ̾¤Ï½ÅÊ£¤·¤Æ¤Þ¤¹", -"'%-.64s' ¤È¤¤¤¦ key ¤Î̾Á°¤Ï½ÅÊ£¤·¤Æ¤¤¤Þ¤¹", -"'%-.64s' ¤Ï key %d ¤Ë¤ª¤¤¤Æ½ÅÊ£¤·¤Æ¤¤¤Þ¤¹", -"Incorrect column specifier for column '%-.64s'", -"%s : '%-.80s' ÉÕ¶á : %d ¹ÔÌÜ", -"Query ¤¬¶õ¤Ç¤¹.", -"'%-.64s' ¤Ï°ì°Õ¤Î table/alias ̾¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó", -"Invalid default value for '%-.64s'", -"Ê£¿ô¤Î primary key ¤¬ÄêµÁ¤µ¤ì¤Þ¤·¤¿", -"key ¤Î»ØÄ꤬¿¤¹¤®¤Þ¤¹. key ¤ÏºÇÂç %d ¤Þ¤Ç¤Ç¤¹", -"Too many key parts specified; max %d parts allowed", -"key ¤¬Ä¹¤¹¤®¤Þ¤¹. key ¤ÎŤµ¤ÏºÇÂç %d ¤Ç¤¹", -"Key column '%-.64s' ¤¬¥Æ¡¼¥Ö¥ë¤Ë¤¢¤ê¤Þ¤»¤ó.", -"BLOB column '%-.64s' can't be used in key specification with the used table type", -"column '%-.64s' ¤Ï,³ÎÊݤ¹¤ë column ¤ÎÂ礤µ¤¬Â¿¤¹¤®¤Þ¤¹. (ºÇÂç %d ¤Þ¤Ç). BLOB ¤ò¤«¤ï¤ê¤Ë»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤.", -"¥Æ¡¼¥Ö¥ë¤ÎÄêµÁ¤¬°ã¤¤¤Þ¤¹; there can be only one auto column and it must be defined as a key", -"%s: ½àÈ÷´°Î»", -"%s: Normal shutdown\n", -"%s: Got signal %d. ÃæÃÇ!\n", -"%s: Shutdown ´°Î»\n", -"%s: ¥¹¥ì¥Ã¥É %ld ¶¯À©½ªÎ» user: '%-.64s'\n", -"IP socket ¤¬ºî¤ì¤Þ¤»¤ó", -"Table '%-.64s' ¤Ï¤½¤Î¤è¤¦¤Ê index ¤ò»ý¤Ã¤Æ¤¤¤Þ¤»¤ó(CREATE INDEX ¼Â¹Ô»þ¤Ë»ØÄꤵ¤ì¤Æ¤¤¤Þ¤»¤ó). ¥Æ¡¼¥Ö¥ë¤òºî¤êľ¤·¤Æ¤¯¤À¤µ¤¤", -"Field separator argument is not what is expected; check the manual", -"You can't use fixed rowlength with BLOBs; please use 'fields terminated by'.", -"¥Õ¥¡¥¤¥ë '%-.64s' ¤Ï databse ¤Î directory ¤Ë¤¢¤ë¤«Á´¤Æ¤Î¥æ¡¼¥¶¡¼¤¬ÆÉ¤á¤ë¤è¤¦¤Ëµö²Ä¤µ¤ì¤Æ¤¤¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó.", -"File '%-.64s' ¤Ï´û¤Ë¸ºß¤·¤Þ¤¹", -"¥ì¥³¡¼¥É¿ô: %ld ºï½ü: %ld Skipped: %ld Warnings: %ld", -"¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£: %ld", -"Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part", -"ALTER TABLE ¤ÇÁ´¤Æ¤Î column ¤Ïºï½ü¤Ç¤¤Þ¤»¤ó. DROP TABLE ¤ò»ÈÍѤ·¤Æ¤¯¤À¤µ¤¤", -"'%-.64s' ¤òÇË´þ¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿; check that column/key exists", -"¥ì¥³¡¼¥É¿ô: %ld ½ÅÊ£¿ô: %ld Warnings: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"thread id: %lu ¤Ï¤¢¤ê¤Þ¤»¤ó", -"thread %lu ¤Î¥ª¡¼¥Ê¡¼¤Ç¤Ï¤¢¤ê¤Þ¤»¤ó", -"No tables used", -"Too many strings for column %-.64s and SET", -"Can't generate a unique log-filename %-.64s.(1-999)\n", -"Table '%-.64s' ¤Ï READ lock ¤Ë¤Ê¤Ã¤Æ¤¤¤Æ¡¢¹¹¿·¤Ï¤Ç¤¤Þ¤»¤ó", -"Table '%-.64s' ¤Ï LOCK TABLES ¤Ë¤è¤Ã¤Æ¥í¥Ã¥¯¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"BLOB column '%-.64s' can't have a default value", -"»ØÄꤷ¤¿ database ̾ '%-.100s' ¤¬´Ö°ã¤Ã¤Æ¤¤¤Þ¤¹", -"»ØÄꤷ¤¿ table ̾ '%-.100s' ¤Ï¤Þ¤Á¤¬¤Ã¤Æ¤¤¤Þ¤¹", -"The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET SQL_MAX_JOIN_SIZE=# if the SELECT is okay", -"Unknown error", -"Unknown procedure '%-.64s'", -"Incorrect parameter count to procedure '%-.64s'", -"Incorrect parameters to procedure '%-.64s'", -"Unknown table '%-.64s' in %s", -"Column '%-.64s' specified twice", -"Invalid use of group function", -"Table '%-.64s' uses an extension that doesn't exist in this MySQL version", -"¥Æ¡¼¥Ö¥ë¤ÏºÇÄã 1 ¸Ä¤Î column ¤¬É¬ÍפǤ¹", -"table '%-.64s' ¤Ï¤¤¤Ã¤Ñ¤¤¤Ç¤¹", -"character set '%-.64s' ¤Ï¥µ¥Ý¡¼¥È¤·¤Æ¤¤¤Þ¤»¤ó", -"¥Æ¡¼¥Ö¥ë¤¬Â¿¤¹¤®¤Þ¤¹; MySQL can only use %d tables in a join", -"column ¤¬Â¿¤¹¤®¤Þ¤¹", -"row size ¤¬Â礤¹¤®¤Þ¤¹. BLOB ¤ò´Þ¤Þ¤Ê¤¤¾ì¹ç¤Î row size ¤ÎºÇÂç¤Ï %d ¤Ç¤¹. ¤¤¤¯¤Ä¤«¤Î field ¤ò BLOB ¤ËÊѤ¨¤Æ¤¯¤À¤µ¤¤.", -"Thread stack overrun: Used: %ld of a %ld stack. ¥¹¥¿¥Ã¥¯Îΰè¤ò¿¤¯¤È¤ê¤¿¤¤¾ì¹ç¡¢'mysqld -O thread_stack=#' ¤È»ØÄꤷ¤Æ¤¯¤À¤µ¤¤", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.64s' ¤¬ UNIQUE ¤« INDEX ¤Ç»ÈÍѤµ¤ì¤Þ¤·¤¿. ¤³¤Î¥«¥é¥à¤Ï NOT NULL ¤ÈÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó.", -"function '%-.64s' ¤ò ¥í¡¼¥É¤Ç¤¤Þ¤»¤ó", -"function '%-.64s' ¤ò½é´ü²½¤Ç¤¤Þ¤»¤ó; %-.80s", -"shared library ¤Ø¤Î¥Ñ¥¹¤¬Ä̤äƤ¤¤Þ¤»¤ó", -"Function '%-.64s' ¤Ï´û¤ËÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤¹", -"shared library '%-.64s' ¤ò³«¤¯»ö¤¬¤Ç¤¤Þ¤»¤ó (errno: %d %s)", -"function '%-.64s' ¤ò¥é¥¤¥Ö¥é¥ê¡¼Ãæ¤Ë¸«ÉÕ¤±¤ë»ö¤¬¤Ç¤¤Þ¤»¤ó", -"Function '%-.64s' ¤ÏÄêµÁ¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"Host '%-.64s' ¤Ï many connection error ¤Î¤¿¤á¡¢µñÈݤµ¤ì¤Þ¤·¤¿. 'mysqladmin flush-hosts' ¤Ç²ò½ü¤·¤Æ¤¯¤À¤µ¤¤", -"Host '%-.64s' ¤Ï MySQL server ¤ËÀܳ¤òµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"MySQL ¤ò anonymous users ¤Ç»ÈÍѤ·¤Æ¤¤¤ë¾õÂ֤Ǥϡ¢¥Ñ¥¹¥ï¡¼¥É¤ÎÊѹ¹¤Ï¤Ç¤¤Þ¤»¤ó", -"¾¤Î¥æ¡¼¥¶¡¼¤Î¥Ñ¥¹¥ï¡¼¥É¤òÊѹ¹¤¹¤ë¤¿¤á¤Ë¤Ï, mysql ¥Ç¡¼¥¿¥Ù¡¼¥¹¤ËÂФ·¤Æ update ¤Îµö²Ä¤¬¤Ê¤±¤ì¤Ð¤Ê¤ê¤Þ¤»¤ó.", -"Can't find any matching row in the user table", -"°ìÃ׿ô(Rows matched): %ld Êѹ¹: %ld Warnings: %ld", -"¿·µ¬¤Ë¥¹¥ì¥Ã¥É¤¬ºî¤ì¤Þ¤»¤ó¤Ç¤·¤¿ (errno %d). ¤â¤·ºÇÂç»ÈÍѵö²Ä¥á¥â¥ê¡¼¿ô¤ò±Û¤¨¤Æ¤¤¤Ê¤¤¤Î¤Ë¥¨¥é¡¼¤¬È¯À¸¤·¤Æ¤¤¤ë¤Ê¤é, ¥Þ¥Ë¥å¥¢¥ë¤ÎÃæ¤«¤é 'possible OS-dependent bug' ¤È¤¤¤¦Ê¸»ú¤òõ¤·¤Æ¤¯¤ß¤Æ¤À¤µ¤¤.", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s'", -"NULL ÃͤλÈÍÑÊýË¡¤¬ÉÔŬÀڤǤ¹", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"¥æ¡¼¥¶¡¼ '%-.32s' (¥Û¥¹¥È '%-.64s' ¤Î¥æ¡¼¥¶¡¼) ¤Ïµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s' ,¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"¥³¥Þ¥ó¥É %-.16s ¤Ï ¥æ¡¼¥¶¡¼ '%-.32s'@'%-.64s'\n ¥«¥é¥à '%-.64s' ¥Æ¡¼¥Ö¥ë '%-.64s' ¤ËÂФ·¤Æµö²Ä¤µ¤ì¤Æ¤¤¤Þ¤»¤ó", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; There can only be one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got NDB error %d '%-.100s'", -"Got temporary NDB error %d '%-.100s'", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/korean/errmsg.txt b/sql/share/korean/errmsg.txt deleted file mode 100644 index 1cff50432e9..00000000000 --- a/sql/share/korean/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=euckr - -"hashchk", -"isamchk", -"¾Æ´Ï¿À", -"¿¹", -"ÈÀÏ '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"Å×À̺í '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. (¿¡·¯¹øÈ£: %d)", -"µ¥ÀÌŸº£À̽º '%-.64s'¸¦ ¸¸µéÁö ¸øÇß½À´Ï´Ù.. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÔ", -"µ¥ÀÌŸº£À̽º '%-.64s'¸¦ Á¦°ÅÇÏÁö ¸øÇß½À´Ï´Ù. µ¥ÀÌŸº£À̽º°¡ Á¸ÀçÇÏÁö ¾ÊÀ½ ", -"µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯('%-.64s'¸¦ »èÁ¦ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)", -"µ¥ÀÌŸº£À̽º Á¦°Å ¿¡·¯(rmdir '%-.64s'¸¦ ÇÒ ¼ö ¾øÀ¾´Ï´Ù, ¿¡·¯¹øÈ£: %d)", -"'%-.64s' »èÁ¦ Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", -"system Å×ÀÌºí¿¡¼ ·¹Äڵ带 ÀÐÀ» ¼ö ¾ø½À´Ï´Ù.", -"'%-.64s'ÀÇ »óŸ¦ ¾òÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"¼öÇà µð·ºÅ丮¸¦ ãÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"ÈÀÏÀ» Àá±×Áö(lock) ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"ÈÀÏÀ» ¿Áö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)", -"ÈÀÏÀ» ãÁö ¸øÇß½À´Ï´Ù.: '%-.64s' (¿¡·¯¹øÈ£: %d)", -"'%-.64s'µð·ºÅ丮¸¦ ÀÐÁö ¸øÇß½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"'%-.64s'µð·ºÅ丮·Î À̵¿ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù. (¿¡·¯¹øÈ£: %d)", -"Å×À̺í '%-.64s'¿¡¼ ¸¶Áö¸·À¸·Î ÀÐÀº ÈÄ Record°¡ º¯°æµÇ¾ú½À´Ï´Ù.", -"Disk full (%s). ´Ù¸¥ »ç¶÷ÀÌ Áö¿ï¶§±îÁö ±â´Ù¸³´Ï´Ù...", -"±â·ÏÇÒ ¼ö ¾øÀ¾´Ï´Ù., Å×À̺í '%-.64s'¿¡¼ Áߺ¹ Ű", -"'%-.64s'´Ý´Â Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", -"'%-.64s'ÈÀÏ Àб⠿¡·¯ (¿¡·¯¹øÈ£: %d)", -"'%-.64s'¸¦ '%-.64s'·Î À̸§ º¯°æÁß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", -"'%-.64s'ÈÀÏ ±â·Ï Áß ¿¡·¯ (¿¡·¯¹øÈ£: %d)", -"'%-.64s'°¡ º¯°æÇÒ ¼ö ¾øµµ·Ï Àá°ÜÀÖÀ¾´Ï´Ù.", -"¼ÒÆ®°¡ ÁߴܵǾú½À´Ï´Ù.", -"ºä '%-.64s'°¡ '%-.64s'¿¡¼´Â Á¸ÀçÇÏÁö ¾ÊÀ¾´Ï´Ù.", -"Å×À̺í handler¿¡¼ %d ¿¡·¯°¡ ¹ß»ý ÇÏ¿´½À´Ï´Ù.", -"'%-.64s'ÀÇ Å×À̺í handler´Â ÀÌ·¯ÇÑ ¿É¼ÇÀ» Á¦°øÇÏÁö ¾ÊÀ¾´Ï´Ù.", -"'%-.64s'¿¡¼ ·¹Äڵ带 ãÀ» ¼ö ¾øÀ¾´Ï´Ù.", -"ÈÀÏÀÇ ºÎÁ¤È®ÇÑ Á¤º¸: '%-.64s'", -"'%-.64s' Å×À̺íÀÇ ºÎÁ¤È®ÇÑ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!", -"'%-.64s' Å×À̺íÀÇ ÀÌÀü¹öÁ¯ÀÇ Å° Á¸Àç. ¼öÁ¤ÇϽÿÀ!", -"Å×À̺í '%-.64s'´Â ÀбâÀü¿ë ÀÔ´Ï´Ù.", -"Out of memory. µ¥¸óÀ» Àç ½ÇÇà ÈÄ ´Ù½Ã ½ÃÀÛÇϽÿÀ (needed %d bytes)", -"Out of sort memory. daemon sort bufferÀÇ Å©±â¸¦ Áõ°¡½ÃŰ¼¼¿ä", -"'%-.64s' ÈÀÏÀ» Àд µµÁß À߸øµÈ eofÀ» ¹ß°ß (¿¡·¯¹øÈ£: %d)", -"³Ê¹« ¸¹Àº ¿¬°á... max_connectionÀ» Áõ°¡ ½ÃŰ½Ã¿À...", -"Out of memory; mysqld³ª ¶Ç´Ù¸¥ ÇÁ·Î¼¼¼¿¡¼ »ç¿ë°¡´ÉÇÑ ¸Þ¸ð¸®¸¦ »ç¿ëÇÑÁö äũÇϽÿÀ. ¸¸¾à ±×·¸Áö ¾Ê´Ù¸é ulimit ¸í·ÉÀ» ÀÌ¿¿ëÇÏ¿© ´õ¸¹Àº ¸Þ¸ð¸®¸¦ »ç¿ëÇÒ ¼ö ÀÖµµ·Ï Çϰųª ½º¿Ò ½ºÆÐÀ̽º¸¦ Áõ°¡½ÃŰ½Ã¿À", -"´ç½ÅÀÇ ÄÄÇ»ÅÍÀÇ È£½ºÆ®À̸§À» ¾òÀ» ¼ö ¾øÀ¾´Ï´Ù.", -"Bad handshake", -"'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â '%-.64s' µ¥ÀÌŸº£À̽º¿¡ Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù.", -"'%-.32s'@'%-.64s' »ç¿ëÀÚ´Â Á¢±ÙÀÌ °ÅºÎ µÇ¾ú½À´Ï´Ù. (using password: %s)", -"¼±ÅÃµÈ µ¥ÀÌŸº£À̽º°¡ ¾ø½À´Ï´Ù.", -"¸í·É¾î°¡ ¹ºÁö ¸ð¸£°Ú¾î¿ä...", -"Ä®·³ '%-.64s'´Â ³Î(Null)ÀÌ µÇ¸é ¾ÈµË´Ï´Ù. ", -"µ¥ÀÌŸº£À̽º '%-.64s'´Â ¾Ë¼ö ¾øÀ½", -"Å×À̺í '%-.64s'´Â ÀÌ¹Ì Á¸ÀçÇÔ", -"Å×À̺í '%-.64s'´Â ¾Ë¼ö ¾øÀ½", -"Ä®·³: '%-.64s' in '%-.64s' ÀÌ ¸ðÈ£ÇÔ", -"Server°¡ ¼Ë´Ù¿î ÁßÀÔ´Ï´Ù.", -"Unknown Ä®·³ '%-.64s' in '%-.64s'", -"'%-.64s'Àº GROUP BY¼Ó¿¡ ¾øÀ½", -"'%-.64s'¸¦ ±×·ìÇÒ ¼ö ¾øÀ½", -"Statement °¡ sum±â´ÉÀ» µ¿ÀÛÁßÀ̰í Ä®·³µµ µ¿ÀÏÇÑ statementÀÔ´Ï´Ù.", -"Ä®·³ÀÇ Ä«¿îÆ®°¡ °ªÀÇ Ä«¿îÆ®¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù.", -"Identifier '%-.100s'´Â ³Ê¹« ±æ±º¿ä.", -"Áߺ¹µÈ Ä®·³ À̸§: '%-.64s'", -"Áߺ¹µÈ Ű À̸§ : '%-.64s'", -"Áߺ¹µÈ ÀÔ·Â °ª '%-.64s': key %d", -"Ä®·³ '%-.64s'ÀÇ ºÎÁ¤È®ÇÑ Ä®·³ Á¤ÀÇÀÚ", -"'%-.64s' ¿¡·¯ °°À¾´Ï´Ù. ('%-.80s' ¸í·É¾î ¶óÀÎ %d)", -"Äõ¸®°á°ú°¡ ¾ø½À´Ï´Ù.", -"Unique ÇÏÁö ¾ÊÀº Å×À̺í/alias: '%-.64s'", -"'%-.64s'ÀÇ À¯È¿ÇÏÁö ¸øÇÑ µðÆúÆ® °ªÀ» »ç¿ëÇϼ̽À´Ï´Ù.", -"Multiple primary key°¡ Á¤ÀǵǾî ÀÖ½¿", -"³Ê¹« ¸¹Àº ۰¡ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %dÀÇ Å°°¡ °¡´ÉÇÔ", -"³Ê¹« ¸¹Àº Ű ºÎºÐ(parts)µéÀÌ Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.. ÃÖ´ë %d ºÎºÐÀÌ °¡´ÉÇÔ", -"Á¤ÀÇµÈ Å°°¡ ³Ê¹« ±é´Ï´Ù. ÃÖ´ë ŰÀÇ ±æÀÌ´Â %dÀÔ´Ï´Ù.", -"Key Ä®·³ '%-.64s'´Â Å×ÀÌºí¿¡ Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù.", -"BLOB Ä®·³ '%-.64s'´Â Ű Á¤ÀÇ¿¡¼ »ç¿ëµÉ ¼ö ¾ø½À´Ï´Ù.", -"Ä®·³ '%-.64s'ÀÇ Ä®·³ ±æÀ̰¡ ³Ê¹« ±é´Ï´Ù (ÃÖ´ë = %d). ´ë½Å¿¡ BLOB¸¦ »ç¿ëÇϼ¼¿ä.", -"ºÎÁ¤È®ÇÑ Å×À̺í Á¤ÀÇ; Å×À̺íÀº ÇϳªÀÇ auto Ä®·³ÀÌ Á¸ÀçÇϰí Ű·Î Á¤ÀǵǾîÁ®¾ß ÇÕ´Ï´Ù.", -"%s: ¿¬°á ÁغñÁßÀÔ´Ï´Ù", -"%s: Á¤»óÀûÀÎ shutdown\n", -"%s: %d ½ÅÈ£°¡ µé¾î¿ÔÀ½. ÁßÁö!\n", -"%s: Shutdown ÀÌ ¿Ï·áµÊ!\n", -"%s: thread %ldÀÇ °Á¦ Á¾·á user: '%-.64s'\n", -"IP ¼ÒÄÏÀ» ¸¸µéÁö ¸øÇß½À´Ï´Ù.", -"Å×À̺í '%-.64s'´Â À妽º¸¦ ¸¸µéÁö ¾Ê¾Ò½À´Ï´Ù. alter Å×À̺í¸í·ÉÀ» ÀÌ¿ëÇÏ¿© Å×À̺íÀ» ¼öÁ¤Çϼ¼¿ä...", -"ÇÊµå ±¸ºÐÀÚ ÀμöµéÀÌ ¿ÏÀüÇÏÁö ¾Ê½À´Ï´Ù. ¸Þ´º¾óÀ» ã¾Æ º¸¼¼¿ä.", -"BLOB·Î´Â °íÁ¤±æÀÌÀÇ lowlength¸¦ »ç¿ëÇÒ ¼ö ¾ø½À´Ï´Ù. 'fields terminated by'¸¦ »ç¿ëÇϼ¼¿ä.", -"'%-.64s' ÈÀÏ´Â µ¥ÀÌŸº£À̽º µð·ºÅ丮¿¡ Á¸ÀçÇϰųª ¸ðµÎ¿¡°Ô Àб⠰¡´ÉÇÏ¿©¾ß ÇÕ´Ï´Ù.", -"'%-.64s' ÈÀÏÀº ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù.", -"·¹ÄÚµå: %ld°³ »èÁ¦: %ld°³ ½ºÅµ: %ld°³ °æ°í: %ld°³", -"·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³", -"ºÎÁ¤È®ÇÑ ¼¹ö ÆÄÆ® Ű. »ç¿ëµÈ Ű ÆÄÆ®°¡ ½ºÆ®¸µÀÌ ¾Æ´Ï°Å³ª Ű ÆÄÆ®ÀÇ ±æÀ̰¡ ³Ê¹« ±é´Ï´Ù.", -"ALTER TABLE ¸í·ÉÀ¸·Î´Â ¸ðµç Ä®·³À» Áö¿ï ¼ö ¾ø½À´Ï´Ù. DROP TABLE ¸í·ÉÀ» ÀÌ¿ëÇϼ¼¿ä.", -"'%-.64s'¸¦ DROPÇÒ ¼ö ¾ø½À´Ï´Ù. Ä®·³À̳ª ۰¡ Á¸ÀçÇÏ´ÂÁö äũÇϼ¼¿ä.", -"·¹ÄÚµå: %ld°³ Áߺ¹: %ld°³ °æ°í: %ld°³", -"You can't specify target table '%-.64s' for update in FROM clause", -"¾Ë¼ö ¾ø´Â ¾²·¹µå id: %lu", -"¾²·¹µå(Thread) %luÀÇ ¼ÒÀ¯ÀÚ°¡ ¾Æ´Õ´Ï´Ù.", -"¾î¶² Å×ÀÌºíµµ »ç¿ëµÇÁö ¾Ê¾Ò½À´Ï´Ù.", -"Ä®·³ %-.64s¿Í SET¿¡¼ ½ºÆ®¸µÀÌ ³Ê¹« ¸¹½À´Ï´Ù.", -"Unique ·Î±×ÈÀÏ '%-.64s'¸¦ ¸¸µé¼ö ¾ø½À´Ï´Ù.(1-999)\n", -"Å×À̺í '%-.64s'´Â READ ¶ôÀÌ Àá°ÜÀÖ¾î¼ °»½ÅÇÒ ¼ö ¾ø½À´Ï´Ù.", -"Å×À̺í '%-.64s'´Â LOCK TABLES ¸í·ÉÀ¸·Î Àá±âÁö ¾Ê¾Ò½À´Ï´Ù.", -"BLOB Ä®·³ '%-.64s' ´Â µðÆúÆ® °ªÀ» °¡Áú ¼ö ¾ø½À´Ï´Ù.", -"'%-.100s' µ¥ÀÌŸº£À̽ºÀÇ À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù.", -"'%-.100s' Å×À̺í À̸§ÀÌ ºÎÁ¤È®ÇÕ´Ï´Ù.", -"SELECT ¸í·É¿¡¼ ³Ê¹« ¸¹Àº ·¹Äڵ带 ã±â ¶§¹®¿¡ ¸¹Àº ½Ã°£ÀÌ ¼Ò¿äµË´Ï´Ù. µû¶ó¼ WHERE ¹®À» Á¡°ËÇϰųª, ¸¸¾à SELECT°¡ okµÇ¸é SET SQL_BIG_SELECTS=1 ¿É¼ÇÀ» »ç¿ëÇϼ¼¿ä.", -"¾Ë¼ö ¾ø´Â ¿¡·¯ÀÔ´Ï´Ù.", -"¾Ë¼ö ¾ø´Â ¼öÇ๮ : '%-.64s'", -"'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆÄ¶ó¸ÞÅÍ", -"'%-.64s' ¼öÇ๮¿¡ ´ëÇÑ ºÎÁ¤È®ÇÑ ÆÄ¶ó¸ÞÅÍ", -"¾Ë¼ö ¾ø´Â Å×À̺í '%-.64s' (µ¥ÀÌŸº£À̽º %s)", -"Ä®·³ '%-.64s'´Â µÎ¹ø Á¤ÀǵǾî ÀÖÀ¾´Ï´Ù.", -"À߸øµÈ ±×·ì ÇÔ¼ö¸¦ »ç¿ëÇÏ¿´½À´Ï´Ù.", -"Å×À̺í '%-.64s'´Â È®Àå¸í·ÉÀ» ÀÌ¿ëÇÏÁö¸¸ ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù.", -"ÇϳªÀÇ Å×ÀÌºí¿¡¼´Â Àû¾îµµ ÇϳªÀÇ Ä®·³ÀÌ Á¸ÀçÇÏ¿©¾ß ÇÕ´Ï´Ù.", -"Å×À̺í '%-.64s'°¡ full³µ½À´Ï´Ù. ", -"¾Ë¼ö¾ø´Â ¾ð¾î Set: '%-.64s'", -"³Ê¹« ¸¹Àº Å×À̺íÀÌ JoinµÇ¾ú½À´Ï´Ù. MySQL¿¡¼´Â JOIN½Ã %d°³ÀÇ Å×ÀÌºí¸¸ »ç¿ëÇÒ ¼ö ÀÖ½À´Ï´Ù.", -"Ä®·³ÀÌ ³Ê¹« ¸¹½À´Ï´Ù.", -"³Ê¹« Å« row »çÀÌÁîÀÔ´Ï´Ù. BLOB¸¦ °è»êÇÏÁö ¾Ê°í ÃÖ´ë row »çÀÌÁî´Â %dÀÔ´Ï´Ù. ¾ó¸¶°£ÀÇ ÇʵåµéÀ» BLOB·Î ¹Ù²Ù¼Å¾ß °Ú±º¿ä..", -"¾²·¹µå ½ºÅÃÀÌ ³ÑÃÆ½À´Ï´Ù. »ç¿ë: %ld°³ ½ºÅÃ: %ld°³. ¸¸¾à ÇÊ¿ä½Ã ´õÅ« ½ºÅÃÀ» ¿øÇÒ¶§¿¡´Â 'mysqld -O thread_stack=#' ¸¦ Á¤ÀÇÇϼ¼¿ä", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"'%-.64s' Ä®·³ÀÌ UNIQUE³ª INDEX¸¦ »ç¿ëÇÏ¿´Áö¸¸ NOT NULLÀÌ Á¤ÀǵÇÁö ¾Ê¾Ò±º¿ä...", -"'%-.64s' ÇÔ¼ö¸¦ ·ÎµåÇÏÁö ¸øÇß½À´Ï´Ù.", -"'%-.64s' ÇÔ¼ö¸¦ ÃʱâÈ ÇÏÁö ¸øÇß½À´Ï´Ù.; %-.80s", -"°øÀ¯ ¶óÀ̹ö·¯¸®¸¦ À§ÇÑ ÆÐ½º°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù.", -"'%-.64s' ÇÔ¼ö´Â ÀÌ¹Ì Á¸ÀçÇÕ´Ï´Ù.", -"'%-.64s' °øÀ¯ ¶óÀ̹ö·¯¸®¸¦ ¿¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£: %d %s)", -"¶óÀ̹ö·¯¸®¿¡¼ '%-.64s' ÇÔ¼ö¸¦ ãÀ» ¼ö ¾ø½À´Ï´Ù.", -"'%-.64s' ÇÔ¼ö°¡ Á¤ÀǵǾî ÀÖÁö ¾Ê½À´Ï´Ù.", -"³Ê¹« ¸¹Àº ¿¬°á¿À·ù·Î ÀÎÇÏ¿© È£½ºÆ® '%-.64s'´Â ºí¶ôµÇ¾ú½À´Ï´Ù. 'mysqladmin flush-hosts'¸¦ ÀÌ¿ëÇÏ¿© ºí¶ôÀ» ÇØÁ¦Çϼ¼¿ä", -"'%-.64s' È£½ºÆ®´Â ÀÌ MySQL¼¹ö¿¡ Á¢¼ÓÇÒ Çã°¡¸¦ ¹ÞÁö ¸øÇß½À´Ï´Ù.", -"´ç½ÅÀº MySQL¼¹ö¿¡ À͸íÀÇ »ç¿ëÀÚ·Î Á¢¼ÓÀ» Çϼ̽À´Ï´Ù.À͸íÀÇ »ç¿ëÀÚ´Â ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ¾ø½À´Ï´Ù.", -"´ç½ÅÀº ´Ù¸¥»ç¿ëÀÚµéÀÇ ¾ÏÈ£¸¦ º¯°æÇÒ ¼ö ÀÖµµ·Ï µ¥ÀÌŸº£À̽º º¯°æ±ÇÇÑÀ» °¡Á®¾ß ÇÕ´Ï´Ù.", -"»ç¿ëÀÚ Å×ÀÌºí¿¡¼ ÀÏÄ¡ÇÏ´Â °ÍÀ» ãÀ» ¼ö ¾øÀ¾´Ï´Ù.", -"ÀÏÄ¡ÇÏ´Â Rows : %ld°³ º¯°æµÊ: %ld°³ °æ°í: %ld°³", -"»õ·Î¿î ¾²·¹µå¸¦ ¸¸µé ¼ö ¾ø½À´Ï´Ù.(¿¡·¯¹øÈ£ %d). ¸¸¾à ¿©À¯¸Þ¸ð¸®°¡ ÀÖ´Ù¸é OS-dependent¹ö±× ÀÇ ¸Þ´º¾ó ºÎºÐÀ» ã¾Æº¸½Ã¿À.", -"Row %ld¿¡¼ Ä®·³ Ä«¿îÆ®¿Í value Ä«¿îÅÍ¿Í ÀÏÄ¡ÇÏÁö ¾Ê½À´Ï´Ù.", -"Å×À̺íÀ» ´Ù½Ã ¿¼ö ¾ø±º¿ä: '%-.64s", -"NULL °ªÀ» À߸ø »ç¿ëÇϼ̱º¿ä...", -"regexp¿¡¼ '%-.64s'°¡ ³µ½À´Ï´Ù.", -"Mixing of GROUP Ä®·³s (MIN(),MAX(),COUNT(),...) with no GROUP Ä®·³s is illegal if there is no GROUP BY clause", -"»ç¿ëÀÚ '%-.32s' (È£½ºÆ® '%-.64s')¸¦ À§ÇÏ¿© Á¤ÀÇµÈ ±×·± ½ÂÀÎÀº ¾ø½À´Ï´Ù.", -"'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Å×À̺í '%-.64s'", -"'%-.16s' ¸í·ÉÀº ´ÙÀ½ »ç¿ëÀÚ¿¡°Ô °ÅºÎµÇ¾ú½À´Ï´Ù. : '%-.32s'@'%-.64s' for Ä®·³ '%-.64s' in Å×À̺í '%-.64s'", -"À߸øµÈ GRANT/REVOKE ¸í·É. ¾î¶² ±Ç¸®¿Í ½ÂÀÎÀÌ »ç¿ëµÇ¾î Áú ¼ö ÀÖ´ÂÁö ¸Þ´º¾óÀ» º¸½Ã¿À.", -"½ÂÀÎ(GRANT)À» À§ÇÏ¿© »ç¿ëÇÑ »ç¿ëÀÚ³ª È£½ºÆ®ÀÇ °ªµéÀÌ ³Ê¹« ±é´Ï´Ù.", -"Å×À̺í '%-.64s.%s' ´Â Á¸ÀçÇÏÁö ¾Ê½À´Ï´Ù.", -"»ç¿ëÀÚ '%-.32s'(È£½ºÆ® '%-.64s')´Â Å×À̺í '%-.64s'¸¦ »ç¿ëÇϱâ À§ÇÏ¿© Á¤ÀÇµÈ ½ÂÀÎÀº ¾ø½À´Ï´Ù. ", -"»ç¿ëµÈ ¸í·ÉÀº ÇöÀçÀÇ MySQL ¹öÁ¯¿¡¼´Â ÀÌ¿ëµÇÁö ¾Ê½À´Ï´Ù.", -"SQL ±¸¹®¿¡ ¿À·ù°¡ ÀÖ½À´Ï´Ù.", -"Áö¿¬µÈ insert ¾²·¹µå°¡ Å×À̺í %-.64sÀÇ ¿ä±¸µÈ ¶ôÅ·À» ó¸®ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù.", -"³Ê¹« ¸¹Àº Áö¿¬ ¾²·¹µå¸¦ »ç¿ëÇϰí ÀÖ½À´Ï´Ù.", -"µ¥ÀÌŸº£À̽º Á¢¼ÓÀ» À§ÇÑ ¿¬°á %ld°¡ Áß´ÜµÊ : '%-.64s' »ç¿ëÀÚ: '%-.64s' (%s)", -"'max_allowed_packet'º¸´Ù ´õÅ« ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù.", -"¿¬°á ÆÄÀÌÇÁ·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"fcntl() ÇÔ¼ö·ÎºÎÅÍ ¿¡·¯°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"¼ø¼°¡ ¸ÂÁö¾Ê´Â ÆÐŶÀ» ¹Þ¾Ò½À´Ï´Ù.", -"Åë½Å ÆÐŶÀÇ ¾ÐÃàÇØÁ¦¸¦ ÇÒ ¼ö ¾ø¾ú½À´Ï´Ù.", -"Åë½Å ÆÐŶÀ» Àд Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Åë½Å ÆÐŶÀ» Àд Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Åë½Å ÆÐŶÀ» ±â·ÏÇÏ´Â Áß ¿À·ù°¡ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Åë½Å ÆÐÆÂÀ» ±â·ÏÇÏ´Â Áß timeoutÀÌ ¹ß»ýÇÏ¿´½À´Ï´Ù.", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/norwegian-ny/errmsg.txt b/sql/share/norwegian-ny/errmsg.txt deleted file mode 100644 index 27f7a18f029..00000000000 --- a/sql/share/norwegian-ny/errmsg.txt +++ /dev/null @@ -1,323 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* Roy-Magne Mo rmo@www.hivolda.no 97 */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NEI", -"JA", -"Kan ikkje opprette fila '%-.64s' (Feilkode: %d)", -"Kan ikkje opprette tabellen '%-.64s' (Feilkode: %d)", -"Kan ikkje opprette databasen '%-.64s' (Feilkode: %d)", -"Kan ikkje opprette databasen '%-.64s'; databasen eksisterer", -"Kan ikkje fjerne (drop) '%-.64s'; databasen eksisterer ikkje", -"Feil ved fjerning (drop) av databasen (kan ikkje slette '%-.64s', feil %d)", -"Feil ved sletting av database (kan ikkje slette katalogen '%-.64s', feil %d)", -"Feil ved sletting av '%-.64s' (Feilkode: %d)", -"Kan ikkje lese posten i systemkatalogen", -"Kan ikkje lese statusen til '%-.64s' (Feilkode: %d)", -"Kan ikkje lese aktiv katalog(Feilkode: %d)", -"Kan ikkje låse fila (Feilkode: %d)", -"Kan ikkje åpne fila: '%-.64s' (Feilkode: %d)", -"Kan ikkje finne fila: '%-.64s' (Feilkode: %d)", -"Kan ikkje lese katalogen '%-.64s' (Feilkode: %d)", -"Kan ikkje skifte katalog til '%-.64s' (Feilkode: %d)", -"Posten har vorte endra sidan den sist vart lesen '%-.64s'", -"Ikkje meir diskplass (%s). Ventar på å få frigjort plass...", -"Kan ikkje skrive, flere like nyklar i tabellen '%-.64s'", -"Feil ved lukking av '%-.64s' (Feilkode: %d)", -"Feil ved lesing av '%-.64s' (Feilkode: %d)", -"Feil ved omdøyping av '%-.64s' til '%-.64s' (Feilkode: %d)", -"Feil ved skriving av fila '%-.64s' (Feilkode: %d)", -"'%-.64s' er låst mot oppdateringar", -"Sortering avbrote", -"View '%-.64s' eksisterar ikkje for '%-.64s'", -"Mottok feil %d fra tabell handterar", -"Tabell håndteraren for '%-.64s' har ikkje denne moglegheita", -"Kan ikkje finne posten i '%-.64s'", -"Feil informasjon i fila: '%-.64s'", -"Tabellen '%-.64s' har feil i nykkelfila; prøv å reparere den", -"Gammel nykkelfil for tabellen '%-.64s'; reparer den!", -"'%-.64s' er skrivetryggja", -"Ikkje meir minne. Start på nytt tenesten og prøv igjen (trengte %d bytar)", -"Ikkje meir sorteringsminne. Auk sorteringsminnet (sorteringsbffer storleik) for tenesten", -"Uventa slutt på fil (eof) ved lesing av fila '%-.64s' (Feilkode: %d)", -"For mange tilkoplingar (connections)", -"Tomt for tråd plass/minne", -"Kan ikkje få tak i vertsnavn for di adresse", -"Feil handtrykk (handshake)", -"Tilgang ikkje tillate for brukar: '%-.32s'@'%-.64s' til databasen '%-.64s' nekta", -"Tilgang ikke tillate for brukar: '%-.32s'@'%-.64s' (Brukar passord: %s)", -"Ingen database vald", -"Ukjent kommando", -"Kolonne '%-.64s' kan ikkje vere null", -"Ukjent database '%-.64s'", -"Tabellen '%-.64s' eksisterar allereide", -"Ukjent tabell '%-.64s'", -"Kolonne: '%-.64s' i tabell %s er ikkje eintydig", -"Tenar nedkopling er i gang", -"Ukjent felt '%-.64s' i tabell %s", -"Brukte '%-.64s' som ikkje var i group by", -"Kan ikkje gruppere på '%-.64s'", -"Uttrykket har summer (sum) funksjoner og kolonner i same uttrykk", -"Kolonne telling stemmer verdi telling", -"Identifikator '%-.64s' er for lang", -"Feltnamnet '%-.64s' eksisterte frå før", -"Nøkkelnamnet '%-.64s' eksisterte frå før", -"Like verdiar '%-.64s' for nykkel %d", -"Feil kolonne spesifikator for kolonne '%-.64s'", -"%s attmed '%-.64s' på line %d", -"Førespurnad var tom", -"Ikkje unikt tabell/alias: '%-.64s'", -"Ugyldig standardverdi for '%-.64s'", -"Fleire primærnyklar spesifisert", -"For mange nykler spesifisert. Maks %d nyklar tillatt", -"For mange nykkeldelar spesifisert. Maks %d delar tillatt", -"Spesifisert nykkel var for lang. Maks nykkellengde er %d", -"Nykkel kolonne '%-.64s' eksiterar ikkje i tabellen", -"Blob kolonne '%-.64s' kan ikkje brukast ved spesifikasjon av nyklar", -"For stor nykkellengde for felt '%-.64s' (maks = %d). Bruk BLOB istadenfor", -"Bare eitt auto felt kan være definert som nøkkel.", -"%s: klar for tilkoblingar", -"%s: Normal nedkopling\n", -"%s: Oppdaga signal %d. Avsluttar!\n", -"%s: Nedkopling komplett\n", -"%s: Påtvinga avslutning av tråd %ld brukar: '%-.64s'\n", -"Kan ikkje opprette IP socket", -"Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Oprett tabellen på nytt", -"Felt skiljer argumenta er ikkje som venta, sjå dokumentasjonen", -"Ein kan ikkje bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'.", -"Filen '%-.64s' må være i database-katalogen for å være lesbar for alle", -"Filen '%-.64s' eksisterte allereide", -"Poster: %ld Fjerna: %ld Hoppa over: %ld Åtvaringar: %ld", -"Poster: %ld Like: %ld", -"Feil delnykkel. Den brukte delnykkelen er ikkje ein streng eller den oppgitte lengda er lengre enn nykkellengden", -"Ein kan ikkje slette alle felt med ALTER TABLE. Bruk DROP TABLE istadenfor.", -"Kan ikkje DROP '%-.64s'. Undersøk om felt/nøkkel eksisterar.", -"Postar: %ld Like: %ld Åtvaringar: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Ukjent tråd id: %lu", -"Du er ikkje eigar av tråd %lu", -"Ingen tabellar i bruk", -"For mange tekststrengar felt %s og SET", -"Kan ikkje lage unikt loggfilnavn %s.(1-999)\n", -"Tabellen '%-.64s' var låst med READ lås og kan ikkje oppdaterast", -"Tabellen '%-.64s' var ikkje låst med LOCK TABLES", -"Blob feltet '%-.64s' kan ikkje ha ein standard verdi", -"Ugyldig database namn '%-.64s'", -"Ugyldig tabell namn '%-.64s'", -"SELECT ville undersøkje for mange postar og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt", -"Ukjend feil", -"Ukjend prosedyre %s", -"Feil parameter tal til prosedyra %s", -"Feil parameter til prosedyra %s", -"Ukjend tabell '%-.64s' i %s", -"Feltet '%-.64s' er spesifisert to gangar", -"Invalid use of group function", -"Table '%-.64s' uses a extension that doesn't exist in this MySQL version", -"A table must have at least 1 column", -"The table '%-.64s' is full", -"Unknown character set: '%-.64s'", -"Too many tables; MySQL can only use %d tables in a join", -"Too many columns", -"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", -"Can't load function '%-.64s'", -"Can't initialize function '%-.64s'; %-.80s", -"No paths allowed for shared library", -"Function '%-.64s' already exists", -"Can't open shared library '%-.64s' (errno: %d %s)", -"Can't find function '%-.64s' in library'", -"Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Mottok feil %d '%-.100s' fra %s", -"Mottok temporary feil %d '%-.100s' fra %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/norwegian/errmsg.txt b/sql/share/norwegian/errmsg.txt deleted file mode 100644 index 772f30e5d94..00000000000 --- a/sql/share/norwegian/errmsg.txt +++ /dev/null @@ -1,323 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* Roy-Magne Mo rmo@www.hivolda.no 97 */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NEI", -"JA", -"Kan ikke opprette fila '%-.64s' (Feilkode: %d)", -"Kan ikke opprette tabellen '%-.64s' (Feilkode: %d)", -"Kan ikke opprette databasen '%-.64s' (Feilkode: %d)", -"Kan ikke opprette databasen '%-.64s'; databasen eksisterer", -"Kan ikke fjerne (drop) '%-.64s'; databasen eksisterer ikke", -"Feil ved fjerning (drop) av databasen (kan ikke slette '%-.64s', feil %d)", -"Feil ved sletting av database (kan ikke slette katalogen '%-.64s', feil %d)", -"Feil ved sletting av '%-.64s' (Feilkode: %d)", -"Kan ikke lese posten i systemkatalogen", -"Kan ikke lese statusen til '%-.64s' (Feilkode: %d)", -"Kan ikke lese aktiv katalog(Feilkode: %d)", -"Kan ikke låse fila (Feilkode: %d)", -"Kan ikke åpne fila: '%-.64s' (Feilkode: %d)", -"Kan ikke finne fila: '%-.64s' (Feilkode: %d)", -"Kan ikke lese katalogen '%-.64s' (Feilkode: %d)", -"Kan ikke skifte katalog til '%-.64s' (Feilkode: %d)", -"Posten har blitt endret siden den ble lest '%-.64s'", -"Ikke mer diskplass (%s). Venter på å få frigjort plass...", -"Kan ikke skrive, flere like nøkler i tabellen '%-.64s'", -"Feil ved lukking av '%-.64s' (Feilkode: %d)", -"Feil ved lesing av '%-.64s' (Feilkode: %d)", -"Feil ved omdøping av '%-.64s' til '%-.64s' (Feilkode: %d)", -"Feil ved skriving av fila '%-.64s' (Feilkode: %d)", -"'%-.64s' er låst mot oppdateringer", -"Sortering avbrutt", -"View '%-.64s' eksisterer ikke for '%-.64s'", -"Mottok feil %d fra tabell håndterer", -"Tabell håndtereren for '%-.64s' har ikke denne muligheten", -"Kan ikke finne posten i '%-.64s'", -"Feil informasjon i filen: '%-.64s'", -"Tabellen '%-.64s' har feil i nøkkelfilen; forsøk å reparer den", -"Gammel nøkkelfil for tabellen '%-.64s'; reparer den!", -"'%-.64s' er skrivebeskyttet", -"Ikke mer minne. Star på nytt tjenesten og prøv igjen (trengte %d byter)", -"Ikke mer sorteringsminne. Øk sorteringsminnet (sort buffer size) for tjenesten", -"Uventet slutt på fil (eof) ved lesing av filen '%-.64s' (Feilkode: %d)", -"For mange tilkoblinger (connections)", -"Tomt for tråd plass/minne", -"Kan ikke få tak i vertsnavn for din adresse", -"Feil håndtrykk (handshake)", -"Tilgang nektet for bruker: '%-.32s'@'%-.64s' til databasen '%-.64s' nektet", -"Tilgang nektet for bruker: '%-.32s'@'%-.64s' (Bruker passord: %s)", -"Ingen database valgt", -"Ukjent kommando", -"Kolonne '%-.64s' kan ikke vere null", -"Ukjent database '%-.64s'", -"Tabellen '%-.64s' eksisterer allerede", -"Ukjent tabell '%-.64s'", -"Felt: '%-.64s' i tabell %s er ikke entydig", -"Database nedkobling er i gang", -"Ukjent kolonne '%-.64s' i tabell %s", -"Brukte '%-.64s' som ikke var i group by", -"Kan ikke gruppere på '%-.64s'", -"Uttrykket har summer (sum) funksjoner og kolonner i samme uttrykk", -"Felt telling stemmer verdi telling", -"Identifikator '%-.64s' er for lang", -"Feltnavnet '%-.64s' eksisterte fra før", -"Nøkkelnavnet '%-.64s' eksisterte fra før", -"Like verdier '%-.64s' for nøkkel %d", -"Feil kolonne spesifikator for felt '%-.64s'", -"%s nær '%-.64s' på linje %d", -"Forespørsel var tom", -"Ikke unikt tabell/alias: '%-.64s'", -"Ugyldig standardverdi for '%-.64s'", -"Fleire primærnøkle spesifisert", -"For mange nøkler spesifisert. Maks %d nøkler tillatt", -"For mange nøkkeldeler spesifisert. Maks %d deler tillatt", -"Spesifisert nøkkel var for lang. Maks nøkkellengde er is %d", -"Nøkkel felt '%-.64s' eksiterer ikke i tabellen", -"Blob felt '%-.64s' kan ikke brukes ved spesifikasjon av nøkler", -"For stor nøkkellengde for kolonne '%-.64s' (maks = %d). Bruk BLOB istedenfor", -"Bare ett auto felt kan være definert som nøkkel.", -"%s: klar for tilkoblinger", -"%s: Normal avslutning\n", -"%s: Oppdaget signal %d. Avslutter!\n", -"%s: Avslutning komplett\n", -"%s: Påtvinget avslutning av tråd %ld bruker: '%-.64s'\n", -"Kan ikke opprette IP socket", -"Tabellen '%-.64s' har ingen index som den som er brukt i CREATE INDEX. Gjenopprett tabellen", -"Felt skiller argumentene er ikke som forventet, se dokumentasjonen", -"En kan ikke bruke faste feltlengder med BLOB. Vennlisgt bruk 'fields terminated by'.", -"Filen '%-.64s' må være i database-katalogen for å være lesbar for alle", -"Filen '%-.64s' eksisterte allerede", -"Poster: %ld Fjernet: %ld Hoppet over: %ld Advarsler: %ld", -"Poster: %ld Like: %ld", -"Feil delnøkkel. Den brukte delnøkkelen er ikke en streng eller den oppgitte lengde er lengre enn nøkkel lengden", -"En kan ikke slette alle felt med ALTER TABLE. Bruk DROP TABLE isteden.", -"Kan ikke DROP '%-.64s'. Undersøk om felt/nøkkel eksisterer.", -"Poster: %ld Like: %ld Advarsler: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Ukjent tråd id: %lu", -"Du er ikke eier av tråden %lu", -"Ingen tabeller i bruk", -"For mange tekststrenger kolonne %s og SET", -"Kan ikke lage unikt loggfilnavn %s.(1-999)\n", -"Tabellen '%-.64s' var låst med READ lås og kan ikke oppdateres", -"Tabellen '%-.64s' var ikke låst med LOCK TABLES", -"Blob feltet '%-.64s' kan ikke ha en standard verdi", -"Ugyldig database navn '%-.64s'", -"Ugyldig tabell navn '%-.64s'", -"SELECT ville undersøke for mange poster og ville sannsynligvis ta veldig lang tid. Undersøk WHERE klausulen og bruk SET SQL_BIG_SELECTS=1 om SELECTen er korrekt", -"Ukjent feil", -"Ukjent prosedyre %s", -"Feil parameter antall til prosedyren %s", -"Feil parametre til prosedyren %s", -"Ukjent tabell '%-.64s' i %s", -"Feltet '%-.64s' er spesifisert to ganger", -"Invalid use of group function", -"Table '%-.64s' uses a extension that doesn't exist in this MySQL version", -"A table must have at least 1 column", -"The table '%-.64s' is full", -"Unknown character set: '%-.64s'", -"Too many tables; MySQL can only use %d tables in a join", -"Too many columns", -"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", -"Can't load function '%-.64s'", -"Can't initialize function '%-.64s'; %-.80s", -"No paths allowed for shared library", -"Function '%-.64s' already exists", -"Can't open shared library '%-.64s' (errno: %d %s)", -"Can't find function '%-.64s' in library'", -"Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Mottok feil %d '%-.100s' fa %s", -"Mottok temporary feil %d '%-.100s' fra %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/polish/errmsg.txt b/sql/share/polish/errmsg.txt deleted file mode 100644 index 634a4d93f42..00000000000 --- a/sql/share/polish/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Changed by Jaroslaw Lewandowski <jotel@itnet.com.pl> - Charset ISO-8859-2 -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NIE", -"TAK", -"Nie mo¿na stworzyæ pliku '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na stworzyæ tabeli '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na stworzyæ bazy danych '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na stworzyæ bazy danych '%-.64s'; baza danych ju¿ istnieje", -"Nie mo¿na usun?æ bazy danych '%-.64s'; baza danych nie istnieje", -"B³?d podczas usuwania bazy danych (nie mo¿na usun?æ '%-.64s', b³?d %d)", -"B³?d podczas usuwania bazy danych (nie mo¿na wykonaæ rmdir '%-.64s', b³?d %d)", -"B³?d podczas usuwania '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na odczytaæ rekordu z tabeli systemowej", -"Nie mo¿na otrzymaæ statusu '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na rozpoznaæ aktualnego katalogu (Kod b³êdu: %d)", -"Nie mo¿na zablokowaæ pliku (Kod b³êdu: %d)", -"Nie mo¿na otworzyæ pliku: '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na znale¥æ pliku: '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na odczytaæ katalogu '%-.64s' (Kod b³êdu: %d)", -"Nie mo¿na zmieniæ katalogu na '%-.64s' (Kod b³êdu: %d)", -"Rekord zosta³ zmieniony od ostaniego odczytania z tabeli '%-.64s'", -"Dysk pe³ny (%s). Oczekiwanie na zwolnienie miejsca...", -"Nie mo¿na zapisaæ, powtórzone klucze w tabeli '%-.64s'", -"B³?d podczas zamykania '%-.64s' (Kod b³êdu: %d)", -"B³?d podczas odczytu pliku '%-.64s' (Kod b³êdu: %d)", -"B³?d podczas zmieniania nazwy '%-.64s' na '%-.64s' (Kod b³êdu: %d)", -"B³?d podczas zapisywania pliku '%-.64s' (Kod b³êdu: %d)", -"'%-.64s' jest zablokowany na wypadek zmian", -"Sortowanie przerwane", -"Widok '%-.64s' nie istnieje dla '%-.64s'", -"Otrzymano b³?d %d z obs³ugi tabeli", -"Obs³uga tabeli '%-.64s' nie posiada tej opcji", -"Nie mo¿na znale¥æ rekordu w '%-.64s'", -"Niew³a?ciwa informacja w pliku: '%-.64s'", -"Niew³a?ciwy plik kluczy dla tabeli: '%-.64s'; spróbuj go naprawiæ", -"Plik kluczy dla tabeli '%-.64s' jest starego typu; napraw go!", -"'%-.64s' jest tylko do odczytu", -"Zbyt ma³o pamiêci. Uruchom ponownie demona i spróbuj ponownie (potrzeba %d bajtów)", -"Zbyt ma³o pamiêci dla sortowania. Zwiêksz wielko?æ bufora demona dla sortowania", -"Nieoczekiwany 'eof' napotkany podczas czytania z pliku '%-.64s' (Kod b³êdu: %d)", -"Zbyt wiele po³?czeñ", -"Zbyt ma³o miejsca/pamiêci dla w?tku", -"Nie mo¿na otrzymaæ nazwy hosta dla twojego adresu", -"Z³y uchwyt(handshake)", -"Access denied for user '%-.32s'@'%-.64s' to database '%-.64s'", -"Access denied for user '%-.32s'@'%-.64s' (using password: %s)", -"Nie wybrano ¿adnej bazy danych", -"Nieznana komenda", -"Kolumna '%-.64s' nie mo¿e byæ null", -"Nieznana baza danych '%-.64s'", -"Tabela '%-.64s' ju¿ istnieje", -"Nieznana tabela '%-.64s'", -"Kolumna: '%-.64s' w %s jest dwuznaczna", -"Trwa koñczenie dzia³ania serwera", -"Nieznana kolumna '%-.64s' w %s", -"U¿yto '%-.64s' bez umieszczenia w group by", -"Nie mo¿na grupowaæ po '%-.64s'", -"Zapytanie ma funkcje sumuj?ce i kolumny w tym samym zapytaniu", -"Liczba kolumn nie odpowiada liczbie warto?ci", -"Nazwa identyfikatora '%-.64s' jest zbyt d³uga", -"Powtórzona nazwa kolumny '%-.64s'", -"Powtórzony nazwa klucza '%-.64s'", -"Powtórzone wyst?pienie '%-.64s' dla klucza %d", -"B³êdna specyfikacja kolumny dla kolumny '%-.64s'", -"%s obok '%-.64s' w linii %d", -"Zapytanie by³o puste", -"Tabela/alias nie s? unikalne: '%-.64s'", -"Niew³a?ciwa warto?æ domy?lna dla '%-.64s'", -"Zdefiniowano wiele kluczy podstawowych", -"Okre?lono zbyt wiele kluczy. Dostêpnych jest maksymalnie %d kluczy", -"Okre?lono zbyt wiele czê?ci klucza. Dostêpnych jest maksymalnie %d czê?ci", -"Zdefinowany klucz jest zbyt d³ugi. Maksymaln? d³ugo?ci? klucza jest %d", -"Kolumna '%-.64s' zdefiniowana w kluczu nie istnieje w tabeli", -"Kolumna typu Blob '%-.64s' nie mo¿e byæ u¿yta w specyfikacji klucza", -"Zbyt du¿a d³ugo?æ kolumny '%-.64s' (maks. = %d). W zamian u¿yj typu BLOB", -"W tabeli mo¿e byæ tylko jedno pole auto i musi ono byæ zdefiniowane jako klucz", -"%s: gotowe do po³?czenia", -"%s: Standardowe zakoñczenie dzia³ania\n", -"%s: Otrzymano sygna³ %d. Koñczenie dzia³ania!\n", -"%s: Zakoñczenie dzia³ania wykonane\n", -"%s: Wymuszenie zamkniêcia w?tku %ld u¿ytkownik: '%-.64s'\n", -"Nie mo¿na stworzyæ socket'u IP", -"Tabela '%-.64s' nie ma indeksu takiego jak w CREATE INDEX. Stwórz tabelê", -"Nie oczekiwano separatora. Sprawd¥ podrêcznik", -"Nie mo¿na u¿yæ sta³ej d³ugo?ci wiersza z polami typu BLOB. U¿yj 'fields terminated by'.", -"Plik '%-.64s' musi znajdowaæ sie w katalogu bazy danych lub mieæ prawa czytania przez wszystkich", -"Plik '%-.64s' ju¿ istnieje", -"Recordów: %ld Usuniêtych: %ld Pominiêtych: %ld Ostrze¿eñ: %ld", -"Rekordów: %ld Duplikatów: %ld", -"B³êdna podczê?æ klucza. U¿yta czê?æ klucza nie jest ³añcuchem lub u¿yta d³ugo?æ jest wiêksza ni¿ czê?æ klucza", -"Nie mo¿na usun?æ wszystkich pól wykorzystuj?c ALTER TABLE. W zamian u¿yj DROP TABLE", -"Nie mo¿na wykonaæ operacji DROP '%-.64s'. Sprawd¥, czy to pole/klucz istnieje", -"Rekordów: %ld Duplikatów: %ld Ostrze¿eñ: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Nieznany identyfikator w?tku: %lu", -"Nie jeste? w³a?cicielem w?tku %lu", -"Nie ma ¿adej u¿ytej tabeli", -"Zbyt wiele ³añcuchów dla kolumny %s i polecenia SET", -"Nie mo¿na stworzyæ unikalnej nazwy pliku z logiem %s.(1-999)\n", -"Tabela '%-.64s' zosta³a zablokowana przez READ i nie mo¿e zostaæ zaktualizowana", -"Tabela '%-.64s' nie zosta³a zablokowana poleceniem LOCK TABLES", -"Pole typu blob '%-.64s' nie mo¿e mieæ domy?lnej warto?ci", -"Niedozwolona nazwa bazy danych '%-.64s'", -"Niedozwolona nazwa tabeli '%-.64s'...", -"Operacja SELECT bêdzie dotyczy³a zbyt wielu rekordów i prawdopodobnie zajmie bardzo du¿o czasu. Sprawd¥ warunek WHERE i u¿yj SQL_OPTION BIG_SELECTS=1 je?li operacja SELECT jest poprawna", -"Unknown error", -"Unkown procedure %s", -"Incorrect parameter count to procedure %s", -"Incorrect parameters to procedure %s", -"Unknown table '%-.64s' in %s", -"Field '%-.64s' specified twice", -"Invalid use of group function", -"Table '%-.64s' uses a extension that doesn't exist in this MySQL version", -"A table must have at least 1 column", -"The table '%-.64s' is full", -"Unknown character set: '%-.64s'", -"Too many tables; MySQL can only use %d tables in a join", -"Too many columns", -"Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. You have to change some columns to TEXT or BLOBs", -"Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld -O thread_stack=#' to specify a bigger stack if needed", -"Cross dependency found in OUTER JOIN; examine your ON conditions", -"Column '%-.32s' is used with UNIQUE or INDEX but is not defined as NOT NULL", -"Can't load function '%-.64s'", -"Can't initialize function '%-.64s'; %-.80s", -"No paths allowed for shared library", -"Function '%-.64s' already exists", -"Can't open shared library '%-.64s' (errno: %d %s)", -"Can't find function '%-.64s' in library'", -"Function '%-.64s' is not defined", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory you can consult the manual for any possible OS dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/portuguese/errmsg.txt b/sql/share/portuguese/errmsg.txt deleted file mode 100644 index 65d80918072..00000000000 --- a/sql/share/portuguese/errmsg.txt +++ /dev/null @@ -1,323 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* Updated by Thiago Delgado Pinto - thiagodp@ieg.com.br - 06.07.2002 */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NÃO", -"SIM", -"Não pode criar o arquivo '%-.64s' (erro no. %d)", -"Não pode criar a tabela '%-.64s' (erro no. %d)", -"Não pode criar o banco de dados '%-.64s' (erro no. %d)", -"Não pode criar o banco de dados '%-.64s'; este banco de dados já existe", -"Não pode eliminar o banco de dados '%-.64s'; este banco de dados não existe", -"Erro ao eliminar banco de dados (não pode eliminar '%-.64s' - erro no. %d)", -"Erro ao eliminar banco de dados (não pode remover diretório '%-.64s' - erro no. %d)", -"Erro na remoção de '%-.64s' (erro no. %d)", -"Não pode ler um registro numa tabela do sistema", -"Não pode obter o status de '%-.64s' (erro no. %d)", -"Não pode obter o diretório corrente (erro no. %d)", -"Não pode travar o arquivo (erro no. %d)", -"Não pode abrir o arquivo '%-.64s' (erro no. %d)", -"Não pode encontrar o arquivo '%-.64s' (erro no. %d)", -"Não pode ler o diretório de '%-.64s' (erro no. %d)", -"Não pode mudar para o diretório '%-.64s' (erro no. %d)", -"Registro alterado desde a última leitura da tabela '%-.64s'", -"Disco cheio (%s). Aguardando alguém liberar algum espaço...", -"Não pode gravar. Chave duplicada na tabela '%-.64s'", -"Erro ao fechar '%-.64s' (erro no. %d)", -"Erro ao ler arquivo '%-.64s' (erro no. %d)", -"Erro ao renomear '%-.64s' para '%-.64s' (erro no. %d)", -"Erro ao gravar arquivo '%-.64s' (erro no. %d)", -"'%-.64s' está com travamento contra alterações", -"Ordenação abortada", -"Visão '%-.64s' não existe para '%-.64s'", -"Obteve erro %d no manipulador de tabelas", -"Manipulador de tabela para '%-.64s' não tem esta opção", -"Não pode encontrar registro em '%-.64s'", -"Informação incorreta no arquivo '%-.64s'", -"Arquivo de índice incorreto para tabela '%-.64s'; tente repará-lo", -"Arquivo de índice desatualizado para tabela '%-.64s'; repare-o!", -"Tabela '%-.64s' é somente para leitura", -"Sem memória. Reinicie o programa e tente novamente (necessita de %d bytes)", -"Sem memória para ordenação. Aumente tamanho do 'buffer' de ordenação", -"Encontrado fim de arquivo inesperado ao ler arquivo '%-.64s' (erro no. %d)", -"Excesso de conexões", -"Sem memória. Verifique se o mysqld ou algum outro processo está usando toda memória disponível. Se não, você pode ter que usar 'ulimit' para permitir ao mysqld usar mais memória ou você pode adicionar mais área de 'swap'", -"Não pode obter nome do 'host' para seu endereço", -"Negociação de acesso falhou", -"Acesso negado para o usuário '%-.32s'@'%-.64s' ao banco de dados '%-.64s'", -"Acesso negado para o usuário '%-.32s'@'%-.64s' (senha usada: %s)", -"Nenhum banco de dados foi selecionado", -"Comando desconhecido", -"Coluna '%-.64s' não pode ser vazia", -"Banco de dados '%-.64s' desconhecido", -"Tabela '%-.64s' já existe", -"Tabela '%-.64s' desconhecida", -"Coluna '%-.64s' em '%-.64s' é ambígua", -"'Shutdown' do servidor em andamento", -"Coluna '%-.64s' desconhecida em '%-.64s'", -"'%-.64s' não está em 'GROUP BY'", -"Não pode agrupar em '%-.64s'", -"Cláusula contém funções de soma e colunas juntas", -"Contagem de colunas não confere com a contagem de valores", -"Nome identificador '%-.100s' é longo demais", -"Nome da coluna '%-.64s' duplicado", -"Nome da chave '%-.64s' duplicado", -"Entrada '%-.64s' duplicada para a chave %d", -"Especificador de coluna incorreto para a coluna '%-.64s'", -"%s próximo a '%-.80s' na linha %d", -"Consulta (query) estava vazia", -"Tabela/alias '%-.64s' não única", -"Valor padrão (default) inválido para '%-.64s'", -"Definida mais de uma chave primária", -"Especificadas chaves demais. O máximo permitido são %d chaves", -"Especificadas partes de chave demais. O máximo permitido são %d partes", -"Chave especificada longa demais. O comprimento de chave máximo permitido é %d", -"Coluna chave '%-.64s' não existe na tabela", -"Coluna BLOB '%-.64s' não pode ser utilizada na especificação de chave para o tipo de tabela usado", -"Comprimento da coluna '%-.64s' grande demais (max = %d); use BLOB em seu lugar", -"Definição incorreta de tabela. Somente é permitido um único campo auto-incrementado e ele tem que ser definido como chave", -"%s: Pronto para conexões", -"%s: 'Shutdown' normal\n", -"%s: Obteve sinal %d. Abortando!\n", -"%s: 'Shutdown' completo\n", -"%s: Forçando finalização da 'thread' %ld - usuário '%-.32s'\n", -"Não pode criar o soquete IP", -"Tabela '%-.64s' não possui um índice como o usado em CREATE INDEX. Recrie a tabela", -"Argumento separador de campos não é o esperado. Cheque o manual", -"Você não pode usar comprimento de linha fixo com BLOBs. Por favor, use campos com comprimento limitado.", -"Arquivo '%-.64s' tem que estar no diretório do banco de dados ou ter leitura possível para todos", -"Arquivo '%-.80s' já existe", -"Registros: %ld - Deletados: %ld - Ignorados: %ld - Avisos: %ld", -"Registros: %ld - Duplicados: %ld", -"Sub parte da chave incorreta. A parte da chave usada não é uma 'string' ou o comprimento usado é maior que parte da chave ou o manipulador de tabelas não suporta sub chaves únicas", -"Você não pode deletar todas as colunas com ALTER TABLE; use DROP TABLE em seu lugar", -"Não se pode fazer DROP '%-.64s'. Confira se esta coluna/chave existe", -"Registros: %ld - Duplicados: %ld - Avisos: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"'Id' de 'thread' %lu desconhecido", -"Você não é proprietário da 'thread' %lu", -"Nenhuma tabela usada", -"'Strings' demais para coluna '%-.64s' e SET", -"Não pode gerar um nome de arquivo de 'log' único '%-.64s'.(1-999)\n", -"Tabela '%-.64s' foi travada com trava de leitura e não pode ser atualizada", -"Tabela '%-.64s' não foi travada com LOCK TABLES", -"Coluna BLOB '%-.64s' não pode ter um valor padrão (default)", -"Nome de banco de dados '%-.100s' incorreto", -"Nome de tabela '%-.100s' incorreto", -"O SELECT examinaria registros demais e provavelmente levaria muito tempo. Cheque sua cláusula WHERE e use SET SQL_BIG_SELECTS=1, se o SELECT estiver correto", -"Erro desconhecido", -"'Procedure' '%-.64s' desconhecida", -"Número de parâmetros incorreto para a 'procedure' '%-.64s'", -"Parâmetros incorretos para a 'procedure' '%-.64s'", -"Tabela '%-.64s' desconhecida em '%-.32s'", -"Coluna '%-.64s' especificada duas vezes", -"Uso inválido de função de agrupamento (GROUP)", -"Tabela '%-.64s' usa uma extensão que não existe nesta versão do MySQL", -"Uma tabela tem que ter pelo menos uma (1) coluna", -"Tabela '%-.64s' está cheia", -"Conjunto de caracteres '%-.64s' desconhecido", -"Tabelas demais. O MySQL pode usar somente %d tabelas em uma junção (JOIN)", -"Colunas demais", -"Tamanho de linha grande demais. O máximo tamanho de linha, não contando BLOBs, é %d. Você tem que mudar alguns campos para BLOBs", -"Estouro da pilha do 'thread'. Usados %ld de uma pilha de %ld. Use 'mysqld -O thread_stack=#' para especificar uma pilha maior, se necessário", -"Dependência cruzada encontrada em junção externa (OUTER JOIN); examine as condições utilizadas nas cláusulas 'ON'", -"Coluna '%-.64s' é usada com única (UNIQUE) ou índice (INDEX), mas não está definida como não-nula (NOT NULL)", -"Não pode carregar a função '%-.64s'", -"Não pode inicializar a função '%-.64s' - '%-.80s'", -"Não há caminhos (paths) permitidos para biblioteca compartilhada", -"Função '%-.64s' já existe", -"Não pode abrir biblioteca compartilhada '%-.64s' (erro no. '%d' - '%-.64s')", -"Não pode encontrar a função '%-.64s' na biblioteca", -"Função '%-.64s' não está definida", -"'Host' '%-.64s' está bloqueado devido a muitos erros de conexão. Desbloqueie com 'mysqladmin flush-hosts'", -"'Host' '%-.64s' não tem permissão para se conectar com este servidor MySQL", -"Você está usando o MySQL como usuário anônimo e usuários anônimos não têm permissão para mudar senhas", -"Você deve ter privilégios para atualizar tabelas no banco de dados mysql para ser capaz de mudar a senha de outros", -"Não pode encontrar nenhuma linha que combine na tabela usuário (user table)", -"Linhas que combinaram: %ld - Alteradas: %ld - Avisos: %ld", -"Não pode criar uma nova 'thread' (erro no. %d). Se você não estiver sem memória disponível, você pode consultar o manual sobre um possível 'bug' dependente do sistema operacional", -"Contagem de colunas não confere com a contagem de valores na linha %ld", -"Não pode reabrir a tabela '%-.64s", -"Uso inválido do valor NULL", -"Obteve erro '%-.64s' em regexp", -"Mistura de colunas agrupadas (com MIN(), MAX(), COUNT(), ...) com colunas não agrupadas é ilegal, se não existir uma cláusula de agrupamento (cláusula GROUP BY)", -"Não existe tal permissão (grant) definida para o usuário '%-.32s' no 'host' '%-.64s'", -"Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na tabela '%-.64s'", -"Comando '%-.16s' negado para o usuário '%-.32s'@'%-.64s' na coluna '%-.64s', na tabela '%-.64s'", -"Comando GRANT/REVOKE ilegal. Por favor consulte no manual quais privilégios podem ser usados.", -"Argumento de 'host' ou de usuário para o GRANT é longo demais", -"Tabela '%-.64s.%-.64s' não existe", -"Não existe tal permissão (grant) definido para o usuário '%-.32s' no 'host' '%-.64s', na tabela '%-.64s'", -"Comando usado não é permitido para esta versão do MySQL", -"Você tem um erro de sintaxe no seu SQL", -"'Thread' de inserção retardada (atrasada) pois não conseguiu obter a trava solicitada para tabela '%-.64s'", -"Excesso de 'threads' retardadas (atrasadas) em uso", -"Conexão %ld abortou para o banco de dados '%-.64s' - usuário '%-.32s' (%-.64s)", -"Obteve um pacote maior do que a taxa máxima de pacotes definida (max_allowed_packet)", -"Obteve um erro de leitura no 'pipe' da conexão", -"Obteve um erro em fcntl()", -"Obteve pacotes fora de ordem", -"Não conseguiu descomprimir pacote de comunicação", -"Obteve um erro na leitura de pacotes de comunicação", -"Obteve expiração de tempo (timeout) na leitura de pacotes de comunicação", -"Obteve um erro na escrita de pacotes de comunicação", -"Obteve expiração de tempo ('timeout') na escrita de pacotes de comunicação", -"'String' resultante é mais longa do que 'max_allowed_packet'", -"Tipo de tabela usado não permite colunas BLOB/TEXT", -"Tipo de tabela usado não permite colunas AUTO_INCREMENT", -"INSERT DELAYED não pode ser usado com a tabela '%-.64s', porque ela está travada com LOCK TABLES", -"Nome de coluna '%-.100s' incorreto", -"O manipulador de tabela usado não pode indexar a coluna '%-.64s'", -"Todas as tabelas contidas na tabela fundida (MERGE) não estão definidas identicamente", -"Não pode gravar, devido à restrição UNIQUE, na tabela '%-.64s'", -"Coluna BLOB '%-.64s' usada na especificação de chave sem o comprimento da chave", -"Todas as partes de uma chave primária devem ser não-nulas. Se você precisou usar um valor nulo (NULL) em uma chave, use a cláusula UNIQUE em seu lugar", -"O resultado consistiu em mais do que uma linha", -"Este tipo de tabela requer uma chave primária", -"Esta versão do MySQL não foi compilada com suporte a RAID", -"Você está usando modo de atualização seguro e tentou atualizar uma tabela sem uma cláusula WHERE que use uma coluna chave", -"Chave '%-.64s' não existe na tabela '%-.64s'", -"Não pode abrir a tabela", -"O manipulador de tabela não suporta %s", -"Não lhe é permitido executar este comando em uma transação", -"Obteve erro %d durante COMMIT", -"Obteve erro %d durante ROLLBACK", -"Obteve erro %d durante FLUSH_LOGS", -"Obteve erro %d durante CHECKPOINT", -"Conexão %ld abortada para banco de dados '%-.64s' - usuário '%-.32s' - 'host' `%-.64s' ('%-.64s')", -"O manipulador de tabela não suporta 'dump' binário de tabela", -"Binlog fechado. Não pode fazer RESET MASTER", -"Falhou na reconstrução do índice da tabela 'dumped' '%-.64s'", -"Erro no 'master' '%-.64s'", -"Erro de rede lendo do 'master'", -"Erro de rede gravando no 'master'", -"Não pode encontrar um índice para o texto todo que combine com a lista de colunas", -"Não pode executar o comando dado porque você tem tabelas ativas travadas ou uma transação ativa", -"Variável de sistema '%-.64s' desconhecida", -"Tabela '%-.64s' está marcada como danificada e deve ser reparada", -"Tabela '%-.64s' está marcada como danificada e a última reparação (automática?) falhou", -"Aviso: Algumas tabelas não-transacionais alteradas não puderam ser reconstituídas (rolled back)", -"Transações multi-declaradas (multi-statement transactions) requeriram mais do que o valor limite (max_binlog_cache_size) de bytes para armazenagem. Aumente o valor desta variável do mysqld e tente novamente", -"Esta operação não pode ser realizada com um 'slave' em execução. Execute STOP SLAVE primeiro", -"Esta operação requer um 'slave' em execução. Configure o 'slave' e execute START SLAVE", -"O servidor não está configurado como 'slave'. Acerte o arquivo de configuração ou use CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Não conseguiu criar 'thread' de 'slave'. Verifique os recursos do sistema", -"Usuário '%-.64s' já possui mais que o valor máximo de conexões (max_user_connections) ativas", -"Você pode usar apenas expressões constantes com SET", -"Tempo de espera (timeout) de travamento excedido. Tente reiniciar a transação.", -"O número total de travamentos excede o tamanho da tabela de travamentos", -"Travamentos de atualização não podem ser obtidos durante uma transação de tipo READ UNCOMMITTED", -"DROP DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura", -"CREATE DATABASE não permitido enquanto uma 'thread' está mantendo um travamento global de leitura", -"Argumentos errados para %s", -"Não é permitido a '%-.32s'@'%-.64s' criar novos usuários", -"Definição incorreta da tabela. Todas as tabelas contidas na junção devem estar no mesmo banco de dados.", -"Encontrado um travamento fatal (deadlock) quando tentava obter uma trava. Tente reiniciar a transação.", -"O tipo de tabela utilizado não suporta índices de texto completo (fulltext indexes)", -"Não pode acrescentar uma restrição de chave estrangeira", -"Não pode acrescentar uma linha filha: uma restrição de chave estrangeira falhou", -"Não pode apagar uma linha pai: uma restrição de chave estrangeira falhou", -"Erro conectando com o master: %-.128s", -"Erro rodando consulta no master: %-.128s", -"Erro quando executando comando %s: %-.128s", -"Uso errado de %s e %s", -"Os comandos SELECT usados têm diferente número de colunas", -"Não posso executar a consulta porque você tem um conflito de travamento de leitura", -"Mistura de tabelas transacional e não-transacional está desabilitada", -"Opção '%s' usada duas vezes no comando", -"Usuário '%-.64s' tem excedido o '%s' recurso (atual valor: %ld)", -"Acesso negado. Você precisa o privilégio %-.128s para essa operação", -"Variável '%-.64s' é uma SESSION variável e não pode ser usada com SET GLOBAL", -"Variável '%-.64s' é uma GLOBAL variável e deve ser configurada com SET GLOBAL", -"Variável '%-.64s' não tem um valor padrão", -"Variável '%-.64s' não pode ser configurada para o valor de '%-.64s'", -"Tipo errado de argumento para variável '%-.64s'", -"Variável '%-.64s' somente pode ser configurada, não lida", -"Errado uso/colocação de '%s'", -"Esta versão de MySQL não suporta ainda '%s'", -"Obteve fatal erro %d: '%-.128s' do master quando lendo dados do binary log", -"Slave SQL thread ignorado a consulta devido às normas de replicação-*-tabela", -"Variable '%-.64s' is a %s variable", -"Definição errada da chave estrangeira para '%-.64s': %s", -"Referência da chave e referência da tabela não coincidem", -"Operand should contain %d column(s)", -"Subconsulta retorna mais que 1 registro", -"Desconhecido manipulador de declaração preparado (%.*s) determinado para %s", -"Banco de dado de ajuda corrupto ou não existente", -"Referência cíclica em subconsultas", -"Convertendo coluna '%s' de %s para %s", -"Referência '%-.64s' não suportada (%s)", -"Cada tabela derivada deve ter seu próprio alias", -"Select %u foi reduzido durante otimização", -"Tabela '%-.64s' de um dos SELECTs não pode ser usada em %-.32s", -"Cliente não suporta o protocolo de autenticação exigido pelo servidor; considere a atualização do cliente MySQL", -"Todas as partes de uma SPATIAL KEY devem ser NOT NULL", -"COLLATION '%s' não é válida para CHARACTER SET '%s'", -"O slave já está rodando", -"O slave já está parado", -"Tamanho muito grande dos dados des comprimidos. O máximo tamanho é %d. (provavelmente, o comprimento dos dados descomprimidos está corrupto)", -"ZLIB: Não suficiente memória disponível", -"ZLIB: Não suficiente espaço no buffer emissor (provavelmente, o comprimento dos dados descomprimidos está corrupto)", -"ZLIB: Dados de entrada está corrupto", -"%d linha(s) foram cortada(s) por GROUP_CONCAT()", -"Conta de registro é menor que a conta de coluna na linha %ld", -"Conta de registro é maior que a conta de coluna na linha %ld", -"Dado truncado, NULL fornecido para NOT NULL coluna '%s' na linha %ld", -"Dado truncado, fora de alcance para coluna '%s' na linha %ld", -"Dado truncado para coluna '%s' na linha %ld", -"Usando engine de armazenamento %s para tabela '%s'", -"Combinação ilegal de collations (%s,%s) e (%s,%s) para operação '%s'", -"Não pode remover um ou mais dos usuários pedidos", -"Não pode revocar todos os privilégios, grant para um ou mais dos usuários pedidos", -"Ilegal combinação de collations (%s,%s), (%s,%s), (%s,%s) para operação '%s'", -"Ilegal combinação de collations para operação '%s'", -"Variável '%-.64s' não é uma variável componente (Não pode ser usada como XXXX.variável_nome)", -"Collation desconhecida: '%-.64s'", -"SSL parâmetros em CHANGE MASTER são ignorados porque este escravo MySQL foi compilado sem o SSL suporte. Os mesmos podem ser usados mais tarde quando o escravo MySQL com SSL seja iniciado.", -"Servidor está rodando em --secure-auth modo, porêm '%s'@'%s' tem senha no formato antigo; por favor troque a senha para o novo formato", -"Campo ou referência '%-.64s%s%-.64s%s%-.64s' de SELECT #%d foi resolvido em SELECT #%d", -"Parâmetro ou combinação de parâmetros errado para START SLAVE UNTIL", -"É recomendado para rodar com --skip-slave-start quando fazendo replicação passo-por-passo com START SLAVE UNTIL, de outra forma você não está seguro em caso de inesperada reinicialição do mysqld escravo", -"Thread SQL não pode ser inicializado tal que opções UNTIL são ignoradas", -"Incorreto nome de índice '%-.100s'", -"Incorreto nome de catálogo '%-.100s'", -"Falha em Query cache para configurar tamanho %lu, novo tamanho de query cache é %lu", -"Coluna '%-.64s' não pode ser parte de índice FULLTEXT", -"Key cache desconhecida '%-.100s'", -"MySQL foi inicializado em modo --skip-name-resolve. Você necesita reincializá-lo sem esta opção para este grant funcionar", -"Motor de tabela desconhecido '%s'", -"'%s' é desatualizado. Use '%s' em seu lugar", -"A tabela destino %-.100s do %s não é atualizável", -"O recurso '%s' foi desativado; você necessita MySQL construído com '%s' para ter isto funcionando", -"O servidor MySQL está rodando com a opção %s razão pela qual não pode executar esse commando", -"Coluna '%-.100s' tem valor duplicado '%-.64s' em %s" -"Truncado errado %-.32s valor: '%-.128s'" -"Incorreta definição de tabela; Pode ter somente uma coluna TIMESTAMP com CURRENT_TIMESTAMP em DEFAULT ou ON UPDATE cláusula" -"Inválida cláusula ON UPDATE para campo '%-.64s'", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/romanian/errmsg.txt b/sql/share/romanian/errmsg.txt deleted file mode 100644 index 01c22f00119..00000000000 --- a/sql/share/romanian/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Translated into Romanian by Stefan Saroiu - e-mail: tzoompy@cs.washington.edu -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NU", -"DA", -"Nu pot sa creez fisierul '%-.64s' (Eroare: %d)", -"Nu pot sa creez tabla '%-.64s' (Eroare: %d)", -"Nu pot sa creez baza de date '%-.64s' (Eroare: %d)", -"Nu pot sa creez baza de date '%-.64s'; baza de date exista deja", -"Nu pot sa drop baza de date '%-.64s'; baza da date este inexistenta", -"Eroare dropuind baza de date (nu pot sa sterg '%-.64s', Eroare: %d)", -"Eroare dropuind baza de date (nu pot sa rmdir '%-.64s', Eroare: %d)", -"Eroare incercind sa delete '%-.64s' (Eroare: %d)", -"Nu pot sa citesc cimpurile in tabla de system (system table)", -"Nu pot sa obtin statusul lui '%-.64s' (Eroare: %d)", -"Nu pot sa obtin directorul current (working directory) (Eroare: %d)", -"Nu pot sa lock fisierul (Eroare: %d)", -"Nu pot sa deschid fisierul: '%-.64s' (Eroare: %d)", -"Nu pot sa gasesc fisierul: '%-.64s' (Eroare: %d)", -"Nu pot sa citesc directorul '%-.64s' (Eroare: %d)", -"Nu pot sa schimb directorul '%-.64s' (Eroare: %d)", -"Cimpul a fost schimbat de la ultima citire a tabelei '%-.64s'", -"Hard-disk-ul este plin (%s). Astept sa se elibereze ceva spatiu...", -"Nu pot sa scriu (can't write), cheie duplicata in tabela '%-.64s'", -"Eroare inchizind '%-.64s' (errno: %d)", -"Eroare citind fisierul '%-.64s' (errno: %d)", -"Eroare incercind sa renumesc '%-.64s' in '%-.64s' (errno: %d)", -"Eroare scriind fisierul '%-.64s' (errno: %d)", -"'%-.64s' este blocat pentry schimbari (loccked against change)", -"Sortare intrerupta", -"View '%-.64s' nu exista pentru '%-.64s'", -"Eroarea %d obtinuta din handlerul tabelei", -"Handlerul tabelei pentru '%-.64s' nu are aceasta optiune", -"Nu pot sa gasesc recordul in '%-.64s'", -"Informatie incorecta in fisierul: '%-.64s'", -"Cheia fisierului incorecta pentru tabela: '%-.64s'; incearca s-o repari", -"Cheia fisierului e veche pentru tabela '%-.64s'; repar-o!", -"Tabela '%-.64s' e read-only", -"Out of memory. Porneste daemon-ul din nou si incearca inca o data (e nevoie de %d bytes)", -"Out of memory pentru sortare. Largeste marimea buffer-ului pentru sortare in daemon (sort buffer size)", -"Sfirsit de fisier neasteptat in citirea fisierului '%-.64s' (errno: %d)", -"Prea multe conectiuni", -"Out of memory; Verifica daca mysqld sau vreun alt proces foloseste toate memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui memoria disponbila. Altfel, trebuie sa folosesi 'ulimit' ca sa permiti lui mysqld sa foloseasca mai multa memorie ori adauga mai mult spatiu pentru swap (swap space)", -"Nu pot sa obtin hostname-ul adresei tale", -"Prost inceput de conectie (bad handshake)", -"Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' la baza de date '%-.64s'", -"Acces interzis pentru utilizatorul: '%-.32s'@'%-.64s' (Folosind parola: %s)", -"Nici o baza de data nu a fost selectata inca", -"Comanda invalida", -"Coloana '%-.64s' nu poate sa fie null", -"Baza de data invalida '%-.64s'", -"Tabela '%-.64s' exista deja", -"Tabela '%-.64s' este invalida", -"Coloana: '%-.64s' in %-.64s este ambigua", -"Terminarea serverului este in desfasurare", -"Coloana invalida '%-.64s' in '%-.64s'", -"'%-.64s' nu exista in clauza GROUP BY", -"Nu pot sa grupez pe (group on) '%-.64s'", -"Comanda are functii suma si coloane in aceeasi comanda", -"Numarul de coloane nu este acelasi cu numarul valoarei", -"Numele indentificatorului '%-.100s' este prea lung", -"Numele coloanei '%-.64s' e duplicat", -"Numele cheiei '%-.64s' e duplicat", -"Cimpul '%-.64s' e duplicat pentru cheia %d", -"Specificandul coloanei '%-.64s' este incorect", -"%s linga '%-.80s' pe linia %d", -"Query-ul a fost gol", -"Tabela/alias: '%-.64s' nu este unic", -"Valoarea de default este invalida pentru '%-.64s'", -"Chei primare definite de mai multe ori", -"Prea multe chei. Numarul de chei maxim este %d", -"Prea multe chei. Numarul de chei maxim este %d", -"Cheia specificata este prea lunga. Marimea maxima a unei chei este de %d", -"Coloana cheie '%-.64s' nu exista in tabela", -"Coloana de tip BLOB '%-.64s' nu poate fi folosita in specificarea cheii cu tipul de tabla folosit", -"Lungimea coloanei '%-.64s' este prea lunga (maximum = %d). Foloseste BLOB mai bine", -"Definitia tabelei este incorecta; Nu pot fi mai mult de o singura coloana de tip auto si aceasta trebuie definita ca cheie", -"%s: sint gata pentru conectii", -"%s: Terminare normala\n", -"%s: Semnal %d obtinut. Aborting!\n", -"%s: Terminare completa\n", -"%s: Terminare fortata a thread-ului %ld utilizatorului: '%-.32s'\n", -"Nu pot crea IP socket", -"Tabela '%-.64s' nu are un index ca acela folosit in CREATE INDEX. Re-creeaza tabela", -"Argumentul pentru separatorul de cimpuri este diferit de ce ma asteptam. Verifica manualul", -"Nu poti folosi lungime de cimp fix pentru BLOB-uri. Foloseste 'fields terminated by'.", -"Fisierul '%-.64s' trebuie sa fie in directorul bazei de data sau trebuie sa poata sa fie citit de catre toata lumea (verifica permisiile)", -"Fisierul '%-.80s' exista deja", -"Recorduri: %ld Sterse: %ld Sarite (skipped): %ld Atentionari (warnings): %ld", -"Recorduri: %ld Duplicate: %ld", -"Componentul cheii este incorrect. Componentul folosit al cheii nu este un sir sau lungimea folosita este mai lunga decit lungimea cheii", -"Nu poti sterge toate coloanele cu ALTER TABLE. Foloseste DROP TABLE in schimb", -"Nu pot sa DROP '%-.64s'. Verifica daca coloana/cheia exista", -"Recorduri: %ld Duplicate: %ld Atentionari (warnings): %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Id-ul: %lu thread-ului este necunoscut", -"Nu sinteti proprietarul threadului %lu", -"Nici o tabela folosita", -"Prea multe siruri pentru coloana %-.64s si SET", -"Nu pot sa generez un nume de log unic %-.64s.(1-999)\n", -"Tabela '%-.64s' a fost locked cu un READ lock si nu poate fi actualizata", -"Tabela '%-.64s' nu a fost locked cu LOCK TABLES", -"Coloana BLOB '%-.64s' nu poate avea o valoare default", -"Numele bazei de date este incorect '%-.100s'", -"Numele tabelei este incorect '%-.100s'", -"SELECT-ul ar examina prea multe cimpuri si probabil ar lua prea mult timp; verifica clauza WHERE si foloseste SET SQL_BIG_SELECTS=1 daca SELECT-ul e okay", -"Eroare unknown", -"Procedura unknown '%-.64s'", -"Procedura '%-.64s' are un numar incorect de parametri", -"Procedura '%-.64s' are parametrii incorecti", -"Tabla '%-.64s' invalida in %-.32s", -"Coloana '%-.64s' specificata de doua ori", -"Folosire incorecta a functiei group", -"Tabela '%-.64s' foloseste o extensire inexistenta in versiunea curenta de MySQL", -"O tabela trebuie sa aiba cel putin o coloana", -"Tabela '%-.64s' e plina", -"Set de caractere invalid: '%-.64s'", -"Prea multe tabele. MySQL nu poate folosi mai mult de %d tabele intr-un join", -"Prea multe coloane", -"Marimea liniei (row) prea mare. Marimea maxima a liniei, excluzind BLOB-urile este de %d. Trebuie sa schimbati unele cimpuri in BLOB-uri", -"Stack-ul thread-ului a fost depasit (prea mic): Folositi: %ld intr-un stack de %ld. Folositi 'mysqld -O thread_stack=#' ca sa specifici un stack mai mare", -"Dependinta incrucisata (cross dependency) gasita in OUTER JOIN. Examinati conditiile ON", -"Coloana '%-.64s' e folosita cu UNIQUE sau INDEX dar fara sa fie definita ca NOT NULL", -"Nu pot incarca functia '%-.64s'", -"Nu pot initializa functia '%-.64s'; %-.80s", -"Nici un paths nu e permis pentru o librarie shared", -"Functia '%-.64s' exista deja", -"Nu pot deschide libraria shared '%-.64s' (Eroare: %d %-.64s)", -"Nu pot gasi functia '%-.64s' in libraria", -"Functia '%-.64s' nu e definita", -"Host-ul '%-.64s' e blocat din cauza multelor erori de conectie. Poti deploca folosind 'mysqladmin flush-hosts'", -"Host-ul '%-.64s' nu este permis a se conecta la aceste server MySQL", -"Dumneavoastra folositi MySQL ca un utilizator anonim si utilizatorii anonimi nu au voie sa schime parolele", -"Trebuie sa aveti privilegii sa actualizati tabelele in bazele de date mysql ca sa puteti sa schimati parolele altora", -"Nu pot gasi nici o linie corespunzatoare in tabela utilizatorului", -"Linii identificate (matched): %ld Schimbate: %ld Atentionari (warnings): %ld", -"Nu pot crea un thread nou (Eroare %d). Daca mai aveti memorie disponibila in sistem, puteti consulta manualul - ar putea exista un potential bug in legatura cu sistemul de operare", -"Numarul de coloane nu corespunde cu numarul de valori la linia %ld", -"Nu pot redeschide tabela: '%-.64s'", -"Folosirea unei value NULL e invalida", -"Eroarea '%-.64s' obtinuta din expresia regulara (regexp)", -"Amestecarea de coloane GROUP (MIN(),MAX(),COUNT()...) fara coloane GROUP este ilegala daca nu exista o clauza GROUP BY", -"Nu exista un astfel de grant definit pentru utilzatorul '%-.32s' de pe host-ul '%-.64s'", -"Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru tabela '%-.64s'", -"Comanda %-.16s interzisa utilizatorului: '%-.32s'@'%-.64s' pentru coloana '%-.64s' in tabela '%-.64s'", -"Comanda GRANT/REVOKE ilegala. Consultati manualul in privinta privilegiilor ce pot fi folosite.", -"Argumentul host-ului sau utilizatorului pentru GRANT e prea lung", -"Tabela '%-.64s.%-.64s' nu exista", -"Nu exista un astfel de privilegiu (grant) definit pentru utilizatorul '%-.32s' de pe host-ul '%-.64s' pentru tabela '%-.64s'", -"Comanda folosita nu este permisa pentru aceasta versiune de MySQL", -"Aveti o eroare in sintaxa RSQL", -"Thread-ul pentru inserarea aminata nu a putut obtine lacatul (lock) pentru tabela %-.64s", -"Prea multe threaduri aminate care sint in uz", -"Conectie terminata %ld la baza de date: '%-.64s' utilizator: '%-.32s' (%-.64s)", -"Un packet mai mare decit 'max_allowed_packet' a fost primit", -"Eroare la citire din cauza lui 'connection pipe'", -"Eroare obtinuta de la fcntl()", -"Packets care nu sint ordonati au fost gasiti", -"Nu s-a putut decompresa pachetul de comunicatie (communication packet)", -"Eroare obtinuta citind pachetele de comunicatie (communication packets)", -"Timeout obtinut citind pachetele de comunicatie (communication packets)", -"Eroare in scrierea pachetelor de comunicatie (communication packets)", -"Timeout obtinut scriind pachetele de comunicatie (communication packets)", -"Sirul rezultat este mai lung decit 'max_allowed_packet'", -"Tipul de tabela folosit nu suporta coloane de tip BLOB/TEXT", -"Tipul de tabela folosit nu suporta coloane de tip AUTO_INCREMENT", -"INSERT DELAYED nu poate fi folosit cu tabela '%-.64s', deoarece este locked folosing LOCK TABLES", -"Nume increct de coloana '%-.100s'", -"Handler-ul tabelei folosite nu poate indexa coloana '%-.64s'", -"Toate tabelele din tabela MERGE nu sint definite identic", -"Nu pot scrie pe hard-drive, din cauza constraintului unic (unique constraint) pentru tabela '%-.64s'", -"Coloana BLOB '%-.64s' este folosita in specificarea unei chei fara ca o lungime de cheie sa fie folosita", -"Toate partile unei chei primare (PRIMARY KEY) trebuie sa fie NOT NULL; Daca aveti nevoie de NULL in vreo cheie, folositi UNIQUE in schimb", -"Resultatul constista din mai multe linii", -"Aceast tip de tabela are nevoie de o cheie primara", -"Aceasta versiune de MySQL, nu a fost compilata cu suport pentru RAID", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave; run STOP SLAVE first", -"This operation requires a running slave; configure slave and do START SLAVE", -"The server is not configured as slave; fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure; more error messages can be found in the MySQL error log", -"Could not create slave thread; check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/russian/errmsg.txt b/sql/share/russian/errmsg.txt deleted file mode 100644 index df354d5797f..00000000000 --- a/sql/share/russian/errmsg.txt +++ /dev/null @@ -1,326 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Translation done in 2003 by Egor Egorov; Ensita.NET, http://www.ensita.net/ -*/ -/* charset: KOI8-R */ - -character-set=koi8r - -"hashchk", -"isamchk", -"îåô", -"äá", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÔÁÂÌÉÃÕ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. âÁÚÁ ÄÁÎÎÙÈ ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", -"îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ '%-.64s'. ôÁËÏÊ ÂÁÚÙ ÄÁÎÎÙÈ ÎÅÔ", -"ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ ÂÁÚÙ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ '%-.64s', ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÂÁÚÕ ÄÁÎÎÙÈ (ÎÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ËÁÔÁÌÏÇ '%-.64s', ÏÛÉÂËÁ: %d)", -"ïÛÉÂËÁ ÐÒÉ ÕÄÁÌÅÎÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ÚÁÐÉÓØ × ÓÉÓÔÅÍÎÏÊ ÔÁÂÌÉÃÅ", -"îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÓÔÁÔÕÓÎÕÀ ÉÎÆÏÒÍÁÃÉÀ Ï '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÏÐÒÅÄÅÌÉÔØ ÒÁÂÏÞÉÊ ËÁÔÁÌÏÇ (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÐÏÓÔÁ×ÉÔØ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÆÁÊÌÅ (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÐÒÏÞÉÔÁÔØ ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)", -"îÅ×ÏÚÍÏÖÎÏ ÐÅÒÅÊÔÉ × ËÁÔÁÌÏÇ '%-.64s' (ÏÛÉÂËÁ: %d)", -"úÁÐÉÓØ ÉÚÍÅÎÉÌÁÓØ Ó ÍÏÍÅÎÔÁ ÐÏÓÌÅÄÎÅÊ ×ÙÂÏÒËÉ × ÔÁÂÌÉÃÅ '%-.64s'", -"äÉÓË ÚÁÐÏÌÎÅÎ. (%s). ïÖÉÄÁÅÍ, ÐÏËÁ ËÔÏ-ÔÏ ÎÅ ÕÂÅÒÅÔ ÐÏÓÌÅ ÓÅÂÑ ÍÕÓÏÒ...", -"îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÚÁÐÉÓØ, ÄÕÂÌÉÒÕÀÝÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉÃÅ '%-.64s'", -"ïÛÉÂËÁ ÐÒÉ ÚÁËÒÙÔÉÉ '%-.64s' (ÏÛÉÂËÁ: %d)", -"ïÛÉÂËÁ ÞÔÅÎÉÑ ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)", -"ïÛÉÂËÁ ÐÒÉ ÐÅÒÅÉÍÅÎÏ×ÁÎÉÉ '%-.64s' × '%-.64s' (ÏÛÉÂËÁ: %d)", -"ïÛÉÂËÁ ÚÁÐÉÓÉ × ÆÁÊÌ '%-.64s' (ÏÛÉÂËÁ: %d)", -"'%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÄÌÑ ÉÚÍÅÎÅÎÉÊ", -"óÏÒÔÉÒÏ×ËÁ ÐÒÅÒ×ÁÎÁ", -"ðÒÅÄÓÔÁ×ÌÅÎÉÅ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ ÄÌÑ '%-.64s'", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d ÏÔ ÏÂÒÁÂÏÔÞÉËÁ ÔÁÂÌÉÃ", -"ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÕ ×ÏÚÍÏÖÎÏÓÔØ", -"îÅ×ÏÚÍÏÖÎÏ ÎÁÊÔÉ ÚÁÐÉÓØ × '%-.64s'", -"îÅËÏÒÒÅËÔÎÁÑ ÉÎÆÏÒÍÁÃÉÑ × ÆÁÊÌÅ '%-.64s'", -"îÅËÏÒÒÅËÔÎÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ: '%-.64s'. ðÏÐÒÏÂÕÊÔÅ ×ÏÓÓÔÁÎÏ×ÉÔØ ÅÇÏ", -"óÔÁÒÙÊ ÉÎÄÅËÓÎÙÊ ÆÁÊÌ ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'; ÏÔÒÅÍÏÎÔÉÒÕÊÔÅ ÅÇÏ!", -"ôÁÂÌÉÃÁ '%-.64s' ÐÒÅÄÎÁÚÎÁÞÅÎÁ ÔÏÌØËÏ ÄÌÑ ÞÔÅÎÉÑ", -"îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ. ðÅÒÅÚÁÐÕÓÔÉÔÅ ÓÅÒ×ÅÒ É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ (ÎÕÖÎÏ %d ÂÁÊÔ)", -"îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ ÄÌÑ ÓÏÒÔÉÒÏ×ËÉ. õ×ÅÌÉÞØÔÅ ÒÁÚÍÅÒ ÂÕÆÅÒÁ ÓÏÒÔÉÒÏ×ËÉ ÎÁ ÓÅÒ×ÅÒÅ", -"îÅÏÖÉÄÁÎÎÙÊ ËÏÎÅà ÆÁÊÌÁ '%-.64s' (ÏÛÉÂËÁ: %d)", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÓÏÅÄÉÎÅÎÉÊ", -"îÅÄÏÓÔÁÔÏÞÎÏ ÐÁÍÑÔÉ; ÕÄÏÓÔÏ×ÅÒØÔÅÓØ, ÞÔÏ mysqld ÉÌÉ ËÁËÏÊ-ÌÉÂÏ ÄÒÕÇÏÊ ÐÒÏÃÅÓÓ ÎÅ ÚÁÎÉÍÁÅÔ ×ÓÀ ÄÏÓÔÕÐÎÕÀ ÐÁÍÑÔØ. åÓÌÉ ÎÅÔ, ÔÏ ×Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ ulimit, ÞÔÏÂÙ ×ÙÄÅÌÉÔØ ÄÌÑ mysqld ÂÏÌØÛÅ ÐÁÍÑÔÉ, ÉÌÉ Õ×ÅÌÉÞÉÔØ ÏÂßÅÍ ÆÁÊÌÁ ÐÏÄËÁÞËÉ", -"îÅ×ÏÚÍÏÖÎÏ ÐÏÌÕÞÉÔØ ÉÍÑ ÈÏÓÔÁ ÄÌÑ ×ÁÛÅÇÏ ÁÄÒÅÓÁ", -"îÅËÏÒÒÅËÔÎÏÅ ÐÒÉ×ÅÔÓÔ×ÉÅ", -"äÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' ÄÏÓÔÕÐ Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÚÁËÒÙÔ", -"äÏÓÔÕÐ ÚÁËÒÙÔ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s'@'%-.64s' (ÂÙÌ ÉÓÐÏÌØÚÏ×ÁÎ ÐÁÒÏÌØ: %s)", -"âÁÚÁ ÄÁÎÎÙÈ ÎÅ ×ÙÂÒÁÎÁ", -"îÅÉÚ×ÅÓÔÎÁÑ ËÏÍÁÎÄÁ ËÏÍÍÕÎÉËÁÃÉÏÎÎÏÇÏ ÐÒÏÔÏËÏÌÁ", -"óÔÏÌÂÅà '%-.64s' ÎÅ ÍÏÖÅÔ ÐÒÉÎÉÍÁÔØ ×ÅÌÉÞÉÎÕ NULL", -"îÅÉÚ×ÅÓÔÎÁÑ ÂÁÚÁ ÄÁÎÎÙÈ '%-.64s'", -"ôÁÂÌÉÃÁ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", -"îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s'", -"óÔÏÌÂÅà '%-.64s' × %-.64s ÚÁÄÁÎ ÎÅÏÄÎÏÚÎÁÞÎÏ", -"óÅÒ×ÅÒ ÎÁÈÏÄÉÔÓÑ × ÐÒÏÃÅÓÓÅ ÏÓÔÁÎÏ×ËÉ", -"îÅÉÚ×ÅÓÔÎÙÊ ÓÔÏÌÂÅà '%-.64s' × '%-.64s'", -"'%-.64s' ÎÅ ÐÒÉÓÕÔÓÔ×ÕÅÔ × GROUP BY", -"îÅ×ÏÚÍÏÖÎÏ ÐÒÏÉÚ×ÅÓÔÉ ÇÒÕÐÐÉÒÏ×ËÕ ÐÏ '%-.64s'", -"÷ÙÒÁÖÅÎÉÅ ÓÏÄÅÒÖÉÔ ÇÒÕÐÐÏ×ÙÅ ÆÕÎËÃÉÉ É ÓÔÏÌÂÃÙ, ÎÏ ÎÅ ×ËÌÀÞÁÅÔ GROUP BY. á ËÁË ×Ù ÕÍÕÄÒÉÌÉÓØ ÐÏÌÕÞÉÔØ ÜÔÏ ÓÏÏÂÝÅÎÉÅ Ï ÏÛÉÂËÅ?", -"ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ", -"óÌÉÛËÏÍ ÄÌÉÎÎÙÊ ÉÄÅÎÔÉÆÉËÁÔÏÒ '%-.100s'", -"äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ÓÔÏÌÂÃÁ '%-.64s'", -"äÕÂÌÉÒÕÀÝÅÅÓÑ ÉÍÑ ËÌÀÞÁ '%-.64s'", -"äÕÂÌÉÒÕÀÝÁÑÓÑ ÚÁÐÉÓØ '%-.64s' ÐÏ ËÌÀÞÕ %d", -"îÅËÏÒÒÅËÔÎÙÊ ÏÐÒÅÄÅÌÉÔÅÌØ ÓÔÏÌÂÃÁ ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s'", -"%s ÏËÏÌÏ '%-.80s' ÎÁ ÓÔÒÏËÅ %d", -"úÁÐÒÏÓ ÏËÁÚÁÌÓÑ ÐÕÓÔÙÍ", -"ðÏ×ÔÏÒÑÀÝÁÑÓÑ ÔÁÂÌÉÃÁ/ÐÓÅ×ÄÏÎÉÍ '%-.64s'", -"îÅËÏÒÒÅËÔÎÏÅ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ '%-.64s'", -"õËÁÚÁÎÏ ÎÅÓËÏÌØËÏ ÐÅÒ×ÉÞÎÙÈ ËÌÀÞÅÊ", -"õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ËÌÀÞÅÊ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ËÌÀÞÅÊ", -"õËÁÚÁÎÏ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÞÁÓÔÅÊ ÓÏÓÔÁ×ÎÏÇÏ ËÌÀÞÁ. òÁÚÒÅÛÁÅÔÓÑ ÕËÁÚÙ×ÁÔØ ÎÅ ÂÏÌÅÅ %d ÞÁÓÔÅÊ", -"õËÁÚÁÎ ÓÌÉÛËÏÍ ÄÌÉÎÎÙÊ ËÌÀÞ. íÁËÓÉÍÁÌØÎÁÑ ÄÌÉÎÁ ËÌÀÞÁ ÓÏÓÔÁ×ÌÑÅÔ %d ÂÁÊÔ", -"ëÌÀÞÅ×ÏÊ ÓÔÏÌÂÅà '%-.64s' × ÔÁÂÌÉÃÅ ÎÅ ÓÕÝÅÓÔ×ÕÅÔ", -"óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÓÐÏÌØÚÏ×ÁÎ ËÁË ÚÎÁÞÅÎÉÅ ËÌÀÞÁ × ÔÁÂÌÉÃÅ ÔÁËÏÇÏ ÔÉÐÁ", -"óÌÉÛËÏÍ ÂÏÌØÛÁÑ ÄÌÉÎÁ ÓÔÏÌÂÃÁ '%-.64s' (ÍÁËÓÉÍÕÍ = %d). éÓÐÏÌØÚÕÊÔÅ ÔÉÐ BLOB ×ÍÅÓÔÏ ÔÅËÕÝÅÇÏ", -"îÅËÏÒÒÅËÔÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ: ÍÏÖÅÔ ÓÕÝÅÓÔ×Ï×ÁÔØ ÔÏÌØËÏ ÏÄÉÎ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÊ ÓÔÏÌÂÅÃ, É ÏÎ ÄÏÌÖÅÎ ÂÙÔØ ÏÐÒÅÄÅÌÅÎ ËÁË ËÌÀÞ", -"%s: çÏÔÏ× ÐÒÉÎÉÍÁÔØ ÓÏÅÄÉÎÅÎÉÑ.\n÷ÅÒÓÉÑ: '%s' ÓÏËÅÔ: '%s' ÐÏÒÔ: %d", -"%s: ëÏÒÒÅËÔÎÁÑ ÏÓÔÁÎÏ×ËÁ\n", -"%s: ðÏÌÕÞÅÎ ÓÉÇÎÁÌ %d. ðÒÅËÒÁÝÁÅÍ!\n", -"%s: ïÓÔÁÎÏ×ËÁ ÚÁ×ÅÒÛÅÎÁ\n", -"%s: ðÒÉÎÕÄÉÔÅÌØÎÏ ÚÁËÒÙ×ÁÅÍ ÐÏÔÏË %ld ÐÏÌØÚÏ×ÁÔÅÌÑ: '%-.32s'\n", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ IP-ÓÏËÅÔ", -"÷ ÔÁÂÌÉÃÅ '%-.64s' ÎÅÔ ÔÁËÏÇÏ ÉÎÄÅËÓÁ, ËÁË × CREATE INDEX. óÏÚÄÁÊÔÅ ÔÁÂÌÉÃÕ ÚÁÎÏ×Ï", -"áÒÇÕÍÅÎÔ ÒÁÚÄÅÌÉÔÅÌÑ ÐÏÌÅÊ - ÎÅ ÔÏÔ, ËÏÔÏÒÙÊ ÏÖÉÄÁÌÓÑ. ïÂÒÁÝÁÊÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ", -"æÉËÓÉÒÏ×ÁÎÎÙÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ Ó ÐÏÌÑÍÉ ÔÉÐÁ BLOB ÉÓÐÏÌØÚÏ×ÁÔØ ÎÅÌØÚÑ, ÐÒÉÍÅÎÑÊÔÅ 'fields terminated by'", -"æÁÊÌ '%-.64s' ÄÏÌÖÅÎ ÎÁÈÏÄÉÔØÓÑ × ÔÏÍ ÖÅ ËÁÔÁÌÏÇÅ, ÞÔÏ É ÂÁÚÁ ÄÁÎÎÙÈ, ÉÌÉ ÂÙÔØ ÏÂÝÅÄÏÓÔÕÐÎÙÍ ÄÌÑ ÞÔÅÎÉÑ", -"æÁÊÌ '%-.80s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", -"úÁÐÉÓÅÊ: %ld õÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld", -"úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld", -"îÅËÏÒÒÅËÔÎÁÑ ÞÁÓÔØ ËÌÀÞÁ. éÓÐÏÌØÚÕÅÍÁÑ ÞÁÓÔØ ËÌÀÞÁ ÎÅ Ñ×ÌÑÅÔÓÑ ÓÔÒÏËÏÊ, ÕËÁÚÁÎÎÁÑ ÄÌÉÎÁ ÂÏÌØÛÅ, ÞÅÍ ÄÌÉÎÁ ÞÁÓÔÉ ËÌÀÞÁ, ÉÌÉ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÕÎÉËÁÌØÎÙÅ ÞÁÓÔÉ ËÌÀÞÁ", -"îÅÌØÚÑ ÕÄÁÌÉÔØ ×ÓÅ ÓÔÏÌÂÃÙ Ó ÐÏÍÏÝØÀ ALTER TABLE. éÓÐÏÌØÚÕÊÔÅ DROP TABLE", -"îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ (DROP) '%-.64s'. õÂÅÄÉÔÅÓØ ÞÔÏ ÓÔÏÌÂÅÃ/ËÌÀÞ ÄÅÊÓÔ×ÉÔÅÌØÎÏ ÓÕÝÅÓÔ×ÕÅÔ", -"úÁÐÉÓÅÊ: %ld äÕÂÌÉËÁÔÏ×: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld", -"îÅ ÄÏÐÕÓËÁÅÔÓÑ ÕËÁÚÁÎÉÅ ÔÁÂÌÉÃÙ '%-.64s' × ÓÐÉÓËÅ ÔÁÂÌÉà FROM ÄÌÑ ×ÎÅÓÅÎÉÑ × ÎÅÅ ÉÚÍÅÎÅÎÉÊ", -"îÅÉÚ×ÅÓÔÎÙÊ ÎÏÍÅÒ ÐÏÔÏËÁ: %lu", -"÷Ù ÎÅ Ñ×ÌÑÅÔÅÓØ ×ÌÁÄÅÌØÃÅÍ ÐÏÔÏËÁ %lu", -"îÉËÁËÉÅ ÔÁÂÌÉÃÙ ÎÅ ÉÓÐÏÌØÚÏ×ÁÎÙ", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÚÎÁÞÅÎÉÊ ÄÌÑ ÓÔÏÌÂÃÁ %-.64s × SET", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÕÎÉËÁÌØÎÏÅ ÉÍÑ ÆÁÊÌÁ ÖÕÒÎÁÌÁ %-.64s.(1-999)\n", -"ôÁÂÌÉÃÁ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎÁ ÕÒÏ×ÎÅÍ READ lock É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ", -"ôÁÂÌÉÃÁ '%-.64s' ÎÅ ÂÙÌÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES", -"îÅ×ÏÚÍÏÖÎÏ ÕËÁÚÙ×ÁÔØ ÚÎÁÞÅÎÉÅ ÐÏ ÕÍÏÌÞÁÎÉÀ ÄÌÑ ÓÔÏÌÂÃÁ BLOB '%-.64s'", -"îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÂÁÚÙ ÄÁÎÎÙÈ '%-.100s'", -"îÅËÏÒÒÅËÔÎÏÅ ÉÍÑ ÔÁÂÌÉÃÙ '%-.100s'", -"äÌÑ ÔÁËÏÊ ×ÙÂÏÒËÉ SELECT ÄÏÌÖÅÎ ÂÕÄÅÔ ÐÒÏÓÍÏÔÒÅÔØ ÓÌÉÛËÏÍ ÍÎÏÇÏ ÚÁÐÉÓÅÊ É, ×ÉÄÉÍÏ, ÜÔÏ ÚÁÊÍÅÔ ÏÞÅÎØ ÍÎÏÇÏ ×ÒÅÍÅÎÉ. ðÒÏ×ÅÒØÔÅ ×ÁÛÅ ÕËÁÚÁÎÉÅ WHERE, É, ÅÓÌÉ × ÎÅÍ ×ÓÅ × ÐÏÒÑÄËÅ, ÕËÁÖÉÔÅ SET SQL_BIG_SELECTS=1", -"îÅÉÚ×ÅÓÔÎÁÑ ÏÛÉÂËÁ", -"îÅÉÚ×ÅÓÔÎÁÑ ÐÒÏÃÅÄÕÒÁ '%-.64s'", -"îÅËÏÒÒÅËÔÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÐÁÒÁÍÅÔÒÏ× ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'", -"îÅËÏÒÒÅËÔÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ ÐÒÏÃÅÄÕÒÙ '%-.64s'", -"îÅÉÚ×ÅÓÔÎÁÑ ÔÁÂÌÉÃÁ '%-.64s' × %-.32s", -"óÔÏÌÂÅà '%-.64s' ÕËÁÚÁÎ Ä×ÁÖÄÙ", -"îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÇÒÕÐÐÏ×ÙÈ ÆÕÎËÃÉÊ", -"÷ ÔÁÂÌÉÃÅ '%-.64s' ÉÓÐÏÌØÚÕÀÔÓÑ ×ÏÚÍÏÖÎÏÓÔÉ, ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÍÙÅ × ÜÔÏÊ ×ÅÒÓÉÉ MySQL", -"÷ ÔÁÂÌÉÃÅ ÄÏÌÖÅÎ ÂÙÔØ ËÁË ÍÉÎÉÍÕÍ ÏÄÉÎ ÓÔÏÌÂÅÃ", -"ôÁÂÌÉÃÁ '%-.64s' ÐÅÒÅÐÏÌÎÅÎÁ", -"îÅÉÚ×ÅÓÔÎÁÑ ËÏÄÉÒÏ×ËÁ '%-.64s'", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÔÁÂÌÉÃ. MySQL ÍÏÖÅÔ ÉÓÐÏÌØÚÏ×ÁÔØ ÔÏÌØËÏ %d ÔÁÂÌÉÃ × ÓÏÅÄÉÎÅÎÉÉ", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÓÔÏÌÂÃÏ×", -"óÌÉÛËÏÍ ÂÏÌØÛÏÊ ÒÁÚÍÅÒ ÚÁÐÉÓÉ. íÁËÓÉÍÁÌØÎÙÊ ÒÁÚÍÅÒ ÓÔÒÏËÉ, ÉÓËÌÀÞÁÑ ÐÏÌÑ BLOB, - %d. ÷ÏÚÍÏÖÎÏ, ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÍÅÎÉÔØ ÔÉÐ ÎÅËÏÔÏÒÙÈ ÐÏÌÅÊ ÎÁ BLOB", -"óÔÅË ÐÏÔÏËÏ× ÐÅÒÅÐÏÌÎÅÎ: ÉÓÐÏÌØÚÏ×ÁÎÏ: %ld ÉÚ %ld ÓÔÅËÁ. ðÒÉÍÅÎÑÊÔÅ 'mysqld -O thread_stack=#' ÄÌÑ ÕËÁÚÁÎÉÑ ÂÏÌØÛÅÇÏ ÒÁÚÍÅÒÁ ÓÔÅËÁ, ÅÓÌÉ ÎÅÏÂÈÏÄÉÍÏ", -"÷ OUTER JOIN ÏÂÎÁÒÕÖÅÎÁ ÐÅÒÅËÒÅÓÔÎÁÑ ÚÁ×ÉÓÉÍÏÓÔØ. ÷ÎÉÍÁÔÅÌØÎÏ ÐÒÏÁÎÁÌÉÚÉÒÕÊÔÅ Ó×ÏÉ ÕÓÌÏ×ÉÑ ON", -"óÔÏÌÂÅà '%-.64s' ÉÓÐÏÌØÚÕÅÔÓÑ × UNIQUE ÉÌÉ × INDEX, ÎÏ ÎÅ ÏÐÒÅÄÅÌÅÎ ËÁË NOT NULL", -"îÅ×ÏÚÍÏÖÎÏ ÚÁÇÒÕÚÉÔØ ÆÕÎËÃÉÀ '%-.64s'", -"îÅ×ÏÚÍÏÖÎÏ ÉÎÉÃÉÁÌÉÚÉÒÏ×ÁÔØ ÆÕÎËÃÉÀ '%-.64s'; %-.80s", -"îÅÄÏÐÕÓÔÉÍÏ ÕËÁÚÙ×ÁÔØ ÐÕÔÉ ÄÌÑ ÄÉÎÁÍÉÞÅÓËÉÈ ÂÉÂÌÉÏÔÅË", -"æÕÎËÃÉÑ '%-.64s' ÕÖÅ ÓÕÝÅÓÔ×ÕÅÔ", -"îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÄÉÎÁÍÉÞÅÓËÕÀ ÂÉÂÌÉÏÔÅËÕ '%-.64s' (ÏÛÉÂËÁ: %d %-.64s)", -"îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÆÕÎËÃÉÀ '%-.64s' × ÂÉÂÌÉÏÔÅËÅ", -"æÕÎËÃÉÑ '%-.64s' ÎÅ ÏÐÒÅÄÅÌÅÎÁ", -"èÏÓÔ '%-.64s' ÚÁÂÌÏËÉÒÏ×ÁÎ ÉÚ-ÚÁ ÓÌÉÛËÏÍ ÂÏÌØÛÏÇÏ ËÏÌÉÞÅÓÔ×Á ÏÛÉÂÏË ÓÏÅÄÉÎÅÎÉÑ. òÁÚÂÌÏËÉÒÏ×ÁÔØ ÅÇÏ ÍÏÖÎÏ Ó ÐÏÍÏÝØÀ 'mysqladmin flush-hosts'", -"èÏÓÔÕ '%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÐÏÄËÌÀÞÁÔØÓÑ Ë ÜÔÏÍÕ ÓÅÒ×ÅÒÕ MySQL", -"÷Ù ÉÓÐÏÌØÚÕÅÔÅ MySQL ÏÔ ÉÍÅÎÉ ÁÎÏÎÉÍÎÏÇÏ ÐÏÌØÚÏ×ÁÔÅÌÑ, Á ÁÎÏÎÉÍÎÙÍ ÐÏÌØÚÏ×ÁÔÅÌÑÍ ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÍÅÎÑÔØ ÐÁÒÏÌÉ", -"äÌÑ ÔÏÇÏ ÞÔÏÂÙ ÉÚÍÅÎÑÔØ ÐÁÒÏÌÉ ÄÒÕÇÉÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ, Õ ×ÁÓ ÄÏÌÖÎÙ ÂÙÔØ ÐÒÉ×ÉÌÅÇÉÉ ÎÁ ÉÚÍÅÎÅÎÉÅ ÔÁÂÌÉÃ × ÂÁÚÅ ÄÁÎÎÙÈ mysql", -"îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÄÈÏÄÑÝÕÀ ÚÁÐÉÓØ × ÔÁÂÌÉÃÅ ÐÏÌØÚÏ×ÁÔÅÌÅÊ", -"óÏ×ÐÁÌÏ ÚÁÐÉÓÅÊ: %ld éÚÍÅÎÅÎÏ: %ld ðÒÅÄÕÐÒÅÖÄÅÎÉÊ: %ld", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÎÏ×ÙÊ ÐÏÔÏË (ÏÛÉÂËÁ %d). åÓÌÉ ÜÔÏ ÎÅ ÓÉÔÕÁÃÉÑ, Ó×ÑÚÁÎÎÁÑ Ó ÎÅÈ×ÁÔËÏÊ ÐÁÍÑÔÉ, ÔÏ ×ÁÍ ÓÌÅÄÕÅÔ ÉÚÕÞÉÔØ ÄÏËÕÍÅÎÔÁÃÉÀ ÎÁ ÐÒÅÄÍÅÔ ÏÐÉÓÁÎÉÑ ×ÏÚÍÏÖÎÏÊ ÏÛÉÂËÉ ÒÁÂÏÔÙ × ËÏÎËÒÅÔÎÏÊ ïó", -"ëÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ× ÎÅ ÓÏ×ÐÁÄÁÅÔ Ó ËÏÌÉÞÅÓÔ×ÏÍ ÚÎÁÞÅÎÉÊ × ÚÁÐÉÓÉ %ld", -"îÅ×ÏÚÍÏÖÎÏ ÚÁÎÏ×Ï ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ '%-.64s'", -"îÅÐÒÁ×ÉÌØÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ×ÅÌÉÞÉÎÙ NULL", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ '%-.64s' ÏÔ ÒÅÇÕÌÑÒÎÏÇÏ ×ÙÒÁÖÅÎÉÑ", -"ïÄÎÏ×ÒÅÍÅÎÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÈ (GROUP) ÓÔÏÌÂÃÏ× (MIN(),MAX(),COUNT(),...) Ó ÎÅÓÇÒÕÐÐÉÒÏ×ÁÎÎÙÍÉ ÓÔÏÌÂÃÁÍÉ Ñ×ÌÑÅÔÓÑ ÎÅËÏÒÒÅËÔÎÙÍ, ÅÓÌÉ × ×ÙÒÁÖÅÎÉÉ ÅÓÔØ GROUP BY", -"ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ÈÏÓÔÅ '%-.64s'", -"ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'", -"ëÏÍÁÎÄÁ %-.16s ÚÁÐÒÅÝÅÎÁ ÐÏÌØÚÏ×ÁÔÅÌÀ '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏÌÂÃÁ '%-.64s' × ÔÁÂÌÉÃÅ '%-.64s'", -"îÅ×ÅÒÎÁÑ ËÏÍÁÎÄÁ GRANT ÉÌÉ REVOKE. ïÂÒÁÔÉÔÅÓØ Ë ÄÏËÕÍÅÎÔÁÃÉÉ, ÞÔÏÂÙ ×ÙÑÓÎÉÔØ, ËÁËÉÅ ÐÒÉ×ÉÌÅÇÉÉ ÍÏÖÎÏ ÉÓÐÏÌØÚÏ×ÁÔØ", -"óÌÉÛËÏÍ ÄÌÉÎÎÏÅ ÉÍÑ ÐÏÌØÚÏ×ÁÔÅÌÑ/ÈÏÓÔÁ ÄÌÑ GRANT", -"ôÁÂÌÉÃÁ '%-.64s.%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ", -"ôÁËÉÅ ÐÒÁ×Á ÎÅ ÏÐÒÅÄÅÌÅÎÙ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' ÎÁ ËÏÍÐØÀÔÅÒÅ '%-.64s' ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s'", -"üÔÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÐÕÓËÁÅÔÓÑ × ÄÁÎÎÏÊ ×ÅÒÓÉÉ MySQL", -"õ ×ÁÓ ÏÛÉÂËÁ × ÚÁÐÒÏÓÅ. éÚÕÞÉÔÅ ÄÏËÕÍÅÎÔÁÃÉÀ ÐÏ ÉÓÐÏÌØÚÕÅÍÏÊ ×ÅÒÓÉÉ MySQL ÎÁ ÐÒÅÄÍÅÔ ËÏÒÒÅËÔÎÏÇÏ ÓÉÎÔÁËÓÉÓÁ", -"ðÏÔÏË, ÏÂÓÌÕÖÉ×ÁÀÝÉÊ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert), ÎÅ ÓÍÏÇ ÐÏÌÕÞÉÔØ ÚÁÐÒÁÛÉ×ÁÅÍÕÀ ÂÌÏËÉÒÏ×ËÕ ÎÁ ÔÁÂÌÉÃÕ %-.64s", -"óÌÉÛËÏÍ ÍÎÏÇÏ ÐÏÔÏËÏ×, ÏÂÓÌÕÖÉ×ÁÀÝÉÈ ÏÔÌÏÖÅÎÎÕÀ ×ÓÔÁ×ËÕ (delayed insert)", -"ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' (%-.64s)", -"ðÏÌÕÞÅÎÎÙÊ ÐÁËÅÔ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ ÏÔ ÐÏÔÏËÁ ÓÏÅÄÉÎÅÎÉÑ (connection pipe)", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÏÔ fcntl()", -"ðÁËÅÔÙ ÐÏÌÕÞÅÎÙ × ÎÅ×ÅÒÎÏÍ ÐÏÒÑÄËÅ", -"îÅ×ÏÚÍÏÖÎÏ ÒÁÓÐÁËÏ×ÁÔØ ÐÁËÅÔ, ÐÏÌÕÞÅÎÎÙÊ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ ", -"ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ ", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ ÐÒÉ ÐÅÒÅÄÁÞÅ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ ", -"ðÏÌÕÞÅÎ ÔÁÊÍÁÕÔ × ÐÒÏÃÅÓÓÅ ÐÅÒÅÄÁÞÉ ÐÁËÅÔÁ ÞÅÒÅÚ ËÏÍÍÕÎÉËÁÃÉÏÎÎÙÊ ÐÒÏÔÏËÏÌ ", -"òÅÚÕÌØÔÉÒÕÀÝÁÑ ÓÔÒÏËÁ ÂÏÌØÛÅ, ÞÅÍ 'max_allowed_packet'", -"éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÔÉÐÙ BLOB/TEXT", -"éÓÐÏÌØÚÕÅÍÁÑ ÔÁÂÌÉÃÁ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Á×ÔÏÉÎËÒÅÍÅÎÔÎÙÅ ÓÔÏÌÂÃÙ", -"îÅÌØÚÑ ÉÓÐÏÌØÚÏ×ÁÔØ INSERT DELAYED ÄÌÑ ÔÁÂÌÉÃÙ '%-.64s', ÐÏÔÏÍÕ ÞÔÏ ÏÎÁ ÚÁÂÌÏËÉÒÏ×ÁÎÁ Ó ÐÏÍÏÝØÀ LOCK TABLES", -"îÅ×ÅÒÎÏÅ ÉÍÑ ÓÔÏÌÂÃÁ '%-.100s'", -"éÓÐÏÌØÚÏ×ÁÎÎÙÊ ÏÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÍÏÖÅÔ ÐÒÏÉÎÄÅËÓÉÒÏ×ÁÔØ ÓÔÏÌÂÅà '%-.64s'", -"îÅ ×ÓÅ ÔÁÂÌÉÃÙ × MERGE ÏÐÒÅÄÅÌÅÎÙ ÏÄÉÎÁËÏ×Ï", -"îÅ×ÏÚÍÏÖÎÏ ÚÁÐÉÓÁÔØ × ÔÁÂÌÉÃÕ '%-.64s' ÉÚ-ÚÁ ÏÇÒÁÎÉÞÅÎÉÊ ÕÎÉËÁÌØÎÏÇÏ ËÌÀÞÁ", -"óÔÏÌÂÅà ÔÉÐÁ BLOB '%-.64s' ÂÙÌ ÕËÁÚÁÎ × ÏÐÒÅÄÅÌÅÎÉÉ ËÌÀÞÁ ÂÅÚ ÕËÁÚÁÎÉÑ ÄÌÉÎÙ ËÌÀÞÁ", -"÷ÓÅ ÞÁÓÔÉ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ (PRIMARY KEY) ÄÏÌÖÎÙ ÂÙÔØ ÏÐÒÅÄÅÌÅÎÙ ËÁË NOT NULL; åÓÌÉ ×ÁÍ ÎÕÖÎÁ ÐÏÄÄÅÒÖËÁ ×ÅÌÉÞÉÎ NULL × ËÌÀÞÅ, ×ÏÓÐÏÌØÚÕÊÔÅÓØ ÉÎÄÅËÓÏÍ UNIQUE", -"÷ ÒÅÚÕÌØÔÁÔÅ ×ÏÚ×ÒÁÝÅÎÁ ÂÏÌÅÅ ÞÅÍ ÏÄÎÁ ÓÔÒÏËÁ", -"üÔÏÔ ÔÉÐ ÔÁÂÌÉÃÙ ÔÒÅÂÕÅÔ ÏÐÒÅÄÅÌÅÎÉÑ ÐÅÒ×ÉÞÎÏÇÏ ËÌÀÞÁ", -"üÔÁ ×ÅÒÓÉÑ MySQL ÓËÏÍÐÉÌÉÒÏ×ÁÎÁ ÂÅÚ ÐÏÄÄÅÒÖËÉ RAID", -"÷Ù ÒÁÂÏÔÁÅÔÅ × ÒÅÖÉÍÅ ÂÅÚÏÐÁÓÎÙÈ ÏÂÎÏ×ÌÅÎÉÊ (safe update mode) É ÐÏÐÒÏÂÏ×ÁÌÉ ÉÚÍÅÎÉÔØ ÔÁÂÌÉÃÕ ÂÅÚ ÉÓÐÏÌØÚÏ×ÁÎÉÑ ËÌÀÞÅ×ÏÇÏ ÓÔÏÌÂÃÁ × ÞÁÓÔÉ WHERE", -"ëÌÀÞ '%-.64s' ÎÅ ÓÕÝÅÓÔ×ÕÅÔ × ÔÁÂÌÉÃÅ '%-.64s'", -"îÅ×ÏÚÍÏÖÎÏ ÏÔËÒÙÔØ ÔÁÂÌÉÃÕ", -"ïÂÒÁÂÏÔÞÉË ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÜÔÏÇÏ: %s", -"÷ÁÍ ÎÅ ÒÁÚÒÅÛÅÎÏ ×ÙÐÏÌÎÑÔØ ÜÔÕ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËÃÉÉ", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ COMMIT", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ ROLLBACK", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ FLUSH_LOGS", -"ðÏÌÕÞÅÎÁ ÏÛÉÂËÁ %d × ÐÒÏÃÅÓÓÅ CHECKPOINT", -"ðÒÅÒ×ÁÎÏ ÓÏÅÄÉÎÅÎÉÅ %ld Ë ÂÁÚÅ ÄÁÎÎÙÈ '%-.64s' ÐÏÌØÚÏ×ÁÔÅÌÑ '%-.32s' Ó ÈÏÓÔÁ `%-.64s' (%-.64s)", -"ïÂÒÁÂÏÔÞÉË ÜÔÏÊ ÔÁÂÌÉÃÙ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ Ä×ÏÉÞÎÏÇÏ ÓÏÈÒÁÎÅÎÉÑ ÏÂÒÁÚÁ ÔÁÂÌÉÃÙ (dump)", -"ä×ÏÉÞÎÙÊ ÖÕÒÎÁÌ ÏÂÎÏ×ÌÅÎÉÑ ÚÁËÒÙÔ, ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ RESET MASTER", -"ïÛÉÂËÁ ÐÅÒÅÓÔÒÏÊËÉ ÉÎÄÅËÓÁ ÓÏÈÒÁÎÅÎÎÏÊ ÔÁÂÌÉÃÙ '%-.64s'", -"ïÛÉÂËÁ ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ: '%-.64s'", -"÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÞÔÅÎÉÑ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ", -"÷ÏÚÎÉËÌÁ ÏÛÉÂËÁ ÚÁÐÉÓÉ × ÐÒÏÃÅÓÓÅ ËÏÍÍÕÎÉËÁÃÉÉ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ", -"îÅ×ÏÚÍÏÖÎÏ ÏÔÙÓËÁÔØ ÐÏÌÎÏÔÅËÓÔÏ×ÙÊ (FULLTEXT) ÉÎÄÅËÓ, ÓÏÏÔ×ÅÔÓÔ×ÕÀÝÉÊ ÓÐÉÓËÕ ÓÔÏÌÂÃÏ×", -"îÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÕËÁÚÁÎÎÕÀ ËÏÍÁÎÄÕ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÐÒÉÓÕÔÓÔ×ÕÀÔ ÁËÔÉ×ÎÏ ÚÁÂÌÏËÉÒÏ×ÁÎÎÙÅ ÔÁÂÌÉÃÁ ÉÌÉ ÏÔËÒÙÔÁÑ ÔÒÁÎÚÁËÃÉÑ", -"îÅÉÚ×ÅÓÔÎÁÑ ÓÉÓÔÅÍÎÁÑ ÐÅÒÅÍÅÎÎÁÑ '%-.64s'", -"ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÄÏÌÖÎÁ ÐÒÏÊÔÉ ÐÒÏ×ÅÒËÕ É ÒÅÍÏÎÔ", -"ôÁÂÌÉÃÁ '%-.64s' ÐÏÍÅÞÅÎÁ ËÁË ÉÓÐÏÒÞÅÎÎÁÑ É ÐÏÓÌÅÄÎÉÊ (Á×ÔÏÍÁÔÉÞÅÓËÉÊ?) ÒÅÍÏÎÔ ÎÅ ÂÙÌ ÕÓÐÅÛÎÙÍ", -"÷ÎÉÍÁÎÉÅ: ÐÏ ÎÅËÏÔÏÒÙÍ ÉÚÍÅÎÅÎÎÙÍ ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍ ÔÁÂÌÉÃÁÍ ÎÅ×ÏÚÍÏÖÎÏ ÂÕÄÅÔ ÐÒÏÉÚ×ÅÓÔÉ ÏÔËÁÔ ÔÒÁÎÚÁËÃÉÉ", -"ôÒÁÎÚÁËÃÉÉ, ×ËÌÀÞÁÀÝÅÊ ÂÏÌØÛÏÅ ËÏÌÉÞÅÓÔ×Ï ËÏÍÁÎÄ, ÐÏÔÒÅÂÏ×ÁÌÏÓØ ÂÏÌÅÅ ÞÅÍ 'max_binlog_cache_size' ÂÁÊÔ. õ×ÅÌÉÞØÔÅ ÜÔÕ ÐÅÒÅÍÅÎÎÕÀ ÓÅÒ×ÅÒÁ mysqld É ÐÏÐÒÏÂÕÊÔÅ ÅÝÅ ÒÁÚ", -"üÔÕ ÏÐÅÒÁÃÉÀ ÎÅ×ÏÚÍÏÖÎÏ ×ÙÐÏÌÎÉÔØ ÐÒÉ ÒÁÂÏÔÁÀÝÅÍ ÐÏÔÏËÅ ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ STOP SLAVE", -"äÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ ÔÒÅÂÕÅÔÓÑ ÒÁÂÏÔÁÀÝÉÊ ÐÏÄÞÉÎÅÎÎÙÊ ÓÅÒ×ÅÒ. óÎÁÞÁÌÁ ×ÙÐÏÌÎÉÔÅ START SLAVE", -"üÔÏÔ ÓÅÒ×ÅÒ ÎÅ ÎÁÓÔÒÏÅÎ ËÁË ÐÏÄÞÉÎÅÎÎÙÊ. ÷ÎÅÓÉÔÅ ÉÓÐÒÁ×ÌÅÎÉÑ × ËÏÎÆÉÇÕÒÁÃÉÏÎÎÏÍ ÆÁÊÌÅ ÉÌÉ Ó ÐÏÍÏÝØÀ CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"îÅ×ÏÚÍÏÖÎÏ ÓÏÚÄÁÔØ ÐÏÔÏË ÐÏÄÞÉÎÅÎÎÏÇÏ ÓÅÒ×ÅÒÁ. ðÒÏ×ÅÒØÔÅ ÓÉÓÔÅÍÎÙÅ ÒÅÓÕÒÓÙ", -"õ ÐÏÌØÚÏ×ÁÔÅÌÑ %-.64s ÕÖÅ ÂÏÌØÛÅ ÞÅÍ 'max_user_connections' ÁËÔÉ×ÎÙÈ ÓÏÅÄÉÎÅÎÉÊ", -"÷Ù ÍÏÖÅÔÅ ÉÓÐÏÌØÚÏ×ÁÔØ × SET ÔÏÌØËÏ ËÏÎÓÔÁÎÔÎÙÅ ×ÙÒÁÖÅÎÉÑ", -"ôÁÊÍÁÕÔ ÏÖÉÄÁÎÉÑ ÂÌÏËÉÒÏ×ËÉ ÉÓÔÅË; ÐÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ", -"ïÂÝÅÅ ËÏÌÉÞÅÓÔ×Ï ÂÌÏËÉÒÏ×ÏË ÐÒÅ×ÙÓÉÌÏ ÒÁÚÍÅÒÙ ÔÁÂÌÉÃÙ ÂÌÏËÉÒÏ×ÏË", -"âÌÏËÉÒÏ×ËÉ ÏÂÎÏ×ÌÅÎÉÊ ÎÅÌØÚÑ ÐÏÌÕÞÉÔØ × ÐÒÏÃÅÓÓÅ ÞÔÅÎÉÑ ÎÅ ÐÒÉÎÑÔÏÊ (× ÒÅÖÉÍÅ READ UNCOMMITTED) ÔÒÁÎÚÁËÃÉÉ", -"îÅ ÄÏÐÕÓËÁÅÔÓÑ DROP DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ", -"îÅ ÄÏÐÕÓËÁÅÔÓÑ CREATE DATABASE, ÐÏËÁ ÐÏÔÏË ÄÅÒÖÉÔ ÇÌÏÂÁÌØÎÕÀ ÂÌÏËÉÒÏ×ËÕ ÞÔÅÎÉÑ", -"îÅ×ÅÒÎÙÅ ÐÁÒÁÍÅÔÒÙ ÄÌÑ %s", -"'%-.32s'@'%-.64s' ÎÅ ÒÁÚÒÅÛÁÅÔÓÑ ÓÏÚÄÁ×ÁÔØ ÎÏ×ÙÈ ÐÏÌØÚÏ×ÁÔÅÌÅÊ", -"îÅ×ÅÒÎÏÅ ÏÐÒÅÄÅÌÅÎÉÅ ÔÁÂÌÉÃÙ; ÷ÓÅ ÔÁÂÌÉÃÙ × MERGE ÄÏÌÖÎÙ ÐÒÉÎÁÄÌÅÖÁÔØ ÏÄÎÏÊ É ÔÏÊ ÖÅ ÂÁÚÅ ÄÁÎÎÙÈ", -"÷ÏÚÎÉËÌÁ ÔÕÐÉËÏ×ÁÑ ÓÉÔÕÁÃÉÑ × ÐÒÏÃÅÓÓÅ ÐÏÌÕÞÅÎÉÑ ÂÌÏËÉÒÏ×ËÉ; ðÏÐÒÏÂÕÊÔÅ ÐÅÒÅÚÁÐÕÓÔÉÔØ ÔÒÁÎÚÁËÃÉÀ", -"éÓÐÏÌØÚÕÅÍÙÊ ÔÉÐ ÔÁÂÌÉà ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ ÐÏÌÎÏÔÅËÓÔÏ×ÙÈ ÉÎÄÅËÓÏ×", -"îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÏÇÒÁÎÉÞÅÎÉÑ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ", -"îÅ×ÏÚÍÏÖÎÏ ÄÏÂÁ×ÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÄÏÞÅÒÎÀÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ", -"îÅ×ÏÚÍÏÖÎÏ ÕÄÁÌÉÔØ ÉÌÉ ÏÂÎÏ×ÉÔØ ÒÏÄÉÔÅÌØÓËÕÀ ÓÔÒÏËÕ: ÐÒÏ×ÅÒËÁ ÏÇÒÁÎÉÞÅÎÉÊ ×ÎÅÛÎÅÇÏ ËÌÀÞÁ ÎÅ ×ÙÐÏÌÎÑÅÔÓÑ", -"ïÛÉÂËÁ ÓÏÅÄÉÎÅÎÉÑ Ó ÇÏÌÏ×ÎÙÍ ÓÅÒ×ÅÒÏÍ: %-.128s", -"ïÛÉÂËÁ ×ÙÐÏÌÎÅÎÉÑ ÚÁÐÒÏÓÁ ÎÁ ÇÏÌÏ×ÎÏÍ ÓÅÒ×ÅÒÅ: %-.128s", -"ïÛÉÂËÁ ÐÒÉ ×ÙÐÏÌÎÅÎÉÉ ËÏÍÁÎÄÙ %s: %-.128s", -"îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ %s É %s", -"éÓÐÏÌØÚÏ×ÁÎÎÙÅ ÏÐÅÒÁÔÏÒÙ ×ÙÂÏÒËÉ (SELECT) ÄÁÀÔ ÒÁÚÎÏÅ ËÏÌÉÞÅÓÔ×Ï ÓÔÏÌÂÃÏ×", -"îÅ×ÏÚÍÏÖÎÏ ÉÓÐÏÌÎÉÔØ ÚÁÐÒÏÓ, ÐÏÓËÏÌØËÕ Õ ×ÁÓ ÕÓÔÁÎÏ×ÌÅÎÙ ËÏÎÆÌÉËÔÕÀÝÉÅ ÂÌÏËÉÒÏ×ËÉ ÞÔÅÎÉÑ", -"éÓÐÏÌØÚÏ×ÁÎÉÅ ÔÒÁÎÚÁËÃÉÏÎÎÙÈ ÔÁÂÌÉà ÎÁÒÑÄÕ Ó ÎÅÔÒÁÎÚÁËÃÉÏÎÎÙÍÉ ÚÁÐÒÅÝÅÎÏ", -"ïÐÃÉÑ '%s' Ä×ÁÖÄÙ ÉÓÐÏÌØÚÏ×ÁÎÁ × ×ÙÒÁÖÅÎÉÉ", -"ðÏÌØÚÏ×ÁÔÅÌØ '%-.64s' ÐÒÅ×ÙÓÉÌ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÒÅÓÕÒÓÁ '%s' (ÔÅËÕÝÅÅ ÚÎÁÞÅÎÉÅ: %ld)", -"÷ ÄÏÓÔÕÐÅ ÏÔËÁÚÁÎÏ. ÷ÁÍ ÎÕÖÎÙ ÐÒÉ×ÉÌÅÇÉÉ %-.128s ÄÌÑ ÜÔÏÊ ÏÐÅÒÁÃÉÉ", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÐÏÔÏËÏ×ÏÊ (SESSION) ÐÅÒÅÍÅÎÎÏÊ É ÎÅ ÍÏÖÅÔ ÂÙÔØ ÉÚÍÅÎÅÎÁ Ó ÐÏÍÏÝØÀ SET GLOBAL", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' Ñ×ÌÑÅÔÓÑ ÇÌÏÂÁÌØÎÏÊ (GLOBAL) ÐÅÒÅÍÅÎÎÏÊ, É ÅÅ ÓÌÅÄÕÅÔ ÉÚÍÅÎÑÔØ Ó ÐÏÍÏÝØÀ SET GLOBAL", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÉÍÅÅÔ ÚÎÁÞÅÎÉÑ ÐÏ ÕÍÏÌÞÁÎÉÀ", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÎÅ ÍÏÖÅÔ ÂÙÔØ ÕÓÔÁÎÏ×ÌÅÎÁ × ÚÎÁÞÅÎÉÅ '%-.64s'", -"îÅ×ÅÒÎÙÊ ÔÉÐ ÁÒÇÕÍÅÎÔÁ ÄÌÑ ÐÅÒÅÍÅÎÎÏÊ '%-.64s'", -"ðÅÒÅÍÅÎÎÁÑ '%-.64s' ÍÏÖÅÔ ÂÙÔØ ÔÏÌØËÏ ÕÓÔÁÎÏ×ÌÅÎÁ, ÎÏ ÎÅ ÓÞÉÔÁÎÁ", -"îÅ×ÅÒÎÏÅ ÉÓÐÏÌØÚÏ×ÁÎÉÅ ÉÌÉ × ÎÅ×ÅÒÎÏÍ ÍÅÓÔÅ ÕËÁÚÁÎ '%s'", -"üÔÁ ×ÅÒÓÉÑ MySQL ÐÏËÁ ÅÝÅ ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔ '%s'", -"ðÏÌÕÞÅÎÁ ÎÅÉÓÐÒÁ×ÉÍÁÑ ÏÛÉÂËÁ %d: '%-.128s' ÏÔ ÇÏÌÏ×ÎÏÇÏ ÓÅÒ×ÅÒÁ × ÐÒÏÃÅÓÓÅ ×ÙÂÏÒËÉ ÄÁÎÎÙÈ ÉÚ Ä×ÏÉÞÎÏÇÏ ÖÕÒÎÁÌÁ", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"ïÐÅÒÁÎÄ ÄÏÌÖÅÎ ÓÏÄÅÒÖÁÔØ %d ËÏÌÏÎÏË", -"ðÏÄÚÁÐÒÏÓ ×ÏÚ×ÒÁÝÁÅÔ ÂÏÌÅÅ ÏÄÎÏÊ ÚÁÐÉÓÉ", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"ãÉËÌÉÞÅÓËÁÑ ÓÓÙÌËÁ ÎÁ ÐÏÄÚÁÐÒÏÓ", -"ðÒÅÏÂÒÁÚÏ×ÁÎÉÅ ÐÏÌÑ '%s' ÉÚ %s × %s", -"óÓÙÌËÁ '%-.64s' ÎÅ ÐÏÄÄÅÒÖÉ×ÁÅÔÓÑ (%s)", -"Every derived table must have its own alias", -"Select %u ÂÙÌ ÕÐÒÁÚÄÎÅÎ × ÐÒÏÃÅÓÓÅ ÏÐÔÉÍÉÚÁÃÉÉ", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"óÅÒ×ÅÒ ÚÁÐÕÝÅÎ × ÒÅÖÉÍÅ --secure-auth (ÂÅÚÏÐÁÓÎÏÊ Á×ÔÏÒÉÚÁÃÉÉ), ÎÏ ÄÌÑ ÐÏÌØÚÏ×ÁÔÅÌÑ '%s'@'%s' ÐÁÒÏÌØ ÓÏÈÒÁÎ£Î × ÓÔÁÒÏÍ ÆÏÒÍÁÔÅ; ÎÅÏÂÈÏÄÉÍÏ ÏÂÎÏ×ÉÔØ ÆÏÒÍÁÔ ÐÁÒÏÌÑ", -"ðÏÌÅ ÉÌÉ ÓÓÙÌËÁ '%-.64s%s%-.64s%s%-.64s' ÉÚ SELECTÁ #%d ÂÙÌÁ ÎÁÊÄÅÎÁ × SELECTÅ #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"ëÅÛ ÚÁÐÒÏÓÏ× ÎÅ ÍÏÖÅÔ ÕÓÔÁÎÏ×ÉÔØ ÒÁÚÍÅÒ %lu, ÎÏ×ÙÊ ÒÁÚÍÅÒ ËÅÛÁ ÚÐÒÏÓÏ× - %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"ôÁÂÌÉÃÁ %-.100s × %s ÎÅ ÍÏÖÅÔ ÉÚÍÅÎÑÔÓÑ", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/serbian/errmsg.txt b/sql/share/serbian/errmsg.txt deleted file mode 100644 index 06270f621e4..00000000000 --- a/sql/share/serbian/errmsg.txt +++ /dev/null @@ -1,314 +0,0 @@ -/* Copyright Abandoned 1997 TCX DataKonsult AB & Monty Program KB & Detron HB - This file is public domain and comes with NO WARRANTY of any kind */ - -/* Serbian Translation, version 1.0: - Copyright 2002 Vladimir Kraljevic, vladimir_kraljevic@yahoo.com - This file is public domain and comes with NO WARRANTY of any kind. - Charset: cp1250 -*/ - -character-set=cp1250 - -"hashchk", -"isamchk", -"NE", -"DA", -"Ne mogu da kreiram file '%-.64s' (errno: %d)", -"Ne mogu da kreiram tabelu '%-.64s' (errno: %d)", -"Ne mogu da kreiram bazu '%-.64s' (errno: %d)", -"Ne mogu da kreiram bazu '%-.64s'; baza veæ postoji.", -"Ne mogu da izbrišem bazu '%-.64s'; baza ne postoji.", -"Ne mogu da izbrišem bazu (ne mogu da izbrišem '%-.64s', errno: %d)", -"Ne mogu da izbrišem bazu (ne mogu da izbrišem direktorijum '%-.64s', errno: %d)", -"Greška pri brisanju '%-.64s' (errno: %d)", -"Ne mogu da proèitam slog iz sistemske tabele", -"Ne mogu da dobijem stanje file-a '%-.64s' (errno: %d)", -"Ne mogu da dobijem trenutni direktorijum (errno: %d)", -"Ne mogu da zakljuèam file (errno: %d)", -"Ne mogu da otvorim file: '%-.64s' (errno: %d)", -"Ne mogu da pronaðem file: '%-.64s' (errno: %d)", -"Ne mogu da proèitam direktorijum '%-.64s' (errno: %d)", -"Ne mogu da promenim direktorijum na '%-.64s' (errno: %d)", -"Slog je promenjen od zadnjeg èitanja tabele '%-.64s'", -"Disk je pun (%s). Èekam nekoga da doðe i oslobodi nešto mesta...", -"Ne mogu da pišem pošto postoji duplirani kljuè u tabeli '%-.64s'", -"Greška pri zatvaranju '%-.64s' (errno: %d)", -"Greška pri èitanju file-a '%-.64s' (errno: %d)", -"Greška pri promeni imena '%-.64s' na '%-.64s' (errno: %d)", -"Greška pri upisu '%-.64s' (errno: %d)", -"'%-.64s' je zakljuèan za upis", -"Sortiranje je prekinuto", -"View '%-.64s' ne postoji za '%-.64s'", -"Handler tabela je vratio grešku %d", -"Handler tabela za '%-.64s' nema ovu opciju", -"Ne mogu da pronaðem slog u '%-.64s'", -"Pogrešna informacija u file-u: '%-.64s'", -"Pogrešan key file za tabelu: '%-.64s'; probajte da ga ispravite", -"Zastareo key file za tabelu '%-.64s'; ispravite ga", -"Tabelu '%-.64s' je dozvoljeno samo èitati", -"Nema memorije. Restartujte MySQL server i probajte ponovo (potrebno je %d byte-ova)", -"Nema memorije za sortiranje. Poveæajte velièinu sort buffer-a MySQL server-u", -"Neoèekivani kraj pri èitanju file-a '%-.64s' (errno: %d)", -"Previše konekcija", -"Nema memorije; Proverite da li MySQL server ili neki drugi proces koristi svu slobodnu memoriju. (UNIX: Ako ne, probajte da upotrebite 'ulimit' komandu da biste dozvolili daemon-u da koristi više memorije ili probajte da dodate više swap memorije)", -"Ne mogu da dobijem ime host-a za vašu IP adresu", -"Loš poèetak komunikacije (handshake)", -"Pristup je zabranjen korisniku '%-.32s'@'%-.64s' za bazu '%-.64s'", -"Pristup je zabranjen korisniku '%-.32s'@'%-.64s' (koristi lozinku: '%s')", -"Ni jedna baza nije selektovana", -"Nepoznata komanda", -"Kolona '%-.64s' ne može biti NULL", -"Nepoznata baza '%-.64s'", -"Tabela '%-.64s' veæ postoji", -"Nepoznata tabela '%-.64s'", -"Kolona '%-.64s' u %-.64s nije jedinstvena u kontekstu", -"Gašenje servera je u toku", -"Nepoznata kolona '%-.64s' u '%-.64s'", -"Entitet '%-.64s' nije naveden u komandi 'GROUP BY'", -"Ne mogu da grupišem po '%-.64s'", -"Izraz ima 'SUM' agregatnu funkciju i kolone u isto vreme", -"Broj kolona ne odgovara broju vrednosti", -"Ime '%-.100s' je predugaèko", -"Duplirano ime kolone '%-.64s'", -"Duplirano ime kljuèa '%-.64s'", -"Dupliran unos '%-.64s' za kljuè '%d'", -"Pogrešan naziv kolone za kolonu '%-.64s'", -"'%s' u iskazu '%-.80s' na liniji %d", -"Upit je bio prazan", -"Tabela ili alias nisu bili jedinstveni: '%-.64s'", -"Loša default vrednost za '%-.64s'", -"Definisani višestruki primarni kljuèevi", -"Navedeno je previše kljuèeva. Maksimum %d kljuèeva je dozvoljeno", -"Navedeno je previše delova kljuèa. Maksimum %d delova je dozvoljeno", -"Navedeni kljuè je predug. Maksimalna dužina kljuèa je %d", -"Kljuèna kolona '%-.64s' ne postoji u tabeli", -"BLOB kolona '%-.64s' ne može biti upotrebljena za navoðenje kljuèa sa tipom tabele koji se trenutno koristi", -"Previše podataka za kolonu '%-.64s' (maksimum je %d). Upotrebite BLOB polje", -"Pogrešna definicija tabele; U tabeli može postojati samo jedna 'AUTO' kolona i ona mora biti istovremeno definisana kao kolona kljuèa", -"%s: Spreman za konekcije\n", -"%s: Normalno gašenje\n", -"%s: Dobio signal %d. Prekidam!\n", -"%s: Gašenje završeno\n", -"%s: Usiljeno gašenje thread-a %ld koji pripada korisniku: '%-.32s'\n", -"Ne mogu da kreiram IP socket", -"Tabela '%-.64s' nema isti indeks kao onaj upotrebljen pri komandi 'CREATE INDEX'. Napravite tabelu ponovo", -"Argument separatora polja nije ono što se oèekivalo. Proverite uputstvo MySQL server-a", -"Ne možete koristiti fiksnu velièinu sloga kada imate BLOB polja. Molim koristite 'fields terminated by' opciju.", -"File '%-.64s' mora biti u direktorijumu gde su file-ovi baze i mora imati odgovarajuæa prava pristupa", -"File '%-.80s' veæ postoji", -"Slogova: %ld Izbrisano: %ld Preskoèeno: %ld Upozorenja: %ld", -"Slogova: %ld Duplikata: %ld", -"Pogrešan pod-kljuè dela kljuèa. Upotrebljeni deo kljuèa nije string, upotrebljena dužina je veæa od dela kljuèa ili handler tabela ne podržava jedinstvene pod-kljuèeve", -"Ne možete da izbrišete sve kolone pomoæu komande 'ALTER TABLE'. Upotrebite komandu 'DROP TABLE' ako želite to da uradite", -"Ne mogu da izvršim komandu drop 'DROP' na '%-.64s'. Proverite da li ta kolona (odnosno kljuè) postoji", -"Slogova: %ld Duplikata: %ld Upozorenja: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Nepoznat thread identifikator: %lu", -"Vi niste vlasnik thread-a %lu", -"Nema upotrebljenih tabela", -"Previše string-ova za kolonu '%-.64s' i komandu 'SET'", -"Ne mogu da generišem jedinstveno ime log-file-a: '%-.64s.(1-999)'\n", -"Tabela '%-.64s' je zakljuèana READ lock-om; iz nje se može samo èitati ali u nju se ne može pisati", -"Tabela '%-.64s' nije bila zakljuèana komandom 'LOCK TABLES'", -"BLOB kolona '%-.64s' ne može imati default vrednost", -"Pogrešno ime baze '%-.100s'", -"Pogrešno ime tabele '%-.100s'", -"Komanda 'SELECT' æe ispitati previše slogova i potrošiti previše vremena. Proverite vaš 'WHERE' filter i upotrebite 'SET OPTION SQL_BIG_SELECTS=1' ako želite baš ovakvu komandu", -"Nepoznata greška", -"Nepoznata procedura '%-.64s'", -"Pogrešan broj parametara za proceduru '%-.64s'", -"Pogrešni parametri prosleðeni proceduri '%-.64s'", -"Nepoznata tabela '%-.64s' u '%-.32s'", -"Kolona '%-.64s' je navedena dva puta", -"Pogrešna upotreba 'GROUP' funkcije", -"Tabela '%-.64s' koristi ekstenziju koje ne postoji u ovoj verziji MySQL-a", -"Tabela mora imati najmanje jednu kolonu", -"Tabela '%-.64s' je popunjena do kraja", -"Nepoznati karakter-set: '%-.64s'", -"Previše tabela. MySQL može upotrebiti maksimum %d tabela pri 'JOIN' operaciji", -"Previše kolona", -"Prevelik slog. Maksimalna velièina sloga, ne raèunajuæi BLOB polja, je %d. Trebali bi da promenite tip nekih polja u BLOB", -"Prepisivanje thread stack-a: Upotrebljeno: %ld od %ld stack memorije. Upotrebite 'mysqld -O thread_stack=#' da navedete veæi stack ako je potrebno", -"Unakrsna zavisnost pronaðena u komandi 'OUTER JOIN'. Istražite vaše 'ON' uslove", -"Kolona '%-.64s' je upotrebljena kao 'UNIQUE' ili 'INDEX' ali nije definisana kao 'NOT NULL'", -"Ne mogu da uèitam funkciju '%-.64s'", -"Ne mogu da inicijalizujem funkciju '%-.64s'; %-.80s", -"Ne postoje dozvoljene putanje do share-ovane biblioteke", -"Funkcija '%-.64s' veæ postoji", -"Ne mogu da otvorim share-ovanu biblioteku '%-.64s' (errno: %d %-.64s)", -"Ne mogu da pronadjem funkciju '%-.64s' u biblioteci", -"Funkcija '%-.64s' nije definisana", -"Host '%-.64s' je blokiran zbog previše grešaka u konekciji. Možete ga odblokirati pomoæu komande 'mysqladmin flush-hosts'", -"Host-u '%-.64s' nije dozvoljeno da se konektuje na ovaj MySQL server", -"Vi koristite MySQL kao anonimni korisnik a anonimnim korisnicima nije dozvoljeno da menjaju lozinke", -"Morate imati privilegije da možete da update-ujete odreðene tabele ako želite da menjate lozinke za druge korisnike", -"Ne mogu da pronaðem odgovarajuæi slog u 'user' tabeli", -"Odgovarajuæih slogova: %ld Promenjeno: %ld Upozorenja: %ld", -"Ne mogu da kreiram novi thread (errno %d). Ako imate još slobodne memorije, trebali biste da pogledate u priruèniku da li je ovo specifièna greška vašeg operativnog sistema", -"Broj kolona ne odgovara broju vrednosti u slogu %ld", -"Ne mogu da ponovo otvorim tabelu '%-.64s'", -"Pogrešna upotreba vrednosti NULL", -"Funkcija regexp je vratila grešku '%-.64s'", -"Upotreba agregatnih funkcija (MIN(),MAX(),COUNT()...) bez 'GROUP' kolona je pogrešna ako ne postoji 'GROUP BY' iskaz", -"Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s'", -"%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za tabelu '%-.64s'", -"%-.16s komanda zabranjena za korisnika '%-.32s'@'%-.64s' za kolonu '%-.64s' iz tabele '%-.64s'", -"Pogrešna 'GRANT' odnosno 'REVOKE' komanda. Molim Vas pogledajte u priruèniku koje vrednosti mogu biti upotrebljene.", -"Argument 'host' ili 'korisnik' prosleðen komandi 'GRANT' je predugaèak", -"Tabela '%-.64s.%-.64s' ne postoji", -"Ne postoji odobrenje za pristup korisniku '%-.32s' na host-u '%-.64s' tabeli '%-.64s'", -"Upotrebljena komanda nije dozvoljena sa ovom verzijom MySQL servera", -"Imate grešku u vašoj SQL sintaksi", -"Prolongirani 'INSERT' thread nije mogao da dobije traženo zakljuèavanje tabele '%-.64s'", -"Previše prolongiranih thread-ova je u upotrebi", -"Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' (%-.64s)", -"Primio sam mrežni paket veæi od definisane vrednosti 'max_allowed_packet'", -"Greška pri èitanju podataka sa pipe-a", -"Greška pri izvršavanju funkcije fcntl()", -"Primio sam mrežne pakete van reda", -"Ne mogu da dekompresujem mrežne pakete", -"Greška pri primanju mrežnih paketa", -"Vremenski limit za èitanje mrežnih paketa je istekao", -"Greška pri slanju mrežnih paketa", -"Vremenski limit za slanje mrežnih paketa je istekao", -"Rezultujuèi string je duži nego što to dozvoljava parametar servera 'max_allowed_packet'", -"Iskorišteni tip tabele ne podržava kolone tipa 'BLOB' odnosno 'TEXT'", -"Iskorišteni tip tabele ne podržava kolone tipa 'AUTO_INCREMENT'", -"Komanda 'INSERT DELAYED' ne može biti iskorištena u tabeli '%-.64s', zbog toga što je zakljuèana komandom 'LOCK TABLES'", -"Pogrešno ime kolone '%-.100s'", -"Handler tabele ne može da indeksira kolonu '%-.64s'", -"Tabele iskorištene u 'MERGE' tabeli nisu definisane na isti naèin", -"Zbog provere jedinstvenosti ne mogu da upišem podatke u tabelu '%-.64s'", -"BLOB kolona '%-.64s' je upotrebljena u specifikaciji kljuèa bez navoðenja dužine kljuèa", -"Svi delovi primarnog kljuèa moraju biti razlièiti od NULL; Ako Vam ipak treba NULL vrednost u kljuèu, upotrebite 'UNIQUE'", -"Rezultat je saèinjen od više slogova", -"Ovaj tip tabele zahteva da imate definisan primarni kljuè", -"Ova verzija MySQL servera nije kompajlirana sa podrškom za RAID ureðaje", -"Vi koristite safe update mod servera, a probali ste da promenite podatke bez 'WHERE' komande koja koristi kolonu kljuèa", -"Kljuè '%-.64s' ne postoji u tabeli '%-.64s'", -"Ne mogu da otvorim tabelu", -"Handler za ovu tabelu ne dozvoljava 'check' odnosno 'repair' komande", -"Nije Vam dozvoljeno da izvršite ovu komandu u transakciji", -"Greška %d za vreme izvršavanja komande 'COMMIT'", -"Greška %d za vreme izvršavanja komande 'ROLLBACK'", -"Greška %d za vreme izvršavanja komande 'FLUSH_LOGS'", -"Greška %d za vreme izvršavanja komande 'CHECKPOINT'", -"Prekinuta konekcija broj %ld ka bazi: '%-.64s' korisnik je bio: '%-.32s' a host: `%-.64s' (%-.64s)", -"Handler tabele ne podržava binarni dump tabele", -"Binarni log file zatvoren, ne mogu da izvršim komandu 'RESET MASTER'", -"Izgradnja indeksa dump-ovane tabele '%-.64s' nije uspela", -"Greška iz glavnog servera '%-.64s' u klasteru", -"Greška u primanju mrežnih paketa sa glavnog servera u klasteru", -"Greška u slanju mrežnih paketa na glavni server u klasteru", -"Ne mogu da pronaðem 'FULLTEXT' indeks koli odgovara listi kolona", -"Ne mogu da izvršim datu komandu zbog toga što su tabele zakljuèane ili je transakcija u toku", -"Nepoznata sistemska promenljiva '%-.64s'", -"Tabela '%-.64s' je markirana kao ošteæena i trebala bi biti popravljena", -"Tabela '%-.64s' je markirana kao ošteæena, a zadnja (automatska?) popravka je bila neuspela", -"Upozorenje: Neke izmenjene tabele ne podržavaju komandu 'ROLLBACK'", -"Transakcija sa više stavki zahtevala je više od 'max_binlog_cache_size' bajtova skladišnog prostora. Uveæajte ovu promenljivu servera i pokušajte ponovo', -"Ova operacija ne može biti izvršena dok je aktivan podreðeni server. Zadajte prvo komandu 'STOP SLAVE' da zaustavite podreðeni server.", -"Ova operacija zahteva da je aktivan podreðeni server. Konfigurišite prvo podreðeni server i onda izvršite komandu 'START SLAVE'", -"Server nije konfigurisan kao podreðeni server, ispravite konfiguracioni file ili na njemu izvršite komandu 'CHANGE MASTER TO'", -"Nisam mogao da inicijalizujem informacionu strukturu glavnog servera, proverite da li imam privilegije potrebne za pristup file-u 'master.info'", -"Nisam mogao da startujem thread za podreðeni server, proverite sistemske resurse", -"Korisnik %-.64s veæ ima više aktivnih konekcija nego što je to odreðeno 'max_user_connections' promenljivom", -"Možete upotrebiti samo konstantan iskaz sa komandom 'SET'", -"Vremenski limit za zakljuèavanje tabele je istekao; Probajte da ponovo startujete transakciju", -"Broj totalnih zakljuèavanja tabele premašuje velièinu tabele zakljuèavanja", -"Zakljuèavanja izmena ne mogu biti realizovana sve dok traje 'READ UNCOMMITTED' transakcija", -"Komanda 'DROP DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka", -"Komanda 'CREATE DATABASE' nije dozvoljena dok thread globalno zakljuèava èitanje podataka", -"Pogrešni argumenti prosleðeni na %s", -"Korisniku '%-.32s'@'%-.64s' nije dozvoljeno da kreira nove korisnike", -"Pogrešna definicija tabele; sve 'MERGE' tabele moraju biti u istoj bazi podataka", -"Unakrsno zakljuèavanje pronaðeno kada sam pokušao da dobijem pravo na zakljuèavanje; Probajte da restartujete transakciju", -"Upotrebljeni tip tabele ne podržava 'FULLTEXT' indekse", -"Ne mogu da dodam proveru spoljnog kljuèa", -"Ne mogu da dodam slog: provera spoljnog kljuèa je neuspela", -"Ne mogu da izbrišem roditeljski slog: provera spoljnog kljuèa je neuspela", -"Greška pri povezivanju sa glavnim serverom u klasteru: %-.128s", -"Greška pri izvršavanju upita na glavnom serveru u klasteru: %-.128s", -"Greška pri izvršavanju komande %s: %-.128s", -"Pogrešna upotreba %s i %s", -"Upotrebljene 'SELECT' komande adresiraju razlièit broj kolona", -"Ne mogu da izvršim upit zbog toga što imate zakljuèavanja èitanja podataka u konfliktu", -"Mešanje tabela koje podržavaju transakcije i onih koje ne podržavaju transakcije je iskljuèeno", -"Opcija '%s' je upotrebljena dva puta u istom iskazu", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%ld) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updatable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working" -"The MySQL server is running with the %s option so it cannot execute this statement" -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/slovak/errmsg.txt b/sql/share/slovak/errmsg.txt deleted file mode 100644 index 12c3eb2b6af..00000000000 --- a/sql/share/slovak/errmsg.txt +++ /dev/null @@ -1,329 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Translated from both E n g l i s h & C z e c h error messages - by steve: (billik@sun.uniag.sk). - Encoding: ISO LATIN-8852-2 - Server version: 3.21.25-gamma - Date: Streda 11. November 1998 20:58:15 -*/ - -character-set=latin2 - -"hashchk", -"isamchk", -"NIE", -"Áno", -"Nemô¾em vytvori» súbor '%-.64s' (chybový kód: %d)", -"Nemô¾em vytvori» tabuµku '%-.64s' (chybový kód: %d)", -"Nemô¾em vytvori» databázu '%-.64s' (chybový kód: %d)", -"Nemô¾em vytvori» databázu '%-.64s'; databáza existuje", -"Nemô¾em zmaza» databázu '%-.64s'; databáza neexistuje", -"Chyba pri mazaní databázy (nemô¾em zmaza» '%-.64s', chybový kód: %d)", -"Chyba pri mazaní databázy (nemô¾em vymaza» adresár '%-.64s', chybový kód: %d)", -"Chyba pri mazaní '%-.64s' (chybový kód: %d)", -"Nemô¾em èíta» záznam v systémovej tabuµke", -"Nemô¾em zisti» stav '%-.64s' (chybový kód: %d)", -"Nemô¾em zisti» pracovný adresár (chybový kód: %d)", -"Nemô¾em zamknú» súbor (chybový kód: %d)", -"Nemô¾em otvori» súbor: '%-.64s' (chybový kód: %d)", -"Nemô¾em nájs» súbor: '%-.64s' (chybový kód: %d)", -"Nemô¾em èíta» adresár '%-.64s' (chybový kód: %d)", -"Nemô¾em vojs» do adresára '%-.64s' (chybový kód: %d)", -"Záznam bol zmenený od posledného èítania v tabuµke '%-.64s'", -"Disk je plný (%s), èakám na uvoµnenie miesta...", -"Nemô¾em zapísa», duplikát kµúèa v tabuµke '%-.64s'", -"Chyba pri zatváraní '%-.64s' (chybový kód: %d)", -"Chyba pri èítaní súboru '%-.64s' (chybový kód: %d)", -"Chyba pri premenovávaní '%-.64s' na '%-.64s' (chybový kód: %d)", -"Chyba pri zápise do súboru '%-.64s' (chybový kód: %d)", -"'%-.64s' je zamknutý proti zmenám", -"Triedenie preru¹ené", -"Pohµad '%-.64s' neexistuje pre '%-.64s'", -"Obsluha tabuµky vrátila chybu %d", -"Obsluha tabuµky '%-.64s' nemá tento parameter", -"Nemô¾em nájs» záznam v '%-.64s'", -"Nesprávna informácia v súbore: '%-.64s'", -"Nesprávny kµúè pre tabuµku '%-.64s'; pokúste sa ho opravi»", -"Starý kµúèový súbor pre '%-.64s'; opravte ho!", -"'%-.64s' is èíta» only", -"Málo pamäti. Re¹tartujte daemona a skúste znova (je potrebných %d bytov)", -"Málo pamäti pre triedenie, zvý¹te veµkos» triediaceho bufferu", -"Neoèakávaný koniec súboru pri èítaní '%-.64s' (chybový kód: %d)", -"Príli¹ mnoho spojení", -"Málo miesta-pamäti pre vlákno", -"Nemô¾em zisti» meno hostiteµa pre va¹u adresu", -"Chyba pri nadväzovaní spojenia", -"Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' k databázi '%-.64s'", -"Zakázaný prístup pre u¾ívateµa: '%-.32s'@'%-.64s' (pou¾itie hesla: %s)", -"Nebola vybraná databáza", -"Neznámy príkaz", -"Pole '%-.64s' nemô¾e by» null", -"Neznáma databáza '%-.64s'", -"Tabuµka '%-.64s' u¾ existuje", -"Neznáma tabuµka '%-.64s'", -"Pole: '%-.64s' v %-.64s je nejasné", -"Prebieha ukonèovanie práce servera", -"Neznáme pole '%-.64s' v '%-.64s'", -"Pou¾ité '%-.64s' nebolo v 'group by'", -"Nemô¾em pou¾i» 'group' na '%-.64s'", -"Príkaz obsahuje zároveò funkciu 'sum' a poµa", -"Poèet polí nezodpovedá zadanej hodnote", -"Meno identifikátora '%-.100s' je príli¹ dlhé", -"Opakované meno poµa '%-.64s'", -"Opakované meno kµúèa '%-.64s'", -"Opakovaný kµúè '%-.64s' (èíslo kµúèa %d)", -"Chyba v ¹pecifikácii poµa '%-.64s'", -"%s blízko '%-.80s' na riadku %d", -"Výsledok po¾iadavky bol prázdny", -"Nie jednoznaèná tabuµka/alias: '%-.64s'", -"Chybná implicitná hodnota pre '%-.64s'", -"Zadefinovaných viac primárnych kµúèov", -"Zadaných ríli¹ veµa kµúèov. Najviac %d kµúèov je povolených", -"Zadaných ríli¹ veµa èastí kµúèov. Je povolených najviac %d èastí", -"Zadaný kµúè je príli¹ dlhý, najväè¹ia då¾ka kµúèa je %d", -"Kµúèový ståpec '%-.64s' v tabuµke neexistuje", -"Blob pole '%-.64s' nemô¾e by» pou¾ité ako kµúè", -"Príli¹ veµká då¾ka pre pole '%-.64s' (maximum = %d). Pou¾ite BLOB", -"Mô¾ete ma» iba jedno AUTO pole a to musí by» definované ako kµúè", -"%s: pripravený na spojenie", -"%s: normálne ukonèenie\n", -"%s: prijatý signál %d, ukonèenie (Abort)!\n", -"%s: práca ukonèená\n", -"%s: násilné ukonèenie vlákna %ld u¾ívateµa '%-.64s'\n", -"Nemô¾em vytvori» IP socket", -"Tabuµka '%-.64s' nemá index zodpovedajúci CREATE INDEX. Vytvorte tabulku znova", -"Argument oddeµovaè polí nezodpovedá po¾iadavkám. Skontrolujte v manuáli", -"Nie je mo¾né pou¾i» fixnú då¾ku s BLOBom. Pou¾ite 'fields terminated by'.", -"Súbor '%-.64s' musí by» v adresári databázy, alebo èitateµný pre v¹etkých", -"Súbor '%-.64s' u¾ existuje", -"Záznamov: %ld Zmazaných: %ld Preskoèených: %ld Varovania: %ld", -"Záznamov: %ld Opakovaných: %ld", -"Incorrect sub part key; the used key part isn't a string or the used length is longer than the key part", -"One nemô¾em zmaza» all fields with ALTER TABLE; use DROP TABLE instead", -"Nemô¾em zru¹i» (DROP) '%-.64s'. Skontrolujte, èi neexistujú záznamy/kµúèe", -"Záznamov: %ld Opakovaných: %ld Varovania: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Neznáma identifikácia vlákna: %lu", -"Nie ste vlastníkom vlákna %lu", -"Nie je pou¾itá ¾iadna tabuµka", -"Príli¹ mnoho re»azcov pre pole %-.64s a SET", -"Nemô¾em vytvori» unikátne meno log-súboru %-.64s.(1-999)\n", -"Tabuµka '%-.64s' bola zamknutá s READ a nemô¾e by» zmenená", -"Tabuµka '%-.64s' nebola zamknutá s LOCK TABLES", -"Pole BLOB '%-.64s' nemô¾e ma» implicitnú hodnotu", -"Neprípustné meno databázy '%-.100s'", -"Neprípustné meno tabuµky '%-.100s'", -"Zadaná po¾iadavka SELECT by prechádzala príli¹ mnoho záznamov a trvala by príli¹ dlho. Skontrolujte tvar WHERE a ak je v poriadku, pou¾ite SET SQL_BIG_SELECTS=1", -"Neznámá chyba", -"Neznámá procedúra '%-.64s'", -"Chybný poèet parametrov procedúry '%-.64s'", -"Chybné parametre procedúry '%-.64s'", -"Neznáma tabuµka '%-.64s' v %s", -"Pole '%-.64s' je zadané dvakrát", -"Nesprávne pou¾itie funkcie GROUP", -"Tabuµka '%-.64s' pou¾íva roz¹írenie, ktoré v tejto verzii MySQL nie je", -"Tabuµka musí ma» aspoò 1 pole", -"Tabuµka '%-.64s' je plná", -"Neznáma znaková sada: '%-.64s'", -"Príli¹ mnoho tabuliek. MySQL mô¾e pou¾i» len %d v JOIN-e", -"Príli¹ mnoho polí", -"Riadok je príli¹ veµký. Maximálna veµkos» riadku, okrem 'BLOB', je %d. Musíte zmeni» niektoré polo¾ky na BLOB", -"Preteèenie zásobníku vlákna: pou¾ité: %ld z %ld. Pou¾ite 'mysqld -O thread_stack=#' k zadaniu väè¹ieho zásobníka", -"V OUTER JOIN bol nájdený krí¾ový odkaz. Skontrolujte podmienky ON", -"Pole '%-.64s' je pou¾ité s UNIQUE alebo INDEX, ale nie je zadefinované ako NOT NULL", -"Nemô¾em naèíta» funkciu '%-.64s'", -"Nemô¾em inicializova» funkciu '%-.64s'; %-.80s", -"Neprípustné ¾iadne cesty k zdieµanej kni¾nici", -"Funkcia '%-.64s' u¾ existuje", -"Nemô¾em otvori» zdieµanú kni¾nicu '%-.64s' (chybový kód: %d %s)", -"Nemô¾em nájs» funkciu '%-.64s' v kni¾nici'", -"Funkcia '%-.64s' nie je definovaná", -"Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'", -"Host '%-.64s' is not allowed to connect to this MySQL server", -"You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords", -"You must have privileges to update tables in the mysql database to be able to change passwords for others", -"Can't find any matching row in the user table", -"Rows matched: %ld Changed: %ld Warnings: %ld", -"Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug", -"Column count doesn't match value count at row %ld", -"Can't reopen table: '%-.64s", -"Invalid use of NULL value", -"Got error '%-.64s' from regexp", -"Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause", -"There is no such grant defined for user '%-.32s' on host '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for table '%-.64s'", -"%-.16s command denied to user '%-.32s'@'%-.64s' for column '%-.64s' in table '%-.64s'", -"Illegal GRANT/REVOKE command; please consult the manual to see which privleges can be used.", -"The host or user argument to GRANT is too long", -"Table '%-.64s.%s' doesn't exist", -"There is no such grant defined for user '%-.32s' on host '%-.64s' on table '%-.64s'", -"The used command is not allowed with this MySQL version", -"Something is wrong in your syntax", -"Delayed insert thread couldn't get requested lock for table %-.64s", -"Too many delayed threads in use", -"Aborted connection %ld to db: '%-.64s' user: '%-.64s' (%s)", -"Got a packet bigger than 'max_allowed_packet' bytes", -"Got a read error from the connection pipe", -"Got an error from fcntl()", -"Got packets out of order", -"Couldn't uncompress communication packet", -"Got an error reading communication packets", -"Got timeout reading communication packets", -"Got an error writing communication packets", -"Got timeout writing communication packets", -"Result string is longer than 'max_allowed_packet' bytes", -"The used table type doesn't support BLOB/TEXT columns", -"The used table type doesn't support AUTO_INCREMENT columns", -"INSERT DELAYED can't be used with table '%-.64s', because it is locked with LOCK TABLES", -"Incorrect column name '%-.100s'", -"The used table handler can't index column '%-.64s'", -"All tables in the MERGE table are not defined identically", -"Can't write, because of unique constraint, to table '%-.64s'", -"BLOB column '%-.64s' used in key specification without a key length", -"All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead", -"Result consisted of more than one row", -"This table type requires a primary key", -"This version of MySQL is not compiled with RAID support", -"You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column", -"Key '%-.64s' doesn't exist in table '%-.64s'", -"Can't open table", -"The handler for the table doesn't support %s", -"You are not allowed to execute this command in a transaction", -"Got error %d during COMMIT", -"Got error %d during ROLLBACK", -"Got error %d during FLUSH_LOGS", -"Got error %d during CHECKPOINT", -"Aborted connection %ld to db: '%-.64s' user: '%-.32s' host: `%-.64s' (%-.64s)", -"The handler for the table does not support binary table dump", -"Binlog closed while trying to FLUSH MASTER", -"Failed rebuilding the index of dumped table '%-.64s'", -"Error from master: '%-.64s'", -"Net error reading from master", -"Net error writing to master", -"Can't find FULLTEXT index matching the column list", -"Can't execute the given command because you have active locked tables or an active transaction", -"Unknown system variable '%-.64s'", -"Table '%-.64s' is marked as crashed and should be repaired", -"Table '%-.64s' is marked as crashed and last (automatic?) repair failed", -"Some non-transactional changed tables couldn't be rolled back", -"Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again", -"This operation cannot be performed with a running slave, run STOP SLAVE first", -"This operation requires a running slave, configure slave and do START SLAVE", -"The server is not configured as slave, fix in config file or with CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"Could not create slave thread, check system resources", -"User %-.64s has already more than 'max_user_connections' active connections", -"You may only use constant expressions with SET", -"Lock wait timeout exceeded; try restarting transaction", -"The total number of locks exceeds the lock table size", -"Update locks cannot be acquired during a READ UNCOMMITTED transaction", -"DROP DATABASE not allowed while thread is holding global read lock", -"CREATE DATABASE not allowed while thread is holding global read lock", -"Incorrect arguments to %s", -"'%-.32s'@'%-.64s' is not allowed to create new users", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"The used table type doesn't support FULLTEXT indexes", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"Operand should contain %d column(s)", -"Subquery returns more than 1 row", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"Cyclic reference on subqueries", -"Converting column '%s' from %s to %s", -"Reference '%-.64s' not supported (%s)", -"Every derived table must have its own alias", -"Select %u was reduced during optimization", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"Query cache failed to set size %lu, new query cache size is %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"The target table %-.100s of the %s is not updateable", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/spanish/errmsg.txt b/sql/share/spanish/errmsg.txt deleted file mode 100644 index c5b4fd34eca..00000000000 --- a/sql/share/spanish/errmsg.txt +++ /dev/null @@ -1,325 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - Traduccion por Miguel Angel Fernandez Roiz -- LoboCom Sistemas, s.l. - From June 28, 2001 translated by Miguel Solorzano miguel@mysql.com */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NO", -"SI", -"No puedo crear archivo '%-.64s' (Error: %d)", -"No puedo crear tabla '%-.64s' (Error: %d)", -"No puedo crear base de datos '%-.64s' (Error: %d)", -"No puedo crear base de datos '%-.64s'; la base de datos ya existe", -"No puedo eliminar base de datos '%-.64s'; la base de datos no existe", -"Error eliminando la base de datos(no puedo borrar '%-.64s', error %d)", -"Error eliminando la base de datos (No puedo borrar directorio '%-.64s', error %d)", -"Error en el borrado de '%-.64s' (Error: %d)", -"No puedo leer el registro en la tabla del sistema", -"No puedo obtener el estado de '%-.64s' (Error: %d)", -"No puedo acceder al directorio (Error: %d)", -"No puedo bloquear archivo: (Error: %d)", -"No puedo abrir archivo: '%-.64s' (Error: %d)", -"No puedo encontrar archivo: '%-.64s' (Error: %d)", -"No puedo leer el directorio de '%-.64s' (Error: %d)", -"No puedo cambiar al directorio de '%-.64s' (Error: %d)", -"El registro ha cambiado desde la ultima lectura de la tabla '%-.64s'", -"Disco lleno (%s). Esperando para que se libere algo de espacio...", -"No puedo escribir, clave duplicada en la tabla '%-.64s'", -"Error en el cierre de '%-.64s' (Error: %d)", -"Error leyendo el fichero '%-.64s' (Error: %d)", -"Error en el renombrado de '%-.64s' a '%-.64s' (Error: %d)", -"Error escribiendo el archivo '%-.64s' (Error: %d)", -"'%-.64s' esta bloqueado contra cambios", -"Ordeancion cancelada", -"La vista '%-.64s' no existe para '%-.64s'", -"Error %d desde el manejador de la tabla", -"El manejador de la tabla de '%-.64s' no tiene esta opcion", -"No puedo encontrar el registro en '%-.64s'", -"Informacion erronea en el archivo: '%-.64s'", -"Clave de archivo erronea para la tabla: '%-.64s'; intente repararlo", -"Clave de archivo antigua para la tabla '%-.64s'; reparelo!", -"'%-.64s' es de solo lectura", -"Memoria insuficiente. Reinicie el demonio e intentelo otra vez (necesita %d bytes)", -"Memoria de ordenacion insuficiente. Incremente el tamano del buffer de ordenacion", -"Inesperado fin de ficheroU mientras leiamos el archivo '%-.64s' (Error: %d)", -"Demasiadas conexiones", -"Memoria/espacio de tranpaso insuficiente", -"No puedo obtener el nombre de maquina de tu direccion", -"Protocolo erroneo", -"Acceso negado para usuario: '%-.32s'@'%-.64s' para la base de datos '%-.64s'", -"Acceso negado para usuario: '%-.32s'@'%-.64s' (Usando clave: %s)", -"Base de datos no seleccionada", -"Comando desconocido", -"La columna '%-.64s' no puede ser nula", -"Base de datos desconocida '%-.64s'", -"La tabla '%-.64s' ya existe", -"Tabla '%-.64s' desconocida", -"La columna: '%-.64s' en %s es ambigua", -"Desconexion de servidor en proceso", -"La columna '%-.64s' en %s es desconocida", -"Usado '%-.64s' el cual no esta group by", -"No puedo agrupar por '%-.64s'", -"El estamento tiene funciones de suma y columnas en el mismo estamento", -"La columna con count no tiene valores para contar", -"El nombre del identificador '%-.64s' es demasiado grande", -"Nombre de columna duplicado '%-.64s'", -"Nombre de clave duplicado '%-.64s'", -"Entrada duplicada '%-.64s' para la clave %d", -"Especificador de columna erroneo para la columna '%-.64s'", -"%s cerca '%-.64s' en la linea %d", -"La query estaba vacia", -"Tabla/alias: '%-.64s' es no unica", -"Valor por defecto invalido para '%-.64s'", -"Multiples claves primarias definidas", -"Demasiadas claves primarias declaradas. Un maximo de %d claves son permitidas", -"Demasiadas partes de clave declaradas. Un maximo de %d partes son permitidas", -"Declaracion de clave demasiado larga. La maxima longitud de clave es %d", -"La columna clave '%-.64s' no existe en la tabla", -"La columna Blob '%-.64s' no puede ser usada en una declaracion de clave", -"Longitud de columna demasiado grande para la columna '%-.64s' (maximo = %d).Usar BLOB en su lugar", -"Puede ser solamente un campo automatico y este debe ser definido como una clave", -"%s: preparado para conexiones", -"%s: Apagado normal\n", -"%s: Recibiendo signal %d. Abortando!\n", -"%s: Apagado completado\n", -"%s: Forzando a cerrar el thread %ld usuario: '%-.64s'\n", -"No puedo crear IP socket", -"La tabla '%-.64s' no tiene indice como el usado en CREATE INDEX. Crea de nuevo la tabla", -"Los separadores de argumentos del campo no son los especificados. Comprueba el manual", -"No puedes usar longitudes de filas fijos con BLOBs. Por favor usa 'campos terminados por '.", -"El archivo '%-.64s' debe estar en el directorio de la base de datos o ser de lectura por todos", -"El archivo '%-.64s' ya existe", -"Registros: %ld Borrados: %ld Saltados: %ld Peligros: %ld", -"Registros: %ld Duplicados: %ld", -"Parte de la clave es erronea. Una parte de la clave no es una cadena o la longitud usada es tan grande como la parte de la clave", -"No puede borrar todos los campos con ALTER TABLE. Usa DROP TABLE para hacerlo", -"No puedo ELIMINAR '%-.64s'. compuebe que el campo/clave existe", -"Registros: %ld Duplicados: %ld Peligros: %ld", -"You can't specify target table '%-.64s' for update in FROM clause", -"Identificador del thread: %lu desconocido", -"Tu no eres el propietario del thread%lu", -"No ha tablas usadas", -"Muchas strings para columna %s y SET", -"No puede crear un unico archivo log %s.(1-999)\n", -"Tabla '%-.64s' fue trabada con un READ lock y no puede ser actualizada", -"Tabla '%-.64s' no fue trabada con LOCK TABLES", -"Campo Blob '%-.64s' no puede tener valores patron", -"Nombre de base de datos ilegal '%-.64s'", -"Nombre de tabla ilegal '%-.64s'", -"El SELECT puede examinar muchos registros y probablemente con mucho tiempo. Verifique tu WHERE y usa SET SQL_BIG_SELECTS=1 si el SELECT esta correcto", -"Error desconocido", -"Procedimiento desconocido %s", -"Equivocado parametro count para procedimiento %s", -"Equivocados parametros para procedimiento %s", -"Tabla desconocida '%-.64s' in %s", -"Campo '%-.64s' especificado dos veces", -"Invalido uso de función en grupo", -"Tabla '%-.64s' usa una extensión que no existe en esta MySQL versión", -"Una tabla debe tener al menos 1 columna", -"La tabla '%-.64s' está llena", -"Juego de caracteres desconocido: '%-.64s'", -"Muchas tablas. MySQL solamente puede usar %d tablas en un join", -"Muchos campos", -"Tamaño de línea muy grande. Máximo tamaño de línea, no contando blob, es %d. Tu tienes que cambiar algunos campos para blob", -"Sobrecarga de la pila de thread: Usada: %ld de una %ld pila. Use 'mysqld -O thread_stack=#' para especificar una mayor pila si necesario", -"Dependencia cruzada encontrada en OUTER JOIN; examine su condición ON", -"Columna '%-.32s' es usada con UNIQUE o INDEX pero no está definida como NOT NULL", -"No puedo cargar función '%-.64s'", -"No puedo inicializar función '%-.64s'; %-.80s", -"No pasos permitidos para librarias conjugadas", -"Función '%-.64s' ya existe", -"No puedo abrir libraria conjugada '%-.64s' (errno: %d %s)", -"No puedo encontrar función '%-.64s' en libraria'", -"Función '%-.64s' no está definida", -"Servidor '%-.64s' está bloqueado por muchos errores de conexión. Desbloquear con 'mysqladmin flush-hosts'", -"Servidor '%-.64s' no está permitido para conectar con este servidor MySQL", -"Tu estás usando MySQL como un usuario anonimo y usuarios anonimos no tienen permiso para cambiar las claves", -"Tu debes de tener permiso para actualizar tablas en la base de datos mysql para cambiar las claves para otros", -"No puedo encontrar una línea correponsdiente en la tabla user", -"Líneas correspondientes: %ld Cambiadas: %ld Avisos: %ld", -"No puedo crear un nuevo thread (errno %d). Si tu está con falta de memoria disponible, tu puedes consultar el Manual para posibles problemas con SO", -"El número de columnas no corresponde al número en la línea %ld", -"No puedo reabrir tabla: '%-.64s", -"Invalido uso de valor NULL", -"Obtenido error '%-.64s' de regexp", -"Mezcla de columnas GROUP (MIN(),MAX(),COUNT()...) con no GROUP columnas es ilegal si no hat la clausula GROUP BY", -"No existe permiso definido para usuario '%-.32s' en el servidor '%-.64s'", -"%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para tabla '%-.64s'", -"%-.16s comando negado para usuario: '%-.32s'@'%-.64s' para columna '%-.64s' en la tabla '%-.64s'", -"Ilegal comando GRANT/REVOKE. Por favor consulte el manual para cuales permisos pueden ser usados.", -"El argumento para servidor o usuario para GRANT es demasiado grande", -"Tabla '%-.64s.%s' no existe", -"No existe tal permiso definido para usuario '%-.32s' en el servidor '%-.64s' en la tabla '%-.64s'", -"El comando usado no es permitido con esta versión de MySQL", -"Algo está equivocado en su sintax", -"Thread de inserción retarda no pudiendo bloquear para la tabla %-.64s", -"Muchos threads retardados en uso", -"Conexión abortada %ld para db: '%-.64s' usuario: '%-.64s' (%s)", -"Obtenido un paquete mayor que 'max_allowed_packet'", -"Obtenido un error de lectura de la conexión pipe", -"Obtenido un error de fcntl()", -"Obtenido paquetes desordenados", -"No puedo descomprimir paquetes de comunicación", -"Obtenido un error leyendo paquetes de comunicación", -"Obtenido timeout leyendo paquetes de comunicación", -"Obtenido un error de escribiendo paquetes de comunicación", -"Obtenido timeout escribiendo paquetes de comunicación", -"La string resultante es mayor que 'max_allowed_packet'", -"El tipo de tabla usada no permite soporte para columnas BLOB/TEXT", -"El tipo de tabla usada no permite soporte para columnas AUTO_INCREMENT", -"INSERT DELAYED no puede ser usado con tablas '%-.64s', porque esta bloqueada con LOCK TABLES", -"Incorrecto nombre de columna '%-.100s'", -"El manipulador de tabla usado no puede indexar columna '%-.64s'", -"Todas las tablas en la MERGE tabla no estan definidas identicamente", -"No puedo escribir, debido al único constraint, para tabla '%-.64s'", -"Columna BLOB column '%-.64s' usada en especificación de clave sin tamaño de la clave", -"Todas las partes de un PRIMARY KEY deben ser NOT NULL; Si necesitas NULL en una clave, use UNIQUE", -"Resultado compuesto de mas que una línea", -"Este tipo de tabla necesita de una primary key", -"Esta versión de MySQL no es compilada con soporte RAID", -"Tu estás usando modo de actualización segura y tentado actualizar una tabla sin un WHERE que usa una KEY columna", -"Clave '%-.64s' no existe en la tabla '%-.64s'", -"No puedo abrir tabla", -"El manipulador de la tabla no permite soporte para %s", -"No tienes el permiso para ejecutar este comando en una transición", -"Obtenido error %d durante COMMIT", -"Obtenido error %d durante ROLLBACK", -"Obtenido error %d durante FLUSH_LOGS", -"Obtenido error %d durante CHECKPOINT", -"Abortada conexión %ld para db: '%-.64s' usuario: '%-.32s' servidor: `%-.64s' (%-.64s)", -"El manipulador de tabla no soporta dump para tabla binaria", -"Binlog cerrado mientras tentaba el FLUSH MASTER", -"Falla reconstruyendo el indice de la tabla dumped '%-.64s'", -"Error del master: '%-.64s'", -"Error de red leyendo del master", -"Error de red escribiendo para el master", -"No puedo encontrar índice FULLTEXT correspondiendo a la lista de columnas", -"No puedo ejecutar el comando dado porque tienes tablas bloqueadas o una transición activa", -"Desconocida variable de sistema '%-.64s'", -"Tabla '%-.64s' está marcada como crashed y debe ser reparada", -"Tabla '%-.64s' está marcada como crashed y la última reparación (automactica?) falló", -"Aviso: Algunas tablas no transancionales no pueden tener rolled back", -"Multipla transición necesita mas que 'max_binlog_cache_size' bytes de almacenamiento. Aumente esta variable mysqld y tente de nuevo", -"Esta operación no puede ser hecha con el esclavo funcionando, primero use STOP SLAVE", -"Esta operación necesita el esclavo funcionando, configure esclavo y haga el START SLAVE", -"El servidor no está configurado como esclavo, edite el archivo config file o con CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"No puedo crear el thread esclavo, verifique recursos del sistema", -"Usario %-.64s ya tiene mas que 'max_user_connections' conexiones activas", -"Tu solo debes usar expresiones constantes con SET", -"Tiempo de bloqueo de espera excedido", -"El número total de bloqueos excede el tamaño de bloqueo de la tabla", -"Bloqueos de actualización no pueden ser adqueridos durante una transición READ UNCOMMITTED", -"DROP DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global", -"CREATE DATABASE no permitido mientras un thread está ejerciendo un bloqueo de lectura global", -"Argumentos errados para %s", -"'%-.32s`@`%-.64s` no es permitido para crear nuevos usuarios", -"Incorrecta definición de la tabla; Todas las tablas MERGE deben estar en el mismo banco de datos", -"Encontrado deadlock cuando tentando obtener el bloqueo; Tente recomenzar la transición", -"El tipo de tabla usada no soporta índices FULLTEXT", -"No puede adicionar clave extranjera constraint", -"No puede adicionar una línea hijo: falla de clave extranjera constraint", -"No puede deletar una línea padre: falla de clave extranjera constraint", -"Error de coneccion a master: %-.128s", -"Error executando el query en master: %-.128s", -"Error de %s: %-.128s", -"Equivocado uso de %s y %s", -"El comando SELECT usado tiene diferente número de columnas", -"No puedo ejecutar el query porque usted tiene conflicto de traba de lectura", -"Mezla de transancional y no-transancional tablas está deshabilitada", -"Opción '%s' usada dos veces en el comando", -"Usuario '%-.64s' ha excedido el recurso '%s' (actual valor: %ld)", -"Acceso negado. Usted necesita el privilegio %-.128s para esta operación", -"Variable '%-.64s' es una SESSION variable y no puede ser usada con SET GLOBAL", -"Variable '%-.64s' es una GLOBAL variable y no puede ser configurada con SET GLOBAL", -"Variable '%-.64s' no tiene un valor patrón", -"Variable '%-.64s' no puede ser configurada para el valor de '%-.64s'", -"Tipo de argumento equivocado para variable '%-.64s'", -"Variable '%-.64s' solamente puede ser configurada, no leída", -"Equivocado uso/colocación de '%s'", -"Esta versión de MySQL no soporta todavia '%s'", -"Recibió fatal error %d: '%-.128s' del master cuando leyendo datos del binary log", -"Slave SQL thread ignorado el query debido a las reglas de replicación-*-tabla", -"Variable '%-.64s' es una %s variable", -"Equivocada definición de llave extranjera para '%-.64s': %s", -"Referencia de llave y referencia de tabla no coinciden", -"Operando debe tener %d columna(s)", -"Subconsulta retorna mas que 1 línea", -"Desconocido preparado comando handler (%.*s) dado para %s", -"Base de datos Help está corrupto o no existe", -"Cíclica referencia en subconsultas", -"Convirtiendo columna '%s' de %s para %s", -"Referencia '%-.64s' no soportada (%s)", -"Cada tabla derivada debe tener su propio alias", -"Select %u fué reducido durante optimización", -"Tabla '%-.64s' de uno de los SELECT no puede ser usada en %-.32s", -"Cliente no soporta protocolo de autenticación solicitado por el servidor; considere actualizar el cliente MySQL", -"Todas las partes de una SPATIAL index deben ser NOT NULL", -"COLLATION '%s' no es válido para CHARACTER SET '%s'", -"Slave ya está funcionando", -"Slave ya fué parado", -"Tamaño demasiado grande para datos descomprimidos. El máximo tamaño es %d. (probablemente, extensión de datos descomprimidos fué corrompida)", -"ZLIB: No suficiente memoria", -"ZLIB: No suficiente espacio en el búfer de salida (probablemente, extensión de datos descomprimidos fué corrompida)", -"ZLIB: Dato de entrada fué corrompido", -"%d línea(s) fueron cortadas por GROUP_CONCAT()", -"Línea %ld no contiene datos para todas las columnas", -"Línea %ld fué truncada; La misma contine mas datos que las que existen en las columnas de entrada", -"Datos truncado, NULL suministrado para NOT NULL columna '%s' en la línea %ld", -"Datos truncados, fuera de gama para columna '%s' en la línea %ld", -"Datos truncados para columna '%s' en la línea %ld", -"Usando motor de almacenamiento %s para tabla '%s'", -"Ilegal mezcla de collations (%s,%s) y (%s,%s) para operación '%s'", -"No puede remover uno o mas de los usuarios solicitados", -"No puede revocar todos los privilegios, derecho para uno o mas de los usuarios solicitados", -"Ilegal mezcla de collations (%s,%s), (%s,%s), (%s,%s) para operación '%s'", -"Ilegal mezcla de collations para operación '%s'", -"Variable '%-.64s' no es una variable componente (No puede ser usada como XXXX.variable_name)", -"Collation desconocida: '%-.64s'", -"Parametros SSL en CHANGE MASTER son ignorados porque este slave MySQL fue compilado sin soporte SSL; pueden ser usados despues cuando el slave MySQL con SSL sea inicializado", -"Servidor está rodando en modo --secure-auth, pero '%s'@'%s' tiene clave en el antiguo formato; por favor cambie la clave para el nuevo formato", -"Campo o referencia '%-.64s%s%-.64s%s%-.64s' de SELECT #%d fue resolvido en SELECT #%d", -"Parametro equivocado o combinación de parametros para START SLAVE UNTIL", -"Es recomendado rodar con --skip-slave-start cuando haciendo replicación step-by-step con START SLAVE UNTIL, a menos que usted no esté seguro en caso de inesperada reinicialización del mysqld slave", -"SQL thread no es inicializado tal que opciones UNTIL son ignoradas", -"Nombre de índice incorrecto '%-.100s'", -"Nombre de catalog incorrecto '%-.100s'", -"Query cache fallada para configurar tamaño %lu, nuevo tamaño de query cache es %lu", -"Columna '%-.64s' no puede ser parte de FULLTEXT index", -"Desconocida key cache '%-.100s'", -"MySQL esta inicializado en modo --skip-name-resolve. Usted necesita reinicializarlo sin esta opción para este derecho funcionar", -"Desconocido motor de tabla '%s'", -"'%s' está desaprobado, use '%s' en su lugar", -"La tabla destino %-.100s del %s no es actualizable", -"El recurso '%s' fue deshabilitado; usted necesita construir MySQL con '%s' para tener eso funcionando", -"El servidor MySQL está rodando con la opción %s tal que no puede ejecutar este comando", -"Columna '%-.100s' tiene valor doblado '%-.64s' en %s" -"Equivocado truncado %-.32s valor: '%-.128s'" -"Incorrecta definición de tabla; Solamente debe haber una columna TIMESTAMP con CURRENT_TIMESTAMP en DEFAULT o ON UPDATE cláusula" -"Inválido ON UPDATE cláusula para campo '%-.64s'", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/swedish/errmsg.txt b/sql/share/swedish/errmsg.txt deleted file mode 100644 index 4dc42389d89..00000000000 --- a/sql/share/swedish/errmsg.txt +++ /dev/null @@ -1,321 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -character-set=latin1 - -"hashchk", -"isamchk", -"NO", -"YES", -"Kan inte skapa filen '%-.64s' (Felkod: %d)", -"Kan inte skapa tabellen '%-.64s' (Felkod: %d)", -"Kan inte skapa databasen '%-.64s' (Felkod: %d)", -"Databasen '%-.64s' existerar redan", -"Kan inte radera databasen '%-.64s'; databasen finns inte", -"Fel vid radering av databasen (Kan inte radera '%-.64s'. Felkod: %d)", -"Fel vid radering av databasen (Kan inte radera biblioteket '%-.64s'. Felkod: %d)", -"Kan inte radera filen '%-.64s' (Felkod: %d)", -"Hittar inte posten i systemregistret", -"Kan inte läsa filinformationen (stat) från '%-.64s' (Felkod: %d)", -"Kan inte inte läsa aktivt bibliotek. (Felkod: %d)", -"Kan inte låsa filen. (Felkod: %d)", -"Kan inte använda '%-.64s' (Felkod: %d)", -"Hittar inte filen '%-.64s' (Felkod: %d)", -"Kan inte läsa från bibliotek '%-.64s' (Felkod: %d)", -"Kan inte byta till '%-.64s' (Felkod: %d)", -"Posten har förändrats sedan den lästes i register '%-.64s'", -"Disken är full (%s). Väntar tills det finns ledigt utrymme...", -"Kan inte skriva, dubbel söknyckel i register '%-.64s'", -"Fick fel vid stängning av '%-.64s' (Felkod: %d)", -"Fick fel vid läsning av '%-.64s' (Felkod %d)", -"Kan inte byta namn från '%-.64s' till '%-.64s' (Felkod: %d)", -"Fick fel vid skrivning till '%-.64s' (Felkod %d)", -"'%-.64s' är låst mot användning", -"Sorteringen avbruten", -"Formulär '%-.64s' finns inte i '%-.64s'", -"Fick felkod %d från databashanteraren", -"Registrets databas har inte denna facilitet", -"Hittar inte posten", -"Felaktig fil: '%-.64s'", -"Fatalt fel vid hantering av register '%-.64s'; kör en reparation", -"Gammal nyckelfil '%-.64s'; reparera registret", -"'%-.64s' är skyddad mot förändring", -"Oväntat slut på minnet, starta om programmet och försök på nytt (Behövde %d bytes)", -"Sorteringsbufferten räcker inte till. Kontrollera startparametrarna", -"Oväntat filslut vid läsning från '%-.64s' (Felkod: %d)", -"För många anslutningar", -"Fick slut på minnet. Kontrollera om mysqld eller någon annan process använder allt tillgängligt minne. Om inte, försök använda 'ulimit' eller allokera mera swap", -"Kan inte hitta 'hostname' för din adress", -"Fel vid initiering av kommunikationen med klienten", -"Användare '%-.32s'@'%-.64s' är ej berättigad att använda databasen %-.64s", -"Användare '%-.32s'@'%-.64s' är ej berättigad att logga in (Använder lösen: %s)", -"Ingen databas i användning", -"Okänt commando", -"Kolumn '%-.64s' får inte vara NULL", -"Okänd databas: '%-.64s'", -"Tabellen '%-.64s' finns redan", -"Okänd tabell '%-.64s'", -"Kolumn '%-.64s' i %s är inte unik", -"Servern går nu ned", -"Okänd kolumn '%-.64s' i %s", -"'%-.64s' finns inte i GROUP BY", -"Kan inte använda GROUP BY med '%-.64s'", -"Kommandot har både sum functions och enkla funktioner", -"Antalet kolumner motsvarar inte antalet värden", -"Kolumnnamn '%-.64s' är för långt", -"Kolumnnamn '%-.64s finns flera gånger", -"Nyckelnamn '%-.64s' finns flera gånger", -"Dubbel nyckel '%-.64s' för nyckel %d", -"Felaktigt kolumntyp för kolumn '%-.64s'", -"%s nära '%-.64s' på rad %d", -"Frågan var tom", -"Icke unikt tabell/alias: '%-.64s'", -"Ogiltigt DEFAULT värde för '%-.64s'", -"Flera PRIMARY KEY använda", -"För många nycklar använda. Man får ha högst %d nycklar", -"För många nyckeldelar använda. Man får ha högst %d nyckeldelar", -"För lång nyckel. Högsta tillåtna nyckellängd är %d", -"Nyckelkolumn '%-.64s' finns inte", -"En BLOB '%-.64s' kan inte vara nyckel med den använda tabelltypen", -"För stor kolumnlängd angiven för '%-.64s' (max= %d). Använd en BLOB instället", -"Det får finnas endast ett AUTO_INCREMENT-fält och detta måste vara en nyckel", -"%s: klar att ta emot klienter", -"%s: Normal avslutning\n", -"%s: Fick signal %d. Avslutar!\n", -"%s: Avslutning klar\n", -"%s: Stänger av tråd %ld; användare: '%-.64s'\n", -"Kan inte skapa IP-socket", -"Tabellen '%-.64s' har inget index som motsvarar det angivna i CREATE INDEX. Skapa om tabellen", -"Fältseparatorerna är vad som förväntades. Kontrollera mot manualen", -"Man kan inte använda fast radlängd med blobs. Använd 'fields terminated by'", -"Textfilen '%.64s' måste finnas i databasbiblioteket eller vara läsbar för alla", -"Filen '%-.64s' existerar redan", -"Rader: %ld Bortagna: %ld Dubletter: %ld Varningar: %ld", -"Rader: %ld Dubletter: %ld", -"Felaktig delnyckel. Nyckeldelen är inte en sträng eller den angivna längden är längre än kolumnlängden", -"Man kan inte radera alla fält med ALTER TABLE. Använd DROP TABLE istället", -"Kan inte ta bort '%-.64s'. Kontrollera att fältet/nyckel finns", -"Rader: %ld Dubletter: %ld Varningar: %ld", -"INSERT-table '%-.64s' får inte finnas i FROM tabell-listan", -"Finns ingen tråd med id %lu", -"Du är inte ägare till tråd %lu", -"Inga tabeller angivna", -"För många alternativ till kolumn %s för SET", -"Kan inte generera ett unikt filnamn %s.(1-999)\n", -"Tabell '%-.64s' kan inte uppdateras emedan den är låst för läsning", -"Tabell '%-.64s' är inte låst med LOCK TABLES", -"BLOB fält '%-.64s' kan inte ha ett DEFAULT-värde", -"Felaktigt databasnamn '%-.64s'", -"Felaktigt tabellnamn '%-.64s'", -"Den angivna frågan skulle läsa mer än MAX_JOIN_SIZE rader. Kontrollera din WHERE och använd SET SQL_BIG_SELECTS=1 eller SET MAX_JOIN_SIZE=# ifall du vill hantera stora joins", -"Oidentifierat fel", -"Okänd procedur: %s", -"Felaktigt antal parametrar till procedur %s", -"Felaktiga parametrar till procedur %s", -"Okänd tabell '%-.64s' i '%-.64s'", -"Fält '%-.64s' är redan använt", -"Felaktig användning av SQL grupp function", -"Tabell '%-.64s' har en extension som inte finns i denna version av MySQL", -"Tabeller måste ha minst 1 kolumn", -"Tabellen '%-.64s' är full", -"Okänd teckenuppsättning: '%-.64s'", -"För många tabeller. MySQL can ha högst %d tabeller i en och samma join", -"För många fält", -"För stor total radlängd. Den högst tillåtna radlängden, förutom BLOBs, är %d. Ändra några av dina fält till BLOB", -"Trådstacken tog slut: Har använt %ld av %ld bytes. Använd 'mysqld -O thread_stack=#' ifall du behöver en större stack", -"Felaktigt referens i OUTER JOIN. Kontrollera ON-uttrycket", -"Kolumn '%-.32s' är använd med UNIQUE eller INDEX men är inte definerad med NOT NULL", -"Kan inte ladda funktionen '%-.64s'", -"Kan inte initialisera funktionen '%-.64s'; '%-.80s'", -"Man får inte ange sökväg för dynamiska bibliotek", -"Funktionen '%-.64s' finns redan", -"Kan inte öppna det dynamiska biblioteket '%-.64s' (Felkod: %d %s)", -"Hittar inte funktionen '%-.64s' in det dynamiska biblioteket", -"Funktionen '%-.64s' är inte definierad", -"Denna dator, '%-.64s', är blockerad pga många felaktig paket. Gör 'mysqladmin flush-hosts' för att ta bort alla blockeringarna", -"Denna dator, '%-.64s', har inte privileger att använda denna MySQL server", -"Du använder MySQL som en anonym användare och som sådan får du inte ändra ditt lösenord", -"För att ändra lösenord för andra måste du ha rättigheter att uppdatera mysql-databasen", -"Hittade inte användaren i 'user'-tabellen", -"Rader: %ld Uppdaterade: %ld Varningar: %ld", -"Kan inte skapa en ny tråd (errno %d)", -"Antalet kolumner motsvarar inte antalet värden på rad: %ld", -"Kunde inte stänga och öppna tabell '%-.64s", -"Felaktig använding av NULL", -"Fick fel '%-.64s' från REGEXP", -"Man får ha både GROUP-kolumner (MIN(),MAX(),COUNT()...) och fält i en fråga om man inte har en GROUP BY-del", -"Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s'", -"%-.16s ej tillåtet för '%-.32s'@'%-.64s' för tabell '%-.64s'", -"%-.16s ej tillåtet för '%-.32s'@'%-.64s' för kolumn '%-.64s' i tabell '%-.64s'", -"Felaktigt GRANT-privilegium använt", -"Felaktigt maskinnamn eller användarnamn använt med GRANT", -"Det finns ingen tabell som heter '%-.64s.%s'", -"Det finns inget privilegium definierat för användare '%-.32s' på '%-.64s' för tabell '%-.64s'", -"Du kan inte använda detta kommando med denna MySQL version", -"Du har något fel i din syntax", -"DELAYED INSERT-tråden kunde inte låsa tabell '%-.64s'", -"Det finns redan 'max_delayed_threads' trådar i använding", -"Avbröt länken för tråd %ld till db '%-.64s', användare '%-.64s' (%s)", -"Kommunkationspaketet är större än 'max_allowed_packet'", -"Fick läsfel från klienten vid läsning från 'PIPE'", -"Fick fatalt fel från 'fcntl()'", -"Kommunikationspaketen kom i fel ordning", -"Kunde inte packa up kommunikationspaketet", -"Fick ett fel vid läsning från klienten", -"Fick 'timeout' vid läsning från klienten", -"Fick ett fel vid skrivning till klienten", -"Fick 'timeout' vid skrivning till klienten", -"Resultatsträngen är längre än 'max_allowed_packet'", -"Den använda tabelltypen kan inte hantera BLOB/TEXT-kolumner", -"Den använda tabelltypen kan inte hantera AUTO_INCREMENT-kolumner", -"INSERT DELAYED kan inte användas med tabell '%-.64s', emedan den är låst med LOCK TABLES", -"Felaktigt kolumnnamn '%-.100s'", -"Den använda tabelltypen kan inte indexera kolumn '%-.64s'", -"Tabellerna i MERGE-tabellen är inte identiskt definierade", -"Kan inte skriva till tabell '%-.64s'; UNIQUE-test", -"Du har inte angett någon nyckellängd för BLOB '%-.64s'", -"Alla delar av en PRIMARY KEY måste vara NOT NULL; Om du vill ha en nyckel med NULL, använd UNIQUE istället", -"Resultet bestod av mera än en rad", -"Denna tabelltyp kräver en PRIMARY KEY", -"Denna version av MySQL är inte kompilerad med RAID", -"Du använder 'säker uppdateringsmod' och försökte uppdatera en tabell utan en WHERE-sats som använder sig av en nyckel", -"Nyckel '%-.64s' finns inte in tabell '%-.64s'", -"Kan inte öppna tabellen", -"Tabellhanteraren för denna tabell kan inte göra %s", -"Du får inte utföra detta kommando i en transaktion", -"Fick fel %d vid COMMIT", -"Fick fel %d vid ROLLBACK", -"Fick fel %d vid FLUSH_LOGS", -"Fick fel %d vid CHECKPOINT", -"Avbröt länken för tråd %ld till db '%-.64s', användare '%-.32s', host '%-.64s' (%-.64s)", -"Tabellhanteraren klarar inte en binär kopiering av tabellen", -"Binärloggen stängdes medan FLUSH MASTER utfördes", -"Failed rebuilding the index of dumped table '%-.64s'", -"Fick en master: '%-.64s'", -"Fick nätverksfel vid läsning från master", -"Fick nätverksfel vid skrivning till master", -"Hittar inte ett FULLTEXT-index i kolumnlistan", -"Kan inte utföra kommandot emedan du har en låst tabell eller an aktiv transaktion", -"Okänd systemvariabel: '%-.64s'", -"Tabell '%-.64s' är trasig och bör repareras med REPAIR TABLE", -"Tabell '%-.64s' är trasig och senast (automatiska?) reparation misslyckades", -"Warning: Några icke transaktionella tabeller kunde inte återställas vid ROLLBACK", -"Transaktionen krävde mera än 'max_binlog_cache_size' minne. Öka denna mysqld-variabel och försök på nytt", -"Denna operation kan inte göras under replikering; Gör STOP SLAVE först", -"Denna operation kan endast göras under replikering; Konfigurera slaven och gör START SLAVE", -"Servern är inte konfigurerade som en replikationsslav. Ändra konfigurationsfilen eller gör CHANGE MASTER TO", -"Kunde inte initialisera replikationsstrukturerna. See MySQL fel fil för mera information", -"Kunde inte starta en tråd för replikering", -"Användare '%-.64s' har redan 'max_user_connections' aktiva inloggningar", -"Man kan endast använda konstantuttryck med SET", -"Fick inte ett lås i tid ; Försök att starta om transaktionen", -"Antal lås överskrider antalet reserverade lås", -"Updateringslås kan inte göras när man använder READ UNCOMMITTED", -"DROP DATABASE är inte tillåtet när man har ett globalt läslås", -"CREATE DATABASE är inte tillåtet när man har ett globalt läslås", -"Felaktiga argument till %s", -"'%-.32s'@'%-.64s' har inte rättighet att skapa nya användare", -"Felaktig tabelldefinition; alla tabeller i en MERGE-tabell måste vara i samma databas", -"Fick 'DEADLOCK' vid låsförsök av block/rad. Försök att starta om transaktionen", -"Tabelltypen har inte hantering av FULLTEXT-index", -"Kan inte lägga till 'FOREIGN KEY constraint'", -"FOREIGN KEY-konflikt: Kan inte skriva barn", -"FOREIGN KEY-konflikt: Kan inte radera fader", -"Fick fel vid anslutning till master: %-.128s", -"Fick fel vid utförande av command på mastern: %-.128s", -"Fick fel vid utförande av %s: %-.128s", -"Felaktig använding av %s and %s", -"SELECT-kommandona har olika antal kolumner", -"Kan inte utföra kommandot emedan du har ett READ-lås", -"Blandning av transaktionella och icke-transaktionella tabeller är inaktiverat", -"Option '%s' användes två gånger", -"Användare '%-.64s' har överskridit '%s' (nuvarande värde: %ld)", -"Du har inte privlegiet '%-.128s' som behövs för denna operation", -"Variabel '%-.64s' är en SESSION variabel och kan inte ändrad med SET GLOBAL", -"Variabel '%-.64s' är en GLOBAL variabel och bör sättas med SET GLOBAL", -"Variabel '%-.64s' har inte ett DEFAULT-värde", -"Variabel '%-.64s' kan inte sättas till '%-.64s'", -"Fel typ av argument till variabel '%-.64s'", -"Variabeln '%-.64s' kan endast sättas, inte läsas", -"Fel använding/placering av '%s'", -"Denna version av MySQL kan ännu inte utföra '%s'", -"Fick fatalt fel %d: '%-.128s' från master vid läsning av binärloggen", -"Slav SQL tråden ignorerade frågan pga en replicate-*-table regel", -"Variabel '%-.64s' är av typ %s", -"Felaktig FOREIGN KEY-definition för '%-.64s': %s", -"Nyckelreferensen och tabellreferensen stämmer inte överens", -"Operand should contain %d column(s)", -"Subquery returnerade mer än 1 rad", -"Okänd PREPARED STATEMENT id (%.*s) var given till %s", -"Hjälpdatabasen finns inte eller är skadad", -"Cyklisk referens i subqueries", -"Konvertar kolumn '%s' från %s till %s", -"Referens '%-.64s' stöds inte (%s)", -"Varje 'derived table' måste ha sitt eget alias", -"Select %u reducerades vid optimiering", -"Tabell '%-.64s' från en SELECT kan inte användas i %-.32s", -"Klienten stöder inte autentiseringsprotokollet som begärts av servern; överväg uppgradering av klientprogrammet.", -"Alla delar av en SPATIAL index måste vara NOT NULL", -"COLLATION '%s' är inte tillåtet för CHARACTER SET '%s'", -"Slaven har redan startat", -"Slaven har redan stoppat", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d rad(er) kapades av GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Använder handler %s för tabell '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"Field or reference '%-.64s%s%-.64s%s%-.64s' of SELECT #%d was resolved in SELECT #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Felaktigt index namn '%-.100s'", -"Felaktigt katalog namn '%-.100s'", -"Storleken av "Query cache" kunde inte sättas till %lu, ny storlek är %lu", -"Kolumn '%-.64s' kan inte vara del av ett FULLTEXT index", -"Okänd nyckel cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"Tabel %-.100s använd med '%s' är inte uppdateringsbar", -"'%s' är inte aktiverad; För att aktivera detta måste du bygga om MySQL med '%s' definerad", -"MySQL är startad med --skip-grant-tables. Pga av detta kan du inte använda detta kommando", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Fick felkod %d '%-.100s' från %s", -"Fick tilfällig felkod %d '%-.100s' från %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/share/ukrainian/errmsg.txt b/sql/share/ukrainian/errmsg.txt deleted file mode 100644 index a19cf946cb1..00000000000 --- a/sql/share/ukrainian/errmsg.txt +++ /dev/null @@ -1,327 +0,0 @@ -/* Copyright (C) 2003 MySQL AB - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ - -/* - * Ukrainian translation by Roman Festchook <roma@orta.zt.ua> - * Encoding: KOI8-U - * Version: 13/09/2001 mysql-3.23.41 - */ - -character-set=koi8u - -"hashchk", -"isamchk", -"î¶", -"ôáë", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÔÁÂÌÉÃÀ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ¦ÓÎÕ¤", -"îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ '%-.64s'. âÁÚÁ ÄÁÎÎÉÈ ÎÅ ¦ÓÎÕ¤", -"îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s', ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÂÁÚÕ ÄÁÎÎÉÈ (îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ ÔÅËÕ '%-.64s', ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ×ÉÄÁÌÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÚÞÉÔÁÔÉ ÚÁÐÉÓ Ú ÓÉÓÔÅÍÎϧ ÔÁÂÌÉæ", -"îÅ ÍÏÖÕ ÏÔÒÉÍÁÔÉ ÓÔÁÔÕÓ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ÒÏÂÏÞÕ ÔÅËÕ (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÚÁÂÌÏËÕ×ÁÔÉ ÆÁÊÌ (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÁÊÌ: '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÐÅÒÅÊÔÉ Õ ÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"úÁÐÉÓ ÂÕÌÏ ÚͦÎÅÎÏ Ú ÞÁÓÕ ÏÓÔÁÎÎØÏÇÏ ÞÉÔÁÎÎÑ Ú ÔÁÂÌÉæ '%-.64s'", -"äÉÓË ÚÁÐÏ×ÎÅÎÉÊ (%s). ÷ÉÞÉËÕÀ, ÄÏËÉ Ú×¦ÌØÎÉÔØÓÑ ÔÒÏÈÉ Í¦ÓÃÑ...", -"îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ, ÄÕÂÌÀÀÞÉÊÓÑ ËÌÀÞ × ÔÁÂÌÉæ '%-.64s'", -"îÅ ÍÏÖÕ ÚÁËÒÉÔÉ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÐÒÏÞÉÔÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÐÅÒÅÊÍÅÎÕ×ÁÔÉ '%-.64s' Õ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÆÁÊÌ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"'%-.64s' ÚÁÂÌÏËÏ×ÁÎÉÊ ÎÁ ×ÎÅÓÅÎÎÑ ÚͦÎ", -"óÏÒÔÕ×ÁÎÎÑ ÐÅÒÅÒ×ÁÎÏ", -"÷ÉÇÌÑÄ '%-.64s' ÎÅ ¦ÓÎÕ¤ ÄÌÑ '%-.64s'", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d ×¦Ä ÄÅÓËÒÉÐÔÏÒÁ ÔÁÂÌÉæ", -"äÅÓËÒÉÐÔÏÒ ÔÁÂÌÉæ '%-.64s' ÎÅ ÍÁ¤ 椧 ×ÌÁÓÔÉ×ÏÓÔ¦", -"îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ Õ '%-.64s'", -"èÉÂÎÁ ¦ÎÆÏÒÍÁÃ¦Ñ Õ ÆÁÊ̦: '%-.64s'", -"èÉÂÎÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ: '%-.64s'; óÐÒÏÂÕÊÔÅ ÊÏÇÏ ×¦ÄÎÏ×ÉÔÉ", -"óÔÁÒÉÊ ÆÁÊÌ ËÌÀÞÅÊ ÄÌÑ ÔÁÂÌÉæ '%-.64s'; ÷¦ÄÎÏ×¦ÔØ ÊÏÇÏ!", -"ôÁÂÌÉÃÑ '%-.64s' Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ", -"âÒÁË ÐÁÍ'ÑÔ¦. òÅÓÔÁÒÔÕÊÔÅ ÓÅÒ×ÅÒ ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ (ÐÏÔÒ¦ÂÎÏ %d ÂÁÊÔ¦×)", -"âÒÁË ÐÁÍ'ÑÔ¦ ÄÌÑ ÓÏÒÔÕ×ÁÎÎÑ. ôÒÅÂÁ ÚÂ¦ÌØÛÉÔÉ ÒÏÚÍ¦Ò ÂÕÆÅÒÁ ÓÏÒÔÕ×ÁÎÎÑ Õ ÓÅÒ×ÅÒÁ", -"èÉÂÎÉÊ Ë¦ÎÅÃØ ÆÁÊÌÕ '%-.64s' (ÐÏÍÉÌËÁ: %d)", -"úÁÂÁÇÁÔÏ Ú'¤ÄÎÁÎØ", -"âÒÁË ÐÁÍ'ÑÔ¦; ðÅÒÅצÒÔÅ ÞÉ mysqld ÁÂÏ ÑË¦ÓØ ¦ÎÛ¦ ÐÒÏÃÅÓÉ ×ÉËÏÒÉÓÔÏ×ÕÀÔØ ÕÓÀ ÄÏÓÔÕÐÎÕ ÐÁÍ'ÑÔØ. ñË Î¦, ÔÏ ×É ÍÏÖÅÔÅ ÓËÏÒÉÓÔÁÔÉÓÑ 'ulimit', ÁÂÉ ÄÏÚ×ÏÌÉÔÉ mysqld ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ Â¦ÌØÛÅ ÐÁÍ'ÑÔ¦ ÁÂÏ ×É ÍÏÖÅÔÅ ÄÏÄÁÔÉ Â¦ÌØÛŠͦÓÃÑ Ð¦Ä Ó×ÁÐ", -"îÅ ÍÏÖÕ ×ÉÚÎÁÞÉÔÉ ¦Í'Ñ ÈÏÓÔÕ ÄÌÑ ×ÁÛϧ ÁÄÒÅÓÉ", -"îÅצÒÎÁ ÕÓÔÁÎÏ×ËÁ Ú×'ÑÚËÕ", -"äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ '%-.64s'", -"äÏÓÔÕÐ ÚÁÂÏÒÏÎÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'@'%-.64s' (÷ÉËÏÒÉÓÔÁÎÏ ÐÁÒÏÌØ: %s)", -"âÁÚÕ ÄÁÎÎÉÈ ÎÅ ×ÉÂÒÁÎÏ", -"îÅצÄÏÍÁ ËÏÍÁÎÄÁ", -"óÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ÎÕÌØÏ×ÉÍ", -"îÅצÄÏÍÁ ÂÁÚÁ ÄÁÎÎÉÈ '%-.64s'", -"ôÁÂÌÉÃÑ '%-.64s' ×ÖÅ ¦ÓÎÕ¤", -"îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s'", -"óÔÏ×ÂÅÃØ '%-.64s' Õ %-.64s ×ÉÚÎÁÞÅÎÉÊ ÎÅÏÄÎÏÚÎÁÞÎÏ", -"úÁ×ÅÒÛÕ¤ÔØÓÑ ÒÁÂÏÔÁ ÓÅÒ×ÅÒÁ", -"îÅצÄÏÍÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' Õ '%-.64s'", -"'%-.64s' ÎÅ ¤ Õ GROUP BY", -"îÅ ÍÏÖÕ ÇÒÕÐÕ×ÁÔÉ ÐÏ '%-.64s'", -"õ ×ÉÒÁÚ¦ ×ÉËÏÒÉÓÔÁÎÏ Ð¦ÄÓÕÍÏ×ÕÀÞ¦ ÆÕÎËæ§ ÐÏÒÑÄ Ú ¦ÍÅÎÁÍÉ ÓÔÏ×Âæ×", -"ë¦ÌØË¦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØË¦ÓÔÀ ÚÎÁÞÅÎØ", -"¶Í'Ñ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒÁ '%-.100s' ÚÁÄÏ×ÇÅ", -"äÕÂÌÀÀÞÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.64s'", -"äÕÂÌÀÀÞÅ ¦Í'Ñ ËÌÀÞÁ '%-.64s'", -"äÕÂÌÀÀÞÉÊ ÚÁÐÉÓ '%-.64s' ÄÌÑ ËÌÀÞÁ %d", -"îÅצÒÎÉÊ ÓÐÅÃÉÆ¦ËÁÔÏÒ ÓÔÏ×ÂÃÑ '%-.64s'", -"%s ¦ÌÑ '%-.80s' × ÓÔÒÏæ %d", -"ðÕÓÔÉÊ ÚÁÐÉÔ", -"îÅÕΦËÁÌØÎÁ ÔÁÂÌÉÃÑ/ÐÓÅ×ÄÏΦÍ: '%-.64s'", -"îÅצÒÎÅ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ ÄÌÑ '%-.64s'", -"ðÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ ×ÉÚÎÁÞÅÎÏ ÎÅÏÄÎÏÒÁÚÏ×Ï", -"úÁÂÁÇÁÔÏ ËÌÀÞ¦× ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ËÌÀÞ¦×", -"úÁÂÁÇÁÔÏ ÞÁÓÔÉÎ ËÌÀÞÁ ÚÁÚÎÁÞÅÎÏ. äÏÚ×ÏÌÅÎÏ ÎÅ Â¦ÌØÛÅ %d ÞÁÓÔÉÎ", -"úÁÚÎÁÞÅÎÉÊ ËÌÀÞ ÚÁÄÏ×ÇÉÊ. îÁÊÂ¦ÌØÛÁ ÄÏ×ÖÉÎÁ ËÌÀÞÁ %d ÂÁÊÔ¦×", -"ëÌÀÞÏ×ÉÊ ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ¦ÓÎÕ¤ Õ ÔÁÂÌÉæ", -"BLOB ÓÔÏ×ÂÅÃØ '%-.64s' ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÉÊ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ × ÃØÏÍÕ ÔÉЦ ÔÁÂÌÉæ", -"úÁÄÏ×ÇÁ ÄÏ×ÖÉÎÁ ÓÔÏ×ÂÃÑ '%-.64s' (max = %d). ÷ÉËÏÒÉÓÔÁÊÔÅ ÔÉÐ BLOB", -"îÅצÒÎÅ ×ÉÚÎÁÞÅÎÎÑ ÔÁÂÌÉæ; íÏÖÅ ÂÕÔÉ ÌÉÛÅ ÏÄÉÎ Á×ÔÏÍÁÔÉÞÎÉÊ ÓÔÏ×ÂÅÃØ, ÝÏ ÐÏ×ÉÎÅÎ ÂÕÔÉ ×ÉÚÎÁÞÅÎÉÊ ÑË ËÌÀÞ", -"%s: çÏÔÏ×ÉÊ ÄÌÑ Ú'¤ÄÎÁÎØ!", -"%s: îÏÒÍÁÌØÎÅ ÚÁ×ÅÒÛÅÎÎÑ\n", -"%s: ïÔÒÉÍÁÎÏ ÓÉÇÎÁÌ %d. ðÅÒÅÒÉ×ÁÀÓØ!\n", -"%s: òÏÂÏÔÕ ÚÁ×ÅÒÛÅÎÏ\n", -"%s: ðÒÉÓËÏÒÀÀ ÚÁËÒÉÔÔÑ Ç¦ÌËÉ %ld ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s'\n", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ IP ÒÏÚ'¤Í", -"ôÁÂÌÉÃÑ '%-.64s' ÍÁ¤ ¦ÎÄÅËÓ, ÝÏ ÎÅ ÓЦ×ÐÁÄÁ¤ Ú ×ËÁÚÁÎÎÉÍ Õ CREATE INDEX. óÔ×ÏÒ¦ÔØ ÔÁÂÌÉÃÀ ÚÎÏ×Õ", -"èÉÂÎÉÊ ÒÏÚĦÌÀ×ÁÞ ÐÏ̦×. ðÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ", -"îÅ ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÓÔÁÌÕ ÄÏ×ÖÉÎÕ ÓÔÒÏËÉ Ú BLOB. úËÏÒÉÓÔÁÊÔÅÓÑ 'fields terminated by'", -"æÁÊÌ '%-.64s' ÐÏ×ÉÎÅÎ ÂÕÔÉ Õ ÔÅæ ÂÁÚÉ ÄÁÎÎÉÈ ÁÂÏ ÍÁÔÉ ×ÓÔÁÎÏ×ÌÅÎÅ ÐÒÁ×Ï ÎÁ ÞÉÔÁÎÎÑ ÄÌÑ ÕÓ¦È", -"æÁÊÌ '%-.80s' ×ÖÅ ¦ÓÎÕ¤", -"úÁÐÉÓ¦×: %ld ÷ÉÄÁÌÅÎÏ: %ld ðÒÏÐÕÝÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld", -"úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld", -"îÅצÒÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ. ÷ÉËÏÒÉÓÔÁÎÁ ÞÁÓÔÉÎÁ ËÌÀÞÁ ÎÅ ¤ ÓÔÒÏËÏÀ, ÚÁÄÏ×ÇÁ ÁÂÏ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ÕΦËÁÌØÎÉÈ ÞÁÓÔÉÎ ËÌÀÞÅÊ", -"îÅ ÍÏÖÌÉ×Ï ×ÉÄÁÌÉÔÉ ×Ó¦ ÓÔÏ×Âæ ÚÁ ÄÏÐÏÍÏÇÏÀ ALTER TABLE. äÌÑ ÃØÏÇÏ ÓËÏÒÉÓÔÁÊÔÅÓÑ DROP TABLE", -"îÅ ÍÏÖÕ DROP '%-.64s'. ðÅÒÅצÒÔÅ, ÞÉ ÃÅÊ ÓÔÏ×ÂÅÃØ/ËÌÀÞ ¦ÓÎÕ¤", -"úÁÐÉÓ¦×: %ld äÕÂ̦ËÁÔ¦×: %ld úÁÓÔÅÒÅÖÅÎØ: %ld", -"ôÁÂÌÉÃÑ '%-.64s' ÝÏ ÚͦÎÀ¤ÔØÓÑ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ ÐÅÒÅ̦ËÕ ÔÁÂÌÉÃØ FROM", -"îÅצÄÏÍÉÊ ¦ÄÅÎÔÉÆ¦ËÁÔÏÒ Ç¦ÌËÉ: %lu", -"÷É ÎÅ ×ÏÌÏÄÁÒ Ç¦ÌËÉ %lu", -"îÅ ×ÉËÏÒÉÓÔÁÎÏ ÔÁÂÌÉÃØ", -"úÁÂÁÇÁÔÏ ÓÔÒÏË ÄÌÑ ÓÔÏ×ÂÃÑ %-.64s ÔÁ SET", -"îÅ ÍÏÖÕ ÚÇÅÎÅÒÕ×ÁÔÉ ÕΦËÁÌØÎÅ ¦Í'Ñ log-ÆÁÊÌÕ %-.64s.(1-999)\n", -"ôÁÂÌÉÃÀ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ô¦ÌØËÉ ÄÌÑ ÞÉÔÁÎÎÑ, ÔÏÍÕ §§ ÎÅ ÍÏÖÎÁ ÏÎÏ×ÉÔÉ", -"ôÁÂÌÉÃÀ '%-.64s' ÎÅ ÂÕÌÏ ÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES", -"óÔÏ×ÂÅÃØ BLOB '%-.64s' ÎÅ ÍÏÖÅ ÍÁÔÉ ÚÎÁÞÅÎÎÑ ÐÏ ÚÁÍÏ×ÞÕ×ÁÎÎÀ", -"îÅצÒÎÅ ¦Í'Ñ ÂÁÚÉ ÄÁÎÎÉÈ '%-.100s'", -"îÅצÒÎÅ ¦Í'Ñ ÔÁÂÌÉæ '%-.100s'", -"úÁÐÉÔÕ SELECT ÐÏÔÒ¦ÂÎÏ ÏÂÒÏÂÉÔÉ ÂÁÇÁÔÏ ÚÁÐÉÓ¦×, ÝÏ, ÐÅ×ÎÅ, ÚÁÊÍÅ ÄÕÖÅ ÂÁÇÁÔÏ ÞÁÓÕ. ðÅÒÅצÒÔÅ ×ÁÛÅ WHERE ÔÁ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ SET SQL_BIG_SELECTS=1, ÑËÝÏ ÃÅÊ ÚÁÐÉÔ SELECT ¤ צÒÎÉÍ", -"îÅצÄÏÍÁ ÐÏÍÉÌËÁ", -"îÅצÄÏÍÁ ÐÒÏÃÅÄÕÒÁ '%-.64s'", -"èÉÂÎÁ Ë¦ÌØË¦ÓÔØ ÐÁÒÁÍÅÔÒ¦× ÐÒÏÃÅÄÕÒÉ '%-.64s'", -"èÉÂÎÉÊ ÐÁÒÁÍÅÔÅÒ ÐÒÏÃÅÄÕÒÉ '%-.64s'", -"îÅצÄÏÍÁ ÔÁÂÌÉÃÑ '%-.64s' Õ %-.32s", -"óÔÏ×ÂÅÃØ '%-.64s' ÚÁÚÎÁÞÅÎÏ Äצަ", -"èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÆÕÎËæ§ ÇÒÕÐÕ×ÁÎÎÑ", -"ôÁÂÌÉÃÑ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ ÒÏÚÛÉÒÅÎÎÑ, ÝÏ ÎÅ ¦ÓÎÕ¤ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL", -"ôÁÂÌÉÃÑ ÐÏ×ÉÎÎÁ ÍÁÔÉ ÈÏÞÁ ÏÄÉÎ ÓÔÏ×ÂÅÃØ", -"ôÁÂÌÉÃÑ '%-.64s' ÚÁÐÏ×ÎÅÎÁ", -"îÅצÄÏÍÁ ËÏÄÏ×Á ÔÁÂÌÉÃÑ: '%-.64s'", -"úÁÂÁÇÁÔÏ ÔÁÂÌÉÃØ. MySQL ÍÏÖÅ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ %d ÔÁÂÌÉÃØ Õ ÏÂ'¤ÄÎÁÎΦ", -"úÁÂÁÇÁÔÏ ÓÔÏ×Âæ×", -"úÁÄÏ×ÇÁ ÓÔÒÏËÁ. îÁÊÂ¦ÌØÛÏÀ ÄÏ×ÖÉÎÏÀ ÓÔÒÏËÉ, ÎÅ ÒÁÈÕÀÞÉ BLOB, ¤ %d. ÷ÁÍ ÐÏÔÒ¦ÂÎÏ ÐÒÉ×ÅÓÔÉ ÄÅÑ˦ ÓÔÏ×Âæ ÄÏ ÔÉÐÕ BLOB", -"óÔÅË Ç¦ÌÏË ÐÅÒÅÐÏ×ÎÅÎÏ: ÷ÉËÏÒÉÓÔÁÎÏ: %ld Ú %ld. ÷ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqld -O thread_stack=#' ÁÂÉ ÚÁÚÎÁÞÉÔÉ Â¦ÌØÛÉÊ ÓÔÅË, ÑËÝÏ ÎÅÏÂȦÄÎÏ", -"ðÅÒÅÈÒÅÓÎÁ ÚÁÌÅÖΦÓÔØ Õ OUTER JOIN. ðÅÒÅצÒÔÅ ÕÍÏ×Õ ON", -"óÔÏ×ÂÅÃØ '%-.64s' ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ Ú UNIQUE ÁÂÏ INDEX, ÁÌÅ ÎÅ ×ÉÚÎÁÞÅÎÉÊ ÑË NOT NULL", -"îÅ ÍÏÖÕ ÚÁ×ÁÎÔÁÖÉÔÉ ÆÕÎËæÀ '%-.64s'", -"îÅ ÍÏÖÕ ¦Î¦Ã¦Á̦ÚÕ×ÁÔÉ ÆÕÎËæÀ '%-.64s'; %-.80s", -"îÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÐÕÔ¦ ÄÌÑ ÒÏÚĦÌÀ×ÁÎÉÈ Â¦Â̦ÏÔÅË", -"æÕÎËÃ¦Ñ '%-.64s' ×ÖÅ ¦ÓÎÕ¤", -"îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÒÏÚĦÌÀ×ÁÎÕ Â¦Â̦ÏÔÅËÕ '%-.64s' (ÐÏÍÉÌËÁ: %d %-.64s)", -"îÅ ÍÏÖÕ ÚÎÁÊÔÉ ÆÕÎËæÀ '%-.64s' Õ Â¦Â̦ÏÔÅæ'", -"æÕÎËæÀ '%-.64s' ÎÅ ×ÉÚÎÁÞÅÎÏ", -"èÏÓÔ '%-.64s' ÚÁÂÌÏËÏ×ÁÎÏ Ú ÐÒÉÞÉÎÉ ×ÅÌÉËϧ Ë¦ÌØËÏÓÔ¦ ÐÏÍÉÌÏË Ú'¤ÄÎÁÎÎÑ. äÌÑ ÒÏÚÂÌÏËÕ×ÁÎÎÑ ×ÉËÏÒÉÓÔÏ×ÕÊÔÅ 'mysqladmin flush-hosts'", -"èÏÓÔÕ '%-.64s' ÎÅ ÄÏ×ÏÌÅÎÏ Ú×'ÑÚÕ×ÁÔÉÓØ Ú ÃÉÍ ÓÅÒ×ÅÒÏÍ MySQL", -"÷É ×ÉËÏÒÉÓÔÏ×Õ¤ÔÅ MySQL ÑË ÁÎÏΦÍÎÉÊ ËÏÒÉÓÔÕ×ÁÞ, ÔÏÍÕ ×ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ÚͦÎÀ×ÁÔÉ ÐÁÒÏ̦", -"÷É ÐÏ×ÉΦ ÍÁÔÉ ÐÒÁ×Ï ÎÁ ÏÎÏ×ÌÅÎÎÑ ÔÁÂÌÉÃØ Õ ÂÁÚ¦ ÄÁÎÎÉÈ mysql, ÁÂÉ ÍÁÔÉ ÍÏÖÌÉצÓÔØ ÚͦÎÀ×ÁÔÉ ÐÁÒÏÌØ ¦ÎÛÉÍ", -"îÅ ÍÏÖÕ ÚÎÁÊÔÉ ×¦ÄÐÏצÄÎÉÈ ÚÁÐÉÓ¦× Õ ÔÁÂÌÉæ ËÏÒÉÓÔÕ×ÁÞÁ", -"úÁÐÉÓ¦× ×¦ÄÐÏצÄÁ¤: %ld úͦÎÅÎÏ: %ld úÁÓÔÅÒÅÖÅÎØ: %ld", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ ÎÏ×Õ Ç¦ÌËÕ (ÐÏÍÉÌËÁ %d). ñËÝÏ ×É ÎÅ ×ÉËÏÒÉÓÔÁÌÉ ÕÓÀ ÐÁÍ'ÑÔØ, ÔÏ ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÄÏ ×ÁÛϧ ïó - ÍÏÖÌÉ×Ï ÃÅ ÐÏÍÉÌËÁ ïó", -"ë¦ÌØË¦ÓÔØ ÓÔÏ×ÂÃ¦× ÎÅ ÓЦ×ÐÁÄÁ¤ Ú Ë¦ÌØË¦ÓÔÀ ÚÎÁÞÅÎØ Õ ÓÔÒÏæ %ld", -"îÅ ÍÏÖÕ ÐÅÒÅצÄËÒÉÔÉ ÔÁÂÌÉÃÀ: '%-.64s'", -"èÉÂÎÅ ×ÉËÏÒÉÓÔÁÎÎÑ ÚÎÁÞÅÎÎÑ NULL", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ '%-.64s' ×¦Ä ÒÅÇÕÌÑÒÎÏÇÏ ×ÉÒÁÚÕ", -"úͦÛÕ×ÁÎÎÑ GROUP ÓÔÏ×ÂÃ¦× (MIN(),MAX(),COUNT()...) Ú ÎÅ GROUP ÓÔÏ×ÂÃÑÍÉ ¤ ÚÁÂÏÒÏÎÅÎÉÍ, ÑËÝÏ ÎÅ ÍÁ¤ GROUP BY", -"ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s'", -"%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' Õ ÔÁÂÌÉæ '%-.64s'", -"%-.16s ËÏÍÁÎÄÁ ÚÁÂÏÒÏÎÅÎÁ ËÏÒÉÓÔÕ×ÁÞÕ: '%-.32s'@'%-.64s' ÄÌÑ ÓÔÏ×ÂÃÑ '%-.64s' Õ ÔÁÂÌÉæ '%-.64s'", -"èÉÂÎÁ GRANT/REVOKE ËÏÍÁÎÄÁ; ÐÒÏÞÉÔÁÊÔÅ ÄÏËÕÍÅÎÔÁæÀ ÓÔÏÓÏ×ÎÏ ÔÏÇÏ, Ñ˦ ÐÒÁ×Á ÍÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ", -"áÒÇÕÍÅÎÔ host ÁÂÏ user ÄÌÑ GRANT ÚÁÄÏ×ÇÉÊ", -"ôÁÂÌÉÃÑ '%-.64s.%-.64s' ÎÅ ¦ÓÎÕ¤", -"ðÏ×ÎÏ×ÁÖÅÎØ ÎÅ ×ÉÚÎÁÞÅÎÏ ÄÌÑ ËÏÒÉÓÔÕ×ÁÞÁ '%-.32s' Ú ÈÏÓÔÕ '%-.64s' ÄÌÑ ÔÁÂÌÉæ '%-.64s'", -"÷ÉËÏÒÉÓÔÏ×Õ×ÁÎÁ ËÏÍÁÎÄÁ ÎÅ ÄÏÚ×ÏÌÅÎÁ Õ Ã¦Ê ×ÅÒÓ¦§ MySQL", -"õ ×ÁÓ ÐÏÍÉÌËÁ Õ ÓÉÎÔÁËÓÉÓ¦ SQL", -"ç¦ÌËÁ ÄÌÑ INSERT DELAYED ÎÅ ÍÏÖÅ ÏÔÒÉÍÁÔÉ ÂÌÏËÕ×ÁÎÎÑ ÄÌÑ ÔÁÂÌÉæ %-.64s", -"úÁÂÁÇÁÔÏ ÚÁÔÒÉÍÁÎÉÈ Ç¦ÌÏË ×ÉËÏÒÉÓÔÏ×Õ¤ÔØÓÑ", -"ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞÁ: '%-.32s' (%-.64s)", -"ïÔÒÉÍÁÎÏ ÐÁËÅÔ Â¦ÌØÛÉÊ Î¦Ö 'max_allowed_packet'", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ Ú ËÏÍÕΦËÁæÊÎÏÇÏ ËÁÎÁÌÕ", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËËÕ ×¦Ä fcntl()", -"ïÔÒÉÍÁÎÏ ÐÁËÅÔÉ Õ ÎÅÎÁÌÅÖÎÏÍÕ ÐÏÒÑÄËÕ", -"îÅ ÍÏÖÕ ÄÅËÏÍÐÒÅÓÕ×ÁÔÉ ËÏÍÕΦËÁæÊÎÉÊ ÐÁËÅÔ", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÞÉÔÁÎÎÑ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"ïÔÒÉÍÁÎÏ ÚÁÔÒÉÍËÕ ÚÁÐÉÓÕ ËÏÍÕΦËÁæÊÎÉÈ ÐÁËÅÔ¦×", -"óÔÒÏËÁ ÒÅÚÕÌØÔÁÔÕ ÄÏ×ÛÁ Î¦Ö 'max_allowed_packet'", -"÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ BLOB/TEXT ÓÔÏ×Âæ", -"÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ AUTO_INCREMENT ÓÔÏ×Âæ", -"INSERT DELAYED ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÒÉÓÔÁÎÏ Ú ÔÁÂÌÉÃÅÀ '%-.64s', ÔÏÍÕ ÝÏ §§ ÚÁÂÌÏËÏ×ÁÎÏ Ú LOCK TABLES", -"îÅצÒÎÅ ¦Í'Ñ ÓÔÏ×ÂÃÑ '%-.100s'", -"÷ÉËÏÒÉÓÔÁÎÉÊ ×ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ÎÅ ÍÏÖÅ ¦ÎÄÅËÓÕ×ÁÔÉ ÓÔÏ×ÂÅÃØ '%-.64s'", -"ôÁÂÌÉæ Õ MERGE TABLE ÍÁÀÔØ Ò¦ÚÎÕ ÓÔÒÕËÔÕÒÕ", -"îÅ ÍÏÖÕ ÚÁÐÉÓÁÔÉ ÄÏ ÔÁÂÌÉæ '%-.64s', Ú ÐÒÉÞÉÎÉ ×ÉÍÏÇ ÕΦËÁÌØÎÏÓÔ¦", -"óÔÏ×ÂÅÃØ BLOB '%-.64s' ×ÉËÏÒÉÓÔÁÎÏ Õ ×ÉÚÎÁÞÅÎΦ ËÌÀÞÁ ÂÅÚ ×ËÁÚÁÎÎÑ ÄÏ×ÖÉÎÉ ËÌÀÞÁ", -"õÓ¦ ÞÁÓÔÉÎÉ PRIMARY KEY ÐÏ×ÉÎΦ ÂÕÔÉ NOT NULL; ñËÝÏ ×É ÐÏÔÒÅÂÕ¤ÔÅ NULL Õ ËÌÀÞ¦, ÓËÏÒÉÓÔÁÊÔÅÓÑ UNIQUE", -"òÅÚÕÌØÔÁÔ ÚÎÁÈÏÄÉÔØÓÑ Õ Â¦ÌØÛÅ Î¦Ö ÏÄÎ¦Ê ÓÔÒÏæ", -"ãÅÊ ÔÉÐ ÔÁÂÌÉæ ÐÏÔÒÅÂÕ¤ ÐÅÒ×ÉÎÎÏÇÏ ËÌÀÞÁ", -"ãÑ ×ÅÒÓ¦Ñ MySQL ÎÅ ÚËÏÍÐ¦ÌØÏ×ÁÎÁ Ú Ð¦ÄÔÒÉÍËÏÀ RAID", -"÷É Õ ÒÅÖÉͦ ÂÅÚÐÅÞÎÏÇÏ ÏÎÏ×ÌÅÎÎÑ ÔÁ ÎÁÍÁÇÁ¤ÔÅÓØ ÏÎÏ×ÉÔÉ ÔÁÂÌÉÃÀ ÂÅÚ ÏÐÅÒÁÔÏÒÁ WHERE, ÝÏ ×ÉËÏÒÉÓÔÏ×Õ¤ KEY ÓÔÏ×ÂÅÃØ", -"ëÌÀÞ '%-.64s' ÎÅ ¦ÓÎÕ¤ × ÔÁÂÌÉæ '%-.64s'", -"îÅ ÍÏÖÕ ×¦ÄËÒÉÔÉ ÔÁÂÌÉÃÀ", -"÷ËÁÚ¦×ÎÉË ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕÅ %s", -"÷ÁÍ ÎÅ ÄÏÚ×ÏÌÅÎÏ ×ÉËÏÎÕ×ÁÔÉ ÃÀ ËÏÍÁÎÄÕ × ÔÒÁÎÚÁËæ§", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ COMMIT", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ ROLLBACK", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ FLUSH_LOGS", -"ïÔÒÉÍÁÎÏ ÐÏÍÉÌËÕ %d Ð¦Ä ÞÁÓ CHECKPOINT", -"ðÅÒÅÒ×ÁÎÏ Ú'¤ÄÎÁÎÎÑ %ld ÄÏ ÂÁÚÉ ÄÁÎÎÉÈ: '%-.64s' ËÏÒÉÓÔÕ×ÁÞ: '%-.32s' ÈÏÓÔ: `%-.64s' (%-.64s)", -"ãÅÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ ¦ÎÁÒÎÕ ÐÅÒÅÄÁÞÕ ÔÁÂÌÉæ", -"òÅÐ̦ËÁæÊÎÉÊ ÌÏÇ ÚÁËÒÉÔÏ, ÎÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ RESET MASTER", -"îÅ×ÄÁÌŠצÄÎÏ×ÌÅÎÎÑ ¦ÎÄÅËÓÁ ÐÅÒÅÄÁÎϧ ÔÁÂÌÉæ '%-.64s'", -"ðÏÍÉÌËÁ ×¦Ä ÇÏÌÏ×ÎÏÇÏ: '%-.64s'", -"íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÞÉÔÁÎÎÑ ×¦Ä ÇÏÌÏ×ÎÏÇÏ", -"íÅÒÅÖÅ×Á ÐÏÍÉÌËÁ ÚÁÐÉÓÕ ÄÏ ÇÏÌÏ×ÎÏÇÏ", -"îÅ ÍÏÖÕ ÚÎÁÊÔÉ FULLTEXT ¦ÎÄÅËÓ, ÝÏ ×¦ÄÐÏצÄÁ¤ ÐÅÒÅ̦ËÕ ÓÔÏ×Âæ×", -"îÅ ÍÏÖÕ ×ÉËÏÎÁÔÉ ÐÏÄÁÎÕ ËÏÍÁÎÄÕ ÔÏÍÕ, ÝÏ ÔÁÂÌÉÃÑ ÚÁÂÌÏËÏ×ÁÎÁ ÁÂÏ ×ÉËÏÎÕ¤ÔØÓÑ ÔÒÁÎÚÁËæÑ", -"îÅצÄÏÍÁ ÓÉÓÔÅÍÎÁ ÚͦÎÎÁ '%-.64s'", -"ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ §§ ÐÏÔÒ¦ÂÎÏ ×¦ÄÎÏ×ÉÔÉ", -"ôÁÂÌÉÃÀ '%-.64s' ÍÁÒËÏ×ÁÎÏ ÑË Ú¦ÐÓÏ×ÁÎÕ ÔÁ ÏÓÔÁÎΤ (Á×ÔÏÍÁÔÉÞÎÅ?) צÄÎÏ×ÌÅÎÎÑ ÎÅ ×ÄÁÌÏÓÑ", -"úÁÓÔÅÒÅÖÅÎÎÑ: äÅÑ˦ ÎÅÔÒÁÎÚÁËæÊΦ ÚͦÎÉ ÔÁÂÌÉÃØ ÎÅ ÍÏÖÎÁ ÂÕÄÅ ÐÏ×ÅÒÎÕÔÉ", -"ôÒÁÎÚÁËÃ¦Ñ Ú ÂÁÇÁÔØÍÁ ×ÉÒÁÚÁÍÉ ×ÉÍÁÇÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_binlog_cache_size' ÂÁÊÔ¦× ÄÌÑ ÚÂÅÒ¦ÇÁÎÎÑ. úÂ¦ÌØÛÔÅ ÃÀ ÚͦÎÎÕ mysqld ÔÁ ÓÐÒÏÂÕÊÔÅ ÚÎÏ×Õ", -"ïÐÅÒÁÃ¦Ñ ÎÅ ÍÏÖÅ ÂÕÔÉ ×ÉËÏÎÁÎÁ Ú ÚÁÐÕÝÅÎÉÍ Ð¦ÄÌÅÇÌÉÍ, ÓÐÏÞÁÔËÕ ×ÉËÏÎÁÊÔÅ STOP SLAVE", -"ïÐÅÒÁÃ¦Ñ ×ÉÍÁÇÁ¤ ÚÁÐÕÝÅÎÏÇÏ Ð¦ÄÌÅÇÌÏÇÏ, ÚËÏÎÆ¦ÇÕÒÕÊÔŠЦÄÌÅÇÌÏÇÏ ÔÁ ×ÉËÏÎÁÊÔÅ START SLAVE", -"óÅÒ×ÅÒ ÎÅ ÚËÏÎÆ¦ÇÕÒÏ×ÁÎÏ ÑË Ð¦ÄÌÅÇÌÉÊ, ×ÉÐÒÁ×ÔÅ ÃÅ Õ ÆÁÊ̦ ËÏÎÆ¦ÇÕÒÁæ§ ÁÂÏ Ú CHANGE MASTER TO", -"Could not initialize master info structure, more error messages can be found in the MySQL error log", -"îÅ ÍÏÖÕ ÓÔ×ÏÒÉÔÉ Ð¦ÄÌÅÇÌÕ Ç¦ÌËÕ, ÐÅÒÅצÒÔÅ ÓÉÓÔÅÍΦ ÒÅÓÕÒÓÉ", -"ëÏÒÉÓÔÕ×ÁÞ %-.64s ×ÖÅ ÍÁ¤ Â¦ÌØÛÅ Î¦Ö 'max_user_connections' ÁËÔÉ×ÎÉÈ Ú'¤ÄÎÁÎØ", -"íÏÖÎÁ ×ÉËÏÒÉÓÔÏ×Õ×ÁÔÉ ÌÉÛÅ ×ÉÒÁÚÉ Ú¦ ÓÔÁÌÉÍÉ Õ SET", -"úÁÔÒÉÍËÕ ÏÞ¦ËÕ×ÁÎÎÑ ÂÌÏËÕ×ÁÎÎÑ ×ÉÞÅÒÐÁÎÏ", -"úÁÇÁÌØÎÁ Ë¦ÌØË¦ÓÔØ ÂÌÏËÕ×ÁÎØ ÐÅÒÅ×ÉÝÉÌÁ ÒÏÚÍ¦Ò ÂÌÏËÕ×ÁÎØ ÄÌÑ ÔÁÂÌÉæ", -"ïÎÏ×ÉÔÉ ÂÌÏËÕ×ÁÎÎÑ ÎÅ ÍÏÖÌÉ×Ï ÎÁ ÐÒÏÔÑÚ¦ ÔÒÁÎÚÁËæ§ READ UNCOMMITTED", -"DROP DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ", -"CREATE DATABASE ÎÅ ÄÏÚ×ÏÌÅÎÏ ÄÏËÉ Ç¦ÌËÁ ÐÅÒÅÂÕ×Á¤ Ð¦Ä ÚÁÇÁÌØÎÉÍ ÂÌÏËÕ×ÁÎÎÑÍ ÞÉÔÁÎÎÑ", -"èÉÂÎÉÊ ÁÒÇÕÍÅÎÔ ÄÌÑ %s", -"ëÏÒÉÓÔÕ×ÁÞÕ '%-.32s'@'%-.64s' ÎÅ ÄÏÚ×ÏÌÅÎÏ ÓÔ×ÏÒÀ×ÁÔÉ ÎÏ×ÉÈ ËÏÒÉÓÔÕ×ÁÞ¦×", -"Incorrect table definition; all MERGE tables must be in the same database", -"Deadlock found when trying to get lock; try restarting transaction", -"÷ÉËÏÒÉÓÔÁÎÉÊ ÔÉÐ ÔÁÂÌÉæ ΊЦÄÔÒÉÍÕ¤ FULLTEXT ¦ÎÄÅËÓ¦×", -"Cannot add foreign key constraint", -"Cannot add a child row: a foreign key constraint fails", -"Cannot delete a parent row: a foreign key constraint fails", -"Error connecting to master: %-.128s", -"Error running query on master: %-.128s", -"Error when executing command %s: %-.128s", -"Incorrect usage of %s and %s", -"The used SELECT statements have a different number of columns", -"Can't execute the query because you have a conflicting read lock", -"Mixing of transactional and non-transactional tables is disabled", -"Option '%s' used twice in statement", -"User '%-.64s' has exceeded the '%s' resource (current value: %ld)", -"Access denied; you need the %-.128s privilege for this operation", -"Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL", -"Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL", -"Variable '%-.64s' doesn't have a default value", -"Variable '%-.64s' can't be set to the value of '%-.64s'", -"Incorrect argument type to variable '%-.64s'", -"Variable '%-.64s' can only be set, not read", -"Incorrect usage/placement of '%s'", -"This version of MySQL doesn't yet support '%s'", -"Got fatal error %d: '%-.128s' from master when reading data from binary log", -"Slave SQL thread ignored the query because of replicate-*-table rules", -"Variable '%-.64s' is a %s variable", -"Incorrect foreign key definition for '%-.64s': %s", -"Key reference and table reference don't match", -"ïÐÅÒÁÎÄ ÍÁ¤ ÓËÌÁÄÁÔÉÓÑ Ú %d ÓÔÏ×Âæ×", -"ð¦ÄÚÁÐÉÔ ÐÏ×ÅÒÔÁ¤ Â¦ÌØÛ ÎiÖ 1 ÚÁÐÉÓ", -"Unknown prepared statement handler (%.*s) given to %s", -"Help database is corrupt or does not exist", -"ãÉË̦ÞÎÅ ÐÏÓÉÌÁÎÎÑ ÎÁ ЦÄÚÁÐÉÔ", -"ðÅÒÅÔ×ÏÒÅÎÎÑ ÓÔÏ×ÂÃÁ '%s' Ú %s Õ %s", -"ðÏÓÉÌÁÎÎÑ '%-.64s' ÎÅ ÐiÄÔÒÉÍÕÅÔÓÑ (%s)", -"Every derived table must have its own alias", -"Select %u was ÓËÁÓÏ×ÁÎÏ ÐÒÉ ÏÐÔÉÍiÚÁÃii", -"Table '%-.64s' from one of the SELECTs cannot be used in %-.32s", -"Client does not support authentication protocol requested by server; consider upgrading MySQL client", -"All parts of a SPATIAL index must be NOT NULL", -"COLLATION '%s' is not valid for CHARACTER SET '%s'", -"Slave is already running", -"Slave has already been stopped", -"Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)", -"ZLIB: Not enough memory", -"ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)", -"ZLIB: Input data corrupted", -"%d line(s) were cut by GROUP_CONCAT()", -"Row %ld doesn't contain data for all columns", -"Row %ld was truncated; it contained more data than there were input columns", -"Data truncated; NULL supplied to NOT NULL column '%s' at row %ld", -"Data truncated; out of range for column '%s' at row %ld", -"Data truncated for column '%s' at row %ld", -"Using storage engine %s for table '%s'", -"Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'", -"Can't drop one or more of the requested users", -"Can't revoke all privileges, grant for one or more of the requested users", -"Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'", -"Illegal mix of collations for operation '%s'", -"Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)", -"Unknown collation: '%-.64s'", -"SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started", -"Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format", -"óÔÏ×ÂÅÃØ ÁÂÏ ÐÏÓÉÌÁÎÎÑ '%-.64s%s%-.64s%s%-.64s' ¦Ú SELECTÕ #%d ÂÕÌÏ ÚÎÁÊÄÅÎÅ Õ SELECT¦ #%d", -"Incorrect parameter or combination of parameters for START SLAVE UNTIL", -"It is recommended to run with --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you are not safe in case of unexpected slave's mysqld restart", -"SQL thread is not to be started so UNTIL options are ignored", -"Incorrect index name '%-.100s'", -"Incorrect catalog name '%-.100s'", -"ëÅÛ ÚÁÐÉÔ¦× ÎÅÓÐÒÏÍÏÖÅÎ ×ÓÔÁÎÏ×ÉÔÉ ÒÏÚÍ¦Ò %lu, ÎÏ×ÉÊ ÒÏÚÍ¦Ò ËÅÛÁ ÚÁÐÉÔ¦× - %lu", -"Column '%-.64s' cannot be part of FULLTEXT index", -"Unknown key cache '%-.100s'", -"MySQL is started in --skip-name-resolve mode. You need to restart it without this switch for this grant to work", -"Unknown table engine '%s'", -"'%s' is deprecated, use '%s' instead", -"ôÁÂÌÉÃÑ %-.100s Õ %s ÎÅ ÍÏÖÅ ÏÎÏ×ÌÀ×ÁÔÉÓØ", -"The '%s' feature was disabled; you need MySQL built with '%s' to have it working", -"The MySQL server is running with the %s option so it cannot execute this statement", -"Column '%-.100s' has duplicated value '%-.64s' in %s" -"Truncated wrong %-.32s value: '%-.128s'" -"Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause" -"Invalid ON UPDATE clause for '%-.64s' column", -"This command is not supported in the prepared statement protocol yet", -"Got error %d '%-.100s' from %s", -"Got temporary error %d '%-.100s' from %s", -"Unknown or incorrect time zone: '%-.64s'", -"Invalid TIMESTAMP value in column '%s' at row %ld", -"Invalid %s character string: '%.64s'", -"Result of %s() was larger than max_allowed_packet (%ld) - truncated" -"Conflicting declarations: '%s%s' and '%s%s'" diff --git a/sql/slave.cc b/sql/slave.cc index ef9caa5f5b5..6b8559859fc 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -74,7 +74,6 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, const char* table_name, bool overwrite); static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi); - /* Find out which replications threads are running @@ -218,6 +217,13 @@ static byte* get_table_key(TABLE_RULE_ENT* e, uint* len, pos Position in relay log file need_data_lock Set to 1 if this functions should do mutex locks errmsg Store pointer to error message here + look_for_description_event + 1 if we should look for such an event. We only need + this when the SQL thread starts and opens an existing + relay log and has to execute it (possibly from an + offset >4); then we need to read the first event of + the relay log to be able to parse the events we have + to execute. DESCRIPTION - Close old open relay log files. @@ -235,15 +241,35 @@ static byte* get_table_key(TABLE_RULE_ENT* e, uint* len, int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, ulonglong pos, bool need_data_lock, - const char** errmsg) + const char** errmsg, + bool look_for_description_event) { DBUG_ENTER("init_relay_log_pos"); + DBUG_PRINT("info", ("pos=%lu", pos)); *errmsg=0; pthread_mutex_t *log_lock=rli->relay_log.get_log_lock(); if (need_data_lock) pthread_mutex_lock(&rli->data_lock); + + /* + Slave threads are not the only users of init_relay_log_pos(). CHANGE MASTER + is, too, and init_slave() too; these 2 functions allocate a description + event in init_relay_log_pos, which is not freed by the terminating SQL slave + thread as that thread is not started by these functions. So we have to free + the description_event here, in case, so that there is no memory leak in + running, say, CHANGE MASTER. + */ + delete rli->relay_log.description_event_for_exec; + /* + By default the relay log is in binlog format 3 (4.0). + Even if format is 4, this will work enough to read the first event + (Format_desc) (remember that format 4 is just lenghtened compared to format + 3; format 3 is a prefix of format 4). + */ + rli->relay_log.description_event_for_exec= new + Format_description_log_event(3); pthread_mutex_lock(log_lock); @@ -283,9 +309,8 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, In this case, we will use the same IO_CACHE pointer to read data as the IO thread is using to write data. */ - rli->cur_log= rli->relay_log.get_log_file(); - if (my_b_tell(rli->cur_log) == 0 && - check_binlog_magic(rli->cur_log, errmsg)) + my_b_seek((rli->cur_log=rli->relay_log.get_log_file()), (off_t)0); + if (check_binlog_magic(rli->cur_log,errmsg)) goto err; rli->cur_log_old_open_count=rli->relay_log.get_open_count(); } @@ -299,8 +324,85 @@ int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log, goto err; rli->cur_log = &rli->cache_buf; } - if (pos >= BIN_LOG_HEADER_SIZE) + /* + In all cases, check_binlog_magic() has been called so we're at offset 4 for + sure. + */ + if (pos > BIN_LOG_HEADER_SIZE) /* If pos<=4, we stay at 4 */ + { + Log_event* ev; + while (look_for_description_event) + { + /* + Read the possible Format_description_log_event; if position + was 4, no need, it will be read naturally. + */ + DBUG_PRINT("info",("looking for a Format_description_log_event")); + + if (my_b_tell(rli->cur_log) >= pos) + break; + + /* + Because of we have rli->data_lock and log_lock, we can safely read an + event + */ + if (!(ev=Log_event::read_log_event(rli->cur_log,0, + rli->relay_log.description_event_for_exec))) + { + DBUG_PRINT("info",("could not read event, rli->cur_log->error=%d", + rli->cur_log->error)); + if (rli->cur_log->error) /* not EOF */ + { + *errmsg= "I/O error reading event at position 4"; + goto err; + } + break; + } + else if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT) + { + DBUG_PRINT("info",("found Format_description_log_event")); + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= (Format_description_log_event*) ev; + /* + As ev was returned by read_log_event, it has passed is_valid(), so + my_malloc() in ctor worked, no need to check again. + */ + /* + Ok, we found a Format_description event. But it is not sure that this + describes the whole relay log; indeed, one can have this sequence + (starting from position 4): + Format_desc (of slave) + Rotate (of master) + Format_desc (of master) + So the Format_desc which really describes the rest of the relay log + is the 3rd event (it can't be further than that, because we rotate + the relay log when we queue a Rotate event from the master). + But what describes the Rotate is the first Format_desc. + So what we do is: + go on searching for Format_description events, until you exceed the + position (argument 'pos') or until you find another event than Rotate + or Format_desc. + */ + } + else + { + DBUG_PRINT("info",("found event of another type=%d", + ev->get_type_code())); + look_for_description_event= (ev->get_type_code() == ROTATE_EVENT); + delete ev; + } + } my_b_seek(rli->cur_log,(off_t)pos); +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + } +#endif + + } err: /* @@ -315,13 +417,15 @@ err: if (need_data_lock) pthread_mutex_unlock(&rli->data_lock); + if (!rli->relay_log.description_event_for_exec->is_valid() && !*errmsg) + *errmsg= "Invalid Format_description log event; could be out of memory"; DBUG_RETURN ((*errmsg) ? 1 : 0); } /* - Init functio to set up array for errors that should be skipped for slave + Init function to set up array for errors that should be skipped for slave SYNOPSIS init_slave_skip_errors() @@ -360,16 +464,15 @@ void init_slave_skip_errors(const char* arg) } -void st_relay_log_info::inc_group_relay_log_pos(ulonglong val, - ulonglong log_pos, - bool skip_lock) +void st_relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, + bool skip_lock) { if (!skip_lock) pthread_mutex_lock(&data_lock); - inc_event_relay_log_pos(val); + inc_event_relay_log_pos(); group_relay_log_pos= event_relay_log_pos; strmake(group_relay_log_name,event_relay_log_name, - sizeof(group_relay_log_name)-1); + sizeof(group_relay_log_name)-1); notify_group_relay_log_name_update(); @@ -383,24 +486,31 @@ void st_relay_log_info::inc_group_relay_log_pos(ulonglong val, not advance as it should on the non-transactional slave (it advances by big leaps, whereas it should advance by small leaps). */ + /* + In 4.x we used the event's len to compute the positions here. This is + wrong if the event was 3.23/4.0 and has been converted to 5.0, because + then the event's len is not what is was in the master's binlog, so this + will make a wrong group_master_log_pos (yes it's a bug in 3.23->4.0 + replication: Exec_master_log_pos is wrong). Only way to solve this is to + have the original offset of the end of the event the relay log. This is + what we do in 5.0: log_pos has become "end_log_pos" (because the real use + of log_pos in 4.0 was to compute the end_log_pos; so better to store + end_log_pos instead of begin_log_pos. + If we had not done this fix here, the problem would also have appeared + when the slave and master are 5.0 but with different event length (for + example the slave is more recent than the master and features the event + UID). It would give false MASTER_POS_WAIT, false Exec_master_log_pos in + SHOW SLAVE STATUS, and so the user would do some CHANGE MASTER using this + value which would lead to badly broken replication. + Even the relay_log_pos will be corrupted in this case, because the len is + the relay log is not "val". + With the end_log_pos solution, we avoid computations involving lengthes. + */ + DBUG_PRINT("info", ("log_pos: %lu group_master_log_pos: %lu", + (long) log_pos, (long) group_master_log_pos)); if (log_pos) // 3.23 binlogs don't have log_posx { -#if MYSQL_VERSION_ID < 50000 - /* - If the event was converted from a 3.23 format, get_event_len() has - grown by 6 bytes (at least for most events, except LOAD DATA INFILE - which is already a big problem for 3.23->4.0 replication); 6 bytes is - the difference between the header's size in 4.0 (LOG_EVENT_HEADER_LEN) - and the header's size in 3.23 (OLD_HEADER_LEN). Note that using - mi->old_format will not help if the I/O thread has not started yet. - Yes this is a hack but it's just to make 3.23->4.x replication work; - 3.23->5.0 replication is working much better. - */ - group_master_log_pos= log_pos + val - - (mi->old_format ? (LOG_EVENT_HEADER_LEN - OLD_HEADER_LEN) : 0); -#else - group_master_log_pos= log_pos+ val; -#endif /* MYSQL_VERSION_ID < 5000 */ + group_master_log_pos= log_pos; } pthread_cond_broadcast(&data_cond); if (!skip_lock) @@ -481,13 +591,16 @@ int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, sizeof(rli->group_relay_log_name)-1); strmake(rli->event_relay_log_name, rli->relay_log.get_log_fname(), sizeof(rli->event_relay_log_name)-1); - // Just first log with magic number and nothing else - rli->log_space_total= BIN_LOG_HEADER_SIZE; rli->group_relay_log_pos= rli->event_relay_log_pos= BIN_LOG_HEADER_SIZE; - rli->relay_log.reset_bytes_written(); + if (count_relay_log_space(rli)) + { + *errmsg= "Error counting relay log space"; + goto err; + } if (!just_reset) - error= init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, - 0 /* do not need data lock */, errmsg); + error= init_relay_log_pos(rli, rli->group_relay_log_name, + rli->group_relay_log_pos, + 0 /* do not need data lock */, errmsg, 0); err: #ifndef DBUG_OFF @@ -635,7 +748,7 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t *start_lock, thd->exit_cond(old_msg); pthread_mutex_lock(cond_lock); // re-acquire it as exit_cond() released if (thd->killed) - DBUG_RETURN(ER_SERVER_SHUTDOWN); + DBUG_RETURN(thd->killed_errno()); } } if (start_lock) @@ -754,6 +867,11 @@ static TABLE_RULE_ENT* find_wild(DYNAMIC_ARRAY *a, const char* key, int len) second call will make the decision (because all_tables_not_ok() = !tables_ok(1st_list) && !tables_ok(2nd_list)). + Thought which arose from a question of a big customer "I want to include + all tables like "abc.%" except the "%.EFG"". This can't be done now. If we + supported Perl regexps we could do it with this pattern: /^abc\.(?!EFG)/ + (I could not find an equivalent in the regex library MySQL uses). + RETURN VALUES 0 should not be logged/replicated 1 should be logged/replicated @@ -764,7 +882,7 @@ bool tables_ok(THD* thd, TABLE_LIST* tables) bool some_tables_updating= 0; DBUG_ENTER("tables_ok"); - for (; tables; tables = tables->next) + for (; tables; tables= tables->next_global) { char hash_key[2*NAME_LEN+2]; char *end; @@ -1163,29 +1281,86 @@ static int init_intvar_from_file(int* var, IO_CACHE* f, int default_val) return 1; } +/* + Note that we rely on the master's version (3.23, 4.0.14 etc) instead of + relying on the binlog's version. This is not perfect: imagine an upgrade + of the master without waiting that all slaves are in sync with the master; + then a slave could be fooled about the binlog's format. This is what happens + when people upgrade a 3.23 master to 4.0 without doing RESET MASTER: 4.0 + slaves are fooled. So we do this only to distinguish between 3.23 and more + recent masters (it's too late to change things for 3.23). + + RETURNS + 0 ok + 1 error +*/ static int get_master_version_and_clock(MYSQL* mysql, MASTER_INFO* mi) { const char* errmsg= 0; - + /* - Note the following switch will bug when we have MySQL branch 30 ;) + Free old description_event_for_queue (that is needed if we are in + a reconnection). */ - switch (*mysql->server_version) { - case '3': - mi->old_format = - (strncmp(mysql->server_version, "3.23.57", 7) < 0) /* < .57 */ ? - BINLOG_FORMAT_323_LESS_57 : - BINLOG_FORMAT_323_GEQ_57 ; - break; - case '4': - mi->old_format = BINLOG_FORMAT_CURRENT; - break; - default: - /* 5.0 is not supported */ - errmsg = "Master reported an unrecognized MySQL version. Note that 4.1 \ -slaves can't replicate a 5.0 or newer master."; - break; + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= 0; + + if (!my_isdigit(&my_charset_bin,*mysql->server_version)) + errmsg = "Master reported unrecognized MySQL version"; + else + { + /* + Note the following switch will bug when we have MySQL branch 30 ;) + */ + switch (*mysql->server_version) + { + case '0': + case '1': + case '2': + errmsg = "Master reported unrecognized MySQL version"; + break; + case '3': + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(1, mysql->server_version); + break; + case '4': + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(3, mysql->server_version); + break; + default: + /* + Master is MySQL >=5.0. Give a default Format_desc event, so that we can + take the early steps (like tests for "is this a 3.23 master") which we + have to take before we receive the real master's Format_desc which will + override this one. Note that the Format_desc we create below is garbage + (it has the format of the *slave*); it's only good to help know if the + master is 3.23, 4.0, etc. + */ + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(4, mysql->server_version); + break; + } + } + + /* + This does not mean that a 5.0 slave will be able to read a 6.0 master; but + as we don't know yet, we don't want to forbid this for now. If a 5.0 slave + can't read a 6.0 master, this will show up when the slave can't read some + events sent by the master, and there will be error messages. + */ + + if (errmsg) + { + sql_print_error(errmsg); + return 1; + } + + /* as we are here, we tried to allocate the event */ + if (!mi->rli.relay_log.description_event_for_queue) + { + sql_print_error("Slave I/O thread failed to create a default Format_description_log_event"); + return 1; } /* @@ -1318,7 +1493,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, packet_len= my_net_read(net); // read create table statement if (packet_len == packet_error) { - send_error(thd, ER_MASTER_NET_READ); + my_message(ER_MASTER_NET_READ, ER(ER_MASTER_NET_READ), MYF(0)); DBUG_RETURN(1); } if (net->read_pos[0] == 255) // error from master @@ -1327,7 +1502,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, err_msg= (char*) net->read_pos + ((mysql->server_capabilities & CLIENT_PROTOCOL_41) ? 3+SQLSTATE_LENGTH+1 : 3); - net_printf(thd, ER_MASTER, err_msg); + my_error(ER_MASTER, MYF(0), err_msg); DBUG_RETURN(1); } thd->command = COM_TABLE_DUMP; @@ -1336,7 +1511,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, if (!(query = thd->strmake((char*) net->read_pos, packet_len))) { sql_print_error("create_table_from_dump: out of memory"); - net_printf(thd, ER_GET_ERRNO, "Out of memory"); + my_message(ER_GET_ERRNO, "Out of memory", MYF(0)); DBUG_RETURN(1); } thd->query= query; @@ -1350,7 +1525,6 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, /* Drop the table if 'overwrite' is true */ if (overwrite && mysql_rm_table(thd,&tables,1,0)) /* drop if exists */ { - send_error(thd); sql_print_error("create_table_from_dump: failed to drop the table"); goto err; } @@ -1377,7 +1551,6 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, tables.lock_type = TL_WRITE; if (!open_ltable(thd, &tables, TL_WRITE)) { - send_error(thd,0,0); // Send error from open_ltable sql_print_error("create_table_from_dump: could not open created table"); goto err; } @@ -1387,7 +1560,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, /* Copy the data file */ if (file->net_read_dump(net)) { - net_printf(thd, ER_MASTER_NET_READ); + my_message(ER_MASTER_NET_READ, ER(ER_MASTER_NET_READ), MYF(0)); sql_print_error("create_table_from_dump: failed in\ handler::net_read_dump()"); goto err; @@ -1407,7 +1580,7 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, error=file->repair(thd,&check_opt) != 0; thd->net.vio = save_vio; if (error) - net_printf(thd, ER_INDEX_REBUILD,tables.table->real_name); + my_error(ER_INDEX_REBUILD, MYF(0), tables.table->real_name); err: close_thread_tables(thd); @@ -1430,12 +1603,11 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, { if (!(mysql = mysql_init(NULL))) { - send_error(thd); // EOM DBUG_RETURN(1); } if (connect_to_master(thd, mysql, mi)) { - net_printf(thd, ER_CONNECT_TO_MASTER, mysql_error(mysql)); + my_error(ER_CONNECT_TO_MASTER, MYF(0), mysql_error(mysql)); mysql_close(mysql); DBUG_RETURN(1); } @@ -1459,7 +1631,7 @@ int fetch_master_table(THD *thd, const char *db_name, const char *table_name, if (!called_connected) mysql_close(mysql); if (errmsg && thd->vio_ok()) - send_error(thd, error, errmsg); + my_message(error, errmsg, MYF(0)); DBUG_RETURN(test(error)); // Return 1 on error } @@ -1534,7 +1706,7 @@ int init_relay_log_info(RELAY_LOG_INFO* rli, const char* info_fname) if (open_log(&rli->relay_log, glob_hostname, opt_relay_logname, "-relay-bin", opt_relaylog_index_name, LOG_BIN, 1 /* read_append cache */, - 1 /* no auto events */, + 0 /* starting from 5.0 we want relay logs to have auto events */, max_relay_log_size ? max_relay_log_size : max_binlog_size)) { pthread_mutex_unlock(&rli->data_lock); @@ -1569,7 +1741,7 @@ file '%s', errno %d)", fname, my_errno); /* Init relay log with first entry in the relay index file */ if (init_relay_log_pos(rli,NullS,BIN_LOG_HEADER_SIZE,0 /* no data lock */, - &msg)) + &msg, 0)) { sql_print_error("Failed to open the relay log 'FIRST' (relay_log_pos 4)"); goto err; @@ -1634,7 +1806,7 @@ Failed to open the existing relay log info file '%s' (errno %d)", rli->group_relay_log_name, rli->group_relay_log_pos, 0 /* no data lock*/, - &msg)) + &msg, 0)) { char llbuf[22]; sql_print_error("Failed to open the relay log '%s' (relay_log_pos %s)", @@ -1643,8 +1815,18 @@ Failed to open the existing relay log info file '%s' (errno %d)", goto err; } } - DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); - DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); + +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); + DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); + } +#endif + /* Now change the cache from READ to WRITE - must do this before flush_relay_log_info @@ -1964,7 +2146,7 @@ file '%s')", fname); goto errwithmsg; #ifndef HAVE_OPENSSL if (ssl) - sql_print_error("SSL information in the master info file " + sql_print_warning("SSL information in the master info file " "('%s') are ignored because this MySQL slave was compiled " "without SSL support.", fname); #endif /* HAVE_OPENSSL */ @@ -2089,7 +2271,7 @@ void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a) } } -int show_master_info(THD* thd, MASTER_INFO* mi) +bool show_master_info(THD* thd, MASTER_INFO* mi) { // TODO: fix this for multi-master List<Item> field_list; @@ -2151,8 +2333,9 @@ int show_master_info(THD* thd, MASTER_INFO* mi) field_list.push_back(new Item_return_int("Seconds_Behind_Master", 10, MYSQL_TYPE_LONGLONG)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); if (mi->host[0]) { @@ -2265,10 +2448,10 @@ int show_master_info(THD* thd, MASTER_INFO* mi) pthread_mutex_unlock(&mi->data_lock); if (my_net_write(&thd->net, (char*)thd->packet.ptr(), packet->length())) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -2395,17 +2578,16 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, ulong init_abort_pos_wait; int error=0; struct timespec abstime; // for timeout checking - set_timespec(abstime,timeout); - + const char *msg; DBUG_ENTER("wait_for_pos"); - DBUG_PRINT("enter",("group_master_log_name: '%s' pos: %lu timeout: %ld", - group_master_log_name, (ulong) group_master_log_pos, - (long) timeout)); + DBUG_PRINT("enter",("log_name: '%s' log_pos: %lu timeout: %lu", + log_name->c_ptr(), (ulong) log_pos, (ulong) timeout)); + set_timespec(abstime,timeout); pthread_mutex_lock(&data_lock); - const char *msg= thd->enter_cond(&data_cond, &data_lock, - "Waiting for the slave SQL thread to " - "advance position"); + msg= thd->enter_cond(&data_cond, &data_lock, + "Waiting for the slave SQL thread to " + "advance position"); /* This function will abort when it notices that some CHANGE MASTER or RESET MASTER has changed the master info. @@ -2461,6 +2643,12 @@ int st_relay_log_info::wait_for_pos(THD* thd, String* log_name, bool pos_reached; int cmp_result= 0; + DBUG_PRINT("info", + ("init_abort_pos_wait: %ld abort_pos_wait: %ld", + init_abort_pos_wait, abort_pos_wait)); + DBUG_PRINT("info",("group_master_log_name: '%s' pos: %lu", + group_master_log_name, (ulong) group_master_log_pos)); + /* group_master_log_name can be "", if we are just after a fresh replication start or after a CHANGE MASTER TO MASTER_HOST/PORT @@ -2543,7 +2731,7 @@ err: thd->exit_cond(msg); DBUG_PRINT("exit",("killed: %d abort: %d slave_running: %d \ improper_arguments: %d timed_out: %d", - (int) thd->killed, + thd->killed_errno(), (int) (init_abort_pos_wait != abort_pos_wait), (int) slave_running, (int) (error == -2), @@ -2556,6 +2744,11 @@ improper_arguments: %d timed_out: %d", DBUG_RETURN( error ? error : event_count ); } +void set_slave_thread_options(THD* thd) +{ + thd->options = ((opt_log_slave_updates) ? OPTION_BIN_LOG:0) | + OPTION_AUTO_IS_NULL; +} /* init_slave_thread() @@ -2573,6 +2766,7 @@ static int init_slave_thread(THD* thd, SLAVE_THD_TYPE thd_type) thd->master_access= ~0; thd->priv_user = 0; thd->slave_thread = 1; + set_slave_thread_options(thd); /* It's nonsense to constrain the slave threads with max_join_size; if a query succeeded on master, we HAVE to execute it. So set @@ -2761,8 +2955,8 @@ server_errno=%d)", /* Check if eof packet */ if (len < 8 && mysql->net.read_pos[0] == 254) { - sql_print_error("Slave: received end packet from server, apparent\ - master shutdown: %s", + sql_print_information("Slave: received end packet from server, apparent " + "master shutdown: %s", mysql_error(mysql)); return packet_error; } @@ -2834,13 +3028,14 @@ bool st_relay_log_info::is_until_satisfied() if (until_log_names_cmp_result == UNTIL_LOG_NAMES_CMP_UNKNOWN) { - /* - We have no cached comaprison results so we should compare log names - and cache result + /* + We have no cached comparison results so we should compare log names + and cache result. + If we are after RESET SLAVE, and the SQL slave thread has not processed + any event yet, it could be that group_master_log_name is "". In that case, + just wait for more events (as there is no sensible comparison to do). */ - DBUG_ASSERT(*log_name || log_pos == 0); - if (*log_name) { const char *basename= log_name + dirname_length(log_name); @@ -2915,28 +3110,63 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) int exec_res; /* - Skip queries originating from this server or number of - queries specified by the user in slave_skip_counter - We can't however skip event's that has something to do with the + Queries originating from this server must be skipped. + Low-level events (Format_desc, Rotate, Stop) from this server + must also be skipped. But for those we don't want to modify + group_master_log_pos, because these events did not exist on the master. + Format_desc is not completely skipped. + Skip queries specified by the user in slave_skip_counter. + We can't however skip events that has something to do with the log files themselves. + Filtering on own server id is extremely important, to ignore execution of + events created by the creation/rotation of the relay log (remember that + now the relay log starts with its Format_desc, has a Rotate etc). */ - - if ((ev->server_id == (uint32) ::server_id && !replicate_same_server_id) || - (rli->slave_skip_counter && type_code != ROTATE_EVENT)) + + DBUG_PRINT("info",("type_code=%d, server_id=%d",type_code,ev->server_id)); + + if ((ev->server_id == (uint32) ::server_id && + !replicate_same_server_id && + type_code != FORMAT_DESCRIPTION_EVENT) || + (rli->slave_skip_counter && + type_code != ROTATE_EVENT && type_code != STOP_EVENT && + type_code != START_EVENT_V3 && type_code!= FORMAT_DESCRIPTION_EVENT)) { - /* TODO: I/O thread should not even log events with the same server id */ - rli->inc_group_relay_log_pos(ev->get_event_len(), - type_code != STOP_EVENT ? ev->log_pos : LL(0), - 1/* skip lock*/); - flush_relay_log_info(rli); - + DBUG_PRINT("info", ("event skipped")); + if (thd->options & OPTION_BEGIN) + rli->inc_event_relay_log_pos(); + else + { + rli->inc_group_relay_log_pos((type_code == ROTATE_EVENT || + type_code == STOP_EVENT || + type_code == FORMAT_DESCRIPTION_EVENT) ? + LL(0) : ev->log_pos, + 1/* skip lock*/); + flush_relay_log_info(rli); + } + /* - Protect against common user error of setting the counter to 1 - instead of 2 while recovering from an failed auto-increment insert + Protect against common user error of setting the counter to 1 + instead of 2 while recovering from an insert which used auto_increment, + rand or user var. */ if (rli->slave_skip_counter && - !((type_code == INTVAR_EVENT || type_code == STOP_EVENT) && - rli->slave_skip_counter == 1)) + !((type_code == INTVAR_EVENT || + type_code == RAND_EVENT || + type_code == USER_VAR_EVENT) && + rli->slave_skip_counter == 1) && + /* + The events from ourselves which have something to do with the relay + log itself must be skipped, true, but they mustn't decrement + rli->slave_skip_counter, because the user is supposed to not see + these events (they are not in the master's binlog) and if we + decremented, START SLAVE would for example decrement when it sees + the Rotate, so the event which the user probably wanted to skip + would not be skipped. + */ + !(ev->server_id == (uint32) ::server_id && + (type_code == ROTATE_EVENT || type_code == STOP_EVENT || + type_code == START_EVENT_V3 || type_code == FORMAT_DESCRIPTION_EVENT))) --rli->slave_skip_counter; pthread_mutex_unlock(&rli->data_lock); delete ev; @@ -2952,7 +3182,16 @@ static int exec_relay_log_event(THD* thd, RELAY_LOG_INFO* rli) ev->thd = thd; exec_res = ev->exec_event(rli); DBUG_ASSERT(rli->sql_thd==thd); - delete ev; + /* + Format_description_log_event should not be deleted because it will be + used to read info about the relay log's format; it will be deleted when + the SQL thread does not need it, i.e. when this thread terminates. + */ + if (ev->get_type_code() != FORMAT_DESCRIPTION_EVENT) + { + DBUG_PRINT("info", ("Deleting the event after it has been executed")); + delete ev; + } return exec_res; } else @@ -3043,7 +3282,7 @@ slave_begin: llstr(mi->master_log_pos,llbuff)); else { - sql_print_error("Slave I/O thread killed while connecting to master"); + sql_print_information("Slave I/O thread killed while connecting to master"); goto err; } @@ -3055,7 +3294,8 @@ connected: thd->proc_info = "Checking master version"; if (get_master_version_and_clock(mysql, mi)) goto err; - if (!mi->old_format) + + if (mi->rli.relay_log.description_event_for_queue->binlog_version > 1) { /* Register ourselves with the master. @@ -3077,7 +3317,7 @@ connected: sql_print_error("Failed on request_dump()"); if (io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed while requesting master \ + sql_print_information("Slave I/O thread killed while requesting master \ dump"); goto err; } @@ -3102,7 +3342,7 @@ dump"); } if (io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed while retrying master \ + sql_print_information("Slave I/O thread killed while retrying master \ dump"); goto err; } @@ -3115,7 +3355,7 @@ reconnecting to try again, log '%s' at postion %s", IO_RPL_LOG_NAME, if (safe_reconnect(thd, mysql, mi, suppress_warnings) || io_slave_killed(thd,mi)) { - sql_print_error("Slave I/O thread killed during or \ + sql_print_information("Slave I/O thread killed during or \ after reconnect"); goto err; } @@ -3137,7 +3377,7 @@ after reconnect"); if (io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed while reading event"); + sql_print_information("Slave I/O thread killed while reading event"); goto err; } @@ -3175,20 +3415,20 @@ max_allowed_packet", if (io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed while waiting to \ + sql_print_information("Slave I/O thread killed while waiting to \ reconnect after a failed read"); goto err; } thd->proc_info = "Reconnecting after a failed master event read"; if (!suppress_warnings) - sql_print_error("Slave I/O thread: Failed reading log event, \ + sql_print_information("Slave I/O thread: Failed reading log event, \ reconnecting to retry, log '%s' position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos, llbuff)); if (safe_reconnect(thd, mysql, mi, suppress_warnings) || io_slave_killed(thd,mi)) { if (global_system_variables.log_warnings) - sql_print_error("Slave I/O thread killed during or after a \ + sql_print_information("Slave I/O thread killed during or after a \ reconnect done to recover from failed read"); goto err; } @@ -3250,7 +3490,7 @@ log space"); // error = 0; err: // print the current replication position - sql_print_error("Slave I/O thread exiting, read up to log '%s', position %s", + sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query = thd->db = 0; // extra safety @@ -3265,6 +3505,9 @@ err: pthread_mutex_lock(&mi->run_lock); mi->slave_running = 0; mi->io_thd = 0; + /* Forget the relay log's format */ + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= 0; // TODO: make rpl_status part of MASTER_INFO change_rpl_status(RPL_ACTIVE_SLAVE,RPL_IDLE_SLAVE); mi->abort_slave = 0; // TODO: check if this is needed @@ -3368,15 +3611,38 @@ slave_begin: if (init_relay_log_pos(rli, rli->group_relay_log_name, rli->group_relay_log_pos, - 1 /*need data lock*/, &errmsg)) + 1 /*need data lock*/, &errmsg, + 1 /*look for a description_event*/)) { sql_print_error("Error initializing relay log position: %s", errmsg); goto err; } THD_CHECK_SENTRY(thd); - DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); - DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); +#ifndef DBUG_OFF + { + char llbuf1[22], llbuf2[22]; + DBUG_PRINT("info", ("my_b_tell(rli->cur_log)=%s rli->event_relay_log_pos=%s", + llstr(my_b_tell(rli->cur_log),llbuf1), + llstr(rli->event_relay_log_pos,llbuf2))); + DBUG_ASSERT(rli->event_relay_log_pos >= BIN_LOG_HEADER_SIZE); + /* + Wonder if this is correct. I (Guilhem) wonder if my_b_tell() returns the + correct position when it's called just after my_b_seek() (the questionable + stuff is those "seek is done on next read" comments in the my_b_seek() + source code). + The crude reality is that this assertion randomly fails whereas + replication seems to work fine. And there is no easy explanation why it + fails (as we my_b_seek(rli->event_relay_log_pos) at the very end of + init_relay_log_pos() called above). Maybe the assertion would be + meaningful if we held rli->data_lock between the my_b_seek() and the + DBUG_ASSERT(). + */ +#ifdef SHOULD_BE_CHECKED + DBUG_ASSERT(my_b_tell(rli->cur_log) == rli->event_relay_log_pos); +#endif + } +#endif DBUG_ASSERT(rli->sql_thd == thd); DBUG_PRINT("master_info",("log_file_name: %s position: %s", @@ -3420,12 +3686,18 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ } /* Thread stopped. Print the current replication position to the log */ - sql_print_information("Slave SQL thread exiting, replication stopped in log \ - '%s' at position %s", RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); + sql_print_information("Slave SQL thread exiting, replication stopped in log " + "'%s' at position %s", + RPL_LOG_NAME, llstr(rli->group_master_log_pos,llbuff)); err: VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query = thd->db = 0; // extra safety + /* + Some extra safety, which should not been needed (normally, event deletion + should already have done these assignments (each event which sets these + variables is supposed to set them to 0 before terminating)). + */ + thd->query= thd->db= thd->catalog= 0; thd->query_length= thd->db_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->proc_info = "Waiting for slave mutex on exit"; @@ -3435,11 +3707,9 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ DBUG_ASSERT(rli->slave_running == 1); // tracking buffer overrun /* When master_pos_wait() wakes up it will check this and terminate */ rli->slave_running= 0; - /* - Going out of the transaction. Necessary to mark it, in case the user - restarts replication from a non-transactional statement (with CHANGE - MASTER). - */ + /* Forget the relay log's format */ + delete rli->relay_log.description_event_for_exec; + rli->relay_log.description_event_for_exec= 0; /* Wake up master_pos_wait() */ pthread_mutex_unlock(&rli->data_lock); DBUG_PRINT("info",("Signaling possibly waiting master_pos_wait() functions")); @@ -3537,7 +3807,7 @@ static int process_io_create_file(MASTER_INFO* mi, Create_file_log_event* cev) if (unlikely(cev_not_written)) break; Execute_load_log_event xev(thd,0,0); - xev.log_pos = mi->master_log_pos; + xev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&xev))) { sql_print_error("Slave I/O: error writing Exec_load event to \ @@ -3551,7 +3821,6 @@ relay log"); { cev->block = (char*)net->read_pos; cev->block_len = num_bytes; - cev->log_pos = mi->master_log_pos; if (unlikely(mi->rli.relay_log.append(cev))) { sql_print_error("Slave I/O: error writing Create_file event to \ @@ -3565,7 +3834,7 @@ relay log"); { aev.block = (char*)net->read_pos; aev.block_len = num_bytes; - aev.log_pos = mi->master_log_pos; + aev.log_pos = cev->log_pos; if (unlikely(mi->rli.relay_log.append(&aev))) { sql_print_error("Slave I/O: error writing Append_block event to \ @@ -3593,6 +3862,7 @@ err: DESCRIPTION Updates the master info with the place in the next binary log where we should start reading. + Rotate the relay log to avoid mixed-format relay logs. NOTES We assume we already locked mi->data_lock @@ -3623,21 +3893,33 @@ static int process_io_rotate(MASTER_INFO *mi, Rotate_log_event *rev) if (disconnect_slave_event_count) events_till_disconnect++; #endif + /* + If description_event_for_queue is format <4, there is conversion in the + relay log to the slave's format (4). And Rotate can mean upgrade or + nothing. If upgrade, it's to 5.0 or newer, so we will get a Format_desc, so + no need to reset description_event_for_queue now. And if it's nothing (same + master version as before), no need (still using the slave's format). + */ + if (mi->rli.relay_log.description_event_for_queue->binlog_version >= 4) + { + delete mi->rli.relay_log.description_event_for_queue; + /* start from format 3 (MySQL 4.0) again */ + mi->rli.relay_log.description_event_for_queue= new + Format_description_log_event(3); + } + /* + Rotate the relay log makes binlog format detection easier (at next slave + start or mysqlbinlog) + */ + rotate_relay_log(mi); /* will take the right mutexes */ DBUG_RETURN(0); } - /* - queue_old_event() - - Writes a 3.23 event to the relay log. - - TODO: - Test this code before release - it has to be tested on a separate - setup with 3.23 master + Reads a 3.23 event and converts it to the slave's format. This code was copied + from MySQL 4.0. */ - -static int queue_old_event(MASTER_INFO *mi, const char *buf, +static int queue_binlog_ver_1_event(MASTER_INFO *mi, const char *buf, ulong event_len) { const char *errmsg = 0; @@ -3645,7 +3927,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, bool ignore_event= 0; char *tmp_buf = 0; RELAY_LOG_INFO *rli= &mi->rli; - DBUG_ENTER("queue_old_event"); + DBUG_ENTER("queue_binlog_ver_1_event"); /* If we get Load event, we need to pass a non-reusable buffer @@ -3677,7 +3959,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, connected to the master). */ Log_event *ev = Log_event::read_log_event(buf,event_len, &errmsg, - 1 /*old format*/ ); + mi->rli.relay_log.description_event_for_queue); if (unlikely(!ev)) { sql_print_error("Read invalid event from master: '%s',\ @@ -3687,7 +3969,7 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, DBUG_RETURN(1); } pthread_mutex_lock(&mi->data_lock); - ev->log_pos = mi->master_log_pos; + ev->log_pos= mi->master_log_pos; /* 3.23 events don't contain log_pos */ switch (ev->get_type_code()) { case STOP_EVENT: ignore_event= 1; @@ -3712,13 +3994,11 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, { /* We come here when and only when tmp_buf != 0 */ DBUG_ASSERT(tmp_buf); + inc_pos=event_len; + ev->log_pos+= inc_pos; int error = process_io_create_file(mi,(Create_file_log_event*)ev); delete ev; - /* - We had incremented event_len, but now when it is used to calculate the - position in the master's log, we must use the original value. - */ - mi->master_log_pos += --event_len; + mi->master_log_pos += inc_pos; DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); pthread_mutex_unlock(&mi->data_lock); my_free((char*)tmp_buf, MYF(0)); @@ -3730,6 +4010,12 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, } if (likely(!ignore_event)) { + if (ev->log_pos) + /* + Don't do it for fake Rotate events (see comment in + Log_event::Log_event(const char* buf...) in log_event.cc). + */ + ev->log_pos+= event_len; /* make log_pos be the pos of the end of the event */ if (unlikely(rli->relay_log.append(ev))) { delete ev; @@ -3745,10 +4031,98 @@ static int queue_old_event(MASTER_INFO *mi, const char *buf, DBUG_RETURN(0); } +/* + Reads a 4.0 event and converts it to the slave's format. This code was copied + from queue_binlog_ver_1_event(), with some affordable simplifications. +*/ +static int queue_binlog_ver_3_event(MASTER_INFO *mi, const char *buf, + ulong event_len) +{ + const char *errmsg = 0; + ulong inc_pos; + char *tmp_buf = 0; + RELAY_LOG_INFO *rli= &mi->rli; + DBUG_ENTER("queue_binlog_ver_3_event"); + + /* read_log_event() will adjust log_pos to be end_log_pos */ + Log_event *ev = Log_event::read_log_event(buf,event_len, &errmsg, + mi->rli.relay_log.description_event_for_queue); + if (unlikely(!ev)) + { + sql_print_error("Read invalid event from master: '%s',\ + master could be corrupt but a more likely cause of this is a bug", + errmsg); + my_free((char*) tmp_buf, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_RETURN(1); + } + pthread_mutex_lock(&mi->data_lock); + switch (ev->get_type_code()) { + case STOP_EVENT: + goto err; + case ROTATE_EVENT: + if (unlikely(process_io_rotate(mi,(Rotate_log_event*)ev))) + { + delete ev; + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(1); + } + inc_pos= 0; + break; + default: + inc_pos= event_len; + break; + } + if (unlikely(rli->relay_log.append(ev))) + { + delete ev; + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(1); + } + rli->relay_log.harvest_bytes_written(&rli->log_space_total); + delete ev; + mi->master_log_pos+= inc_pos; +err: + DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); + pthread_mutex_unlock(&mi->data_lock); + DBUG_RETURN(0); +} + +/* + queue_old_event() + + Writes a 3.23 or 4.0 event to the relay log, after converting it to the 5.0 + (exactly, slave's) format. To do the conversion, we create a 5.0 event from + the 3.23/4.0 bytes, then write this event to the relay log. + + TODO: + Test this code before release - it has to be tested on a separate + setup with 3.23 master or 4.0 master +*/ + +static int queue_old_event(MASTER_INFO *mi, const char *buf, + ulong event_len) +{ + switch (mi->rli.relay_log.description_event_for_queue->binlog_version) + { + case 1: + return queue_binlog_ver_1_event(mi,buf,event_len); + case 3: + return queue_binlog_ver_3_event(mi,buf,event_len); + default: /* unsupported format; eg version 2 */ + DBUG_PRINT("info",("unsupported binlog format %d in queue_old_event()", + mi->rli.relay_log.description_event_for_queue->binlog_version)); + return 1; + } +} /* queue_event() + If the event is 3.23/4.0, passes it to queue_old_event() which will convert + it. Otherwise, writes a 5.0 (or newer) event to the relay log. Then there is + no format conversion, it's pure read/write of bytes. + So a 5.0.0 slave's relay log can contain events in the slave's format or in + any >=5.0.0 format. */ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) @@ -3758,7 +4132,8 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) RELAY_LOG_INFO *rli= &mi->rli; DBUG_ENTER("queue_event"); - if (mi->old_format) + if (mi->rli.relay_log.description_event_for_queue->binlog_version<4 && + buf[EVENT_TYPE_OFFSET] != FORMAT_DESCRIPTION_EVENT /* a way to escape */) DBUG_RETURN(queue_old_event(mi,buf,event_len)); pthread_mutex_lock(&mi->data_lock); @@ -3785,7 +4160,7 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) goto err; case ROTATE_EVENT: { - Rotate_log_event rev(buf,event_len,0); + Rotate_log_event rev(buf,event_len,mi->rli.relay_log.description_event_for_queue); if (unlikely(process_io_rotate(mi,&rev))) { error= 1; @@ -3798,6 +4173,42 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) inc_pos= 0; break; } + case FORMAT_DESCRIPTION_EVENT: + { + /* + Create an event, and save it (when we rotate the relay log, we will have + to write this event again). + */ + /* + We are the only thread which reads/writes description_event_for_queue. The + relay_log struct does not move (though some members of it can change), so + we needn't any lock (no rli->data_lock, no log lock). + */ + Format_description_log_event* tmp; + const char* errmsg; + if (!(tmp= (Format_description_log_event*) + Log_event::read_log_event(buf, event_len, &errmsg, + mi->rli.relay_log.description_event_for_queue))) + { + error= 2; + goto err; + } + delete mi->rli.relay_log.description_event_for_queue; + mi->rli.relay_log.description_event_for_queue= tmp; + /* + Though this does some conversion to the slave's format, this will + preserve the master's binlog format version, and number of event types. + */ + /* + If the event was not requested by the slave (the slave did not ask for + it), i.e. has end_log_pos=0, we do not increment mi->master_log_pos + */ + inc_pos= uint4korr(buf+LOG_POS_OFFSET) ? event_len : 0; + DBUG_PRINT("info",("binlog format is now %d", + mi->rli.relay_log.description_event_for_queue->binlog_version)); + + } + break; default: inc_pos= event_len; break; @@ -3824,23 +4235,32 @@ int queue_event(MASTER_INFO* mi,const char* buf, ulong event_len) We still want to increment, so that we won't re-read this event from the master if the slave IO thread is now stopped/restarted (more efficient if the events we are ignoring are big LOAD DATA INFILE). + But events which were generated by this slave and which do not exist in + the master's binlog (i.e. Format_desc, Rotate & Stop) should not increment + mi->master_log_pos. */ - mi->master_log_pos+= inc_pos; + if (buf[EVENT_TYPE_OFFSET]!=FORMAT_DESCRIPTION_EVENT && + buf[EVENT_TYPE_OFFSET]!=ROTATE_EVENT && + buf[EVENT_TYPE_OFFSET]!=STOP_EVENT) + mi->master_log_pos+= inc_pos; DBUG_PRINT("info", ("master_log_pos: %d, event originating from the same server, ignored", (ulong) mi->master_log_pos)); } else { /* write the event to the relay log */ - if (likely(!(error= rli->relay_log.appendv(buf,event_len,0)))) + if (likely(!(rli->relay_log.appendv(buf,event_len,0)))) { mi->master_log_pos+= inc_pos; DBUG_PRINT("info", ("master_log_pos: %d", (ulong) mi->master_log_pos)); rli->relay_log.harvest_bytes_written(&rli->log_space_total); } + else + error=3; } err: pthread_mutex_unlock(&mi->data_lock); + DBUG_PRINT("info", ("error=%d", error)); DBUG_RETURN(error); } @@ -3865,6 +4285,7 @@ void end_relay_log_info(RELAY_LOG_INFO* rli) } rli->inited = 0; rli->relay_log.close(LOG_CLOSE_INDEX | LOG_CLOSE_STOP_EVENT); + rli->relay_log.harvest_bytes_written(&rli->log_space_total); /* Delete the slave's temporary tables from memory. In the future there will be other actions than this, to ensure persistance @@ -3993,6 +4414,7 @@ replication resumed in log '%s' at position %s", mi->user, thd->set_active_vio(mysql->net.vio); #endif } + mysql->reconnect= 1; DBUG_PRINT("exit",("slave_was_killed: %d", slave_was_killed)); DBUG_RETURN(slave_was_killed); } @@ -4086,6 +4508,7 @@ static IO_CACHE *reopen_relay_log(RELAY_LOG_INFO *rli, const char **errmsg) relay_log_pos Current log pos pending Number of bytes already processed from the event */ + rli->event_relay_log_pos= max(rli->event_relay_log_pos, BIN_LOG_HEADER_SIZE); my_b_seek(cur_log,rli->event_relay_log_pos); DBUG_RETURN(cur_log); } @@ -4144,28 +4567,40 @@ Log_event* next_event(RELAY_LOG_INFO* rli) hot_log=0; // Using old binary log } } + #ifndef DBUG_OFF { + /* This is an assertion which sometimes fails, let's try to track it */ char llbuf1[22], llbuf2[22]; - DBUG_ASSERT(my_b_tell(cur_log) >= BIN_LOG_HEADER_SIZE); - /* - The next assertion sometimes (very rarely) fails, let's try to track - it - */ - DBUG_PRINT("info", ("\ -Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", + DBUG_PRINT("info", ("my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", llstr(my_b_tell(cur_log),llbuf1), - llstr(rli->group_relay_log_pos,llbuf2))); - DBUG_ASSERT(my_b_tell(cur_log) == rli->event_relay_log_pos); + llstr(rli->event_relay_log_pos,llbuf2))); + DBUG_ASSERT(my_b_tell(cur_log) >= BIN_LOG_HEADER_SIZE); + DBUG_ASSERT(my_b_tell(cur_log) == rli->event_relay_log_pos); } #endif /* Relay log is always in new format - if the master is 3.23, the - I/O thread will convert the format for us + I/O thread will convert the format for us. + A problem: the description event may be in a previous relay log. So if + the slave has been shutdown meanwhile, we would have to look in old relay + logs, which may even have been deleted. So we need to write this + description event at the beginning of the relay log. + When the relay log is created when the I/O thread starts, easy: the + master will send the description event and we will queue it. + But if the relay log is created by new_file(): then the solution is: + MYSQL_LOG::open() will write the buffered description event. */ - if ((ev=Log_event::read_log_event(cur_log,0,(bool)0 /* new format */))) + if ((ev=Log_event::read_log_event(cur_log,0, + rli->relay_log.description_event_for_exec))) + { DBUG_ASSERT(thd==rli->sql_thd); + /* + read it while we have a lock, to avoid a mutex lock in + inc_event_relay_log_pos() + */ + rli->future_event_relay_log_pos= my_b_tell(cur_log); if (hot_log) pthread_mutex_unlock(log_lock); DBUG_RETURN(ev); @@ -4322,8 +4757,8 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", { #ifdef EXTRA_DEBUG if (global_system_variables.log_warnings) - sql_print_error("next log '%s' is currently active", - rli->linfo.log_file_name); + sql_print_information("next log '%s' is currently active", + rli->linfo.log_file_name); #endif rli->cur_log= cur_log= rli->relay_log.get_log_file(); rli->cur_log_old_open_count= rli->relay_log.get_open_count(); @@ -4352,8 +4787,8 @@ Before assert, my_b_tell(cur_log)=%s rli->event_relay_log_pos=%s", */ #ifdef EXTRA_DEBUG if (global_system_variables.log_warnings) - sql_print_error("next log '%s' is not active", - rli->linfo.log_file_name); + sql_print_information("next log '%s' is not active", + rli->linfo.log_file_name); #endif // open_binlog() will check the magic header if ((rli->cur_log_fd=open_binlog(cur_log,rli->linfo.log_file_name, @@ -4379,7 +4814,11 @@ event(errno: %d cur_log->error: %d)", } } if (!errmsg && global_system_variables.log_warnings) - errmsg = "slave SQL thread was killed"; + { + sql_print_information("Error reading relay log event: %s", + "slave SQL thread was killed"); + DBUG_RETURN(0); + } err: if (errmsg) @@ -4399,8 +4838,9 @@ void rotate_relay_log(MASTER_INFO* mi) DBUG_ENTER("rotate_relay_log"); RELAY_LOG_INFO* rli= &mi->rli; - lock_slave_threads(mi); - pthread_mutex_lock(&rli->data_lock); + /* We don't lock rli->run_lock. This would lead to deadlocks. */ + pthread_mutex_lock(&mi->run_lock); + /* We need to test inited because otherwise, new_file() will attempt to lock LOCK_log, which may not be inited (if we're not a slave). @@ -4429,8 +4869,7 @@ void rotate_relay_log(MASTER_INFO* mi) */ rli->relay_log.harvest_bytes_written(&rli->log_space_total); end: - pthread_mutex_unlock(&rli->data_lock); - unlock_slave_threads(mi); + pthread_mutex_unlock(&mi->run_lock); DBUG_VOID_RETURN; } diff --git a/sql/slave.h b/sql/slave.h index bcd79dd4a39..e0816fd45a7 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -93,11 +93,6 @@ extern my_bool opt_log_slave_updates; extern ulonglong relay_log_space_limit; struct st_master_info; -enum enum_binlog_formats { - BINLOG_FORMAT_CURRENT=0, /* 0 is important for easy 'if (mi->old_format)' */ - BINLOG_FORMAT_323_LESS_57, - BINLOG_FORMAT_323_GEQ_57 }; - /* 3 possible values for MASTER_INFO::slave_running and RELAY_LOG_INFO::slave_running. @@ -214,6 +209,8 @@ typedef struct st_relay_log_info ulonglong group_relay_log_pos; char event_relay_log_name[FN_REFLEN]; ulonglong event_relay_log_pos; + ulonglong future_event_relay_log_pos; + /* Original log name and position of the group we're currently executing (whose coordinates are group_relay_log_name/pos in the relay log) @@ -318,12 +315,14 @@ typedef struct st_relay_log_info until_log_names_cmp_result= UNTIL_LOG_NAMES_CMP_UNKNOWN; } - inline void inc_event_relay_log_pos(ulonglong val) + inline void inc_event_relay_log_pos() { - event_relay_log_pos+= val; + event_relay_log_pos= future_event_relay_log_pos; } - void inc_group_relay_log_pos(ulonglong val, ulonglong log_pos, bool skip_lock=0); + void inc_group_relay_log_pos(ulonglong log_pos, + bool skip_lock=0); + int wait_for_pos(THD* thd, String* log_name, longlong log_pos, longlong timeout); void close_temporary_tables(); @@ -400,7 +399,6 @@ typedef struct st_master_info int events_till_abort; #endif bool inited; - enum enum_binlog_formats old_format; volatile bool abort_slave; volatile uint slave_running; volatile ulong slave_run_id; @@ -416,7 +414,7 @@ typedef struct st_master_info long clock_diff_with_master; st_master_info() - :ssl(0), fd(-1), io_thd(0), inited(0), old_format(BINLOG_FORMAT_CURRENT), + :ssl(0), fd(-1), io_thd(0), inited(0), abort_slave(0),slave_running(0), slave_run_id(0) { host[0] = 0; user[0] = 0; password[0] = 0; @@ -509,8 +507,8 @@ int fetch_master_table(THD* thd, const char* db_name, const char* table_name, void table_rule_ent_hash_to_str(String* s, HASH* h); void table_rule_ent_dynamic_array_to_str(String* s, DYNAMIC_ARRAY* a); -int show_master_info(THD* thd, MASTER_INFO* mi); -int show_binlog_info(THD* thd); +bool show_master_info(THD* thd, MASTER_INFO* mi); +bool show_binlog_info(THD* thd); /* See if the query uses any tables that should not be replicated */ bool tables_ok(THD* thd, TABLE_LIST* tables); @@ -548,10 +546,12 @@ void lock_slave_threads(MASTER_INFO* mi); void unlock_slave_threads(MASTER_INFO* mi); void init_thread_mask(int* mask,MASTER_INFO* mi,bool inverse); int init_relay_log_pos(RELAY_LOG_INFO* rli,const char* log,ulonglong pos, - bool need_data_lock, const char** errmsg); + bool need_data_lock, const char** errmsg, + bool look_for_description_event); int purge_relay_logs(RELAY_LOG_INFO* rli, THD *thd, bool just_reset, const char** errmsg); +void set_slave_thread_options(THD* thd); void rotate_relay_log(MASTER_INFO* mi); extern "C" pthread_handler_decl(handle_slave_io,arg); diff --git a/sql/sp.cc b/sql/sp.cc new file mode 100644 index 00000000000..65dad60cda7 --- /dev/null +++ b/sql/sp.cc @@ -0,0 +1,1265 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + + +#include "mysql_priv.h" +#include "sp.h" +#include "sp_head.h" +#include "sp_cache.h" + +static bool +create_string(THD *thd, String *buf, + int sp_type, + sp_name *name, + const char *params, ulong paramslen, + const char *returns, ulong returnslen, + const char *body, ulong bodylen, + st_sp_chistics *chistics); + +/* + * + * DB storage of Stored PROCEDUREs and FUNCTIONs + * + */ + +enum +{ + MYSQL_PROC_FIELD_DB = 0, + MYSQL_PROC_FIELD_NAME, + MYSQL_PROC_FIELD_TYPE, + MYSQL_PROC_FIELD_SPECIFIC_NAME, + MYSQL_PROC_FIELD_LANGUAGE, + MYSQL_PROC_FIELD_ACCESS, + MYSQL_PROC_FIELD_DETERMINISTIC, + MYSQL_PROC_FIELD_SECURITY_TYPE, + MYSQL_PROC_FIELD_PARAM_LIST, + MYSQL_PROC_FIELD_RETURNS, + MYSQL_PROC_FIELD_BODY, + MYSQL_PROC_FIELD_DEFINER, + MYSQL_PROC_FIELD_CREATED, + MYSQL_PROC_FIELD_MODIFIED, + MYSQL_PROC_FIELD_SQL_MODE, + MYSQL_PROC_FIELD_COMMENT, + MYSQL_PROC_FIELD_COUNT +}; + +bool mysql_proc_table_exists= 1; + +/* *opened=true means we opened ourselves */ +static int +db_find_routine_aux(THD *thd, int type, sp_name *name, + enum thr_lock_type ltype, TABLE **tablep, bool *opened) +{ + TABLE *table; + byte key[64+64+1]; // db, name, type + uint keylen; + DBUG_ENTER("db_find_routine_aux"); + DBUG_PRINT("enter", ("type: %d name: %*s", + type, name->m_name.length, name->m_name.str)); + + /* + Speed up things if mysql.proc doesn't exists + mysql_proc_table_exists is set when on creates a stored procedure + or on flush privileges + */ + if (!mysql_proc_table_exists && ltype == TL_READ) + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + + // Put the key used to read the row together + keylen= name->m_db.length; + if (keylen > 64) + keylen= 64; + memcpy(key, name->m_db.str, keylen); + memset(key+keylen, (int)' ', 64-keylen); // Pad with space + keylen= name->m_name.length; + if (keylen > 64) + keylen= 64; + memcpy(key+64, name->m_name.str, keylen); + memset(key+64+keylen, (int)' ', 64-keylen); // Pad with space + key[128]= type; + keylen= sizeof(key); + + if (thd->lex->proc_table) + table= thd->lex->proc_table->table; + else + { + for (table= thd->open_tables ; table ; table= table->next) + if (strcmp(table->table_cache_key, "mysql") == 0 && + strcmp(table->real_name, "proc") == 0) + break; + } + if (table) + *opened= FALSE; + else + { + TABLE_LIST tables; + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.real_name= tables.alias= (char*)"proc"; + if (! (table= open_ltable(thd, &tables, ltype))) + { + *tablep= NULL; + mysql_proc_table_exists= 0; + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + } + *opened= TRUE; + } + mysql_proc_table_exists= 1; + + if (table->file->index_read_idx(table->record[0], 0, + key, keylen, + HA_READ_KEY_EXACT)) + { + *tablep= NULL; + DBUG_RETURN(SP_KEY_NOT_FOUND); + } + *tablep= table; + + DBUG_RETURN(SP_OK); +} + + +static int +db_find_routine(THD *thd, int type, sp_name *name, sp_head **sphp) +{ + extern int yyparse(void *thd); + TABLE *table; + const char *params, *returns, *body; + int ret; + bool opened; + const char *definer; + longlong created; + longlong modified; + st_sp_chistics chistics; + char *ptr; + uint length; + char buff[65]; + String str(buff, sizeof(buff), &my_charset_bin); + ulong sql_mode; + DBUG_ENTER("db_find_routine"); + DBUG_PRINT("enter", ("type: %d name: %*s", + type, name->m_name.length, name->m_name.str)); + + ret= db_find_routine_aux(thd, type, name, TL_READ, &table, &opened); + if (ret != SP_OK) + goto done; + + if (table->fields != MYSQL_PROC_FIELD_COUNT) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + bzero((char *)&chistics, sizeof(chistics)); + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_ACCESS])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + switch (ptr[0]) { + case 'N': + chistics.daccess= SP_NO_SQL; + break; + case 'C': + chistics.daccess= SP_CONTAINS_SQL; + break; + case 'R': + chistics.daccess= SP_READS_SQL_DATA; + break; + case 'M': + chistics.daccess= SP_MODIFIES_SQL_DATA; + break; + default: + chistics.daccess= SP_CONTAINS_SQL; + } + + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_DETERMINISTIC])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + chistics.detistic= (ptr[0] == 'N' ? FALSE : TRUE); + + if ((ptr= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + chistics.suid= (ptr[0] == 'I' ? SP_IS_NOT_SUID : SP_IS_SUID); + + if ((params= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_PARAM_LIST])) == NULL) + { + params= ""; + } + + if (type == TYPE_ENUM_PROCEDURE) + returns= ""; + else if ((returns= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_RETURNS])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + if ((body= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_BODY])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + // Get additional information + if ((definer= get_field(thd->mem_root, + table->field[MYSQL_PROC_FIELD_DEFINER])) == NULL) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + + modified= table->field[MYSQL_PROC_FIELD_MODIFIED]->val_int(); + created= table->field[MYSQL_PROC_FIELD_CREATED]->val_int(); + + sql_mode= (ulong) table->field[MYSQL_PROC_FIELD_SQL_MODE]->val_int(); + + table->field[MYSQL_PROC_FIELD_COMMENT]->val_str(&str, &str); + + ptr= 0; + if ((length= str.length())) + ptr= thd->strmake(str.ptr(), length); + chistics.comment.str= ptr; + chistics.comment.length= length; + + if (opened) + { + opened= FALSE; + close_thread_tables(thd, 0, 1); + } + + { + String defstr; + LEX *oldlex= thd->lex; + char olddb[128]; + bool dbchanged; + enum enum_sql_command oldcmd= thd->lex->sql_command; + ulong old_sql_mode= thd->variables.sql_mode; + ha_rows select_limit= thd->variables.select_limit; + + thd->variables.sql_mode= sql_mode; + thd->variables.select_limit= HA_POS_ERROR; + + defstr.set_charset(system_charset_info); + if (!create_string(thd, &defstr, + type, + name, + params, strlen(params), + returns, strlen(returns), + body, strlen(body), + &chistics)) + { + ret= SP_INTERNAL_ERROR; + goto done; + } + + dbchanged= FALSE; + if ((ret= sp_use_new_db(thd, name->m_db.str, olddb, sizeof(olddb), + 1, &dbchanged))) + goto done; + + { + /* This is something of a kludge. We need to initialize some fields + * in thd->lex (the unit and master stuff), and the easiest way to + * do it is, is to call mysql_init_query(), but this unfortunately + * resets teh value_list where we keep the CALL parameters. So we + * copy the list and then restore it. + */ + List<Item> vals= thd->lex->value_list; + + lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length()); + thd->lex->value_list= vals; + } + + if (yyparse(thd) || thd->is_fatal_error || thd->lex->sphead == NULL) + { + LEX *newlex= thd->lex; + sp_head *sp= newlex->sphead; + + if (dbchanged && (ret= sp_change_db(thd, olddb, 1))) + goto done; + if (sp) + { + if (oldlex != newlex) + sp->restore_lex(thd); + delete sp; + newlex->sphead= NULL; + } + ret= SP_PARSE_ERROR; + } + else + { + if (dbchanged && (ret= sp_change_db(thd, olddb, 1))) + goto done; + *sphp= thd->lex->sphead; + (*sphp)->set_info((char *)definer, (uint)strlen(definer), + created, modified, &chistics, sql_mode); + (*sphp)->optimize(); + } + thd->lex->sql_command= oldcmd; + thd->variables.sql_mode= old_sql_mode; + thd->variables.select_limit= select_limit; + } + + done: + + if (opened) + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +static int +db_create_routine(THD *thd, int type, sp_head *sp) +{ + int ret; + TABLE *table; + TABLE_LIST tables; + char definer[HOSTNAME_LENGTH+USERNAME_LENGTH+2]; + char olddb[128]; + bool dbchanged; + DBUG_ENTER("db_create_routine"); + DBUG_PRINT("enter", ("type: %d name: %*s",type,sp->m_name.length,sp->m_name.str)); + + dbchanged= FALSE; + if ((ret= sp_use_new_db(thd, sp->m_db.str, olddb, sizeof(olddb), + 0, &dbchanged))) + { + ret= SP_NO_DB_ERROR; + goto done; + } + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.real_name= tables.alias= (char*)"proc"; + + if (! (table= open_ltable(thd, &tables, TL_WRITE))) + ret= SP_OPEN_TABLE_FAILED; + else + { + restore_record(table, default_values); // Get default values for fields + strxmov(definer, thd->priv_user, "@", thd->priv_host, NullS); + + if (table->fields != MYSQL_PROC_FIELD_COUNT) + { + ret= SP_GET_FIELD_FAILED; + goto done; + } + table->field[MYSQL_PROC_FIELD_DB]-> + store(sp->m_db.str, sp->m_db.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_NAME]-> + store(sp->m_name.str, sp->m_name.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_TYPE]-> + store((longlong)type); + table->field[MYSQL_PROC_FIELD_SPECIFIC_NAME]-> + store(sp->m_name.str, sp->m_name.length, system_charset_info); + if (sp->m_chistics->daccess != SP_DEFAULT_ACCESS) + table->field[MYSQL_PROC_FIELD_ACCESS]-> + store((longlong)sp->m_chistics->daccess); + table->field[MYSQL_PROC_FIELD_DETERMINISTIC]-> + store((longlong)(sp->m_chistics->detistic ? 1 : 2)); + if (sp->m_chistics->suid != SP_IS_DEFAULT_SUID) + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]-> + store((longlong)sp->m_chistics->suid); + table->field[MYSQL_PROC_FIELD_PARAM_LIST]-> + store(sp->m_params.str, sp->m_params.length, system_charset_info); + if (sp->m_retstr.str) + table->field[MYSQL_PROC_FIELD_RETURNS]-> + store(sp->m_retstr.str, sp->m_retstr.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_BODY]-> + store(sp->m_body.str, sp->m_body.length, system_charset_info); + table->field[MYSQL_PROC_FIELD_DEFINER]-> + store(definer, (uint)strlen(definer), system_charset_info); + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_CREATED])->set_time(); + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time(); + table->field[MYSQL_PROC_FIELD_SQL_MODE]-> + store((longlong)thd->variables.sql_mode); + if (sp->m_chistics->comment.str) + table->field[MYSQL_PROC_FIELD_COMMENT]-> + store(sp->m_chistics->comment.str, sp->m_chistics->comment.length, + system_charset_info); + + ret= SP_OK; + if (table->file->write_row(table->record[0])) + ret= SP_WRITE_ROW_FAILED; + } + +done: + close_thread_tables(thd); + if (dbchanged) + (void)sp_change_db(thd, olddb, 1); + DBUG_RETURN(ret); +} + + +static int +db_drop_routine(THD *thd, int type, sp_name *name) +{ + TABLE *table; + int ret; + bool opened; + DBUG_ENTER("db_drop_routine"); + DBUG_PRINT("enter", ("type: %d name: %*s", + type, name->m_name.length, name->m_name.str)); + + ret= db_find_routine_aux(thd, type, name, TL_WRITE, &table, &opened); + if (ret == SP_OK) + { + if (table->file->delete_row(table->record[0])) + ret= SP_DELETE_ROW_FAILED; + } + + if (opened) + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +static int +db_update_routine(THD *thd, int type, sp_name *name, st_sp_chistics *chistics) +{ + TABLE *table; + int ret; + bool opened; + DBUG_ENTER("db_update_routine"); + DBUG_PRINT("enter", ("type: %d name: %*s", + type, name->m_name.length, name->m_name.str)); + + ret= db_find_routine_aux(thd, type, name, TL_WRITE, &table, &opened); + if (ret == SP_OK) + { + store_record(table,record[1]); + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + ((Field_timestamp *)table->field[MYSQL_PROC_FIELD_MODIFIED])->set_time(); + if (chistics->suid != SP_IS_DEFAULT_SUID) + table->field[MYSQL_PROC_FIELD_SECURITY_TYPE]-> + store((longlong)chistics->suid); + if (chistics->daccess != SP_DEFAULT_ACCESS) + table->field[MYSQL_PROC_FIELD_ACCESS]-> + store((longlong)chistics->daccess); + if (chistics->comment.str) + table->field[MYSQL_PROC_FIELD_COMMENT]->store(chistics->comment.str, + chistics->comment.length, + system_charset_info); + if ((table->file->update_row(table->record[1],table->record[0]))) + ret= SP_WRITE_ROW_FAILED; + } + if (opened) + close_thread_tables(thd); + DBUG_RETURN(ret); +} + + +struct st_used_field +{ + const char *field_name; + uint field_length; + enum enum_field_types field_type; + Field *field; +}; + +static struct st_used_field init_fields[]= +{ + { "Db", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { "Name", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { "Type", 9, MYSQL_TYPE_STRING, 0}, + { "Definer", 77, MYSQL_TYPE_STRING, 0}, + { "Modified", 0, MYSQL_TYPE_TIMESTAMP, 0}, + { "Created", 0, MYSQL_TYPE_TIMESTAMP, 0}, + { "Security_type", 1, MYSQL_TYPE_STRING, 0}, + { "Comment", NAME_LEN, MYSQL_TYPE_STRING, 0}, + { 0, 0, MYSQL_TYPE_STRING, 0} +}; + + +static int +print_field_values(THD *thd, TABLE *table, + struct st_used_field *used_fields, + int type, const char *wild) +{ + Protocol *protocol= thd->protocol; + + if (table->field[MYSQL_PROC_FIELD_TYPE]->val_int() == type) + { + String db_string; + String name_string; + struct st_used_field *used_field= used_fields; + + if (get_field(thd->mem_root, used_field->field, &db_string)) + db_string.set_ascii("", 0); + used_field+= 1; + get_field(thd->mem_root, used_field->field, &name_string); + + if (!wild || !wild[0] || !wild_compare(name_string.ptr(), wild, 0)) + { + protocol->prepare_for_resend(); + protocol->store(&db_string); + protocol->store(&name_string); + for (used_field++; + used_field->field_name; + used_field++) + { + switch (used_field->field_type) { + case MYSQL_TYPE_TIMESTAMP: + { + TIME tmp_time; + + bzero((char *)&tmp_time, sizeof(tmp_time)); + ((Field_timestamp *) used_field->field)->get_time(&tmp_time); + protocol->store(&tmp_time); + } + break; + default: + { + String tmp_string; + + get_field(thd->mem_root, used_field->field, &tmp_string); + protocol->store(&tmp_string); + } + break; + } + } + if (protocol->write()) + return SP_INTERNAL_ERROR; + } + } + return SP_OK; +} + + +static int +db_show_routine_status(THD *thd, int type, const char *wild) +{ + TABLE *table; + TABLE_LIST tables; + int res; + DBUG_ENTER("db_show_routine_status"); + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.real_name= tables.alias= (char*)"proc"; + + if (! (table= open_ltable(thd, &tables, TL_READ))) + { + res= SP_OPEN_TABLE_FAILED; + goto done; + } + else + { + Item *item; + List<Item> field_list; + struct st_used_field *used_field; + TABLE_LIST *leaves= 0; + st_used_field used_fields[array_elements(init_fields)]; + + memcpy((char*) used_fields, (char*) init_fields, sizeof(used_fields)); + /* Init header */ + for (used_field= &used_fields[0]; + used_field->field_name; + used_field++) + { + switch (used_field->field_type) { + case MYSQL_TYPE_TIMESTAMP: + field_list.push_back(item=new Item_datetime(used_field->field_name)); + break; + default: + field_list.push_back(item=new Item_empty_string(used_field->field_name, + used_field-> + field_length)); + break; + } + } + /* Print header */ + if (thd->protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) + { + res= SP_INTERNAL_ERROR; + goto err_case; + } + + /* + Init fields + + tables is not VIEW for sure => we can pass 0 as condition + */ + setup_tables(thd, &tables, 0, &leaves, FALSE, FALSE); + for (used_field= &used_fields[0]; + used_field->field_name; + used_field++) + { + Item_field *field= new Item_field("mysql", "proc", + used_field->field_name); + if (!(used_field->field= find_field_in_tables(thd, field, &tables, + 0, REPORT_ALL_ERRORS, 1))) + { + res= SP_INTERNAL_ERROR; + goto err_case1; + } + } + + table->file->ha_index_init(0); + if ((res= table->file->index_first(table->record[0]))) + { + res= (res == HA_ERR_END_OF_FILE) ? 0 : SP_INTERNAL_ERROR; + goto err_case1; + } + if ((res= print_field_values(thd, table, used_fields, type, wild))) + goto err_case1; + while (!table->file->index_next(table->record[0])) + { + if ((res= print_field_values(thd, table, used_fields, type, wild))) + goto err_case1; + } + res= SP_OK; + } + +err_case1: + send_eof(thd); +err_case: + table->file->ha_index_end(); + close_thread_tables(thd); +done: + DBUG_RETURN(res); +} + + +/* Drop all routines in database 'db' */ +int +sp_drop_db_routines(THD *thd, char *db) +{ + TABLE *table; + byte key[64]; // db + uint keylen; + int ret; + DBUG_ENTER("sp_drop_db_routines"); + DBUG_PRINT("enter", ("db: %s", db)); + + // Put the key used to read the row together + keylen= strlen(db); + if (keylen > 64) + keylen= 64; + memcpy(key, db, keylen); + memset(key+keylen, (int)' ', 64-keylen); // Pad with space + keylen= sizeof(key); + + for (table= thd->open_tables ; table ; table= table->next) + if (strcmp(table->table_cache_key, "mysql") == 0 && + strcmp(table->real_name, "proc") == 0) + break; + if (! table) + { + TABLE_LIST tables; + + memset(&tables, 0, sizeof(tables)); + tables.db= (char*)"mysql"; + tables.real_name= tables.alias= (char*)"proc"; + if (! (table= open_ltable(thd, &tables, TL_WRITE))) + DBUG_RETURN(SP_OPEN_TABLE_FAILED); + } + + ret= SP_OK; + table->file->ha_index_init(0); + if (! table->file->index_read(table->record[0], + key, keylen, HA_READ_KEY_EXACT)) + { + int nxtres; + bool deleted= FALSE; + + do { + if (! table->file->delete_row(table->record[0])) + deleted= TRUE; /* We deleted something */ + else + { + ret= SP_DELETE_ROW_FAILED; + nxtres= 0; + break; + } + } while (! (nxtres= table->file->index_next_same(table->record[0], + key, keylen))); + if (nxtres != HA_ERR_END_OF_FILE) + ret= SP_KEY_NOT_FOUND; + if (deleted) + sp_cache_invalidate(); + } + table->file->ha_index_end(); + + close_thread_tables(thd); + + DBUG_RETURN(ret); +} + + +/***************************************************************************** + PROCEDURE +******************************************************************************/ + +sp_head * +sp_find_procedure(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_find_procedure"); + DBUG_PRINT("enter", ("name: %*s.%*s", + name->m_db.length, name->m_db.str, + name->m_name.length, name->m_name.str)); + + if (!(sp= sp_cache_lookup(&thd->sp_proc_cache, name))) + { + if (db_find_routine(thd, TYPE_ENUM_PROCEDURE, name, &sp) == SP_OK) + sp_cache_insert(&thd->sp_proc_cache, sp); + } + + DBUG_RETURN(sp); +} + + +int +sp_exists_routine(THD *thd, TABLE_LIST *tables, bool any, bool no_error) +{ + TABLE_LIST *table; + bool result= 0; + DBUG_ENTER("sp_exists_routine"); + for (table= tables; table; table= table->next_global) + { + sp_name *name; + LEX_STRING lex_db; + LEX_STRING lex_name; + lex_db.length= strlen(table->db); + lex_name.length= strlen(table->real_name); + lex_db.str= thd->strmake(table->db, lex_db.length); + lex_name.str= thd->strmake(table->real_name, lex_name.length); + name= new sp_name(lex_db, lex_name); + name->init_qname(thd); + if (sp_find_procedure(thd, name) != NULL || + sp_find_function(thd, name) != NULL) + { + if (any) + DBUG_RETURN(1); + result= 1; + } + else if (!any) + { + if (!no_error) + { + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION or PROCEDURE", + table->real_name); + DBUG_RETURN(-1); + } + DBUG_RETURN(0); + } + } + DBUG_RETURN(result); +} + + +int +sp_create_procedure(THD *thd, sp_head *sp) +{ + int ret; + DBUG_ENTER("sp_create_procedure"); + DBUG_PRINT("enter", ("name: %*s", sp->m_name.length, sp->m_name.str)); + + ret= db_create_routine(thd, TYPE_ENUM_PROCEDURE, sp); + DBUG_RETURN(ret); +} + + +int +sp_drop_procedure(THD *thd, sp_name *name) +{ + int ret; + bool found; + DBUG_ENTER("sp_drop_procedure"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + found= sp_cache_remove(&thd->sp_proc_cache, name); + ret= db_drop_routine(thd, TYPE_ENUM_PROCEDURE, name); + if (!found && !ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_update_procedure(THD *thd, sp_name *name, st_sp_chistics *chistics) +{ + int ret; + bool found; + DBUG_ENTER("sp_update_procedure"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + found= sp_cache_remove(&thd->sp_proc_cache, name); + ret= db_update_routine(thd, TYPE_ENUM_PROCEDURE, name, chistics); + if (!found && !ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_show_create_procedure(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_show_create_procedure"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + if ((sp= sp_find_procedure(thd, name))) + { + int ret= sp->show_create_procedure(thd); + + DBUG_RETURN(ret); + } + + DBUG_RETURN(SP_KEY_NOT_FOUND); +} + + +int +sp_show_status_procedure(THD *thd, const char *wild) +{ + int ret; + DBUG_ENTER("sp_show_status_procedure"); + + ret= db_show_routine_status(thd, TYPE_ENUM_PROCEDURE, wild); + DBUG_RETURN(ret); +} + + +/***************************************************************************** + FUNCTION +******************************************************************************/ + +sp_head * +sp_find_function(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_find_function"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + if (!(sp= sp_cache_lookup(&thd->sp_func_cache, name))) + { + if (db_find_routine(thd, TYPE_ENUM_FUNCTION, name, &sp) != SP_OK) + sp= NULL; + else + sp_cache_insert(&thd->sp_func_cache, sp); + } + DBUG_RETURN(sp); +} + + +int +sp_create_function(THD *thd, sp_head *sp) +{ + int ret; + DBUG_ENTER("sp_create_function"); + DBUG_PRINT("enter", ("name: %*s", sp->m_name.length, sp->m_name.str)); + + ret= db_create_routine(thd, TYPE_ENUM_FUNCTION, sp); + DBUG_RETURN(ret); +} + + +int +sp_drop_function(THD *thd, sp_name *name) +{ + int ret; + bool found; + DBUG_ENTER("sp_drop_function"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + found= sp_cache_remove(&thd->sp_func_cache, name); + ret= db_drop_routine(thd, TYPE_ENUM_FUNCTION, name); + if (!found && !ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_update_function(THD *thd, sp_name *name, st_sp_chistics *chistics) +{ + int ret; + bool found; + DBUG_ENTER("sp_update_procedure"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + found= sp_cache_remove(&thd->sp_func_cache, name); + ret= db_update_routine(thd, TYPE_ENUM_FUNCTION, name, chistics); + if (!found && !ret) + sp_cache_invalidate(); + DBUG_RETURN(ret); +} + + +int +sp_show_create_function(THD *thd, sp_name *name) +{ + sp_head *sp; + DBUG_ENTER("sp_show_create_function"); + DBUG_PRINT("enter", ("name: %*s", name->m_name.length, name->m_name.str)); + + if ((sp= sp_find_function(thd, name))) + { + int ret= sp->show_create_function(thd); + + DBUG_RETURN(ret); + } + DBUG_RETURN(SP_KEY_NOT_FOUND); +} + + +int +sp_show_status_function(THD *thd, const char *wild) +{ + int ret; + DBUG_ENTER("sp_show_status_function"); + ret= db_show_routine_status(thd, TYPE_ENUM_FUNCTION, wild); + DBUG_RETURN(ret); +} + + +bool +sp_function_exists(THD *thd, sp_name *name) +{ + TABLE *table; + bool ret= FALSE; + bool opened= FALSE; + DBUG_ENTER("sp_function_exists"); + + if (sp_cache_lookup(&thd->sp_func_cache, name) || + db_find_routine_aux(thd, TYPE_ENUM_FUNCTION, + name, TL_READ, + &table, &opened) == SP_OK) + ret= TRUE; + if (opened) + close_thread_tables(thd, 0, 1); + thd->clear_error(); + DBUG_RETURN(ret); +} + + +byte * +sp_lex_spfuns_key(const byte *ptr, uint *plen, my_bool first) +{ + LEX_STRING *lsp= (LEX_STRING *)ptr; + *plen= lsp->length; + return (byte *)lsp->str; +} + + +void +sp_add_fun_to_lex(LEX *lex, sp_name *fun) +{ + if (! hash_search(&lex->spfuns, + (byte *)fun->m_qname.str, fun->m_qname.length)) + { + LEX_STRING *ls= (LEX_STRING *)sql_alloc(sizeof(LEX_STRING)); + ls->str= sql_strmake(fun->m_qname.str, fun->m_qname.length); + ls->length= fun->m_qname.length; + + my_hash_insert(&lex->spfuns, (byte *)ls); + } +} + + +void +sp_merge_funs(LEX *dst, LEX *src) +{ + for (uint i=0 ; i < src->spfuns.records ; i++) + { + LEX_STRING *ls= (LEX_STRING *)hash_element(&src->spfuns, i); + + if (! hash_search(&dst->spfuns, (byte *)ls->str, ls->length)) + my_hash_insert(&dst->spfuns, (byte *)ls); + } +} + + +int +sp_cache_functions(THD *thd, LEX *lex) +{ + HASH *h= &lex->spfuns; + int ret= 0; + + for (uint i=0 ; i < h->records ; i++) + { + LEX_STRING *ls= (LEX_STRING *)hash_element(h, i); + sp_name name(*ls); + + name.m_qname= *ls; + if (! sp_cache_lookup(&thd->sp_func_cache, &name)) + { + sp_head *sp; + LEX *oldlex= thd->lex; + LEX *newlex= new st_lex; + + thd->lex= newlex; + newlex->proc_table= oldlex->proc_table; // hint if mysql.oper is opened + newlex->current_select= NULL; + name.m_name.str= strchr(name.m_qname.str, '.'); + name.m_db.length= name.m_name.str - name.m_qname.str; + name.m_db.str= strmake_root(thd->mem_root, + name.m_qname.str, name.m_db.length); + name.m_name.str+= 1; + name.m_name.length= name.m_qname.length - name.m_db.length - 1; + + if (db_find_routine(thd, TYPE_ENUM_FUNCTION, &name, &sp) + == SP_OK) + { + sp_cache_insert(&thd->sp_func_cache, sp); + ret= sp_cache_functions(thd, newlex); + delete newlex; + thd->lex= oldlex; + if (ret) + break; + } + else + { + delete newlex; + thd->lex= oldlex; + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "FUNCTION", ls->str); + ret= 1; + break; + } + } + } + return ret; +} + +/* + * Generates the CREATE... string from the table information. + * Returns TRUE on success, FALSE on (alloc) failure. + */ +static bool +create_string(THD *thd, String *buf, + int type, + sp_name *name, + const char *params, ulong paramslen, + const char *returns, ulong returnslen, + const char *body, ulong bodylen, + st_sp_chistics *chistics) +{ + /* Make some room to begin with */ + if (buf->alloc(100 + name->m_qname.length + paramslen + returnslen + bodylen + + chistics->comment.length)) + return FALSE; + + buf->append("CREATE ", 7); + if (type == TYPE_ENUM_FUNCTION) + buf->append("FUNCTION ", 9); + else + buf->append("PROCEDURE ", 10); + append_identifier(thd, buf, name->m_db.str, name->m_db.length); + buf->append('.'); + append_identifier(thd, buf, name->m_name.str, name->m_name.length); + buf->append('('); + buf->append(params, paramslen); + buf->append(')'); + if (type == TYPE_ENUM_FUNCTION) + { + buf->append(" RETURNS ", 9); + buf->append(returns, returnslen); + } + buf->append('\n'); + switch (chistics->daccess) { + case SP_NO_SQL: + buf->append(" NO SQL\n"); + break; + case SP_READS_SQL_DATA: + buf->append(" READS SQL DATA\n"); + break; + case SP_MODIFIES_SQL_DATA: + buf->append(" MODIFIES SQL DATA\n"); + break; + case SP_DEFAULT_ACCESS: + case SP_CONTAINS_SQL: + /* Do nothing */ + break; + } + if (chistics->detistic) + buf->append(" DETERMINISTIC\n", 18); + if (chistics->suid == SP_IS_NOT_SUID) + buf->append(" SQL SECURITY INVOKER\n", 25); + if (chistics->comment.length) + { + buf->append(" COMMENT "); + append_unescaped(buf, chistics->comment.str, chistics->comment.length); + buf->append('\n'); + } + buf->append(body, bodylen); + return TRUE; +} + + +// +// Utilities... +// + +int +sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddblen, + bool no_access_check, bool *dbchangedp) +{ + bool changeit; + DBUG_ENTER("sp_use_new_db"); + DBUG_PRINT("enter", ("newdb: %s", newdb)); + + if (! newdb) + newdb= (char *)""; + if (thd->db && thd->db[0]) + { + if (my_strcasecmp(system_charset_info, thd->db, newdb) == 0) + changeit= 0; + else + { + changeit= 1; + strnmov(olddb, thd->db, olddblen); + } + } + else + { // thd->db empty + if (newdb[0]) + changeit= 1; + else + changeit= 0; + olddb[0] = '\0'; + } + if (!changeit) + { + *dbchangedp= FALSE; + DBUG_RETURN(0); + } + else + { + int ret= sp_change_db(thd, newdb, no_access_check); + + if (! ret) + *dbchangedp= TRUE; + DBUG_RETURN(ret); + } +} + +/* + Change database. + + SYNOPSIS + sp_change_db() + thd Thread handler + name Database name + empty_is_ok True= it's ok with "" as name + no_access_check True= don't do access check + + DESCRIPTION + This is the same as mysql_change_db(), but with some extra + arguments for Stored Procedure usage; doing implicit "use" + when executing an SP in a different database. + We also use different error routines, since this might be + invoked from a function when executing a query or statement. + Note: We would have prefered to reuse mysql_change_db(), but + the error handling in particular made that too awkward, so + we (reluctantly) have a "copy" here. + + RETURN VALUES + 0 ok + 1 error +*/ + +int +sp_change_db(THD *thd, char *name, bool no_access_check) +{ + int length, db_length; + char *dbname=my_strdup((char*) name,MYF(MY_WME)); + char path[FN_REFLEN]; + HA_CREATE_INFO create; + DBUG_ENTER("sp_change_db"); + DBUG_PRINT("enter", ("db: %s, no_access_check: %d", name, no_access_check)); + + db_length= (!dbname ? 0 : strip_sp(dbname)); + if (dbname && db_length) + { + if ((db_length > NAME_LEN) || check_db_name(dbname)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), dbname); + x_free(dbname); + DBUG_RETURN(1); + } + } + + if (dbname && db_length) + { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (! no_access_check) + { + ulong db_access; + + if (test_all_bits(thd->master_access,DB_ACLS)) + db_access=DB_ACLS; + else + db_access= (acl_get(thd->host,thd->ip, thd->priv_user,dbname,0) | + thd->master_access); + if (!(db_access & DB_ACLS) && + (!grant_option || check_grant_db(thd,dbname))) + { + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + thd->priv_user, + thd->priv_host, + dbname); + mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), + thd->priv_user, + thd->priv_host, + dbname); + my_free(dbname,MYF(0)); + DBUG_RETURN(1); + } + } +#endif + (void) sprintf(path,"%s/%s",mysql_data_home,dbname); + length=unpack_dirname(path,path); // Convert if not unix + if (length && path[length-1] == FN_LIBCHAR) + path[length-1]=0; // remove ending '\' + if (access(path,F_OK)) + { + my_error(ER_BAD_DB_ERROR, MYF(0), dbname); + my_free(dbname,MYF(0)); + DBUG_RETURN(1); + } + } + + x_free(thd->db); + thd->db=dbname; // THD::~THD will free this + thd->db_length=db_length; + + if (dbname && db_length) + { + strmov(path+unpack_dirname(path,path), MY_DB_OPT_FILE); + load_db_opt(thd, path, &create); + thd->db_charset= create.default_table_charset ? + create.default_table_charset : + thd->variables.collation_server; + thd->variables.collation_database= thd->db_charset; + } + DBUG_RETURN(0); +} diff --git a/sql/sp.h b/sql/sp.h new file mode 100644 index 00000000000..152c59d0d02 --- /dev/null +++ b/sql/sp.h @@ -0,0 +1,105 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_H_ +#define _SP_H_ + +// Return codes from sp_create_*, sp_drop_*, and sp_show_*: +#define SP_OK 0 +#define SP_KEY_NOT_FOUND -1 +#define SP_OPEN_TABLE_FAILED -2 +#define SP_WRITE_ROW_FAILED -3 +#define SP_DELETE_ROW_FAILED -4 +#define SP_GET_FIELD_FAILED -5 +#define SP_PARSE_ERROR -6 +#define SP_INTERNAL_ERROR -7 +#define SP_NO_DB_ERROR -8 + +/* Drop all routines in database 'db' */ +int +sp_drop_db_routines(THD *thd, char *db); + +sp_head * +sp_find_procedure(THD *thd, sp_name *name); + +int +sp_exists_routine(THD *thd, TABLE_LIST *procs, bool any, bool no_error); + +int +sp_create_procedure(THD *thd, sp_head *sp); + +int +sp_drop_procedure(THD *thd, sp_name *name); + + +int +sp_update_procedure(THD *thd, sp_name *name, st_sp_chistics *chistics); + +int +sp_show_create_procedure(THD *thd, sp_name *name); + +int +sp_show_status_procedure(THD *thd, const char *wild); + +sp_head * +sp_find_function(THD *thd, sp_name *name); + +int +sp_create_function(THD *thd, sp_head *sp); + +int +sp_drop_function(THD *thd, sp_name *name); + +int +sp_update_function(THD *thd, sp_name *name, st_sp_chistics *chistics); + +int +sp_show_create_function(THD *thd, sp_name *name); + +int +sp_show_status_function(THD *thd, const char *wild); + +bool +sp_function_exists(THD *thd, sp_name *name); + + +// This is needed since we have to read the functions before we +// do anything else. +void +sp_add_fun_to_lex(LEX *lex, sp_name *fun); +void +sp_merge_funs(LEX *dst, LEX *src); +int +sp_cache_functions(THD *thd, LEX *lex); + + +// +// Utilities... +// + +// Do a "use newdb". The current db is stored at olddb. +// If newdb is the same as the current one, nothing is changed. +// dbchangedp is set to true if the db was actually changed. +int +sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddbmax, + bool no_access_check, bool *dbchangedp); + +// Like mysql_change_db() but handles empty db name and the send_ok() problem. +int +sp_change_db(THD *thd, char *db, bool no_access_check); + +#endif /* _SP_H_ */ diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc new file mode 100644 index 00000000000..056ac6d7e96 --- /dev/null +++ b/sql/sp_cache.cc @@ -0,0 +1,166 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef __GNUC__ +#pragma implementation +#endif + +#include "mysql_priv.h" +#include "sp_cache.h" +#include "sp_head.h" + +static pthread_mutex_t Cversion_lock; +static ulong Cversion = 0; + +void +sp_cache_init() +{ + pthread_mutex_init(&Cversion_lock, MY_MUTEX_INIT_FAST); +} + +void +sp_cache_clear(sp_cache **cp) +{ + sp_cache *c= *cp; + + if (c) + { + delete c; + *cp= NULL; + } +} + +void +sp_cache_insert(sp_cache **cp, sp_head *sp) +{ + sp_cache *c= *cp; + + if (! c) + c= new sp_cache(); + if (c) + { + ulong v; + + pthread_mutex_lock(&Cversion_lock); // LOCK + v= Cversion; + pthread_mutex_unlock(&Cversion_lock); // UNLOCK + + if (c->version < v) + { + if (*cp) + c->remove_all(); + c->version= v; + } + c->insert(sp); + if (*cp == NULL) + *cp= c; + } +} + +sp_head * +sp_cache_lookup(sp_cache **cp, sp_name *name) +{ + ulong v; + sp_cache *c= *cp; + + if (! c) + return NULL; + + pthread_mutex_lock(&Cversion_lock); // LOCK + v= Cversion; + pthread_mutex_unlock(&Cversion_lock); // UNLOCK + + if (c->version < v) + { + c->remove_all(); + c->version= v; + return NULL; + } + return c->lookup(name->m_qname.str, name->m_qname.length); +} + +bool +sp_cache_remove(sp_cache **cp, sp_name *name) +{ + sp_cache *c= *cp; + bool found= FALSE; + + if (c) + { + ulong v; + + pthread_mutex_lock(&Cversion_lock); // LOCK + v= Cversion++; + pthread_mutex_unlock(&Cversion_lock); // UNLOCK + + if (c->version < v) + c->remove_all(); + else + found= c->remove(name->m_qname.str, name->m_qname.length); + c->version= v+1; + } + return found; +} + +void +sp_cache_invalidate() +{ + pthread_mutex_lock(&Cversion_lock); // LOCK + Cversion++; + pthread_mutex_unlock(&Cversion_lock); // UNLOCK +} + +static byte * +hash_get_key_for_sp_head(const byte *ptr, uint *plen, + my_bool first) +{ + sp_head *sp= (sp_head *)ptr; + + *plen= sp->m_qname.length; + return (byte*) sp->m_qname.str; +} + +static void +hash_free_sp_head(void *p) +{ + sp_head *sp= (sp_head *)p; + + delete sp; +} + +sp_cache::sp_cache() +{ + init(); +} + +sp_cache::~sp_cache() +{ + hash_free(&m_hashtable); +} + +void +sp_cache::init() +{ + hash_init(&m_hashtable, system_charset_info, 0, 0, 0, + hash_get_key_for_sp_head, hash_free_sp_head, 0); + version= 0; +} + +void +sp_cache::cleanup() +{ + hash_free(&m_hashtable); +} diff --git a/sql/sp_cache.h b/sql/sp_cache.h new file mode 100644 index 00000000000..754a987090e --- /dev/null +++ b/sql/sp_cache.h @@ -0,0 +1,107 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_CACHE_H_ +#define _SP_CACHE_H_ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +class sp_head; +class sp_cache; + +/* Initialize the SP caching once at startup */ +void sp_cache_init(); + +/* Clear the cache *cp and set *cp to NULL */ +void sp_cache_clear(sp_cache **cp); + +/* Insert an SP to cache. If 'cp' points to NULL, it's set to a new cache */ +void sp_cache_insert(sp_cache **cp, sp_head *sp); + +/* Lookup an SP in cache */ +sp_head *sp_cache_lookup(sp_cache **cp, sp_name *name); + +/* Remove an SP from cache. Returns true if something was removed */ +bool sp_cache_remove(sp_cache **cp, sp_name *name); + +/* Invalidate a cache */ +void sp_cache_invalidate(); + + +/* + * + * The cache class. Don't use this directly, use the C API above + * + */ + +class sp_cache +{ +public: + + ulong version; + + sp_cache(); + + ~sp_cache(); + + void + init(); + + void + cleanup(); + + inline void + insert(sp_head *sp) + { + my_hash_insert(&m_hashtable, (const byte *)sp); + } + + inline sp_head * + lookup(char *name, uint namelen) + { + return (sp_head *)hash_search(&m_hashtable, (const byte *)name, namelen); + } + + inline bool + remove(char *name, uint namelen) + { + sp_head *sp= lookup(name, namelen); + + if (sp) + { + hash_delete(&m_hashtable, (byte *)sp); + return TRUE; + } + return FALSE; + } + + inline void + remove_all() + { + cleanup(); + init(); + } + +private: + + HASH m_hashtable; + +}; // class sp_cache + +#endif /* _SP_CACHE_H_ */ diff --git a/sql/sp_head.cc b/sql/sp_head.cc new file mode 100644 index 00000000000..9afc0c04631 --- /dev/null +++ b/sql/sp_head.cc @@ -0,0 +1,1866 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef __GNUC__ +#pragma implementation +#endif + +#include "mysql_priv.h" +#include "sp_head.h" +#include "sp.h" +#include "sp_pcontext.h" +#include "sp_rcontext.h" + +Item_result +sp_map_result_type(enum enum_field_types type) +{ + switch (type) + { + case MYSQL_TYPE_TINY: + case MYSQL_TYPE_SHORT: + case MYSQL_TYPE_LONG: + case MYSQL_TYPE_LONGLONG: + case MYSQL_TYPE_INT24: + return INT_RESULT; + case MYSQL_TYPE_DECIMAL: + case MYSQL_TYPE_FLOAT: + case MYSQL_TYPE_DOUBLE: + return REAL_RESULT; + default: + return STRING_RESULT; + } +} + +/* + * Returns TRUE if the 'cmd' is a command that might result in + * multiple result sets being sent back. + * Note: This does not include SQLCOM_SELECT which is treated + * separately in sql_yacc.yy. + */ +bool +sp_multi_results_command(enum enum_sql_command cmd) +{ + switch (cmd) { + case SQLCOM_ANALYZE: + case SQLCOM_CHECKSUM: + case SQLCOM_HA_READ: + case SQLCOM_SHOW_BINLOGS: + case SQLCOM_SHOW_BINLOG_EVENTS: + case SQLCOM_SHOW_CHARSETS: + case SQLCOM_SHOW_COLLATIONS: + case SQLCOM_SHOW_COLUMN_TYPES: + case SQLCOM_SHOW_CREATE: + case SQLCOM_SHOW_CREATE_DB: + case SQLCOM_SHOW_CREATE_FUNC: + case SQLCOM_SHOW_CREATE_PROC: + case SQLCOM_SHOW_DATABASES: + case SQLCOM_SHOW_ERRORS: + case SQLCOM_SHOW_FIELDS: + case SQLCOM_SHOW_GRANTS: + case SQLCOM_SHOW_INNODB_STATUS: + case SQLCOM_SHOW_KEYS: + case SQLCOM_SHOW_LOGS: + case SQLCOM_SHOW_MASTER_STAT: + case SQLCOM_SHOW_NEW_MASTER: + case SQLCOM_SHOW_OPEN_TABLES: + case SQLCOM_SHOW_PRIVILEGES: + case SQLCOM_SHOW_PROCESSLIST: + case SQLCOM_SHOW_SLAVE_HOSTS: + case SQLCOM_SHOW_SLAVE_STAT: + case SQLCOM_SHOW_STATUS: + case SQLCOM_SHOW_STATUS_FUNC: + case SQLCOM_SHOW_STATUS_PROC: + case SQLCOM_SHOW_STORAGE_ENGINES: + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_VARIABLES: + case SQLCOM_SHOW_WARNS: + return TRUE; + default: + return FALSE; + } +} + +/* Evaluate a (presumed) func item. Always returns an item, the parameter +** if nothing else. +*/ +Item * +sp_eval_func_item(THD *thd, Item *it, enum enum_field_types type) +{ + DBUG_ENTER("sp_eval_func_item"); + it= it->this_item(); + DBUG_PRINT("info", ("type: %d", type)); + + if (!it->fixed && it->fix_fields(thd, 0, &it)) + { + DBUG_PRINT("info", ("fix_fields() failed")); + DBUG_RETURN(NULL); + } + + /* QQ How do we do this? Is there some better way? */ + if (type == MYSQL_TYPE_NULL) + it= new Item_null(); + else + { + switch (sp_map_result_type(type)) { + case INT_RESULT: + { + longlong i= it->val_int(); + + if (it->null_value) + { + DBUG_PRINT("info", ("INT_RESULT: null")); + it= new Item_null(); + } + else + { + DBUG_PRINT("info", ("INT_RESULT: %d", i)); + it= new Item_int(it->val_int()); + } + break; + } + case REAL_RESULT: + { + double d= it->val_real(); + + if (it->null_value) + { + DBUG_PRINT("info", ("REAL_RESULT: null")); + it= new Item_null(); + } + else + { + /* There's some difference between Item::new_item() and the + * constructor; the former crashes, the latter works... weird. */ + uint8 decimals= it->decimals; + uint32 max_length= it->max_length; + DBUG_PRINT("info", ("REAL_RESULT: %g", d)); + it= new Item_real(it->val_real()); + it->decimals= decimals; + it->max_length= max_length; + } + break; + } + default: + { + char buffer[MAX_FIELD_WIDTH]; + String tmp(buffer, sizeof(buffer), it->collation.collation); + String *s= it->val_str(&tmp); + + if (it->null_value) + { + DBUG_PRINT("info", ("default result: null")); + it= new Item_null(); + } + else + { + DBUG_PRINT("info",("default result: %*s",s->length(),s->c_ptr_quick())); + it= new Item_string(thd->strmake(s->c_ptr_quick(), s->length()), + s->length(), it->collation.collation); + } + break; + } + } + } + + DBUG_RETURN(it); +} + + +/* + * + * sp_name + * + */ + +void +sp_name::init_qname(THD *thd) +{ + m_qname.length= m_db.length+m_name.length+1; + m_qname.str= thd->alloc(m_qname.length+1); + sprintf(m_qname.str, "%*s.%*s", + m_db.length, (m_db.length ? m_db.str : ""), + m_name.length, m_name.str); +} + +sp_name * +sp_name_current_db_new(THD *thd, LEX_STRING name) +{ + sp_name *qname; + + if (! thd->db) + qname= new sp_name(name); + else + { + LEX_STRING db; + + db.length= strlen(thd->db); + db.str= thd->strmake(thd->db, db.length); + qname= new sp_name(db, name); + } + qname->init_qname(thd); + return qname; +} + + +/* ------------------------------------------------------------------ */ + + +/* + * + * sp_head + * + */ + +void * +sp_head::operator new(size_t size) +{ + DBUG_ENTER("sp_head::operator new"); + MEM_ROOT own_root; + sp_head *sp; + + init_alloc_root(&own_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC); + sp= (sp_head *) alloc_root(&own_root, size); + sp->main_mem_root= own_root; + DBUG_PRINT("info", ("mem_root 0x%lx", (ulong) &sp->mem_root)); + DBUG_RETURN(sp); +} + +void +sp_head::operator delete(void *ptr, size_t size) +{ + DBUG_ENTER("sp_head::operator delete"); + MEM_ROOT own_root; + sp_head *sp= (sp_head *) ptr; + + /* Make a copy of main_mem_root as free_root will free the sp */ + own_root= sp->main_mem_root; + DBUG_PRINT("info", ("mem_root 0x%lx moved to 0x%lx", + (ulong) &sp->mem_root, (ulong) &own_root)); + free_root(&own_root, MYF(0)); + + DBUG_VOID_RETURN; +} + + +sp_head::sp_head() + :Item_arena((bool)FALSE), m_returns_cs(NULL), m_has_return(FALSE), + m_simple_case(FALSE), m_multi_results(FALSE), m_in_handler(FALSE) +{ + DBUG_ENTER("sp_head::sp_head"); + + state= INITIALIZED; + m_backpatch.empty(); + m_lex.empty(); + DBUG_VOID_RETURN; +} + + +void +sp_head::init(LEX *lex) +{ + DBUG_ENTER("sp_head::init"); + + lex->spcont= m_pcont= new sp_pcontext(NULL); + /* + Altough trg_table_fields list is used only in triggers we init for all + types of stored procedures to simplify reset_lex()/restore_lex() code. + */ + lex->trg_table_fields.empty(); + my_init_dynamic_array(&m_instr, sizeof(sp_instr *), 16, 8); + m_param_begin= m_param_end= m_returns_begin= m_returns_end= m_body_begin= 0; + m_qname.str= m_db.str= m_name.str= m_params.str= m_retstr.str= + m_body.str= m_defstr.str= 0; + m_qname.length= m_db.length= m_name.length= m_params.length= + m_retstr.length= m_body.length= m_defstr.length= 0; + m_returns_cs= NULL; + DBUG_VOID_RETURN; +} + +void +sp_head::init_strings(THD *thd, LEX *lex, sp_name *name) +{ + DBUG_ENTER("sp_head::init_strings"); + uint n; /* Counter for nul trimming */ + /* During parsing, we must use thd->mem_root */ + MEM_ROOT *root= thd->mem_root; + + /* We have to copy strings to get them into the right memroot */ + if (name) + { + m_db.length= name->m_db.length; + if (name->m_db.length == 0) + m_db.str= NULL; + else + m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); + m_name.length= name->m_name.length; + m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); + + if (name->m_qname.length == 0) + name->init_qname(thd); + m_qname.length= name->m_qname.length; + m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); + } + else if (thd->db) + { + m_db.length= thd->db_length; + m_db.str= strmake_root(root, thd->db, m_db.length); + } + + if (m_param_begin && m_param_end) + { + m_params.length= m_param_end - m_param_begin; + m_params.str= strmake_root(root, + (char *)m_param_begin, m_params.length); + } + + if (m_returns_begin && m_returns_end) + { + /* QQ KLUDGE: We can't seem to cut out just the type in the parser + (without the RETURNS), so we'll have to do it here. :-( + Furthermore, if there's a character type as well, it's not include + (beyond the m_returns_end pointer), in which case we need + m_returns_cs. */ + char *p= (char *)m_returns_begin+strspn((char *)m_returns_begin,"\t\n\r "); + p+= strcspn(p, "\t\n\r "); + p+= strspn(p, "\t\n\r "); + if (p < (char *)m_returns_end) + m_returns_begin= (uchar *)p; + /* While we're at it, trim the end too. */ + p= (char *)m_returns_end-1; + while (p > (char *)m_returns_begin && + (*p == '\t' || *p == '\n' || *p == '\r' || *p == ' ')) + p-= 1; + m_returns_end= (uchar *)p+1; + if (m_returns_cs) + { + String s((char *)m_returns_begin, m_returns_end - m_returns_begin, + system_charset_info); + + s.append(' '); + s.append(m_returns_cs->csname); + m_retstr.length= s.length(); + m_retstr.str= strmake_root(root, s.ptr(), m_retstr.length); + } + else + { + m_retstr.length= m_returns_end - m_returns_begin; + m_retstr.str= strmake_root(root, + (char *)m_returns_begin, m_retstr.length); + } + } + m_body.length= lex->ptr - m_body_begin; + /* Trim nuls at the end */ + n= 0; + while (m_body.length && m_body_begin[m_body.length-1] == '\0') + { + m_body.length-= 1; + n+= 1; + } + m_body.str= strmake_root(root, (char *)m_body_begin, m_body.length); + m_defstr.length= lex->ptr - lex->buf; + m_defstr.length-= n; + m_defstr.str= strmake_root(root, (char *)lex->buf, m_defstr.length); + DBUG_VOID_RETURN; +} + +int +sp_head::create(THD *thd) +{ + DBUG_ENTER("sp_head::create"); + int ret; + + DBUG_PRINT("info", ("type: %d name: %s params: %s body: %s", + m_type, m_name.str, m_params.str, m_body.str)); + +#ifndef DBUG_OFF + optimize(); + { + String s; + sp_instr *i; + uint ip= 0; + while ((i = get_instr(ip))) + { + char buf[8]; + + sprintf(buf, "%4u: ", ip); + s.append(buf); + i->print(&s); + s.append('\n'); + ip+= 1; + } + s.append('\0'); + DBUG_PRINT("info", ("Code %s\n%s", m_qname.str, s.ptr())); + } +#endif + + if (m_type == TYPE_ENUM_FUNCTION) + ret= sp_create_function(thd, this); + else + ret= sp_create_procedure(thd, this); + + DBUG_RETURN(ret); +} + +sp_head::~sp_head() +{ + destroy(); + if (m_thd) + restore_thd_mem_root(m_thd); +} + +void +sp_head::destroy() +{ + DBUG_ENTER("sp_head::destroy"); + DBUG_PRINT("info", ("name: %s", m_name.str)); + sp_instr *i; + LEX *lex; + + for (uint ip = 0 ; (i = get_instr(ip)) ; ip++) + delete i; + delete_dynamic(&m_instr); + m_pcont->destroy(); + free_items(free_list); + while ((lex= (LEX *)m_lex.pop())) + { + if (lex != &m_thd->main_lex) // We got interrupted and have lex'es left + delete lex; + } + DBUG_VOID_RETURN; +} + +int +sp_head::execute(THD *thd) +{ + DBUG_ENTER("sp_head::execute"); + char olddb[128]; + bool dbchanged; + sp_rcontext *ctx; + int ret= 0; + uint ip= 0; + Item_arena *old_arena; + + +#ifndef EMBEDDED_LIBRARY + if (check_stack_overrun(thd, olddb)) + { + DBUG_RETURN(-1); + } +#endif + + dbchanged= FALSE; + if (m_db.length && + (ret= sp_use_new_db(thd, m_db.str, olddb, sizeof(olddb), 0, &dbchanged))) + goto done; + + if ((ctx= thd->spcont)) + ctx->clear_handler(); + thd->query_error= 0; + old_arena= thd->current_arena; + thd->current_arena= this; + + do + { + sp_instr *i; + uint hip; // Handler ip + + i = get_instr(ip); // Returns NULL when we're done. + if (i == NULL) + break; + DBUG_PRINT("execute", ("Instruction %u", ip)); + ret= i->execute(thd, &ip); + if (i->free_list) + cleanup_items(i->free_list); + // Check if an exception has occurred and a handler has been found + // Note: We havo to check even if ret==0, since warnings (and some + // errors don't return a non-zero value. + if (!thd->killed && ctx) + { + uint hf; + + switch (ctx->found_handler(&hip, &hf)) + { + case SP_HANDLER_NONE: + break; + case SP_HANDLER_CONTINUE: + ctx->save_variables(hf); + ctx->push_hstack(ip); + // Fall through + default: + ip= hip; + ret= 0; + ctx->clear_handler(); + ctx->in_handler= TRUE; + thd->clear_error(); + continue; + } + } + } while (ret == 0 && !thd->killed); + + cleanup_items(thd->current_arena->free_list); + thd->current_arena= old_arena; + + done: + DBUG_PRINT("info", ("ret=%d killed=%d query_error=%d", + ret, thd->killed, thd->query_error)); + + if (thd->killed) + ret= -1; + /* If the DB has changed, the pointer has changed too, but the + original thd->db will then have been freed */ + if (dbchanged) + { + if (! thd->killed) + ret= sp_change_db(thd, olddb, 0); + } + DBUG_RETURN(ret); +} + + +int +sp_head::execute_function(THD *thd, Item **argp, uint argcount, Item **resp) +{ + DBUG_ENTER("sp_head::execute_function"); + DBUG_PRINT("info", ("function %s", m_name.str)); + uint csize = m_pcont->max_pvars(); + uint params = m_pcont->current_pvars(); + uint hmax = m_pcont->max_handlers(); + uint cmax = m_pcont->max_cursors(); + sp_rcontext *octx = thd->spcont; + sp_rcontext *nctx = NULL; + uint i; + int ret; + + if (argcount != params) + { + // Need to use my_printf_error here, or it will not terminate the + // invoking query properly. + my_error(ER_SP_WRONG_NO_OF_ARGS, MYF(0), + "FUNCTION", m_name.str, params, argcount); + DBUG_RETURN(-1); + } + + // QQ Should have some error checking here? (types, etc...) + nctx= new sp_rcontext(csize, hmax, cmax); + for (i= 0 ; i < params && i < argcount ; i++) + { + sp_pvar_t *pvar = m_pcont->find_pvar(i); + Item *it= sp_eval_func_item(thd, *argp++, pvar->type); + + if (it) + nctx->push_item(it); + else + { + DBUG_RETURN(-1); + } + } +#ifdef NOT_WORKING + /* + Close tables opened for subselect in argument list + This can't be done as this will close all other tables used + by the query. + */ + close_thread_tables(thd); +#endif + // The rest of the frame are local variables which are all IN. + // Default all variables to null (those with default clauses will + // be set by an set instruction). + { + Item_null *nit= NULL; // Re-use this, and only create if needed + for (; i < csize ; i++) + { + if (! nit) + nit= new Item_null(); + nctx->push_item(nit); + } + } + thd->spcont= nctx; + + ret= execute(thd); + + if (m_type == TYPE_ENUM_FUNCTION && ret == 0) + { + /* We need result only in function but not in trigger */ + Item *it= nctx->get_result(); + + if (it) + *resp= it; + else + { + my_error(ER_SP_NORETURNEND, MYF(0), m_name.str); + ret= -1; + } + } + + nctx->pop_all_cursors(); // To avoid memory leaks after an error + thd->spcont= octx; + DBUG_RETURN(ret); +} + +int +sp_head::execute_procedure(THD *thd, List<Item> *args) +{ + DBUG_ENTER("sp_head::execute_procedure"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + int ret= 0; + uint csize = m_pcont->max_pvars(); + uint params = m_pcont->current_pvars(); + uint hmax = m_pcont->max_handlers(); + uint cmax = m_pcont->max_cursors(); + sp_rcontext *octx = thd->spcont; + sp_rcontext *nctx = NULL; + my_bool tmp_octx = FALSE; // True if we have allocated a temporary octx + + if (args->elements != params) + { + my_error(ER_SP_WRONG_NO_OF_ARGS, MYF(0), "PROCEDURE", + m_name.str, params, args->elements); + DBUG_RETURN(-1); + } + + if (csize > 0 || hmax > 0 || cmax > 0) + { + Item_null *nit= NULL; // Re-use this, and only create if needed + uint i; + List_iterator_fast<Item> li(*args); + Item *it; + + nctx= new sp_rcontext(csize, hmax, cmax); + if (! octx) + { // Create a temporary old context + octx= new sp_rcontext(csize, hmax, cmax); + tmp_octx= TRUE; + } + // QQ: Should do type checking? + for (i = 0 ; (it= li++) && i < params ; i++) + { + sp_pvar_t *pvar = m_pcont->find_pvar(i); + + if (! pvar) + nctx->set_oindex(i, -1); // Shouldn't happen + else + { + if (pvar->mode == sp_param_out) + { + if (! nit) + nit= new Item_null(); + nctx->push_item(nit); // OUT + } + else + { + Item *it2= sp_eval_func_item(thd, it,pvar->type); + + if (it2) + nctx->push_item(it2); // IN or INOUT + else + { + ret= -1; // Eval failed + break; + } + } + // Note: If it's OUT or INOUT, it must be a variable. + // QQ: We can check for global variables here, or should we do it + // while parsing? + if (pvar->mode == sp_param_in) + nctx->set_oindex(i, -1); // IN + else // OUT or INOUT + nctx->set_oindex(i, static_cast<Item_splocal *>(it)->get_offset()); + } + } + // Clean up the joins before closing the tables. + thd->lex->unit.cleanup(); + // Close tables opened for subselect in argument list + close_thread_tables(thd); + + // The rest of the frame are local variables which are all IN. + // Default all variables to null (those with default clauses will + // be set by an set instruction). + for (; i < csize ; i++) + { + if (! nit) + nit= new Item_null(); + nctx->push_item(nit); + } + thd->spcont= nctx; + } + + if (! ret) + ret= execute(thd); + + if (!ret && csize > 0) + { + List_iterator_fast<Item> li(*args); + Item *it; + + // Copy back all OUT or INOUT values to the previous frame, or + // set global user variables + for (uint i = 0 ; (it= li++) && i < params ; i++) + { + int oi = nctx->get_oindex(i); + + if (oi >= 0) + { + if (! tmp_octx) + octx->set_item(nctx->get_oindex(i), nctx->get_item(i)); + else + { + // QQ Currently we just silently ignore non-user-variable arguments. + // We should check this during parsing, when setting up the call + // above + if (it->type() == Item::FUNC_ITEM) + { + Item_func *fi= static_cast<Item_func*>(it); + + if (fi->functype() == Item_func::GUSERVAR_FUNC) + { // A global user variable + Item *item= nctx->get_item(i); + Item_func_set_user_var *suv; + Item_func_get_user_var *guv= + static_cast<Item_func_get_user_var*>(fi); + + suv= new Item_func_set_user_var(guv->get_name(), item); + suv->fix_fields(thd, NULL, &item); + suv->fix_length_and_dec(); + suv->check(); + suv->update(); + } + } + } + } + } + } + + if (tmp_octx) + octx= NULL; + if (nctx) + nctx->pop_all_cursors(); // To avoid memory leaks after an error + thd->spcont= octx; + + DBUG_RETURN(ret); +} + + +// Reset lex during parsing, before we parse a sub statement. +void +sp_head::reset_lex(THD *thd) +{ + DBUG_ENTER("sp_head::reset_lex"); + LEX *sublex; + LEX *oldlex= thd->lex; + + (void)m_lex.push_front(oldlex); + thd->lex= sublex= new st_lex; + + /* Reset most stuff. The length arguments doesn't matter here. */ + lex_start(thd, oldlex->buf, (ulong) (oldlex->end_of_query - oldlex->ptr)); + + /* We must reset ptr and end_of_query again */ + sublex->ptr= oldlex->ptr; + sublex->end_of_query= oldlex->end_of_query; + sublex->tok_start= oldlex->tok_start; + sublex->yylineno= oldlex->yylineno; + /* And keep the SP stuff too */ + sublex->sphead= oldlex->sphead; + sublex->spcont= oldlex->spcont; + /* And trigger related stuff too */ + sublex->trg_chistics= oldlex->trg_chistics; + sublex->trg_table_fields.empty(); + sublex->sp_lex_in_use= FALSE; + DBUG_VOID_RETURN; +} + +// Restore lex during parsing, after we have parsed a sub statement. +void +sp_head::restore_lex(THD *thd) +{ + DBUG_ENTER("sp_head::restore_lex"); + LEX *sublex= thd->lex; + LEX *oldlex= (LEX *)m_lex.pop(); + + if (! oldlex) + return; // Nothing to restore + + // Update some state in the old one first + oldlex->ptr= sublex->ptr; + oldlex->next_state= sublex->next_state; + oldlex->trg_table_fields.push_back(&sublex->trg_table_fields); + + // Collect some data from the sub statement lex. + sp_merge_funs(oldlex, sublex); +#ifdef NOT_USED_NOW + // QQ We're not using this at the moment. + if (sublex.sql_command == SQLCOM_CALL) + { + // It would be slightly faster to keep the list sorted, but we need + // an "insert before" method to do that. + char *proc= sublex.udf.name.str; + + List_iterator_fast<char *> li(m_calls); + char **it; + + while ((it= li++)) + if (my_strcasecmp(system_charset_info, proc, *it) == 0) + break; + if (! it) + m_calls.push_back(&proc); + + } + // Merge used tables + // QQ ...or just open tables in thd->open_tables? + // This is not entirerly clear at the moment, but for now, we collect + // tables here. + for (sl= sublex.all_selects_list ; + sl ; + sl= sl->next_select()) + { + for (TABLE_LIST *tables= sl->get_table_list() ; + tables ; + tables= tables->next) + { + List_iterator_fast<char *> li(m_tables); + char **tb; + + while ((tb= li++)) + if (my_strcasecmp(system_charset_info, tables->real_name, *tb) == 0) + break; + if (! tb) + m_tables.push_back(&tables->real_name); + } + } +#endif + if (! sublex->sp_lex_in_use) + delete sublex; + thd->lex= oldlex; + DBUG_VOID_RETURN; +} + +void +sp_head::push_backpatch(sp_instr *i, sp_label_t *lab) +{ + bp_t *bp= (bp_t *)sql_alloc(sizeof(bp_t)); + + if (bp) + { + bp->lab= lab; + bp->instr= i; + (void)m_backpatch.push_front(bp); + } +} + +void +sp_head::backpatch(sp_label_t *lab) +{ + bp_t *bp; + uint dest= instructions(); + List_iterator_fast<bp_t> li(m_backpatch); + + while ((bp= li++)) + { + if (bp->lab == lab || + (bp->lab->type == SP_LAB_REF && + my_strcasecmp(system_charset_info, bp->lab->name, lab->name) == 0)) + { + if (bp->lab->type != SP_LAB_REF) + bp->instr->backpatch(dest, lab->ctx); + else + { + sp_label_t *dstlab= bp->lab->ctx->find_label(lab->name); + + if (dstlab) + { + bp->lab= lab; + bp->instr->backpatch(dest, dstlab->ctx); + } + } + } + } +} + +int +sp_head::check_backpatch(THD *thd) +{ + bp_t *bp; + List_iterator_fast<bp_t> li(m_backpatch); + + while ((bp= li++)) + { + if (bp->lab->type == SP_LAB_REF) + { + my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "GOTO", bp->lab->name); + return -1; + } + } + return 0; +} + +void +sp_head::set_info(char *definer, uint definerlen, + longlong created, longlong modified, + st_sp_chistics *chistics, ulong sql_mode) +{ + char *p= strchr(definer, '@'); + uint len; + + if (! p) + p= definer; // Weird... + len= p-definer; + m_definer_user.str= strmake_root(mem_root, definer, len); + m_definer_user.length= len; + len= definerlen-len-1; + m_definer_host.str= strmake_root(mem_root, p+1, len); + m_definer_host.length= len; + m_created= created; + m_modified= modified; + m_chistics= (st_sp_chistics *) memdup_root(mem_root, (char*) chistics, + sizeof(*chistics)); + if (m_chistics->comment.length == 0) + m_chistics->comment.str= 0; + else + m_chistics->comment.str= strmake_root(mem_root, + m_chistics->comment.str, + m_chistics->comment.length); + m_sql_mode= sql_mode; +} + +void +sp_head::reset_thd_mem_root(THD *thd) +{ + DBUG_ENTER("sp_head::reset_thd_mem_root"); + m_thd_root= thd->mem_root; + thd->mem_root= &main_mem_root; + DBUG_PRINT("info", ("mem_root 0x%lx moved to thd mem root 0x%lx", + (ulong) &mem_root, (ulong) &thd->mem_root)); + free_list= thd->free_list; // Keep the old list + thd->free_list= NULL; // Start a new one + /* Copy the db, since substatements will point to it */ + m_thd_db= thd->db; + thd->db= thd->strmake(thd->db, thd->db_length); + m_thd= thd; + DBUG_VOID_RETURN; +} + +void +sp_head::restore_thd_mem_root(THD *thd) +{ + DBUG_ENTER("sp_head::restore_thd_mem_root"); + Item *flist= free_list; // The old list + set_item_arena(thd); // Get new free_list and mem_root + state= INITIALIZED; + + DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx", + (ulong) &mem_root, (ulong) &thd->mem_root)); + thd->free_list= flist; // Restore the old one + thd->db= m_thd_db; // Restore the original db pointer + thd->mem_root= m_thd_root; + m_thd= NULL; + DBUG_VOID_RETURN; +} + + +int +sp_head::show_create_procedure(THD *thd) +{ + Protocol *protocol= thd->protocol; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); + int res; + List<Item> field_list; + ulong old_sql_mode; + sys_var *sql_mode_var; + byte *sql_mode_str; + ulong sql_mode_len; + + DBUG_ENTER("sp_head::show_create_procedure"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + LINT_INIT(sql_mode_str); + LINT_INIT(sql_mode_len); + + old_sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode= m_sql_mode; + sql_mode_var= find_sys_var("SQL_MODE", 8); + if (sql_mode_var) + { + sql_mode_str= sql_mode_var->value_ptr(thd, OPT_SESSION, 0); + sql_mode_len= strlen((char*) sql_mode_str); + } + + field_list.push_back(new Item_empty_string("Procedure", NAME_LEN)); + if (sql_mode_var) + field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + // 1024 is for not to confuse old clients + field_list.push_back(new Item_empty_string("Create Procedure", + max(buffer.length(), 1024))); + if (protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)) + { + res= 1; + goto done; + } + protocol->prepare_for_resend(); + protocol->store(m_name.str, m_name.length, system_charset_info); + if (sql_mode_var) + protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info); + protocol->store(m_defstr.str, m_defstr.length, system_charset_info); + res= protocol->write(); + send_eof(thd); + + done: + thd->variables.sql_mode= old_sql_mode; + DBUG_RETURN(res); +} + + +/* + Add instruction to SP + + SYNOPSIS + sp_head::add_instr() + instr Instruction +*/ + +void sp_head::add_instr(sp_instr *instr) +{ + instr->free_list= m_thd->free_list; + m_thd->free_list= 0; + insert_dynamic(&m_instr, (gptr)&instr); +} + + +int +sp_head::show_create_function(THD *thd) +{ + Protocol *protocol= thd->protocol; + char buff[2048]; + String buffer(buff, sizeof(buff), system_charset_info); + int res; + List<Item> field_list; + ulong old_sql_mode; + sys_var *sql_mode_var; + byte *sql_mode_str; + ulong sql_mode_len; + DBUG_ENTER("sp_head::show_create_function"); + DBUG_PRINT("info", ("procedure %s", m_name.str)); + LINT_INIT(sql_mode_str); + LINT_INIT(sql_mode_len); + + old_sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode= m_sql_mode; + sql_mode_var= find_sys_var("SQL_MODE", 8); + if (sql_mode_var) + { + sql_mode_str= sql_mode_var->value_ptr(thd, OPT_SESSION, 0); + sql_mode_len= strlen((char*) sql_mode_str); + } + + field_list.push_back(new Item_empty_string("Function",NAME_LEN)); + if (sql_mode_var) + field_list.push_back(new Item_empty_string("sql_mode", sql_mode_len)); + field_list.push_back(new Item_empty_string("Create Function", + max(buffer.length(),1024))); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + { + res= 1; + goto done; + } + protocol->prepare_for_resend(); + protocol->store(m_name.str, m_name.length, system_charset_info); + if (sql_mode_var) + protocol->store((char*) sql_mode_str, sql_mode_len, system_charset_info); + protocol->store(m_defstr.str, m_defstr.length, system_charset_info); + res= protocol->write(); + send_eof(thd); + + done: + thd->variables.sql_mode= old_sql_mode; + DBUG_RETURN(res); +} + +void +sp_head::optimize() +{ + List<sp_instr> bp; + sp_instr *i; + uint src, dst; + + opt_mark(0); + + bp.empty(); + src= dst= 0; + while ((i= get_instr(src))) + { + if (! i->marked) + { + delete i; + src+= 1; + } + else + { + if (src != dst) + { + sp_instr *ibp; + List_iterator_fast<sp_instr> li(bp); + + set_dynamic(&m_instr, (gptr)&i, dst); + while ((ibp= li++)) + { + sp_instr_jump *ji= static_cast<sp_instr_jump *>(ibp); + if (ji->m_dest == src) + ji->m_dest= dst; + } + } + i->opt_move(dst, &bp); + src+= 1; + dst+= 1; + } + } + m_instr.elements= dst; + bp.empty(); +} + +void +sp_head::opt_mark(uint ip) +{ + sp_instr *i; + + while ((i= get_instr(ip)) && !i->marked) + ip= i->opt_mark(this); +} + +// ------------------------------------------------------------------ + +// +// sp_instr_stmt +// +sp_instr_stmt::~sp_instr_stmt() +{ + if (m_lex) + delete m_lex; +} + +int +sp_instr_stmt::execute(THD *thd, uint *nextp) +{ + char *query; + uint32 query_length; + DBUG_ENTER("sp_instr_stmt::execute"); + DBUG_PRINT("info", ("command: %d", m_lex->sql_command)); + int res; + + query= thd->query; + query_length= thd->query_length; + if (!(res= alloc_query(thd, m_query.str, m_query.length+1))) + { + if (query_cache_send_result_to_client(thd, + thd->query, thd->query_length) <= 0) + { + res= exec_stmt(thd, m_lex); + query_cache_end_of_result(thd); + } + thd->query= query; + thd->query_length= query_length; + } + *nextp = m_ip+1; + DBUG_RETURN(res); +} + +void +sp_instr_stmt::print(String *str) +{ + str->reserve(12); + str->append("stmt "); + str->qs_append((uint)m_lex->sql_command); +} + + +int +sp_instr_stmt::exec_stmt(THD *thd, LEX *lex) +{ + LEX *olex; // The other lex + int res; + + olex= thd->lex; // Save the other lex + thd->lex= lex; // Use my own lex + thd->lex->thd = thd; // QQ Not reentrant! + thd->lex->unit.thd= thd; // QQ Not reentrant + thd->free_list= NULL; + + VOID(pthread_mutex_lock(&LOCK_thread_count)); + thd->query_id= query_id++; + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + + reset_stmt_for_execute(thd, lex); + + res= mysql_execute_command(thd); + + lex->unit.cleanup(); + if (thd->lock || thd->open_tables || thd->derived_tables) + { + thd->proc_info="closing tables"; + close_thread_tables(thd); /* Free tables */ + } + + thd->lex= olex; // Restore the other lex + + return res; +} + +// +// sp_instr_set +// +int +sp_instr_set::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_set::execute"); + DBUG_PRINT("info", ("offset: %u", m_offset)); + Item *it; + int res; + + if (tables && + ((res= check_table_access(thd, SELECT_ACL, tables, 0)) || + (res= open_and_lock_tables(thd, tables)))) + DBUG_RETURN(res); + + it= sp_eval_func_item(thd, m_value, m_type); + if (! it) + res= -1; + else + { + res= 0; + thd->spcont->set_item(m_offset, it); + } + *nextp = m_ip+1; + if (tables && (thd->lock || thd->open_tables || thd->derived_tables)) + close_thread_tables(thd); + DBUG_RETURN(res); +} + +void +sp_instr_set::print(String *str) +{ + str->reserve(12); + str->append("set "); + str->qs_append(m_offset); + str->append(' '); + m_value->print(str); +} + +// +// sp_instr_set_user_var +// +int +sp_instr_set_user_var::execute(THD *thd, uint *nextp) +{ + int res= 0; + + DBUG_ENTER("sp_instr_set_user_var::execute"); + /* + It is ok to pass 0 as 3rd argument to fix_fields() since + Item_func_set_user_var::fix_fields() won't use it. + QQ: Still unsure what should we return in case of error 1 or -1 ? + */ + if (!m_set_var_item.fixed && m_set_var_item.fix_fields(thd, 0, 0) || + m_set_var_item.check() || m_set_var_item.update()) + res= -1; + *nextp= m_ip + 1; + DBUG_RETURN(res); +} + +void +sp_instr_set_user_var::print(String *str) +{ + m_set_var_item.print_as_stmt(str); +} + +// +// sp_instr_set_trigger_field +// +int +sp_instr_set_trigger_field::execute(THD *thd, uint *nextp) +{ + int res= 0; + + DBUG_ENTER("sp_instr_set_trigger_field::execute"); + /* QQ: Still unsure what should we return in case of error 1 or -1 ? */ + if (!value->fixed && value->fix_fields(thd, 0, &value) || + trigger_field.fix_fields(thd, 0, 0) || + (value->save_in_field(trigger_field.field, 0) < 0)) + res= -1; + *nextp= m_ip + 1; + DBUG_RETURN(res); +} + +void +sp_instr_set_trigger_field::print(String *str) +{ + str->append("set ", 4); + trigger_field.print(str); + str->append(":=", 2); + value->print(str); +} + +// +// sp_instr_jump +// +int +sp_instr_jump::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_jump::execute"); + DBUG_PRINT("info", ("destination: %u", m_dest)); + + *nextp= m_dest; + DBUG_RETURN(0); +} + +void +sp_instr_jump::print(String *str) +{ + str->reserve(12); + str->append("jump "); + str->qs_append(m_dest); +} + +uint +sp_instr_jump::opt_mark(sp_head *sp) +{ + m_dest= opt_shortcut_jump(sp, this); + if (m_dest != m_ip+1) /* Jumping to following instruction? */ + marked= 1; + m_optdest= sp->get_instr(m_dest); + return m_dest; +} + +uint +sp_instr_jump::opt_shortcut_jump(sp_head *sp, sp_instr *start) +{ + uint dest= m_dest; + sp_instr *i; + + while ((i= sp->get_instr(dest))) + { + uint ndest; + + if (start == i) + break; + ndest= i->opt_shortcut_jump(sp, start); + if (ndest == dest) + break; + dest= ndest; + } + return dest; +} + +void +sp_instr_jump::opt_move(uint dst, List<sp_instr> *bp) +{ + if (m_dest > m_ip) + bp->push_back(this); // Forward + else if (m_optdest) + m_dest= m_optdest->m_ip; // Backward + m_ip= dst; +} + +// +// sp_instr_jump_if +// +int +sp_instr_jump_if::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_jump_if::execute"); + DBUG_PRINT("info", ("destination: %u", m_dest)); + Item *it; + int res; + + if (tables && + ((res= check_table_access(thd, SELECT_ACL, tables, 0)) || + (res= open_and_lock_tables(thd, tables)))) + DBUG_RETURN(res); + + it= sp_eval_func_item(thd, m_expr, MYSQL_TYPE_TINY); + if (!it) + res= -1; + else + { + res= 0; + if (it->val_int()) + *nextp = m_dest; + else + *nextp = m_ip+1; + } + if (tables && (thd->lock || thd->open_tables || thd->derived_tables)) + close_thread_tables(thd); + DBUG_RETURN(res); +} + +void +sp_instr_jump_if::print(String *str) +{ + str->reserve(12); + str->append("jump_if "); + str->qs_append(m_dest); + str->append(' '); + m_expr->print(str); +} + +uint +sp_instr_jump_if::opt_mark(sp_head *sp) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_dest))) + { + m_dest= i->opt_shortcut_jump(sp, this); + m_optdest= sp->get_instr(m_dest); + } + sp->opt_mark(m_dest); + return m_ip+1; +} + +// +// sp_instr_jump_if_not +// +int +sp_instr_jump_if_not::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_jump_if_not::execute"); + DBUG_PRINT("info", ("destination: %u", m_dest)); + Item *it; + int res; + + if (tables && + ((res= check_table_access(thd, SELECT_ACL, tables, 0)) || + (res= open_and_lock_tables(thd, tables)))) + DBUG_RETURN(res); + + it= sp_eval_func_item(thd, m_expr, MYSQL_TYPE_TINY); + if (! it) + res= -1; + else + { + res= 0; + if (! it->val_int()) + *nextp = m_dest; + else + *nextp = m_ip+1; + } + if (tables && (thd->lock || thd->open_tables || thd->derived_tables)) + close_thread_tables(thd); + DBUG_RETURN(res); +} + +void +sp_instr_jump_if_not::print(String *str) +{ + str->reserve(16); + str->append("jump_if_not "); + str->qs_append(m_dest); + str->append(' '); + m_expr->print(str); +} + +uint +sp_instr_jump_if_not::opt_mark(sp_head *sp) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_dest))) + { + m_dest= i->opt_shortcut_jump(sp, this); + m_optdest= sp->get_instr(m_dest); + } + sp->opt_mark(m_dest); + return m_ip+1; +} + +// +// sp_instr_freturn +// +int +sp_instr_freturn::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_freturn::execute"); + Item *it; + int res; + + if (tables && + ((res= check_table_access(thd, SELECT_ACL, tables, 0)) || + (res= open_and_lock_tables(thd, tables)))) + DBUG_RETURN(res); + + it= sp_eval_func_item(thd, m_value, m_type); + if (! it) + res= -1; + else + { + res= 0; + thd->spcont->set_result(it); + } + *nextp= UINT_MAX; + DBUG_RETURN(res); +} + +void +sp_instr_freturn::print(String *str) +{ + str->reserve(12); + str->append("freturn "); + str->qs_append((uint)m_type); + str->append(' '); + m_value->print(str); +} + +// +// sp_instr_hpush_jump +// +int +sp_instr_hpush_jump::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hpush_jump::execute"); + List_iterator_fast<sp_cond_type_t> li(m_cond); + sp_cond_type_t *p; + + while ((p= li++)) + thd->spcont->push_handler(p, m_handler, m_type, m_frame); + + *nextp= m_dest; + DBUG_RETURN(0); +} + +void +sp_instr_hpush_jump::print(String *str) +{ + str->reserve(32); + str->append("hpush_jump "); + str->qs_append(m_dest); + str->append(" t="); + str->qs_append(m_type); + str->append(" f="); + str->qs_append(m_frame); + str->append(" h="); + str->qs_append(m_handler); +} + +uint +sp_instr_hpush_jump::opt_mark(sp_head *sp) +{ + sp_instr *i; + + marked= 1; + if ((i= sp->get_instr(m_dest))) + { + m_dest= i->opt_shortcut_jump(sp, this); + m_optdest= sp->get_instr(m_dest); + } + sp->opt_mark(m_dest); + return m_ip+1; +} + +// +// sp_instr_hpop +// +int +sp_instr_hpop::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hpop::execute"); + thd->spcont->pop_handlers(m_count); + *nextp= m_ip+1; + DBUG_RETURN(0); +} + +void +sp_instr_hpop::print(String *str) +{ + str->reserve(12); + str->append("hpop "); + str->qs_append(m_count); +} + +void +sp_instr_hpop::backpatch(uint dest, sp_pcontext *dst_ctx) +{ + m_count= m_ctx->diff_handlers(dst_ctx); +} + + +// +// sp_instr_hreturn +// +int +sp_instr_hreturn::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_hreturn::execute"); + if (m_dest) + *nextp= m_dest; + else + { + thd->spcont->restore_variables(m_frame); + *nextp= thd->spcont->pop_hstack(); + } + thd->spcont->in_handler= FALSE; + DBUG_RETURN(0); +} + +void +sp_instr_hreturn::print(String *str) +{ + str->reserve(16); + str->append("hreturn "); + str->qs_append(m_frame); + if (m_dest) + str->qs_append(m_dest); +} + +uint +sp_instr_hreturn::opt_mark(sp_head *sp) +{ + if (m_dest) + return sp_instr_jump::opt_mark(sp); + else + { + marked= 1; + return UINT_MAX; + } +} + + +// +// sp_instr_cpush +// +int +sp_instr_cpush::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_cpush::execute"); + thd->spcont->push_cursor(m_lex); + *nextp= m_ip+1; + DBUG_RETURN(0); +} + +sp_instr_cpush::~sp_instr_cpush() +{ + if (m_lex) + delete m_lex; +} + +void +sp_instr_cpush::print(String *str) +{ + str->append("cpush"); +} + +// +// sp_instr_cpop +// +int +sp_instr_cpop::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_cpop::execute"); + thd->spcont->pop_cursors(m_count); + *nextp= m_ip+1; + DBUG_RETURN(0); +} + +void +sp_instr_cpop::print(String *str) +{ + str->reserve(12); + str->append("cpop "); + str->qs_append(m_count); +} + +void +sp_instr_cpop::backpatch(uint dest, sp_pcontext *dst_ctx) +{ + m_count= m_ctx->diff_cursors(dst_ctx); +} + +// +// sp_instr_copen +// +int +sp_instr_copen::execute(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + DBUG_ENTER("sp_instr_copen::execute"); + + if (! c) + res= -1; + else + { + LEX *lex= c->pre_open(thd); + + if (! lex) + res= -1; + else + res= exec_stmt(thd, lex); + c->post_open(thd, (lex ? TRUE : FALSE)); + } + + *nextp= m_ip+1; + DBUG_RETURN(res); +} + +void +sp_instr_copen::print(String *str) +{ + str->reserve(12); + str->append("copen "); + str->qs_append(m_cursor); +} + +// +// sp_instr_cclose +// +int +sp_instr_cclose::execute(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + DBUG_ENTER("sp_instr_cclose::execute"); + + if (! c) + res= -1; + else + res= c->close(thd); + *nextp= m_ip+1; + DBUG_RETURN(res); +} + +void +sp_instr_cclose::print(String *str) +{ + str->reserve(12); + str->append("cclose "); + str->qs_append(m_cursor); +} + +// +// sp_instr_cfetch +// +int +sp_instr_cfetch::execute(THD *thd, uint *nextp) +{ + sp_cursor *c= thd->spcont->get_cursor(m_cursor); + int res; + DBUG_ENTER("sp_instr_cfetch::execute"); + + if (! c) + res= -1; + else + res= c->fetch(thd, &m_varlist); + *nextp= m_ip+1; + DBUG_RETURN(res); +} + +void +sp_instr_cfetch::print(String *str) +{ + List_iterator_fast<struct sp_pvar> li(m_varlist); + sp_pvar_t *pv; + + str->reserve(12); + str->append("cfetch "); + str->qs_append(m_cursor); + while ((pv= li++)) + { + str->reserve(8); + str->append(' '); + str->qs_append(pv->offset); + } +} + +// +// sp_instr_error +// +int +sp_instr_error::execute(THD *thd, uint *nextp) +{ + DBUG_ENTER("sp_instr_error::execute"); + + my_message(m_errcode, ER(m_errcode), MYF(0)); + *nextp= m_ip+1; + DBUG_RETURN(-1); +} + +void +sp_instr_error::print(String *str) +{ + str->reserve(12); + str->append("error "); + str->qs_append(m_errcode); +} + +/* ------------------------------------------------------------------ */ + + +// +// Security context swapping +// + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +void +sp_change_security_context(THD *thd, sp_head *sp, st_sp_security_context *ctxp) +{ + ctxp->changed= (sp->m_chistics->suid != SP_IS_NOT_SUID && + (strcmp(sp->m_definer_user.str, thd->priv_user) || + strcmp(sp->m_definer_host.str, thd->priv_host))); + + if (ctxp->changed) + { + ctxp->master_access= thd->master_access; + ctxp->db_access= thd->db_access; + ctxp->priv_user= thd->priv_user; + strncpy(ctxp->priv_host, thd->priv_host, sizeof(ctxp->priv_host)); + ctxp->user= thd->user; + ctxp->host= thd->host; + ctxp->ip= thd->ip; + + /* Change thise just to do the acl_getroot_no_password */ + thd->user= sp->m_definer_user.str; + thd->host= thd->ip = sp->m_definer_host.str; + + if (acl_getroot_no_password(thd)) + { // Failed, run as invoker for now + ctxp->changed= FALSE; + thd->master_access= ctxp->master_access; + thd->db_access= ctxp->db_access; + thd->priv_user= ctxp->priv_user; + strncpy(thd->priv_host, ctxp->priv_host, sizeof(thd->priv_host)); + } + + /* Restore these immiediately */ + thd->user= ctxp->user; + thd->host= ctxp->host; + thd->ip= ctxp->ip; + } +} + +void +sp_restore_security_context(THD *thd, sp_head *sp, st_sp_security_context *ctxp) +{ + if (ctxp->changed) + { + ctxp->changed= FALSE; + thd->master_access= ctxp->master_access; + thd->db_access= ctxp->db_access; + thd->priv_user= ctxp->priv_user; + strncpy(thd->priv_host, ctxp->priv_host, sizeof(thd->priv_host)); + } +} + +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ diff --git a/sql/sp_head.h b/sql/sp_head.h new file mode 100644 index 00000000000..c4d2068661c --- /dev/null +++ b/sql/sp_head.h @@ -0,0 +1,900 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_HEAD_H_ +#define _SP_HEAD_H_ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +#include <stddef.h> + +// Values for the type enum. This reflects the order of the enum declaration +// in the CREATE TABLE command. +#define TYPE_ENUM_FUNCTION 1 +#define TYPE_ENUM_PROCEDURE 2 +#define TYPE_ENUM_TRIGGER 3 + +Item_result +sp_map_result_type(enum enum_field_types type); + +bool +sp_multi_results_command(enum enum_sql_command cmd); + +struct sp_label; +class sp_instr; +struct sp_cond_type; +struct sp_pvar; + +class sp_name : public Sql_alloc +{ +public: + + LEX_STRING m_db; + LEX_STRING m_name; + LEX_STRING m_qname; + + sp_name(LEX_STRING name) + : m_name(name) + { + m_db.str= m_qname.str= 0; + m_db.length= m_qname.length= 0; + } + + sp_name(LEX_STRING db, LEX_STRING name) + : m_db(db), m_name(name) + { + m_qname.str= 0; + m_qname.length= 0; + } + + // Init. the qualified name from the db and name. + void init_qname(THD *thd); // thd for memroot allocation + + ~sp_name() + {} +}; + +sp_name * +sp_name_current_db_new(THD *thd, LEX_STRING name); + + +class sp_head :private Item_arena +{ + sp_head(const sp_head &); /* Prevent use of these */ + void operator=(sp_head &); + +public: + + int m_type; // TYPE_ENUM_FUNCTION or TYPE_ENUM_PROCEDURE + enum enum_field_types m_returns; // For FUNCTIONs only + CHARSET_INFO *m_returns_cs; // For FUNCTIONs only + my_bool m_has_return; // For FUNCTIONs only + my_bool m_simple_case; // TRUE if parsing simple case, FALSE otherwise + my_bool m_multi_results; // TRUE if a procedure with SELECT(s) + my_bool m_in_handler; // TRUE if parser in a handler body + uchar *m_tmp_query; // Temporary pointer to sub query string + uint m_old_cmq; // Old CLIENT_MULTI_QUERIES value + st_sp_chistics *m_chistics; + ulong m_sql_mode; // For SHOW CREATE +#if NOT_USED_NOW + // QQ We're not using this at the moment. + List<char *> m_calls; // Called procedures. + List<char *> m_tables; // Used tables. +#endif + LEX_STRING m_qname; // db.name + LEX_STRING m_db; + LEX_STRING m_name; + LEX_STRING m_params; + LEX_STRING m_retstr; // For FUNCTIONs only + LEX_STRING m_body; + LEX_STRING m_defstr; + LEX_STRING m_definer_user; + LEX_STRING m_definer_host; + longlong m_created; + longlong m_modified; + // Pointers set during parsing + uchar *m_param_begin, *m_param_end, *m_returns_begin, *m_returns_end, + *m_body_begin; + + static void * + operator new(size_t size); + + static void + operator delete(void *ptr, size_t size); + + sp_head(); + + // Initialize after we have reset mem_root + void + init(LEX *lex); + + // Initialize strings after parsing header + void + init_strings(THD *thd, LEX *lex, sp_name *name); + + int + create(THD *thd); + + virtual ~sp_head(); + + // Free memory + void + destroy(); + + int + execute_function(THD *thd, Item **args, uint argcount, Item **resp); + + int + execute_procedure(THD *thd, List<Item> *args); + + int + show_create_procedure(THD *thd); + + int + show_create_function(THD *thd); + + void + add_instr(sp_instr *instr); + + inline uint + instructions() + { + return m_instr.elements; + } + + inline sp_instr * + last_instruction() + { + sp_instr *i; + + get_dynamic(&m_instr, (gptr)&i, m_instr.elements-1); + return i; + } + + // Resets lex in 'thd' and keeps a copy of the old one. + void + reset_lex(THD *thd); + + // Restores lex in 'thd' from our copy, but keeps some status from the + // one in 'thd', like ptr, tables, fields, etc. + void + restore_lex(THD *thd); + + // Put the instruction on the backpatch list, associated with the label. + void + push_backpatch(sp_instr *, struct sp_label *); + + // Update all instruction with this label in the backpatch list to + // the current position. + void + backpatch(struct sp_label *); + + // Check that no unresolved references exist. + // If none found, 0 is returned, otherwise errors have been issued + // and -1 is returned. + // This is called by the parser at the end of a create procedure/function. + int + check_backpatch(THD *thd); + + char *name(uint *lenp = 0) const + { + if (lenp) + *lenp= m_name.length; + return m_name.str; + } + + char *create_string(THD *thd, ulong *lenp); + + inline Item_result result() + { + return sp_map_result_type(m_returns); + } + + void set_info(char *definer, uint definerlen, + longlong created, longlong modified, + st_sp_chistics *chistics, ulong sql_mode); + + void reset_thd_mem_root(THD *thd); + + void restore_thd_mem_root(THD *thd); + + void optimize(); + void opt_mark(uint ip); + + inline sp_instr * + get_instr(uint i) + { + sp_instr *ip; + + if (i < m_instr.elements) + get_dynamic(&m_instr, (gptr)&ip, i); + else + ip= NULL; + return ip; + } + +private: + + MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root + THD *m_thd; // Set if we have reset mem_root + char *m_thd_db; // Original thd->db pointer + + sp_pcontext *m_pcont; // Parse context + List<LEX> m_lex; // Temp. store for the other lex + DYNAMIC_ARRAY m_instr; // The "instructions" + typedef struct + { + struct sp_label *lab; + sp_instr *instr; + } bp_t; + List<bp_t> m_backpatch; // Instructions needing backpatching + + int + execute(THD *thd); + +}; // class sp_head : public Sql_alloc + + +// +// "Instructions"... +// + +class sp_instr : public Sql_alloc +{ + sp_instr(const sp_instr &); /* Prevent use of these */ + void operator=(sp_instr &); + +public: + + uint marked; + Item *free_list; // My Items + uint m_ip; // My index + sp_pcontext *m_ctx; // My parse context + + // Should give each a name or type code for debugging purposes? + sp_instr(uint ip, sp_pcontext *ctx) + :Sql_alloc(), marked(0), free_list(0), m_ip(ip), m_ctx(ctx) + {} + + virtual ~sp_instr() + { free_items(free_list); } + + // Execute this instrution. '*nextp' will be set to the index of the next + // instruction to execute. (For most instruction this will be the + // instruction following this one.) + // Returns 0 on success, non-zero if some error occured. + virtual int execute(THD *thd, uint *nextp) = 0; + + virtual void print(String *str) = 0; + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx) + {} + + virtual uint opt_mark(sp_head *sp) + { + marked= 1; + return m_ip+1; + } + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + + virtual void opt_move(uint dst, List<sp_instr> *ibp) + { + m_ip= dst; + } + +}; // class sp_instr : public Sql_alloc + + +// +// Call out to some prepared SQL statement. +// +class sp_instr_stmt : public sp_instr +{ + sp_instr_stmt(const sp_instr_stmt &); /* Prevent use of these */ + void operator=(sp_instr_stmt &); + +public: + + LEX_STRING m_query; // For thd->query + + sp_instr_stmt(uint ip, sp_pcontext *ctx) + : sp_instr(ip, ctx), m_lex(NULL) + { + m_query.str= 0; + m_query.length= 0; + } + + virtual ~sp_instr_stmt(); + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + inline void + set_lex(LEX *lex) + { + m_lex= lex; + } + + inline LEX * + get_lex() + { + return m_lex; + } + +protected: + + int exec_stmt(THD *thd, LEX *lex); // Execute a statement + +private: + + LEX *m_lex; // My own lex + +}; // class sp_instr_stmt : public sp_instr + + +class sp_instr_set : public sp_instr +{ + sp_instr_set(const sp_instr_set &); /* Prevent use of these */ + void operator=(sp_instr_set &); + +public: + + TABLE_LIST *tables; + + sp_instr_set(uint ip, sp_pcontext *ctx, + uint offset, Item *val, enum enum_field_types type) + : sp_instr(ip, ctx), + tables(NULL), m_offset(offset), m_value(val), m_type(type) + {} + + virtual ~sp_instr_set() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_offset; // Frame offset + Item *m_value; + enum enum_field_types m_type; // The declared type + +}; // class sp_instr_set : public sp_instr + + +/* + Set user variable instruction. + Used in functions and triggers to set user variables because we don't + want use sp_instr_stmt + "SET @a:=..." statement in this case since + latter will close all tables and thus will ruin execution of statement + calling/invoking this function/trigger. +*/ +class sp_instr_set_user_var : public sp_instr +{ + sp_instr_set_user_var(const sp_instr_set_user_var &); + void operator=(sp_instr_set_user_var &); + +public: + + sp_instr_set_user_var(uint ip, sp_pcontext *ctx, LEX_STRING var, Item *val) + : sp_instr(ip, ctx), m_set_var_item(var, val) + {} + + virtual ~sp_instr_set_user_var() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + Item_func_set_user_var m_set_var_item; +}; // class sp_instr_set_user_var : public sp_instr + + +/* + Set NEW/OLD row field value instruction. Used in triggers. +*/ +class sp_instr_set_trigger_field : public sp_instr +{ + sp_instr_set_trigger_field(const sp_instr_set_trigger_field &); + void operator=(sp_instr_set_trigger_field &); + +public: + + sp_instr_set_trigger_field(uint ip, sp_pcontext *ctx, + LEX_STRING field_name, Item *val) + : sp_instr(ip, ctx), + trigger_field(Item_trigger_field::NEW_ROW, field_name.str), + value(val) + {} + + virtual ~sp_instr_set_trigger_field() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + Item_trigger_field trigger_field; + +private: + Item *value; +}; // class sp_instr_trigger_field : public sp_instr + + +class sp_instr_jump : public sp_instr +{ + sp_instr_jump(const sp_instr_jump &); /* Prevent use of these */ + void operator=(sp_instr_jump &); + +public: + + uint m_dest; // Where we will go + + sp_instr_jump(uint ip, sp_pcontext *ctx) + : sp_instr(ip, ctx), m_dest(0), m_optdest(0) + {} + + sp_instr_jump(uint ip, sp_pcontext *ctx, uint dest) + : sp_instr(ip, ctx), m_dest(dest), m_optdest(0) + {} + + virtual ~sp_instr_jump() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start); + + virtual void opt_move(uint dst, List<sp_instr> *ibp); + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx) + { + if (m_dest == 0) // Don't reset + m_dest= dest; + } + +protected: + + sp_instr *m_optdest; // Used during optimization + +}; // class sp_instr_jump : public sp_instr + + +class sp_instr_jump_if : public sp_instr_jump +{ + sp_instr_jump_if(const sp_instr_jump_if &); /* Prevent use of these */ + void operator=(sp_instr_jump_if &); + +public: + + TABLE_LIST *tables; + + sp_instr_jump_if(uint ip, sp_pcontext *ctx, Item *i) + : sp_instr_jump(ip, ctx), tables(NULL), m_expr(i) + {} + + sp_instr_jump_if(uint ip, sp_pcontext *ctx, Item *i, uint dest) + : sp_instr_jump(ip, ctx, dest), tables(NULL), m_expr(i) + {} + + virtual ~sp_instr_jump_if() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + +private: + + Item *m_expr; // The condition + +}; // class sp_instr_jump_if : public sp_instr_jump + + +class sp_instr_jump_if_not : public sp_instr_jump +{ + sp_instr_jump_if_not(const sp_instr_jump_if_not &); /* Prevent use of these */ + void operator=(sp_instr_jump_if_not &); + +public: + + TABLE_LIST *tables; + + sp_instr_jump_if_not(uint ip, sp_pcontext *ctx, Item *i) + : sp_instr_jump(ip, ctx), tables(NULL), m_expr(i) + {} + + sp_instr_jump_if_not(uint ip, sp_pcontext *ctx, Item *i, uint dest) + : sp_instr_jump(ip, ctx, dest), tables(NULL), m_expr(i) + {} + + virtual ~sp_instr_jump_if_not() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + +private: + + Item *m_expr; // The condition + +}; // class sp_instr_jump_if_not : public sp_instr_jump + + +class sp_instr_freturn : public sp_instr +{ + sp_instr_freturn(const sp_instr_freturn &); /* Prevent use of these */ + void operator=(sp_instr_freturn &); + +public: + + TABLE_LIST *tables; + + sp_instr_freturn(uint ip, sp_pcontext *ctx, + Item *val, enum enum_field_types type) + : sp_instr(ip, ctx), tables(NULL), m_value(val), m_type(type) + {} + + virtual ~sp_instr_freturn() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp) + { + marked= 1; + return UINT_MAX; + } + +protected: + + Item *m_value; + enum enum_field_types m_type; + +}; // class sp_instr_freturn : public sp_instr + + +class sp_instr_hpush_jump : public sp_instr_jump +{ + sp_instr_hpush_jump(const sp_instr_hpush_jump &); /* Prevent use of these */ + void operator=(sp_instr_hpush_jump &); + +public: + + sp_instr_hpush_jump(uint ip, sp_pcontext *ctx, int htype, uint fp) + : sp_instr_jump(ip, ctx), m_type(htype), m_frame(fp) + { + m_handler= ip+1; + m_cond.empty(); + } + + virtual ~sp_instr_hpush_jump() + { + m_cond.empty(); + } + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + + virtual uint opt_shortcut_jump(sp_head *sp, sp_instr *start) + { + return m_ip; + } + + inline void add_condition(struct sp_cond_type *cond) + { + m_cond.push_front(cond); + } + +private: + + int m_type; // Handler type + uint m_frame; + uint m_handler; // Location of handler + List<struct sp_cond_type> m_cond; + +}; // class sp_instr_hpush_jump : public sp_instr_jump + + +class sp_instr_hpop : public sp_instr +{ + sp_instr_hpop(const sp_instr_hpop &); /* Prevent use of these */ + void operator=(sp_instr_hpop &); + +public: + + sp_instr_hpop(uint ip, sp_pcontext *ctx, uint count) + : sp_instr(ip, ctx), m_count(count) + {} + + virtual ~sp_instr_hpop() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx); + + virtual uint opt_mark(sp_head *sp) + { + if (m_count) + marked= 1; + return m_ip+1; + } + +private: + + uint m_count; + +}; // class sp_instr_hpop : public sp_instr + + +class sp_instr_hreturn : public sp_instr_jump +{ + sp_instr_hreturn(const sp_instr_hreturn &); /* Prevent use of these */ + void operator=(sp_instr_hreturn &); + +public: + + sp_instr_hreturn(uint ip, sp_pcontext *ctx, uint fp) + : sp_instr_jump(ip, ctx), m_frame(fp) + {} + + virtual ~sp_instr_hreturn() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp); + +private: + + uint m_frame; + +}; // class sp_instr_hreturn : public sp_instr + + +class sp_instr_cpush : public sp_instr +{ + sp_instr_cpush(const sp_instr_cpush &); /* Prevent use of these */ + void operator=(sp_instr_cpush &); + +public: + + sp_instr_cpush(uint ip, sp_pcontext *ctx, LEX *lex) + : sp_instr(ip, ctx), m_lex(lex) + {} + + virtual ~sp_instr_cpush(); + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + LEX *m_lex; + +}; // class sp_instr_cpush : public sp_instr + + +class sp_instr_cpop : public sp_instr +{ + sp_instr_cpop(const sp_instr_cpop &); /* Prevent use of these */ + void operator=(sp_instr_cpop &); + +public: + + sp_instr_cpop(uint ip, sp_pcontext *ctx, uint count) + : sp_instr(ip, ctx), m_count(count) + {} + + virtual ~sp_instr_cpop() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual void backpatch(uint dest, sp_pcontext *dst_ctx); + + virtual uint opt_mark(sp_head *sp) + { + if (m_count) + marked= 1; + return m_ip+1; + } + +private: + + uint m_count; + +}; // class sp_instr_cpop : public sp_instr + + +class sp_instr_copen : public sp_instr_stmt +{ + sp_instr_copen(const sp_instr_copen &); /* Prevent use of these */ + void operator=(sp_instr_copen &); + +public: + + sp_instr_copen(uint ip, sp_pcontext *ctx, uint c) + : sp_instr_stmt(ip, ctx), m_cursor(c) + {} + + virtual ~sp_instr_copen() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_cursor; // Stack index + +}; // class sp_instr_copen : public sp_instr_stmt + + +class sp_instr_cclose : public sp_instr +{ + sp_instr_cclose(const sp_instr_cclose &); /* Prevent use of these */ + void operator=(sp_instr_cclose &); + +public: + + sp_instr_cclose(uint ip, sp_pcontext *ctx, uint c) + : sp_instr(ip, ctx), m_cursor(c) + {} + + virtual ~sp_instr_cclose() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + +private: + + uint m_cursor; + +}; // class sp_instr_cclose : public sp_instr + + +class sp_instr_cfetch : public sp_instr +{ + sp_instr_cfetch(const sp_instr_cfetch &); /* Prevent use of these */ + void operator=(sp_instr_cfetch &); + +public: + + sp_instr_cfetch(uint ip, sp_pcontext *ctx, uint c) + : sp_instr(ip, ctx), m_cursor(c) + { + m_varlist.empty(); + } + + virtual ~sp_instr_cfetch() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + void add_to_varlist(struct sp_pvar *var) + { + m_varlist.push_back(var); + } + +private: + + uint m_cursor; + List<struct sp_pvar> m_varlist; + +}; // class sp_instr_cfetch : public sp_instr + + +class sp_instr_error : public sp_instr +{ + sp_instr_error(const sp_instr_error &); /* Prevent use of these */ + void operator=(sp_instr_error &); + +public: + + sp_instr_error(uint ip, sp_pcontext *ctx, int errcode) + : sp_instr(ip, ctx), m_errcode(errcode) + {} + + virtual ~sp_instr_error() + {} + + virtual int execute(THD *thd, uint *nextp); + + virtual void print(String *str); + + virtual uint opt_mark(sp_head *sp) + { + marked= 1; + return UINT_MAX; + } + +private: + + int m_errcode; + +}; // class sp_instr_error : public sp_instr + + +struct st_sp_security_context +{ + bool changed; + uint master_access; + uint db_access; + char *priv_user; + char priv_host[MAX_HOSTNAME]; + char *user; + char *host; + char *ip; +}; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +void +sp_change_security_context(THD *thd, sp_head *sp, st_sp_security_context *ctxp); +void +sp_restore_security_context(THD *thd, sp_head *sp,st_sp_security_context *ctxp); +#endif /* NO_EMBEDDED_ACCESS_CHECKS */ + +#endif /* _SP_HEAD_H_ */ diff --git a/sql/sp_pcontext.cc b/sql/sp_pcontext.cc new file mode 100644 index 00000000000..3b60f0936f9 --- /dev/null +++ b/sql/sp_pcontext.cc @@ -0,0 +1,274 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef __GNUC__ +#pragma implementation +#endif + +#if defined(WIN32) || defined(__WIN__) +#undef SAFEMALLOC /* Problems with threads */ +#endif + +#include "mysql_priv.h" +#include "sp_pcontext.h" +#include "sp_head.h" + +sp_pcontext::sp_pcontext(sp_pcontext *prev) + : Sql_alloc(), m_psubsize(0), m_csubsize(0), m_hsubsize(0), + m_handlers(0), m_parent(prev) +{ + VOID(my_init_dynamic_array(&m_pvar, sizeof(sp_pvar_t *), 16, 8)); + VOID(my_init_dynamic_array(&m_cond, sizeof(sp_cond_type_t *), 16, 8)); + VOID(my_init_dynamic_array(&m_cursor, sizeof(LEX_STRING), 16, 8)); + m_label.empty(); + m_children.empty(); + if (!prev) + m_poffset= m_coffset= 0; + else + { + m_poffset= prev->current_pvars(); + m_coffset= prev->current_cursors(); + } +} + +void +sp_pcontext::destroy() +{ + List_iterator_fast<sp_pcontext> li(m_children); + sp_pcontext *child; + + while ((child= li++)) + child->destroy(); + + m_children.empty(); + m_label.empty(); + delete_dynamic(&m_pvar); + delete_dynamic(&m_cond); + delete_dynamic(&m_cursor); +} + +sp_pcontext * +sp_pcontext::push_context() +{ + sp_pcontext *child= new sp_pcontext(this); + + if (child) + m_children.push_back(child); + return child; +} + +sp_pcontext * +sp_pcontext::pop_context() +{ + uint submax= max_pvars(); + + if (submax > m_parent->m_psubsize) + m_parent->m_psubsize= submax; + submax= max_handlers(); + if (submax > m_parent->m_hsubsize) + m_parent->m_hsubsize= submax; + submax= max_cursors(); + if (submax > m_parent->m_csubsize) + m_parent->m_csubsize= submax; + return m_parent; +} + +uint +sp_pcontext::diff_handlers(sp_pcontext *ctx) +{ + uint n= 0; + sp_pcontext *pctx= this; + + while (pctx && pctx != ctx) + { + n+= pctx->m_handlers; + pctx= pctx->parent_context(); + } + if (pctx) + return n; + return 0; // Didn't find ctx +} + +uint +sp_pcontext::diff_cursors(sp_pcontext *ctx) +{ + uint n= 0; + sp_pcontext *pctx= this; + + while (pctx && pctx != ctx) + pctx= pctx->parent_context(); + if (pctx) + return ctx->current_cursors() - pctx->current_cursors(); + return 0; // Didn't find ctx +} + +/* This does a linear search (from newer to older variables, in case +** we have shadowed names). +** It's possible to have a more efficient allocation and search method, +** but it might not be worth it. The typical number of parameters and +** variables will in most cases be low (a handfull). +** ...and, this is only called during parsing. +*/ +sp_pvar_t * +sp_pcontext::find_pvar(LEX_STRING *name, my_bool scoped) +{ + uint i= m_pvar.elements; + + while (i--) + { + sp_pvar_t *p; + + get_dynamic(&m_pvar, (gptr)&p, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)p->name.str, p->name.length) == 0) + { + return p; + } + } + if (!scoped && m_parent) + return m_parent->find_pvar(name, scoped); + return NULL; +} + +void +sp_pcontext::push_pvar(LEX_STRING *name, enum enum_field_types type, + sp_param_mode_t mode) +{ + sp_pvar_t *p= (sp_pvar_t *)sql_alloc(sizeof(sp_pvar_t)); + + if (p) + { + if (m_pvar.elements == m_psubsize) + m_psubsize+= 1; + p->name.str= name->str; + p->name.length= name->length; + p->type= type; + p->mode= mode; + p->offset= current_pvars(); + p->isset= (mode == sp_param_out ? FALSE : TRUE); + p->dflt= NULL; + insert_dynamic(&m_pvar, (gptr)&p); + } +} + +sp_label_t * +sp_pcontext::push_label(char *name, uint ip) +{ + sp_label_t *lab = (sp_label_t *)sql_alloc(sizeof(sp_label_t)); + + if (lab) + { + lab->name= name; + lab->ip= ip; + lab->type= SP_LAB_GOTO; + lab->ctx= this; + m_label.push_front(lab); + } + return lab; +} + +sp_label_t * +sp_pcontext::find_label(char *name) +{ + List_iterator_fast<sp_label_t> li(m_label); + sp_label_t *lab; + + while ((lab= li++)) + if (my_strcasecmp(system_charset_info, name, lab->name) == 0) + return lab; + + if (m_parent) + return m_parent->find_label(name); + return NULL; +} + +void +sp_pcontext::push_cond(LEX_STRING *name, sp_cond_type_t *val) +{ + sp_cond_t *p= (sp_cond_t *)sql_alloc(sizeof(sp_cond_t)); + + if (p) + { + p->name.str= name->str; + p->name.length= name->length; + p->val= val; + insert_dynamic(&m_cond, (gptr)&p); + } +} + +/* + * See comment for find_pvar() above + */ +sp_cond_type_t * +sp_pcontext::find_cond(LEX_STRING *name, my_bool scoped) +{ + uint i= m_cond.elements; + + while (i--) + { + sp_cond_t *p; + + get_dynamic(&m_cond, (gptr)&p, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)p->name.str, p->name.length) == 0) + { + return p->val; + } + } + if (!scoped && m_parent) + return m_parent->find_cond(name, scoped); + return NULL; +} + +void +sp_pcontext::push_cursor(LEX_STRING *name) +{ + LEX_STRING n; + + if (m_cursor.elements == m_csubsize) + m_csubsize+= 1; + n.str= name->str; + n.length= name->length; + insert_dynamic(&m_cursor, (gptr)&n); +} + +/* + * See comment for find_pvar() above + */ +my_bool +sp_pcontext::find_cursor(LEX_STRING *name, uint *poff, my_bool scoped) +{ + uint i= m_cursor.elements; + + while (i--) + { + LEX_STRING n; + + get_dynamic(&m_cursor, (gptr)&n, i); + if (my_strnncoll(system_charset_info, + (const uchar *)name->str, name->length, + (const uchar *)n.str, n.length) == 0) + { + *poff= i; + return TRUE; + } + } + if (!scoped && m_parent) + return m_parent->find_cursor(name, poff, scoped); + return FALSE; +} diff --git a/sql/sp_pcontext.h b/sql/sp_pcontext.h new file mode 100644 index 00000000000..66f631f4938 --- /dev/null +++ b/sql/sp_pcontext.h @@ -0,0 +1,298 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_PCONTEXT_H_ +#define _SP_PCONTEXT_H_ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +typedef enum +{ + sp_param_in, + sp_param_out, + sp_param_inout +} sp_param_mode_t; + +typedef struct sp_pvar +{ + LEX_STRING name; + enum enum_field_types type; + sp_param_mode_t mode; + uint offset; // Offset in current frame + my_bool isset; + Item *dflt; +} sp_pvar_t; + + +#define SP_LAB_REF 0 // Unresolved reference (for goto) +#define SP_LAB_GOTO 1 // Free goto label +#define SP_LAB_BEGIN 2 // Label at BEGIN +#define SP_LAB_ITER 3 // Label at iteration control + +typedef struct sp_label +{ + char *name; + uint ip; // Instruction index + int type; // begin/iter or ref/free + sp_pcontext *ctx; // The label's context +} sp_label_t; + +typedef struct sp_cond_type +{ + enum { number, state, warning, notfound, exception } type; + char sqlstate[6]; + uint mysqlerr; +} sp_cond_type_t; + +typedef struct sp_cond +{ + LEX_STRING name; + sp_cond_type_t *val; +} sp_cond_t; + +class sp_pcontext : public Sql_alloc +{ + sp_pcontext(const sp_pcontext &); /* Prevent use of these */ + void operator=(sp_pcontext &); + + public: + + sp_pcontext(sp_pcontext *prev); + + // Free memory + void + destroy(); + + sp_pcontext * + push_context(); + + // Returns the previous context, not the one we pop + sp_pcontext * + pop_context(); + + sp_pcontext * + parent_context() + { + return m_parent; + } + + uint + diff_handlers(sp_pcontext *ctx); + + uint + diff_cursors(sp_pcontext *ctx); + + + // + // Parameters and variables + // + + inline uint + max_pvars() + { + return m_psubsize + m_pvar.elements; + } + + inline uint + current_pvars() + { + return m_poffset + m_pvar.elements; + } + + inline uint + context_pvars() + { + return m_pvar.elements; + } + + inline uint + pvar_context2index(uint i) + { + return m_poffset + i; + } + + inline void + set_type(uint i, enum enum_field_types type) + { + sp_pvar_t *p= find_pvar(i); + + if (p) + p->type= type; + } + + inline void + set_isset(uint i, my_bool val) + { + sp_pvar_t *p= find_pvar(i); + + if (p) + p->isset= val; + } + + inline void + set_default(uint i, Item *it) + { + sp_pvar_t *p= find_pvar(i); + + if (p) + p->dflt= it; + } + + void + push_pvar(LEX_STRING *name, enum enum_field_types type, sp_param_mode_t mode); + + // Pop the last 'num' slots of the frame + inline void + pop_pvar(uint num = 1) + { + while (num--) + pop_dynamic(&m_pvar); + } + + // Find by name + sp_pvar_t * + find_pvar(LEX_STRING *name, my_bool scoped=0); + + // Find by index + sp_pvar_t * + find_pvar(uint i) + { + sp_pvar_t *p; + + if (i < m_pvar.elements) + get_dynamic(&m_pvar, (gptr)&p, i); + else + p= NULL; + return p; + } + + // + // Labels + // + + sp_label_t * + push_label(char *name, uint ip); + + sp_label_t * + find_label(char *name); + + inline sp_label_t * + last_label() + { + sp_label_t *lab= m_label.head(); + + if (!lab && m_parent) + lab= m_parent->last_label(); + return lab; + } + + inline sp_label_t * + pop_label() + { + return m_label.pop(); + } + + // + // Conditions + // + + void + push_cond(LEX_STRING *name, sp_cond_type_t *val); + + inline void + pop_cond(uint num) + { + while (num--) + pop_dynamic(&m_cond); + } + + sp_cond_type_t * + find_cond(LEX_STRING *name, my_bool scoped=0); + + // + // Handlers + // + + inline void + add_handler() + { + m_handlers+= 1; + } + + inline uint + max_handlers() + { + return m_hsubsize + m_handlers; + } + + inline void + push_handlers(uint n) + { + m_handlers+= n; + } + + // + // Cursors + // + + void + push_cursor(LEX_STRING *name); + + my_bool + find_cursor(LEX_STRING *name, uint *poff, my_bool scoped=0); + + inline uint + max_cursors() + { + return m_csubsize + m_cursor.elements; + } + + inline uint + current_cursors() + { + return m_coffset + m_cursor.elements; + } + +protected: + + // The maximum sub context's framesizes + uint m_psubsize; + uint m_csubsize; + uint m_hsubsize; + uint m_handlers; // No. of handlers in this context + +private: + + sp_pcontext *m_parent; // Parent context + + uint m_poffset; // Variable offset for this context + uint m_coffset; // Cursor offset for this context + + DYNAMIC_ARRAY m_pvar; // Parameters/variables + DYNAMIC_ARRAY m_cond; // Conditions + DYNAMIC_ARRAY m_cursor; // Cursors + + List<sp_label_t> m_label; // The label list + + List<sp_pcontext> m_children; // Children contexts, used for destruction + +}; // class sp_pcontext : public Sql_alloc + + +#endif /* _SP_PCONTEXT_H_ */ diff --git a/sql/sp_rcontext.cc b/sql/sp_rcontext.cc new file mode 100644 index 00000000000..609882b84c6 --- /dev/null +++ b/sql/sp_rcontext.cc @@ -0,0 +1,271 @@ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifdef __GNUC__ +#pragma implementation +#endif + +#if defined(WIN32) || defined(__WIN__) +#undef SAFEMALLOC /* Problems with threads */ +#endif + +#include "mysql_priv.h" +#include "mysql.h" +#include "sp_head.h" +#include "sp_rcontext.h" +#include "sp_pcontext.h" + +sp_rcontext::sp_rcontext(uint fsize, uint hmax, uint cmax) + : m_count(0), m_fsize(fsize), m_result(NULL), m_hcount(0), m_hsp(0), + m_hfound(-1), m_ccount(0) +{ + in_handler= FALSE; + m_frame= (Item **)sql_alloc(fsize * sizeof(Item*)); + m_outs= (int *)sql_alloc(fsize * sizeof(int)); + m_handler= (sp_handler_t *)sql_alloc(hmax * sizeof(sp_handler_t)); + m_hstack= (uint *)sql_alloc(hmax * sizeof(uint)); + m_cstack= (sp_cursor **)sql_alloc(cmax * sizeof(sp_cursor *)); + m_saved.empty(); +} + +int +sp_rcontext::set_item_eval(uint idx, Item *i, enum_field_types type) +{ + extern Item *sp_eval_func_item(THD *thd, Item *it, enum_field_types type); + Item *it= sp_eval_func_item(current_thd, i, type); + + if (! it) + return -1; + else + { + set_item(idx, it); + return 0; + } +} + +bool +sp_rcontext::find_handler(uint sql_errno, + MYSQL_ERROR::enum_warning_level level) +{ + if (in_handler) + return 0; // Already executing a handler + if (m_hfound >= 0) + return 1; // Already got one + + const char *sqlstate= mysql_errno_to_sqlstate(sql_errno); + int i= m_hcount, found= -1; + + while (i--) + { + sp_cond_type_t *cond= m_handler[i].cond; + + switch (cond->type) + { + case sp_cond_type_t::number: + if (sql_errno == cond->mysqlerr) + found= i; // Always the most specific + break; + case sp_cond_type_t::state: + if (strcmp(sqlstate, cond->sqlstate) == 0 && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::number)) + found= i; + break; + case sp_cond_type_t::warning: + if ((sqlstate[0] == '0' && sqlstate[1] == '1' || + level == MYSQL_ERROR::WARN_LEVEL_WARN) && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::state)) + found= i; + break; + case sp_cond_type_t::notfound: + if (sqlstate[0] == '0' && sqlstate[1] == '2' && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::state)) + found= i; + break; + case sp_cond_type_t::exception: + if ((sqlstate[0] != '0' || sqlstate[1] > '2' || + level == MYSQL_ERROR::WARN_LEVEL_ERROR) && + (found < 0 || m_handler[found].cond->type > sp_cond_type_t::state)) + found= i; + break; + } + } + if (found < 0) + return FALSE; + m_hfound= found; + return TRUE; +} + +void +sp_rcontext::save_variables(uint fp) +{ + while (fp < m_count) + m_saved.push_front(m_frame[fp++]); +} + +void +sp_rcontext::restore_variables(uint fp) +{ + uint i= m_count; + + while (i-- > fp) + m_frame[i]= m_saved.pop(); +} + +void +sp_rcontext::push_cursor(LEX *lex) +{ + m_cstack[m_ccount++]= new sp_cursor(lex); +} + +void +sp_rcontext::pop_cursors(uint count) +{ + while (count--) + { + delete m_cstack[--m_ccount]; + } +} + + +/* + * + * sp_cursor + * + */ + +// We have split this in two to make it easy for sp_instr_copen +// to reuse the sp_instr::exec_stmt() code. +LEX * +sp_cursor::pre_open(THD *thd) +{ + if (m_isopen) + { + my_message(ER_SP_CURSOR_ALREADY_OPEN, ER(ER_SP_CURSOR_ALREADY_OPEN), + MYF(0)); + return NULL; + } + + bzero((char *)&m_mem_root, sizeof(m_mem_root)); + init_alloc_root(&m_mem_root, MEM_ROOT_BLOCK_SIZE, MEM_ROOT_PREALLOC); + if ((m_prot= new Protocol_cursor(thd, &m_mem_root)) == NULL) + return NULL; + + m_oprot= thd->protocol; // Save the original protocol + thd->protocol= m_prot; + + m_nseof= thd->net.no_send_eof; + thd->net.no_send_eof= TRUE; + return m_lex; +} + +void +sp_cursor::post_open(THD *thd, my_bool was_opened) +{ + thd->net.no_send_eof= m_nseof; // Restore the originals + thd->protocol= m_oprot; + if (was_opened) + { + m_isopen= was_opened; + m_current_row= m_prot->data; + } +} + +int +sp_cursor::close(THD *thd) +{ + if (! m_isopen) + { + my_message(ER_SP_CURSOR_NOT_OPEN, ER(ER_SP_CURSOR_NOT_OPEN), MYF(0)); + return -1; + } + destroy(); + return 0; +} + +void +sp_cursor::destroy() +{ + if (m_prot) + { + delete m_prot; + m_prot= NULL; + free_root(&m_mem_root, MYF(0)); + bzero((char *)&m_mem_root, sizeof(m_mem_root)); + } + m_isopen= FALSE; +} + +int +sp_cursor::fetch(THD *thd, List<struct sp_pvar> *vars) +{ + List_iterator_fast<struct sp_pvar> li(*vars); + sp_pvar_t *pv; + MYSQL_ROW row; + uint fldcount; + + if (! m_isopen) + { + my_message(ER_SP_CURSOR_NOT_OPEN, ER(ER_SP_CURSOR_NOT_OPEN), MYF(0)); + return -1; + } + if (m_current_row == NULL) + { + my_message(ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA), MYF(0)); + return -1; + } + + row= m_current_row->data; + for (fldcount= 0 ; (pv= li++) ; fldcount++) + { + Item *it; + const char *s; + + if (fldcount >= m_prot->get_field_count()) + { + my_message(ER_SP_WRONG_NO_OF_FETCH_ARGS, + ER(ER_SP_WRONG_NO_OF_FETCH_ARGS), MYF(0)); + return -1; + } + s= row[fldcount]; + if (!s) + it= new Item_null(); + else + switch (sp_map_result_type(pv->type)) + { + case INT_RESULT: + it= new Item_int(s); + break; + case REAL_RESULT: + it= new Item_real(s, strlen(s)); + break; + default: + { + uint len= strlen(s); + it= new Item_string(thd->strmake(s, len), len, thd->db_charset); + break; + } + } + thd->spcont->set_item(pv->offset, it); + } + if (fldcount < m_prot->get_field_count()) + { + my_message(ER_SP_WRONG_NO_OF_FETCH_ARGS, + ER(ER_SP_WRONG_NO_OF_FETCH_ARGS), MYF(0)); + return -1; + } + m_current_row= m_current_row->next; + return 0; +} diff --git a/sql/sp_rcontext.h b/sql/sp_rcontext.h new file mode 100644 index 00000000000..8e818ab76d1 --- /dev/null +++ b/sql/sp_rcontext.h @@ -0,0 +1,255 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2002 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef _SP_RCONTEXT_H_ +#define _SP_RCONTEXT_H_ + +#ifdef __GNUC__ +#pragma interface /* gcc class implementation */ +#endif + +struct sp_cond_type; +class sp_cursor; +struct sp_pvar; + +#define SP_HANDLER_NONE 0 +#define SP_HANDLER_EXIT 1 +#define SP_HANDLER_CONTINUE 2 +#define SP_HANDLER_UNDO 3 + +typedef struct +{ + struct sp_cond_type *cond; + uint handler; // Location of handler + int type; + uint foffset; // Frame offset for the handlers declare level +} sp_handler_t; + +class sp_rcontext : public Sql_alloc +{ + sp_rcontext(const sp_rcontext &); /* Prevent use of these */ + void operator=(sp_rcontext &); + + public: + + bool in_handler; + + sp_rcontext(uint fsize, uint hmax, uint cmax); + + ~sp_rcontext() + { + // Not needed? + //sql_element_free(m_frame); + //m_saved.empty(); + } + + inline void + push_item(Item *i) + { + if (m_count < m_fsize) + m_frame[m_count++] = i; + } + + inline void + set_item(uint idx, Item *i) + { + if (idx < m_count) + m_frame[idx] = i; + } + + /* Returns 0 on success, -1 on (eval) failure */ + int + set_item_eval(uint idx, Item *i, enum_field_types type); + + inline Item * + get_item(uint idx) + { + return m_frame[idx]; + } + + inline void + set_oindex(uint idx, int oidx) + { + m_outs[idx] = oidx; + } + + inline int + get_oindex(uint idx) + { + return m_outs[idx]; + } + + inline void + set_result(Item *it) + { + m_result= it; + } + + inline Item * + get_result() + { + return m_result; + } + + inline void + push_handler(struct sp_cond_type *cond, uint h, int type, uint f) + { + m_handler[m_hcount].cond= cond; + m_handler[m_hcount].handler= h; + m_handler[m_hcount].type= type; + m_handler[m_hcount].foffset= f; + m_hcount+= 1; + } + + inline void + pop_handlers(uint count) + { + m_hcount-= count; + } + + // Returns 1 if a handler was found, 0 otherwise. + bool + find_handler(uint sql_errno,MYSQL_ERROR::enum_warning_level level); + + // Returns handler type and sets *ip to location if one was found + inline int + found_handler(uint *ip, uint *fp) + { + if (m_hfound < 0) + return SP_HANDLER_NONE; + *ip= m_handler[m_hfound].handler; + *fp= m_handler[m_hfound].foffset; + return m_handler[m_hfound].type; + } + + // Clears the handler find state + inline void + clear_handler() + { + m_hfound= -1; + } + + inline void + push_hstack(uint h) + { + m_hstack[m_hsp++]= h; + } + + inline uint + pop_hstack() + { + return m_hstack[--m_hsp]; + } + + // Save variables starting at fp and up + void + save_variables(uint fp); + + // Restore variables down to fp + void + restore_variables(uint fp); + + void + push_cursor(LEX *lex); + + void + pop_cursors(uint count); + + void + pop_all_cursors() + { + pop_cursors(m_ccount); + } + + inline sp_cursor * + get_cursor(uint i) + { + return m_cstack[i]; + } + +private: + + uint m_count; + uint m_fsize; + Item **m_frame; + int *m_outs; + + Item *m_result; // For FUNCTIONs + + sp_handler_t *m_handler; + uint m_hcount; + uint *m_hstack; + uint m_hsp; + int m_hfound; // Set by find_handler; -1 if not found + List<Item> m_saved; // Saved variables + + sp_cursor **m_cstack; + uint m_ccount; + +}; // class sp_rcontext : public Sql_alloc + + +class sp_cursor : public Sql_alloc +{ +public: + + sp_cursor(LEX *lex) + : m_lex(lex), m_prot(NULL), m_isopen(0), m_current_row(NULL) + { + /* Empty */ + } + + virtual ~sp_cursor() + { + destroy(); + } + + // We have split this in two to make it easy for sp_instr_copen + // to reuse the sp_instr::exec_stmt() code. + LEX * + pre_open(THD *thd); + void + post_open(THD *thd, my_bool was_opened); + + int + close(THD *thd); + + inline my_bool + is_open() + { + return m_isopen; + } + + int + fetch(THD *, List<struct sp_pvar> *vars); + +private: + + MEM_ROOT m_mem_root; // My own mem_root + LEX *m_lex; + Protocol_cursor *m_prot; + my_bool m_isopen; + my_bool m_nseof; // Original no_send_eof + Protocol *m_oprot; // Original protcol + MYSQL_ROWS *m_current_row; + + void + destroy(); + +}; // class sp_cursor : public Sql_alloc + +#endif /* _SP_RCONTEXT_H_ */ diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 2c11d1c87ad..ba00c1d3fa7 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -17,8 +17,8 @@ /* The privileges are saved in the following tables: - mysql/user ; super user who are allowed to do almoust anything - mysql/host ; host priviliges. This is used if host is empty in mysql/db. + mysql/user ; super user who are allowed to do almost anything + mysql/host ; host privileges. This is used if host is empty in mysql/db. mysql/db ; database privileges / user data in tables is sorted according to how many not-wild-cards there is @@ -32,6 +32,8 @@ #endif #include <m_ctype.h> #include <stdarg.h> +#include "sp_head.h" +#include "sp.h" #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -58,7 +60,7 @@ static DYNAMIC_ARRAY acl_hosts,acl_users,acl_dbs; static MEM_ROOT mem, memex; static bool initialized=0; static bool allow_all_hosts=1; -static HASH acl_check_hosts, column_priv_hash; +static HASH acl_check_hosts, column_priv_hash, proc_priv_hash; static DYNAMIC_ARRAY acl_wild_hosts; static hash_filo *acl_cache; static uint grant_version=0; @@ -139,7 +141,6 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) MYSQL_LOCK *lock; my_bool return_val=1; bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE; - DBUG_ENTER("acl_init"); if (!acl_cache) @@ -152,6 +153,7 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) } priv_version++; /* Privileges updated */ + mysql_proc_table_exists= 1; // Assume mysql.proc exists /* To be able to run this from boot, we allocate a temporary THD @@ -167,8 +169,8 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) tables[0].alias=tables[0].real_name=(char*) "host"; tables[1].alias=tables[1].real_name=(char*) "user"; tables[2].alias=tables[2].real_name=(char*) "db"; - tables[0].next=tables+1; - tables[1].next=tables+2; + tables[0].next_local= tables[0].next_global= tables+1; + tables[1].next_local= tables[1].next_global= tables+2; tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_READ; tables[0].db=tables[1].db=tables[2].db=thd->db; @@ -300,9 +302,26 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) { uint next_field; user.access= get_access(table,3,&next_field) & GLOBAL_ACLS; + /* + if it is pre 5.0.1 privilege table then map CREATE privilege on + CREATE VIEW & SHOW VIEW privileges + */ + if (table->fields <= 31 && (user.access & CREATE_ACL)) + user.access|= (CREATE_VIEW_ACL | SHOW_VIEW_ACL); + + /* + if it is pre 5.0.2 privilege table then map CREATE/ALTER privilege on + CREATE PROCEDURE & ALTER PROCEDURE privileges + */ + if (table->fields <= 33 && (user.access & CREATE_ACL)) + user.access|= CREATE_PROC_ACL; + if (table->fields <= 33 && (user.access & ALTER_ACL)) + user.access|= ALTER_PROC_ACL; + user.sort= get_sort(2,user.host.hostname,user.user); user.hostname_length= (user.host.hostname ? (uint) strlen(user.host.hostname) : 0); + if (table->fields >= 31) /* Starting from 4.0.2 we have more fields */ { char *ssl_type=get_field(&mem, table->field[next_field++]); @@ -324,10 +343,19 @@ my_bool acl_init(THD *org_thd, bool dont_read_acl_tables) ptr = get_field(&mem, table->field[next_field++]); user.user_resource.updates=ptr ? atoi(ptr) : 0; ptr = get_field(&mem, table->field[next_field++]); - user.user_resource.connections=ptr ? atoi(ptr) : 0; + user.user_resource.conn_per_hour= ptr ? atoi(ptr) : 0; if (user.user_resource.questions || user.user_resource.updates || - user.user_resource.connections) + user.user_resource.conn_per_hour) mqh_used=1; + + if (table->fields >= 36) + { + /* Starting from 5.0.3 we have max_user_connections field */ + ptr= get_field(&mem, table->field[next_field++]); + user.user_resource.user_conn= ptr ? atoi(ptr) : 0; + } + else + user.user_resource.user_conn= 0; } else { @@ -438,7 +466,8 @@ void acl_free(bool end) SYNOPSIS acl_reload() - thd Thread handle + thd Thread handle. Note that this may be NULL if we refresh + because we got a signal */ void acl_reload(THD *thd) @@ -491,8 +520,8 @@ void acl_reload(THD *thd) Get all access bits from table after fieldnr IMPLEMENTATION - We know that the access privileges ends when there is no more fields - or the field is not an enum with two elements. + We know that the access privileges ends when there is no more fields + or the field is not an enum with two elements. SYNOPSIS get_access() @@ -595,7 +624,7 @@ static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b) thd->host, thd->ip, thd->user are used for checks. mqh user resources; on success mqh is reset, else unchanged - passwd scrambled & crypted password, recieved from client + passwd scrambled & crypted password, received from client (to check): thd->scramble or thd->scramble_323 is used to decrypt passwd, so they must contain original random string, @@ -606,7 +635,7 @@ static int acl_compare(ACL_ACCESS *a,ACL_ACCESS *b) RETURN VALUE 0 success: thd->priv_user, thd->priv_host, thd->master_access, mqh are updated - 1 user not found or authentification failure + 1 user not found or authentication failure 2 user found, has long (4.1.1) salt, but passwd is in old (3.23) format. -1 user found, has short (3.23) salt, but passwd is in new (4.1.1) format. */ @@ -746,7 +775,7 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, break; } DBUG_PRINT("info",("checkpoint 2")); - /* If X509 issuer is speified, we check it... */ + /* If X509 issuer is specified, we check it... */ if (acl_user->x509_issuer) { DBUG_PRINT("info",("checkpoint 3")); @@ -805,6 +834,86 @@ int acl_getroot(THD *thd, USER_RESOURCES *mqh, } +/* + * This is like acl_getroot() above, but it doesn't check password, + * and we don't care about the user resources. + * Used to get access rights for SQL SECURITY DEFINER invocation of + * stored procedures. + */ +int acl_getroot_no_password(THD *thd) +{ + ulong user_access= NO_ACCESS; + int res= 1; + uint i; + ACL_USER *acl_user= 0; + DBUG_ENTER("acl_getroot_no_password"); + + if (!initialized) + { + /* + here if mysqld's been started with --skip-grant-tables option. + */ + thd->priv_user= (char *) ""; // privileges for + *thd->priv_host= '\0'; // the user are unknown + thd->master_access= ~NO_ACCESS; // everything is allowed + DBUG_RETURN(0); + } + + VOID(pthread_mutex_lock(&acl_cache->lock)); + + thd->master_access= 0; + thd->db_access= 0; + + /* + Find acl entry in user database. + This is specially tailored to suit the check we do for CALL of + a stored procedure; thd->user is set to what is actually a + priv_user, which can be ''. + */ + for (i=0 ; i < acl_users.elements ; i++) + { + acl_user= dynamic_element(&acl_users,i,ACL_USER*); + if ((!acl_user->user && (!thd->user || !thd->user[0])) || + (acl_user->user && strcmp(thd->user, acl_user->user) == 0)) + { + if (compare_hostname(&acl_user->host, thd->host, thd->ip)) + { + res= 0; + break; + } + } + } + + if (acl_user) + { + for (i=0 ; i < acl_dbs.elements ; i++) + { + ACL_DB *acl_db= dynamic_element(&acl_dbs, i, ACL_DB*); + if (!acl_db->user || + (thd->user && thd->user[0] && !strcmp(thd->user, acl_db->user))) + { + if (compare_hostname(&acl_db->host, thd->host, thd->ip)) + { + if (!acl_db->db || (thd->db && !strcmp(acl_db->db, thd->db))) + { + thd->db_access= acl_db->access; + break; + } + } + } + } + thd->master_access= acl_user->access; + thd->priv_user= acl_user->user ? thd->user : (char *) ""; + + if (acl_user->host.hostname) + strmake(thd->priv_host, acl_user->host.hostname, MAX_HOSTNAME); + else + *thd->priv_host= 0; + } + VOID(pthread_mutex_unlock(&acl_cache->lock)); + DBUG_RETURN(res); +} + static byte* check_get_key(ACL_USER *buff,uint *length, my_bool not_used __attribute__((unused))) { @@ -831,15 +940,17 @@ static void acl_update_user(const char *user, const char *host, { if (!acl_user->host.hostname && !host[0] || acl_user->host.hostname && - !my_strcasecmp(&my_charset_latin1, host, acl_user->host.hostname)) + !my_strcasecmp(system_charset_info, host, acl_user->host.hostname)) { acl_user->access=privileges; - if (mqh->bits & 1) + if (mqh->specified_limits & USER_RESOURCES::QUERIES_PER_HOUR) acl_user->user_resource.questions=mqh->questions; - if (mqh->bits & 2) + if (mqh->specified_limits & USER_RESOURCES::UPDATES_PER_HOUR) acl_user->user_resource.updates=mqh->updates; - if (mqh->bits & 4) - acl_user->user_resource.connections=mqh->connections; + if (mqh->specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR) + acl_user->user_resource.conn_per_hour= mqh->conn_per_hour; + if (mqh->specified_limits & USER_RESOURCES::USER_CONNECTIONS) + acl_user->user_resource.user_conn= mqh->user_conn; if (ssl_type != SSL_TYPE_NOT_SPECIFIED) { acl_user->ssl_type= ssl_type; @@ -910,7 +1021,7 @@ static void acl_update_db(const char *user, const char *host, const char *db, { if (!acl_db->host.hostname && !host[0] || acl_db->host.hostname && - !my_strcasecmp(&my_charset_latin1, host, acl_db->host.hostname)) + !my_strcasecmp(system_charset_info, host, acl_db->host.hostname)) { if (!acl_db->db && !db[0] || acl_db->db && !strcmp(db,acl_db->db)) @@ -964,7 +1075,7 @@ static void acl_insert_db(const char *user, const char *host, const char *db, ulong acl_get(const char *host, const char *ip, const char *user, const char *db, my_bool db_is_pattern) { - ulong host_access= ~0,db_access= 0; + ulong host_access= ~0, db_access= 0; uint i,key_length; char key[ACL_KEY_LENGTH],*tmp_db,*end; acl_entry *entry; @@ -1052,7 +1163,7 @@ static void init_check_host(void) DBUG_ENTER("init_check_host"); VOID(my_init_dynamic_array(&acl_wild_hosts,sizeof(struct acl_host_and_ip), acl_users.elements,1)); - VOID(hash_init(&acl_check_hosts,&my_charset_latin1,acl_users.elements,0,0, + VOID(hash_init(&acl_check_hosts,system_charset_info,acl_users.elements,0,0, (hash_get_key) check_get_key,0,0)); if (!allow_all_hosts) { @@ -1068,7 +1179,7 @@ static void init_check_host(void) { // Check if host already exists acl_host_and_ip *acl=dynamic_element(&acl_wild_hosts,j, acl_host_and_ip *); - if (!my_strcasecmp(&my_charset_latin1, + if (!my_strcasecmp(system_charset_info, acl_user->host.hostname, acl->hostname)) break; // already stored } @@ -1139,29 +1250,27 @@ bool check_change_password(THD *thd, const char *host, const char *user, { if (!initialized) { - net_printf(thd,ER_OPTION_PREVENTS_STATEMENT, - "--skip-grant-tables"); + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); return(1); } if (!thd->slave_thread && (strcmp(thd->user,user) || - my_strcasecmp(&my_charset_latin1, host, thd->host_or_ip))) + my_strcasecmp(system_charset_info, host, thd->host_or_ip))) { if (check_access(thd, UPDATE_ACL, "mysql",0,1,0)) return(1); } if (!thd->slave_thread && !thd->user[0]) { - send_error(thd, ER_PASSWORD_ANONYMOUS_USER); + my_message(ER_PASSWORD_ANONYMOUS_USER, ER(ER_PASSWORD_ANONYMOUS_USER), + MYF(0)); return(1); } uint len=strlen(new_password); if (len && len != SCRAMBLED_PASSWORD_CHAR_LENGTH && len != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) { - net_printf(thd, 0, - "Password hash should be a %d-digit hexadecimal number", - SCRAMBLED_PASSWORD_CHAR_LENGTH); + my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); return -1; } return(0); @@ -1199,7 +1308,7 @@ bool change_password(THD *thd, const char *host, const char *user, if (!(acl_user= find_acl_user(host, user))) { VOID(pthread_mutex_unlock(&acl_cache->lock)); - send_error(thd, ER_PASSWORD_NO_MATCH); + my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), MYF(0)); DBUG_RETURN(1); } /* update loaded acl entry: */ @@ -1212,7 +1321,6 @@ bool change_password(THD *thd, const char *host, const char *user, new_password, new_password_len)) { VOID(pthread_mutex_unlock(&acl_cache->lock)); /* purecov: deadcode */ - send_error(thd,0); /* purecov: deadcode */ DBUG_RETURN(1); /* purecov: deadcode */ } @@ -1227,7 +1335,6 @@ bool change_password(THD *thd, const char *host, const char *user, acl_user->host.hostname ? acl_user->host.hostname : "", new_password)); thd->clear_error(); - mysql_update_log.write(thd, buff, query_length); Query_log_event qinfo(thd, buff, query_length, 0, FALSE); mysql_bin_log.write(&qinfo); DBUG_RETURN(0); @@ -1317,7 +1424,7 @@ static bool compare_hostname(const acl_host_and_ip *host, const char *hostname, return (tmp & host->ip_mask) == host->ip; } return (!host->hostname || - (hostname && !wild_case_compare(&my_charset_latin1, + (hostname && !wild_case_compare(system_charset_info, hostname,host->hostname)) || (ip && !wild_compare(ip,host->hostname,0))); } @@ -1330,7 +1437,7 @@ bool hostname_requires_resolving(const char *hostname) int namelen= strlen(hostname); int lhlen= strlen(my_localhost); if ((namelen == lhlen) && - !my_strnncoll(&my_charset_latin1, (const uchar *)hostname, namelen, + !my_strnncoll(system_charset_info, (const uchar *)hostname, namelen, (const uchar *)my_localhost, strlen(my_localhost))) return FALSE; for (; (cur=*hostname); hostname++) @@ -1352,6 +1459,7 @@ static bool update_user_table(THD *thd, const char *host, const char *user, TABLE_LIST tables; TABLE *table; bool error=1; + char user_key[MAX_KEY_LENGTH]; DBUG_ENTER("update_user_table"); DBUG_PRINT("enter",("user: %s host: %s",user,host)); @@ -1379,20 +1487,22 @@ static bool update_user_table(THD *thd, const char *host, const char *user, if (!(table=open_ltable(thd,&tables,TL_WRITE))) DBUG_RETURN(1); /* purecov: deadcode */ - table->field[0]->store(host,(uint) strlen(host), &my_charset_latin1); - table->field[1]->store(user,(uint) strlen(user), &my_charset_latin1); + table->field[0]->store(host,(uint) strlen(host), system_charset_info); + table->field[1]->store(user,(uint) strlen(user), system_charset_info); + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, + if (table->file->index_read_idx(table->record[0], 0, + user_key, table->key_info->key_length, HA_READ_KEY_EXACT)) { - my_error(ER_PASSWORD_NO_MATCH,MYF(0)); /* purecov: deadcode */ + my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), + MYF(0)); /* purecov: deadcode */ DBUG_RETURN(1); /* purecov: deadcode */ } store_record(table,record[1]); - table->field[2]->store(new_password, new_password_len, &my_charset_latin1); + table->field[2]->store(new_password, new_password_len, system_charset_info); if ((error=table->file->update_row(table->record[1],table->record[0]))) { table->file->print_error(error,MYF(0)); /* purecov: deadcode */ @@ -1444,7 +1554,10 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, const char *password= ""; uint password_len= 0; char what= (revoke_grant) ? 'N' : 'Y'; + byte user_key[MAX_KEY_LENGTH]; + LEX *lex= thd->lex; DBUG_ENTER("replace_user_table"); + safe_mutex_assert_owner(&acl_cache->lock); if (combo.password.str && combo.password.str[0]) @@ -1452,49 +1565,76 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, if (combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH && combo.password.length != SCRAMBLED_PASSWORD_CHAR_LENGTH_323) { - my_printf_error(ER_UNKNOWN_ERROR, - "Password hash should be a %d-digit hexadecimal number", - MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); + my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); DBUG_RETURN(-1); } password_len= combo.password.length; password=combo.password.str; } - table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); - table->field[1]->store(combo.user.str,combo.user.length, &my_charset_latin1); + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(combo.user.str,combo.user.length, system_charset_info); + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0], 0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, - HA_READ_KEY_EXACT)) + user_key, table->key_info->key_length, + HA_READ_KEY_EXACT)) { - if (!create_user) + /* what == 'N' means revoke */ + if (what == 'N') { - if (what == 'N') - my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str); - else - my_error(ER_NO_PERMISSION_TO_CREATE_USER, MYF(0), - thd->user, thd->host_or_ip); + my_error(ER_NONEXISTING_GRANT, MYF(0), combo.user.str, combo.host.str); + goto end; + } + /* + There are four options which affect the process of creation of + a new user(mysqld option --safe-create-user, 'insert' privilege + on 'mysql.user' table, using 'GRANT' with 'IDENTIFIED BY' and + SQL_MODE flag NO_AUTO_CREATE_USER). Below is the simplified rule + how it should work. + if (safe-user-create && ! INSERT_priv) => reject + else if (identified_by) => create + else if (no_auto_create_user) => reject + else create + */ + else if (((thd->variables.sql_mode & MODE_NO_AUTO_CREATE_USER) && + !password_len) || !create_user) + { + my_error(ER_NO_PERMISSION_TO_CREATE_USER, MYF(0), + thd->user, thd->host_or_ip); goto end; } old_row_exists = 0; restore_record(table,default_values); // cp empty row from default_values table->field[0]->store(combo.host.str,combo.host.length, - &my_charset_latin1); + system_charset_info); table->field[1]->store(combo.user.str,combo.user.length, - &my_charset_latin1); + system_charset_info); table->field[2]->store(password, password_len, - &my_charset_latin1); + system_charset_info); } else { + /* + Check that the user isn't trying to change a password for another + user if he doesn't have UPDATE privilege to the MySQL database + */ + DBUG_ASSERT(combo.host.str); + if (thd->user && combo.password.str && + (strcmp(thd->user,combo.user.str) || + my_strcasecmp(system_charset_info, + combo.host.str, thd->host_or_ip)) && + check_access(thd, UPDATE_ACL, "mysql",0,1,0)) + goto end; old_row_exists = 1; store_record(table,record[1]); // Save copy for update if (combo.password.str) // If password given - table->field[2]->store(password, password_len, &my_charset_latin1); - else if (!rights && !revoke_grant && thd->lex->ssl_type == SSL_TYPE_NOT_SPECIFIED && - !thd->lex->mqh.bits) + table->field[2]->store(password, password_len, system_charset_info); + else if (!rights && !revoke_grant && + lex->ssl_type == SSL_TYPE_NOT_SPECIFIED && + !lex->mqh.specified_limits) { DBUG_RETURN(0); } @@ -1518,7 +1658,7 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, if (table->fields >= 31) /* From 4.0.0 we have more fields */ { /* We write down SSL related ACL stuff */ - switch (thd->lex->ssl_type) { + switch (lex->ssl_type) { case SSL_TYPE_ANY: table->field[next_field]->store("ANY", 3, &my_charset_latin1); table->field[next_field+1]->store("", 0, &my_charset_latin1); @@ -1536,18 +1676,15 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, table->field[next_field+1]->store("", 0, &my_charset_latin1); table->field[next_field+2]->store("", 0, &my_charset_latin1); table->field[next_field+3]->store("", 0, &my_charset_latin1); - if (thd->lex->ssl_cipher) - table->field[next_field+1]->store(thd->lex->ssl_cipher, - strlen(thd->lex->ssl_cipher), - &my_charset_latin1); - if (thd->lex->x509_issuer) - table->field[next_field+2]->store(thd->lex->x509_issuer, - strlen(thd->lex->x509_issuer), - &my_charset_latin1); - if (thd->lex->x509_subject) - table->field[next_field+3]->store(thd->lex->x509_subject, - strlen(thd->lex->x509_subject), - &my_charset_latin1); + if (lex->ssl_cipher) + table->field[next_field+1]->store(lex->ssl_cipher, + strlen(lex->ssl_cipher), system_charset_info); + if (lex->x509_issuer) + table->field[next_field+2]->store(lex->x509_issuer, + strlen(lex->x509_issuer), system_charset_info); + if (lex->x509_subject) + table->field[next_field+3]->store(lex->x509_subject, + strlen(lex->x509_subject), system_charset_info); break; case SSL_TYPE_NOT_SPECIFIED: break; @@ -1558,18 +1695,19 @@ static int replace_user_table(THD *thd, TABLE *table, const LEX_USER &combo, table->field[next_field+3]->store("", 0, &my_charset_latin1); break; } + next_field+=4; - /* Skip over SSL related fields to first user limits related field */ - next_field+= 4; - - USER_RESOURCES mqh= thd->lex->mqh; - if (mqh.bits & 1) + USER_RESOURCES mqh= lex->mqh; + if (mqh.specified_limits & USER_RESOURCES::QUERIES_PER_HOUR) table->field[next_field]->store((longlong) mqh.questions); - if (mqh.bits & 2) + if (mqh.specified_limits & USER_RESOURCES::UPDATES_PER_HOUR) table->field[next_field+1]->store((longlong) mqh.updates); - if (mqh.bits & 4) - table->field[next_field+2]->store((longlong) mqh.connections); - mqh_used = mqh_used || mqh.questions || mqh.updates || mqh.connections; + if (mqh.specified_limits & USER_RESOURCES::CONNECTIONS_PER_HOUR) + table->field[next_field+2]->store((longlong) mqh.conn_per_hour); + if (table->fields >= 36 && + (mqh.specified_limits & USER_RESOURCES::USER_CONNECTIONS)) + table->field[next_field+3]->store((longlong) mqh.user_conn); + mqh_used= mqh_used || mqh.questions || mqh.updates || mqh.conn_per_hour; } if (old_row_exists) { @@ -1605,19 +1743,19 @@ end: if (old_row_exists) acl_update_user(combo.user.str, combo.host.str, combo.password.str, password_len, - thd->lex->ssl_type, - thd->lex->ssl_cipher, - thd->lex->x509_issuer, - thd->lex->x509_subject, - &thd->lex->mqh, + lex->ssl_type, + lex->ssl_cipher, + lex->x509_issuer, + lex->x509_subject, + &lex->mqh, rights); else acl_insert_user(combo.user.str, combo.host.str, password, password_len, - thd->lex->ssl_type, - thd->lex->ssl_cipher, - thd->lex->x509_issuer, - thd->lex->x509_subject, - &thd->lex->mqh, + lex->ssl_type, + lex->ssl_cipher, + lex->x509_issuer, + lex->x509_subject, + &lex->mqh, rights); } DBUG_RETURN(error); @@ -1637,6 +1775,7 @@ static int replace_db_table(TABLE *table, const char *db, bool old_row_exists=0; int error; char what= (revoke_grant) ? 'N' : 'Y'; + byte user_key[MAX_KEY_LENGTH]; DBUG_ENTER("replace_db_table"); if (!initialized) @@ -1648,18 +1787,20 @@ static int replace_db_table(TABLE *table, const char *db, /* Check if there is such a user in user table in memory? */ if (!find_acl_user(combo.host.str,combo.user.str)) { - my_error(ER_PASSWORD_NO_MATCH,MYF(0)); + my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), MYF(0)); DBUG_RETURN(-1); } - table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); - table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); - table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(db,(uint) strlen(db), system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, - HA_READ_KEY_EXACT)) + user_key, table->key_info->key_length, + HA_READ_KEY_EXACT)) { if (what == 'N') { // no row, no revoke @@ -1668,9 +1809,9 @@ static int replace_db_table(TABLE *table, const char *db, } old_row_exists = 0; restore_record(table,default_values); // cp empty row from default_values - table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); - table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); - table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(db,(uint) strlen(db), system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); } else { @@ -1746,30 +1887,43 @@ static byte* get_key_column(GRANT_COLUMN *buff,uint *length, } -class GRANT_TABLE :public Sql_alloc +class GRANT_NAME :public Sql_alloc { public: char *host,*db, *user, *tname, *hash_key, *orig_host; - ulong privs, cols; + ulong privs; ulong sort; uint key_length; + GRANT_NAME(const char *h, const char *d,const char *u, + const char *t, ulong p); + GRANT_NAME (TABLE *form); + virtual ~GRANT_NAME() {}; + virtual bool ok() { return privs != 0; } +}; + + +class GRANT_TABLE :public GRANT_NAME +{ +public: + ulong cols; HASH hash_columns; GRANT_TABLE(const char *h, const char *d,const char *u, const char *t, ulong p, ulong c); GRANT_TABLE (TABLE *form, TABLE *col_privs); + ~GRANT_TABLE(); bool ok() { return privs != 0 || cols != 0; } }; -GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, - const char *t, ulong p, ulong c) - :privs(p), cols(c) +GRANT_NAME::GRANT_NAME(const char *h, const char *d,const char *u, + const char *t, ulong p) + :privs(p) { /* Host given by user */ orig_host= strdup_root(&memex,h); - /* Convert empty hostname to '%' for easy comparision */ + /* Convert empty hostname to '%' for easy comparison */ host= orig_host[0] ? orig_host : (char*) "%"; db = strdup_root(&memex,d); user = strdup_root(&memex,u); @@ -1783,15 +1937,20 @@ GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, key_length =(uint) strlen(d)+(uint) strlen(u)+(uint) strlen(t)+3; hash_key = (char*) alloc_root(&memex,key_length); strmov(strmov(strmov(hash_key,user)+1,db)+1,tname); - (void) hash_init(&hash_columns,&my_charset_latin1, - 0,0,0, (hash_get_key) get_key_column,0,0); } -GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) +GRANT_TABLE::GRANT_TABLE(const char *h, const char *d,const char *u, + const char *t, ulong p, ulong c) + :GRANT_NAME(h,d,u,t,p), cols(c) { - byte key[MAX_KEY_LENGTH]; + (void) hash_init(&hash_columns,system_charset_info, + 0,0,0, (hash_get_key) get_key_column,0,0); +} + +GRANT_NAME::GRANT_NAME(TABLE *form) +{ orig_host= host= get_field(&memex, form->field[0]); db= get_field(&memex,form->field[1]); user= get_field(&memex,form->field[2]); @@ -1807,7 +1966,7 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) if (!db || !tname) { /* Wrong table row; Ignore it */ - privs = cols = 0; /* purecov: inspected */ + privs= 0; return; /* purecov: inspected */ } if (lower_case_table_names) @@ -1820,30 +1979,48 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) hash_key = (char*) alloc_root(&memex,key_length); strmov(strmov(strmov(hash_key,user)+1,db)+1,tname); privs = (ulong) form->field[6]->val_int(); - cols = (ulong) form->field[7]->val_int(); privs = fix_rights_for_table(privs); +} + + +GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) + :GRANT_NAME(form) +{ + byte key[MAX_KEY_LENGTH]; + + if (!db || !tname) + { + /* Wrong table row; Ignore it */ + hash_clear(&hash_columns); /* allow for destruction */ + cols= 0; + return; + } + cols= (ulong) form->field[7]->val_int(); cols = fix_rights_for_column(cols); - (void) hash_init(&hash_columns,&my_charset_latin1, + (void) hash_init(&hash_columns,system_charset_info, 0,0,0, (hash_get_key) get_key_column,0,0); if (cols) { - int key_len; + uint key_prefix_len; + KEY_PART_INFO *key_part= col_privs->key_info->key_part; col_privs->field[0]->store(orig_host,(uint) strlen(orig_host), - &my_charset_latin1); - col_privs->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); - col_privs->field[2]->store(user,(uint) strlen(user), &my_charset_latin1); - col_privs->field[3]->store(tname,(uint) strlen(tname), &my_charset_latin1); - key_len=(col_privs->field[0]->pack_length()+ - col_privs->field[1]->pack_length()+ - col_privs->field[2]->pack_length()+ - col_privs->field[3]->pack_length()); - key_copy(key,col_privs,0,key_len); + system_charset_info); + col_privs->field[1]->store(db,(uint) strlen(db), system_charset_info); + col_privs->field[2]->store(user,(uint) strlen(user), system_charset_info); + col_privs->field[3]->store(tname,(uint) strlen(tname), system_charset_info); + + key_prefix_len= (key_part[0].store_length + + key_part[1].store_length + + key_part[2].store_length + + key_part[3].store_length); + key_copy(key, col_privs->record[0], col_privs->key_info, key_prefix_len); col_privs->field[4]->store("",0, &my_charset_latin1); + col_privs->file->ha_index_init(0); if (col_privs->file->index_read(col_privs->record[0], - (byte*) col_privs->field[0]->ptr, - key_len, HA_READ_KEY_EXACT)) + (byte*) key, + key_prefix_len, HA_READ_KEY_EXACT)) { cols = 0; /* purecov: deadcode */ col_privs->file->ha_index_end(); @@ -1865,13 +2042,19 @@ GRANT_TABLE::GRANT_TABLE(TABLE *form, TABLE *col_privs) } my_hash_insert(&hash_columns, (byte *) mem_check); } while (!col_privs->file->index_next(col_privs->record[0]) && - !key_cmp_if_same(col_privs,key,0,key_len)); + !key_cmp_if_same(col_privs,key,0,key_prefix_len)); col_privs->file->ha_index_end(); } } -static byte* get_grant_table(GRANT_TABLE *buff,uint *length, +GRANT_TABLE::~GRANT_TABLE() +{ + hash_free(&hash_columns); +} + + +static byte* get_grant_table(GRANT_NAME *buff,uint *length, my_bool not_used __attribute__((unused))) { *length=buff->key_length; @@ -1887,44 +2070,62 @@ void free_grant_table(GRANT_TABLE *grant_table) /* Search after a matching grant. Prefer exact grants before not exact ones */ -static GRANT_TABLE *table_hash_search(const char *host,const char* ip, +static GRANT_NAME *name_hash_search(HASH *name_hash, + const char *host,const char* ip, const char *db, const char *user, const char *tname, bool exact) { char helping [NAME_LEN*2+USERNAME_LENGTH+3]; uint len; - GRANT_TABLE *grant_table,*found=0; + GRANT_NAME *grant_name,*found=0; len = (uint) (strmov(strmov(strmov(helping,user)+1,db)+1,tname)-helping)+ 1; - for (grant_table=(GRANT_TABLE*) hash_search(&column_priv_hash, + for (grant_name=(GRANT_NAME*) hash_search(name_hash, (byte*) helping, len) ; - grant_table ; - grant_table= (GRANT_TABLE*) hash_next(&column_priv_hash,(byte*) helping, + grant_name ; + grant_name= (GRANT_NAME*) hash_next(name_hash,(byte*) helping, len)) { if (exact) { if ((host && - !my_strcasecmp(&my_charset_latin1, host, grant_table->host)) || - (ip && !strcmp(ip,grant_table->host))) - return grant_table; + !my_strcasecmp(system_charset_info, host, grant_name->host)) || + (ip && !strcmp(ip,grant_name->host))) + return grant_name; } else { - if (((host && !wild_case_compare(&my_charset_latin1, - host,grant_table->host)) || - (ip && !wild_case_compare(&my_charset_latin1, - ip,grant_table->host))) && - (!found || found->sort < grant_table->sort)) - found=grant_table; // Host ok + if (((host && !wild_case_compare(system_charset_info, + host,grant_name->host)) || + (ip && !wild_case_compare(system_charset_info, + ip,grant_name->host))) && + (!found || found->sort < grant_name->sort)) + found=grant_name; // Host ok } } return found; } +inline GRANT_NAME * +proc_hash_search(const char *host, const char *ip, const char *db, + const char *user, const char *tname, bool exact) +{ + return (GRANT_TABLE*) name_hash_search(&proc_priv_hash, host, ip, db, + user, tname, exact); +} + + +inline GRANT_TABLE * +table_hash_search(const char *host, const char *ip, const char *db, + const char *user, const char *tname, bool exact) +{ + return (GRANT_TABLE*) name_hash_search(&column_priv_hash, host, ip, db, + user, tname, exact); +} + inline GRANT_COLUMN * column_hash_search(GRANT_TABLE *t, const char *cname, uint length) @@ -1940,19 +2141,22 @@ static int replace_column_table(GRANT_TABLE *g_t, ulong rights, bool revoke_grant) { int error=0,result=0; - uint key_length; byte key[MAX_KEY_LENGTH]; + uint key_prefix_length; + KEY_PART_INFO *key_part= table->key_info->key_part; DBUG_ENTER("replace_column_table"); - table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); - table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); - table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); - table->field[3]->store(table_name,(uint) strlen(table_name), &my_charset_latin1); - key_length=(table->field[0]->pack_length()+ table->field[1]->pack_length()+ - table->field[2]->pack_length()+ table->field[3]->pack_length()); - key_copy(key,table,0,key_length); + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(db,(uint) strlen(db), system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); + table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info); - rights &= COL_ACLS; // Only ACL for columns + /* Get length of 3 first key parts */ + key_prefix_length= (key_part[0].store_length + key_part[1].store_length + + key_part[2].store_length + key_part[3].store_length); + key_copy(key, table->record[0], table->key_info, key_prefix_length); + + rights&= COL_ACLS; // Only ACL for columns /* first fix privileges for all columns in column list */ @@ -1963,27 +2167,35 @@ static int replace_column_table(GRANT_TABLE *g_t, { ulong privileges = xx->rights; bool old_row_exists=0; - key_restore(table,key,0,key_length); + byte user_key[MAX_KEY_LENGTH]; + + key_restore(table->record[0],key,table->key_info, + key_prefix_length); table->field[4]->store(xx->column.ptr(),xx->column.length(), - &my_charset_latin1); + system_charset_info); + /* Get key for the first 4 columns */ + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (table->file->index_read(table->record[0],(byte*) table->field[0]->ptr, - table->key_info[0].key_length, - HA_READ_KEY_EXACT)) + if (table->file->index_read(table->record[0], user_key, + table->key_info->key_length, + HA_READ_KEY_EXACT)) { if (revoke_grant) { my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), - combo.user.str, combo.host.str, table_name); /* purecov: inspected */ + combo.user.str, combo.host.str, + table_name); /* purecov: inspected */ result= -1; /* purecov: inspected */ continue; /* purecov: inspected */ } old_row_exists = 0; restore_record(table,default_values); // Get empty record - key_restore(table,key,0,key_length); + key_restore(table->record[0],key,table->key_info, + key_prefix_length); table->field[4]->store(xx->column.ptr(),xx->column.length(), - &my_charset_latin1); + system_charset_info); } else { @@ -2038,10 +2250,14 @@ static int replace_column_table(GRANT_TABLE *g_t, if (revoke_grant) { + byte user_key[MAX_KEY_LENGTH]; + key_copy(user_key, table->record[0], table->key_info, + key_prefix_length); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (table->file->index_read(table->record[0], (byte*) table->field[0]->ptr, - key_length, - HA_READ_KEY_EXACT)) + if (table->file->index_read(table->record[0], user_key, + key_prefix_length, + HA_READ_KEY_EXACT)) goto end; /* Scan through all rows with the same host,db,user and table */ @@ -2055,7 +2271,8 @@ static int replace_column_table(GRANT_TABLE *g_t, { GRANT_COLUMN *grant_column = NULL; char colum_name_buf[HOSTNAME_LENGTH+1]; - String column_name(colum_name_buf,sizeof(colum_name_buf),&my_charset_latin1); + String column_name(colum_name_buf,sizeof(colum_name_buf), + system_charset_info); privileges&= ~rights; table->field[6]->store((longlong) @@ -2091,7 +2308,7 @@ static int replace_column_table(GRANT_TABLE *g_t, } } } while (!table->file->index_next(table->record[0]) && - !key_cmp_if_same(table,key,0,key_length)); + !key_cmp_if_same(table, key, 0, key_prefix_length)); } end: @@ -2110,6 +2327,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, int old_row_exists = 1; int error=0; ulong store_table_rights, store_col_rights; + byte user_key[MAX_KEY_LENGTH]; DBUG_ENTER("replace_table_table"); strxmov(grantor, thd->user, "@", thd->host_or_ip, NullS); @@ -2120,20 +2338,23 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, */ if (!find_acl_user(combo.host.str,combo.user.str)) { - my_error(ER_PASSWORD_NO_MATCH,MYF(0)); /* purecov: deadcode */ + my_message(ER_PASSWORD_NO_MATCH, ER(ER_PASSWORD_NO_MATCH), + MYF(0)); /* purecov: deadcode */ DBUG_RETURN(-1); /* purecov: deadcode */ } restore_record(table,default_values); // Get empty record - table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); - table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); - table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); - table->field[3]->store(table_name,(uint) strlen(table_name), &my_charset_latin1); + table->field[0]->store(combo.host.str,combo.host.length, system_charset_info); + table->field[1]->store(db,(uint) strlen(db), system_charset_info); + table->field[2]->store(combo.user.str,combo.user.length, system_charset_info); + table->field[3]->store(table_name,(uint) strlen(table_name), system_charset_info); store_record(table,record[1]); // store at pos 1 + key_copy(user_key, table->record[0], table->key_info, + table->key_info->key_length); + table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (table->file->index_read_idx(table->record[0],0, - (byte*) table->field[0]->ptr, - table->key_info[0].key_length, + if (table->file->index_read_idx(table->record[0], 0, + user_key, table->key_info->key_length, HA_READ_KEY_EXACT)) { /* @@ -2145,7 +2366,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, { // no row, no revoke my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), combo.user.str, combo.host.str, - table_name); /* purecov: deadcode */ + table_name); /* purecov: deadcode */ DBUG_RETURN(-1); /* purecov: deadcode */ } old_row_exists = 0; @@ -2173,7 +2394,7 @@ static int replace_table_table(THD *thd, GRANT_TABLE *grant_table, } } - table->field[4]->store(grantor,(uint) strlen(grantor), &my_charset_latin1); + table->field[4]->store(grantor,(uint) strlen(grantor), system_charset_info); table->field[6]->store((longlong) store_table_rights); table->field[7]->store((longlong) store_col_rights); rights=fix_rights_for_table(store_table_rights); @@ -2214,6 +2435,117 @@ table_error: } +static int replace_proc_table(THD *thd, GRANT_NAME *grant_name, + TABLE *table, const LEX_USER &combo, + const char *db, const char *proc_name, + ulong rights, bool revoke_grant) +{ + char grantor[HOSTNAME_LENGTH+USERNAME_LENGTH+2]; + int old_row_exists= 1; + int error=0; + ulong store_proc_rights; + DBUG_ENTER("replace_proc_table"); + + if (!initialized) + { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); + DBUG_RETURN(-1); + } + + strxmov(grantor, thd->user, "@", thd->host_or_ip, NullS); + + /* + The following should always succeed as new users are created before + this function is called! + */ + if (!find_acl_user(combo.host.str,combo.user.str)) + { + my_error(ER_PASSWORD_NO_MATCH,MYF(0)); + DBUG_RETURN(-1); + } + + restore_record(table,default_values); // Get empty record + table->field[0]->store(combo.host.str,combo.host.length, &my_charset_latin1); + table->field[1]->store(db,(uint) strlen(db), &my_charset_latin1); + table->field[2]->store(combo.user.str,combo.user.length, &my_charset_latin1); + table->field[3]->store(proc_name,(uint) strlen(proc_name), &my_charset_latin1); + store_record(table,record[1]); // store at pos 1 + + if (table->file->index_read_idx(table->record[0],0, + (byte*) table->field[0]->ptr,0, + HA_READ_KEY_EXACT)) + { + /* + The following should never happen as we first check the in memory + grant tables for the user. There is however always a small change that + the user has modified the grant tables directly. + */ + if (revoke_grant) + { // no row, no revoke + my_error(ER_NONEXISTING_PROC_GRANT, MYF(0), + combo.user.str, combo.host.str, proc_name); + DBUG_RETURN(-1); + } + old_row_exists= 0; + restore_record(table,record[1]); // Get saved record + } + + store_proc_rights= get_rights_for_procedure(rights); + if (old_row_exists) + { + ulong j; + store_record(table,record[1]); + j= (ulong) table->field[6]->val_int(); + + if (revoke_grant) + { + /* column rights are already fixed in mysql_table_grant */ + store_proc_rights=j & ~store_proc_rights; + } + else + { + store_proc_rights|= j; + } + } + + table->field[4]->store(grantor,(uint) strlen(grantor), &my_charset_latin1); + table->field[6]->store((longlong) store_proc_rights); + rights=fix_rights_for_procedure(store_proc_rights); + + if (old_row_exists) + { + if (store_proc_rights) + { + if ((error=table->file->update_row(table->record[1],table->record[0]))) + goto table_error; + } + else if ((error= table->file->delete_row(table->record[1]))) + goto table_error; + } + else + { + error=table->file->write_row(table->record[0]); + if (error && error != HA_ERR_FOUND_DUPP_KEY) + goto table_error; + } + + if (rights) + { + grant_name->privs= rights; + } + else + { + hash_delete(&proc_priv_hash,(byte*) grant_name); + } + DBUG_RETURN(0); + + /* This should never happen */ +table_error: + table->file->print_error(error,MYF(0)); + DBUG_RETURN(-1); +} + + /* Store table level and column level grants in the privilege tables @@ -2227,11 +2559,11 @@ table_error: revoke_grant Set to 1 if this is a REVOKE command RETURN - 0 ok - 1 error + FALSE ok + TRUE error */ -int mysql_table_grant(THD *thd, TABLE_LIST *table_list, +bool mysql_table_grant(THD *thd, TABLE_LIST *table_list, List <LEX_USER> &user_list, List <LEX_COLUMN> &columns, ulong rights, bool revoke_grant) @@ -2241,45 +2573,48 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, LEX_USER *Str; TABLE_LIST tables[3]; bool create_new_users=0; + char *db_name, *real_name; DBUG_ENTER("mysql_table_grant"); if (!initialized) { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); /* purecov: inspected */ - DBUG_RETURN(-1); /* purecov: inspected */ + DBUG_RETURN(TRUE); /* purecov: inspected */ } if (rights & ~TABLE_ACLS) { - my_error(ER_ILLEGAL_GRANT_FOR_TABLE,MYF(0)); - DBUG_RETURN(-1); + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), + MYF(0)); + DBUG_RETURN(TRUE); } if (!revoke_grant) { - if (columns.elements && !revoke_grant) + if (columns.elements) { - TABLE *table; class LEX_COLUMN *column; List_iterator <LEX_COLUMN> column_iter(columns); + int res; + + if (open_and_lock_tables(thd, table_list)) + DBUG_RETURN(TRUE); - if (!(table=open_ltable(thd,table_list,TL_READ))) - DBUG_RETURN(-1); while ((column = column_iter++)) { uint unused_field_idx= NO_CACHED_FIELD_INDEX; - Field *f= find_field_in_table(thd,table,column->column.ptr(), - column->column.length(),1,0,&unused_field_idx); - if (!f) + Field *f=find_field_in_table(thd, table_list, column->column.ptr(), + column->column.ptr(), + column->column.length(), 0, 1, 1, 0, + &unused_field_idx, FALSE); + if (f == (Field*)0) { my_error(ER_BAD_FIELD_ERROR, MYF(0), column->column.c_ptr(), table_list->alias); - DBUG_RETURN(-1); - } - if (f == (Field*)-1) - { - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } + if (f == (Field *)-1) + DBUG_RETURN(TRUE); column_priv|= column->rights; } close_thread_tables(thd); @@ -2295,7 +2630,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, if (access(buf,F_OK)) { my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, table_list->alias); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } if (table_list->grant.want_privilege) @@ -2304,7 +2639,7 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, get_privilege_desc(command, sizeof(command), table_list->grant.want_privilege); my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), - command, thd->priv_user, thd->host_or_ip, table_list->alias); + command, thd->priv_user, thd->host_or_ip, table_list->alias); DBUG_RETURN(-1); } } @@ -2316,11 +2651,13 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, tables[0].alias=tables[0].real_name= (char*) "user"; tables[1].alias=tables[1].real_name= (char*) "tables_priv"; tables[2].alias=tables[2].real_name= (char*) "columns_priv"; - tables[0].next=tables+1; + tables[0].next_local= tables[0].next_global= tables+1; /* Don't open column table if we don't need it ! */ - tables[1].next=((column_priv || - (revoke_grant && ((rights & COL_ACLS) || columns.elements))) - ? tables+2 : 0); + tables[1].next_local= + tables[1].next_global= ((column_priv || + (revoke_grant && + ((rights & COL_ACLS) || columns.elements))) + ? tables+2 : 0); tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_WRITE; tables[0].db=tables[1].db=tables[2].db=(char*) "mysql"; @@ -2337,19 +2674,19 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, */ tables[0].updating= tables[1].updating= tables[2].updating= 1; if (!tables_ok(0, tables)) - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } #endif if (simple_open_n_lock_tables(thd,tables)) { // Should never happen close_thread_tables(thd); /* purecov: deadcode */ - DBUG_RETURN(-1); /* purecov: deadcode */ + DBUG_RETURN(TRUE); /* purecov: deadcode */ } if (!revoke_grant) create_new_users= test_if_create_new_users(thd); - int result=0; + bool result= FALSE; rw_wrlock(&LOCK_grant); MEM_ROOT *old_root= thd->mem_root; thd->mem_root= &memex; @@ -2361,8 +2698,9 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, if (Str->host.length > HOSTNAME_LENGTH || Str->user.length > USERNAME_LENGTH) { - my_error(ER_GRANT_WRONG_HOST_OR_USER,MYF(0)); - result= -1; + my_message(ER_GRANT_WRONG_HOST_OR_USER, ER(ER_GRANT_WRONG_HOST_OR_USER), + MYF(0)); + result= TRUE; continue; } /* Create user if needed */ @@ -2372,31 +2710,36 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, pthread_mutex_unlock(&acl_cache->lock); if (error) { - result= -1; // Remember error + result= TRUE; // Remember error continue; // Add next user } + db_name= (table_list->view_db.length ? + table_list->view_db.str : + table_list->db); + real_name= (table_list->view_name.length ? + table_list->view_name.str : + table_list->real_name); + /* Find/create cached table grant */ - grant_table= table_hash_search(Str->host.str,NullS,table_list->db, - Str->user.str, - table_list->real_name,1); + grant_table= table_hash_search(Str->host.str, NullS, db_name, + Str->user.str, real_name, 1); if (!grant_table) { if (revoke_grant) { my_error(ER_NONEXISTING_TABLE_GRANT, MYF(0), Str->user.str, Str->host.str, table_list->real_name); - result= -1; + result= TRUE; continue; } - grant_table = new GRANT_TABLE (Str->host.str,table_list->db, - Str->user.str, - table_list->real_name, + grant_table = new GRANT_TABLE (Str->host.str, db_name, + Str->user.str, real_name, rights, column_priv); if (!grant_table) // end of memory { - result= -1; /* purecov: deadcode */ + result= TRUE; /* purecov: deadcode */ continue; /* purecov: deadcode */ } my_hash_insert(&column_priv_hash,(byte*) grant_table); @@ -2436,23 +2779,21 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, /* update table and columns */ - if (replace_table_table(thd,grant_table,tables[1].table,*Str, - table_list->db, - table_list->real_name, + if (replace_table_table(thd, grant_table, tables[1].table, *Str, + db_name, real_name, rights, column_priv, revoke_grant)) { /* Should only happen if table is crashed */ - result= -1; /* purecov: deadcode */ + result= TRUE; /* purecov: deadcode */ } else if (tables[2].table) { - if ((replace_column_table(grant_table,tables[2].table, *Str, + if ((replace_column_table(grant_table, tables[2].table, *Str, columns, - table_list->db, - table_list->real_name, + db_name, real_name, rights, revoke_grant))) { - result= -1; + result= TRUE; } } } @@ -2466,8 +2807,163 @@ int mysql_table_grant(THD *thd, TABLE_LIST *table_list, } -int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, - ulong rights, bool revoke_grant) +/* + Store procedure level grants in the privilege tables + + SYNOPSIS + mysql_procedure_grant() + thd Thread handle + table_list List of procedures to give grant + user_list List of users to give grant + rights Table level grant + revoke_grant Set to 1 if this is a REVOKE command + + RETURN + 0 ok + 1 error +*/ + +bool mysql_procedure_grant(THD *thd, TABLE_LIST *table_list, + List <LEX_USER> &user_list, ulong rights, + bool revoke_grant, bool no_error) +{ + List_iterator <LEX_USER> str_list (user_list); + LEX_USER *Str; + TABLE_LIST tables[2]; + bool create_new_users=0, result=0; + char *db_name, *real_name; + DBUG_ENTER("mysql_procedure_grant"); + + if (!initialized) + { + if (!no_error) + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), + "--skip-grant-tables"); + DBUG_RETURN(TRUE); + } + if (rights & ~PROC_ACLS) + { + if (!no_error) + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), + MYF(0)); + DBUG_RETURN(TRUE); + } + + if (!revoke_grant) + { + if (sp_exists_routine(thd, table_list, 0, no_error)<0) + DBUG_RETURN(TRUE); + } + + /* open the mysql.user and mysql.procs_priv tables */ + + bzero((char*) &tables,sizeof(tables)); + tables[0].alias=tables[0].real_name= (char*) "user"; + tables[1].alias=tables[1].real_name= (char*) "procs_priv"; + tables[0].next_local= tables[0].next_global= tables+1; + tables[0].lock_type=tables[1].lock_type=TL_WRITE; + tables[0].db=tables[1].db=(char*) "mysql"; + +#ifdef HAVE_REPLICATION + /* + GRANT and REVOKE are applied the slave in/exclusion rules as they are + some kind of updates to the mysql.% tables. + */ + if (thd->slave_thread && table_rules_on) + { + /* + The tables must be marked "updating" so that tables_ok() takes them into + account in tests. + */ + tables[0].updating= tables[1].updating= 1; + if (!tables_ok(0, tables)) + DBUG_RETURN(FALSE); + } +#endif + + if (simple_open_n_lock_tables(thd,tables)) + { // Should never happen + close_thread_tables(thd); + DBUG_RETURN(TRUE); + } + + if (!revoke_grant) + create_new_users= test_if_create_new_users(thd); + rw_wrlock(&LOCK_grant); + MEM_ROOT *old_root= thd->mem_root; + thd->mem_root= &memex; + + DBUG_PRINT("info",("now time to iterate and add users")); + + while ((Str= str_list++)) + { + int error; + GRANT_NAME *grant_name; + if (Str->host.length > HOSTNAME_LENGTH || + Str->user.length > USERNAME_LENGTH) + { + if (!no_error) + my_message(ER_GRANT_WRONG_HOST_OR_USER, ER(ER_GRANT_WRONG_HOST_OR_USER), + MYF(0)); + result= TRUE; + continue; + } + /* Create user if needed */ + pthread_mutex_lock(&acl_cache->lock); + error=replace_user_table(thd, tables[0].table, *Str, + 0, revoke_grant, create_new_users); + pthread_mutex_unlock(&acl_cache->lock); + if (error) + { + result= TRUE; // Remember error + continue; // Add next user + } + + db_name= table_list->db; + real_name= table_list->real_name; + + grant_name= proc_hash_search(Str->host.str, NullS, db_name, + Str->user.str, real_name, 1); + if (!grant_name) + { + if (revoke_grant) + { + if (!no_error) + my_error(ER_NONEXISTING_PROC_GRANT, MYF(0), + Str->user.str, Str->host.str, real_name); + result= TRUE; + continue; + } + grant_name= new GRANT_NAME(Str->host.str, db_name, + Str->user.str, real_name, + rights); + if (!grant_name) + { + result= TRUE; + continue; + } + my_hash_insert(&proc_priv_hash,(byte*) grant_name); + } + + if (replace_proc_table(thd, grant_name, tables[1].table, *Str, + db_name, real_name, rights, revoke_grant)) + { + result= TRUE; + continue; + } + } + grant_option=TRUE; + thd->mem_root= old_root; + rw_unlock(&LOCK_grant); + if (!result && !no_error) + send_ok(thd); + /* Tables are automatically closed */ + DBUG_RETURN(result); +} + + +bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, + ulong rights, bool revoke_grant) { List_iterator <LEX_USER> str_list (list); LEX_USER *Str; @@ -2479,7 +2975,7 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); /* purecov: tested */ - DBUG_RETURN(-1); /* purecov: tested */ + DBUG_RETURN(TRUE); /* purecov: tested */ } if (lower_case_table_names && db) @@ -2493,11 +2989,9 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, bzero((char*) &tables,sizeof(tables)); tables[0].alias=tables[0].real_name=(char*) "user"; tables[1].alias=tables[1].real_name=(char*) "db"; - tables[0].next=tables+1; - tables[1].next=0; + tables[0].next_local= tables[0].next_global= tables+1; tables[0].lock_type=tables[1].lock_type=TL_WRITE; tables[0].db=tables[1].db=(char*) "mysql"; - tables[0].table=tables[1].table=0; #ifdef HAVE_REPLICATION /* @@ -2512,14 +3006,14 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, */ tables[0].updating= tables[1].updating= 1; if (!tables_ok(0, tables)) - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } #endif if (simple_open_n_lock_tables(thd,tables)) { // This should never happen close_thread_tables(thd); /* purecov: deadcode */ - DBUG_RETURN(-1); /* purecov: deadcode */ + DBUG_RETURN(TRUE); /* purecov: deadcode */ } if (!revoke_grant) @@ -2536,7 +3030,8 @@ int mysql_grant(THD *thd, const char *db, List <LEX_USER> &list, if (Str->host.length > HOSTNAME_LENGTH || Str->user.length > USERNAME_LENGTH) { - my_error(ER_GRANT_WRONG_HOST_OR_USER,MYF(0)); + my_message(ER_GRANT_WRONG_HOST_OR_USER, ER(ER_GRANT_WRONG_HOST_OR_USER), + MYF(0)); result= -1; continue; } @@ -2579,6 +3074,7 @@ void grant_free(void) DBUG_ENTER("grant_free"); grant_option = FALSE; hash_free(&column_priv_hash); + hash_free(&proc_priv_hash); free_root(&memex,MYF(0)); DBUG_VOID_RETURN; } @@ -2589,18 +3085,21 @@ void grant_free(void) my_bool grant_init(THD *org_thd) { THD *thd; - TABLE_LIST tables[2]; + TABLE_LIST tables[3]; MYSQL_LOCK *lock; MEM_ROOT *memex_ptr; my_bool return_val= 1; - TABLE *t_table, *c_table; + TABLE *t_table, *c_table, *p_table; bool check_no_resolve= specialflag & SPECIAL_NO_RESOLVE; DBUG_ENTER("grant_init"); grant_option = FALSE; - (void) hash_init(&column_priv_hash,&my_charset_latin1, + (void) hash_init(&column_priv_hash,system_charset_info, 0,0,0, (hash_get_key) get_grant_table, (hash_free_key) free_grant_table,0); + (void) hash_init(&proc_priv_hash,system_charset_info, + 0,0,0, (hash_get_key) get_grant_table, + 0,0); init_sql_alloc(&memex, ACL_ALLOC_BLOCK_SIZE, 0); /* Don't do anything if running with --skip-grant */ @@ -2615,66 +3114,110 @@ my_bool grant_init(THD *org_thd) bzero((char*) &tables, sizeof(tables)); tables[0].alias=tables[0].real_name= (char*) "tables_priv"; tables[1].alias=tables[1].real_name= (char*) "columns_priv"; - tables[0].next=tables+1; - tables[0].lock_type=tables[1].lock_type=TL_READ; - tables[0].db=tables[1].db=thd->db; + tables[2].alias=tables[2].real_name= (char*) "procs_priv"; + tables[0].next_local= tables[0].next_global= tables+1; + tables[1].next_local= tables[1].next_global= tables+2; + tables[0].lock_type=tables[1].lock_type=tables[2].lock_type=TL_READ; + tables[0].db=tables[1].db=tables[2].db=thd->db; uint counter; if (open_tables(thd, tables, &counter)) goto end; - TABLE *ptr[2]; // Lock tables for quick update + TABLE *ptr[3]; // Lock tables for quick update ptr[0]= tables[0].table; ptr[1]= tables[1].table; - if (!(lock=mysql_lock_tables(thd,ptr,2))) + ptr[2]= tables[2].table; + if (!(lock=mysql_lock_tables(thd,ptr,3))) goto end; t_table = tables[0].table; c_table = tables[1].table; + p_table= tables[2].table; t_table->file->ha_index_init(0); - if (t_table->file->index_first(t_table->record[0])) + p_table->file->ha_index_init(0); + if (!t_table->file->index_first(t_table->record[0])) { - return_val= 0; - goto end_unlock; - } - grant_option= TRUE; + /* Will be restored by org_thd->store_globals() */ + memex_ptr= &memex; + my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr); + do + { + GRANT_TABLE *mem_check; + if (!(mem_check=new GRANT_TABLE(t_table,c_table))) + { + /* This could only happen if we are out memory */ + grant_option= FALSE; + goto end_unlock; + } - /* Will be restored by org_thd->store_globals() */ - memex_ptr= &memex; - my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr); - do + if (check_no_resolve) + { + if (hostname_requires_resolving(mem_check->host)) + { + sql_print_warning("'tables_priv' entry '%s %s@%s' " + "ignored in --skip-name-resolve mode.", + mem_check->tname, mem_check->user, + mem_check->host, mem_check->host); + continue; + } + } + + if (! mem_check->ok()) + delete mem_check; + else if (my_hash_insert(&column_priv_hash,(byte*) mem_check)) + { + delete mem_check; + grant_option= FALSE; + goto end_unlock; + } + } + while (!t_table->file->index_next(t_table->record[0])); + } + if (!p_table->file->index_first(p_table->record[0])) { - GRANT_TABLE *mem_check; - if (!(mem_check=new GRANT_TABLE(t_table,c_table))) + /* Will be restored by org_thd->store_globals() */ + memex_ptr= &memex; + my_pthread_setspecific_ptr(THR_MALLOC, &memex_ptr); + do { - /* This could only happen if we are out memory */ - grant_option= FALSE; /* purecov: deadcode */ - goto end_unlock; - } + GRANT_NAME *mem_check; + if (!(mem_check=new GRANT_NAME(p_table))) + { + /* This could only happen if we are out memory */ + grant_option= FALSE; + goto end_unlock; + } - if (check_no_resolve) - { - if (hostname_requires_resolving(mem_check->host)) + if (check_no_resolve) { - sql_print_warning("'tables_priv' entry '%s %s@%s' " - "ignored in --skip-name-resolve mode.", - mem_check->tname, mem_check->user, - mem_check->host, mem_check->host); - continue; + if (hostname_requires_resolving(mem_check->host)) + { + sql_print_warning("'procs_priv' entry '%s %s@%s' " + "ignored in --skip-name-resolve mode.", + mem_check->tname, mem_check->user, + mem_check->host, mem_check->host); + continue; + } } - } - if (mem_check->ok() && my_hash_insert(&column_priv_hash,(byte*) mem_check)) - { - grant_option= FALSE; - goto end_unlock; + mem_check->privs= fix_rights_for_procedure(mem_check->privs); + if (! mem_check->ok()) + delete mem_check; + else if (my_hash_insert(&proc_priv_hash,(byte*) mem_check)) + { + delete mem_check; + grant_option= FALSE; + goto end_unlock; + } } + while (!p_table->file->index_next(p_table->record[0])); } - while (!t_table->file->index_next(t_table->record[0])); - + grant_option= TRUE; return_val=0; // Return ok end_unlock: t_table->file->ha_index_end(); + p_table->file->ha_index_end(); mysql_unlock_tables(thd, lock); thd->version--; // Force close to free memory @@ -2705,7 +3248,7 @@ end: void grant_reload(THD *thd) { - HASH old_column_priv_hash; + HASH old_column_priv_hash, old_proc_priv_hash; bool old_grant_option; MEM_ROOT old_mem; DBUG_ENTER("grant_reload"); @@ -2713,6 +3256,7 @@ void grant_reload(THD *thd) rw_wrlock(&LOCK_grant); grant_version++; old_column_priv_hash= column_priv_hash; + old_proc_priv_hash= proc_priv_hash; old_grant_option= grant_option; old_mem= memex; @@ -2721,12 +3265,14 @@ void grant_reload(THD *thd) DBUG_PRINT("error",("Reverting to old privileges")); grant_free(); /* purecov: deadcode */ column_priv_hash= old_column_priv_hash; /* purecov: deadcode */ + proc_priv_hash= old_proc_priv_hash; grant_option= old_grant_option; /* purecov: deadcode */ memex= old_mem; /* purecov: deadcode */ } else { hash_free(&old_column_priv_hash); + hash_free(&old_proc_priv_hash); free_root(&old_mem,MYF(0)); } rw_unlock(&LOCK_grant); @@ -2736,7 +3282,22 @@ void grant_reload(THD *thd) /**************************************************************************** Check table level grants - All errors are written directly to the client if no_errors is given ! + + SYNOPSIS + bool check_grant() + thd Thread handler + want_access Bits of privileges user needs to have + tables List of tables to check. The user should have 'want_access' + to all tables in list. + show_table <> 0 if we are in show table. In this case it's enough to have + any privilege for the table + number Check at most this number of tables. + no_errors If 0 then we write an error. The error is sent directly to + the client + + RETURN + 0 ok + 1 Error: User did not have the requested privileges ****************************************************************************/ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, @@ -2745,23 +3306,28 @@ bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, TABLE_LIST *table; char *user = thd->priv_user; DBUG_ENTER("check_grant"); + DBUG_ASSERT(number > 0); - want_access &= ~thd->master_access; + want_access&= ~thd->master_access; if (!want_access) DBUG_RETURN(0); // ok rw_rdlock(&LOCK_grant); - for (table= tables; table && number--; table= table->next) + for (table= tables; table && number--; table= table->next_global) { - if (!(~table->grant.privilege & want_access) || table->derived) + GRANT_TABLE *grant_table; + if (!(~table->grant.privilege & want_access) || + table->derived || table->schema_table) { - table->grant.want_privilege=0; + /* + It is subquery in the FROM clause. VIEW set table->derived after + table opening, but this function always called before table opening. + */ + table->grant.want_privilege= 0; continue; // Already checked } - GRANT_TABLE *grant_table = table_hash_search(thd->host,thd->ip, - table->db,user, - table->real_name,0); - if (!grant_table) + if (!(grant_table= table_hash_search(thd->host,thd->ip, + table->db,user, table->real_name,0))) { want_access &= ~table->grant.privilege; goto err; // No grants @@ -2793,137 +3359,134 @@ err: { char command[128]; get_privilege_desc(command, sizeof(command), want_access); - net_printf(thd,ER_TABLEACCESS_DENIED_ERROR, - command, - thd->priv_user, - thd->host_or_ip, - table ? table->real_name : "unknown"); + my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), + command, + thd->priv_user, + thd->host_or_ip, + table ? table->real_name : "unknown"); } DBUG_RETURN(1); } -bool check_grant_column(THD *thd,TABLE *table, const char *name, - uint length, uint show_tables) +bool check_grant_column(THD *thd, GRANT_INFO *grant, + char *db_name, char *table_name, + const char *name, uint length, uint show_tables) { GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; + ulong want_access= grant->want_privilege & ~grant->privilege; + DBUG_ENTER("check_grant_column"); + DBUG_PRINT("enter", ("table: %s want_access: %u", table_name, want_access)); - ulong want_access=table->grant.want_privilege; if (!want_access) - return 0; // Already checked + DBUG_RETURN(0); // Already checked rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->table_cache_key, + grant->grant_table= + table_hash_search(thd->host, thd->ip, db_name, thd->priv_user, - table->real_name, 0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } - if (!(grant_table=table->grant.grant_table)) + if (!(grant_table= grant->grant_table)) goto err; /* purecov: deadcode */ grant_column=column_hash_search(grant_table, name, length); if (grant_column && !(~grant_column->rights & want_access)) { rw_unlock(&LOCK_grant); - return 0; + DBUG_RETURN(0); } #ifdef NOT_USED - if (show_tables && (grant_column || table->grant.privilege & COL_ACLS)) + if (show_tables && (grant_column || grant->privilege & COL_ACLS)) { rw_unlock(&LOCK_grant); /* purecov: deadcode */ - return 0; /* purecov: deadcode */ + DBUG_RETURN(0); /* purecov: deadcode */ } #endif - /* We must use my_printf_error() here! */ err: rw_unlock(&LOCK_grant); if (!show_tables) { char command[128]; get_privilege_desc(command, sizeof(command), want_access); - my_printf_error(ER_COLUMNACCESS_DENIED_ERROR, - ER(ER_COLUMNACCESS_DENIED_ERROR), - MYF(0), - command, - thd->priv_user, - thd->host_or_ip, - name, - table ? table->real_name : "unknown"); + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + command, + thd->priv_user, + thd->host_or_ip, + name, + table_name); } - return 1; + DBUG_RETURN(1); } -bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table) +bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, + char* db_name, char *table_name, + Field_iterator *fields) { GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; - Field *field=0,**ptr; + Field *field=0; - want_access &= ~table->grant.privilege; + want_access &= ~grant->privilege; if (!want_access) return 0; // Already checked if (!grant_option) - { - field= table->field[0]; // To give a meaningful error message goto err2; - } rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->table_cache_key, + grant->grant_table= + table_hash_search(thd->host, thd->ip, db_name, thd->priv_user, - table->real_name,0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } /* The following should always be true */ - if (!(grant_table=table->grant.grant_table)) + if (!(grant_table= grant->grant_table)) goto err; /* purecov: inspected */ - for (ptr=table->field; (field= *ptr) ; ptr++) + for (; !fields->end_of_fields(); fields->next()) { - grant_column=column_hash_search(grant_table, field->field_name, - (uint) strlen(field->field_name)); + const char *field_name= fields->name(); + grant_column= column_hash_search(grant_table, field_name, + (uint) strlen(field_name)); if (!grant_column || (~grant_column->rights & want_access)) goto err; } rw_unlock(&LOCK_grant); return 0; - /* We must use my_printf_error() here! */ err: rw_unlock(&LOCK_grant); err2: char command[128]; get_privilege_desc(command, sizeof(command), want_access); - my_printf_error(ER_COLUMNACCESS_DENIED_ERROR, - ER(ER_COLUMNACCESS_DENIED_ERROR), - MYF(0), - command, - thd->priv_user, - thd->host_or_ip, - field ? field->field_name : "unknown", - table->real_name); + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + command, + thd->priv_user, + thd->host_or_ip, + fields->name(), + table_name); return 1; } /* Check if a user has the right to access a database - Access is accepted if he has a grant for any table in the database + Access is accepted if he has a grant for any table/routine in the database Return 1 if access is denied */ @@ -2942,9 +3505,9 @@ bool check_grant_db(THD *thd,const char *db) idx); if (len < grant_table->key_length && !memcmp(grant_table->hash_key,helping,len) && - (thd->host && !wild_case_compare(&my_charset_latin1, + (thd->host && !wild_case_compare(system_charset_info, thd->host,grant_table->host) || - (thd->ip && !wild_case_compare(&my_charset_latin1, + (thd->ip && !wild_case_compare(system_charset_info, thd->ip,grant_table->host)))) { error=0; // Found match @@ -2955,6 +3518,72 @@ bool check_grant_db(THD *thd,const char *db) return error; } + +/**************************************************************************** + Check procedure level grants + + SYNPOSIS + bool check_grant_procedure() + thd Thread handler + want_access Bits of privileges user needs to have + procs List of procedures to check. The user should have 'want_access' + no_errors If 0 then we write an error. The error is sent directly to + the client + + RETURN + 0 ok + 1 Error: User did not have the requested privielges +****************************************************************************/ + +bool check_grant_procedure(THD *thd, ulong want_access, + TABLE_LIST *procs, bool no_errors) +{ + TABLE_LIST *table; + char *user= thd->priv_user; + char *host= thd->priv_host; + DBUG_ENTER("check_grant_procedure"); + + want_access&= ~thd->master_access; + if (!want_access) + DBUG_RETURN(0); // ok + + rw_rdlock(&LOCK_grant); + for (table= procs; table; table= table->next_global) + { + GRANT_NAME *grant_proc; + if ((grant_proc= proc_hash_search(host,thd->ip, + table->db, user, table->real_name, 0))) + table->grant.privilege|= grant_proc->privs; + + if (want_access & ~table->grant.privilege) + { + want_access &= ~table->grant.privilege; + goto err; + } + } + rw_unlock(&LOCK_grant); + DBUG_RETURN(0); +err: + rw_unlock(&LOCK_grant); + if (!no_errors) + { + char buff[1024]; + const char *command=""; + if (table) + strxmov(buff, table->db, ".", table->real_name, NullS); + if (want_access & EXECUTE_ACL) + command= "execute"; + else if (want_access & ALTER_PROC_ACL) + command= "alter procedure"; + else if (want_access & GRANT_ACL) + command= "grant"; + my_error(ER_PROCACCESS_DENIED_ERROR, MYF(0), + command, user, host, table ? buff : "unknown"); + } + DBUG_RETURN(1); +} + + /***************************************************************************** Functions to retrieve the grant for a table/column (for SHOW functions) *****************************************************************************/ @@ -2983,7 +3612,9 @@ ulong get_table_grant(THD *thd, TABLE_LIST *table) } -ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field) +ulong get_column_grant(THD *thd, GRANT_INFO *grant, + const char *db_name, const char *table_name, + const char *field_name) { GRANT_TABLE *grant_table; GRANT_COLUMN *grant_column; @@ -2991,30 +3622,31 @@ ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field) rw_rdlock(&LOCK_grant); /* reload table if someone has modified any grants */ - if (table->grant.version != grant_version) + if (grant->version != grant_version) { - table->grant.grant_table= - table_hash_search(thd->host, thd->ip, table->db, + grant->grant_table= + table_hash_search(thd->host, thd->ip, db_name, thd->priv_user, - table->real_name,0); /* purecov: inspected */ - table->grant.version=grant_version; /* purecov: inspected */ + table_name, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ } - if (!(grant_table=table->grant.grant_table)) - priv=table->grant.privilege; + if (!(grant_table= grant->grant_table)) + priv= grant->privilege; else { - grant_column=column_hash_search(grant_table, field->field_name, - (uint) strlen(field->field_name)); + grant_column= column_hash_search(grant_table, field_name, + (uint) strlen(field_name)); if (!grant_column) - priv=table->grant.privilege; + priv= grant->privilege; else - priv=table->grant.privilege | grant_column->rights; + priv= grant->privilege | grant_column->rights; } rw_unlock(&LOCK_grant); return priv; } + /* Help function for mysql_show_grants */ static void add_user_option(String *grant, ulong value, const char *name) @@ -3032,15 +3664,16 @@ static void add_user_option(String *grant, ulong value, const char *name) static const char *command_array[]= { - "SELECT", "INSERT","UPDATE","DELETE","CREATE", "DROP", "RELOAD","SHUTDOWN", - "PROCESS","FILE","GRANT","REFERENCES","INDEX", "ALTER", "SHOW DATABASES", - "SUPER", "CREATE TEMPORARY TABLES", "LOCK TABLES", "EXECUTE", - "REPLICATION SLAVE", "REPLICATION CLIENT", + "SELECT", "INSERT", "UPDATE", "DELETE", "CREATE", "DROP", "RELOAD", + "SHUTDOWN", "PROCESS","FILE", "GRANT", "REFERENCES", "INDEX", + "ALTER", "SHOW DATABASES", "SUPER", "CREATE TEMPORARY TABLES", + "LOCK TABLES", "EXECUTE", "REPLICATION SLAVE", "REPLICATION CLIENT", + "CREATE VIEW", "SHOW VIEW", "CREATE ROUTINE", "ALTER ROUTINE", }; static uint command_lengths[]= { - 6,6,6,6,6,4,6,8,7,4,5,10,5,5,14,5,23,11,7,17,18 + 6, 6, 6, 6, 6, 4, 6, 8, 7, 4, 5, 10, 5, 5, 14, 5, 23, 11, 7, 17, 18, 11, 9, 14, 13 }; @@ -3051,7 +3684,7 @@ static uint command_lengths[]= Send to client grant-like strings depicting user@host privileges */ -int mysql_show_grants(THD *thd,LEX_USER *lex_user) +bool mysql_show_grants(THD *thd,LEX_USER *lex_user) { ulong want_access; uint counter,index; @@ -3066,7 +3699,7 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (!initialized) { my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } if (!lex_user->host.str) @@ -3077,8 +3710,9 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (lex_user->host.length > HOSTNAME_LENGTH || lex_user->user.length > USERNAME_LENGTH) { - my_error(ER_GRANT_WRONG_HOST_OR_USER,MYF(0)); - DBUG_RETURN(-1); + my_message(ER_GRANT_WRONG_HOST_OR_USER, ER(ER_GRANT_WRONG_HOST_OR_USER), + MYF(0)); + DBUG_RETURN(TRUE); } for (counter=0 ; counter < acl_users.elements ; counter++) @@ -3090,14 +3724,14 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (!(host=acl_user->host.hostname)) host= ""; if (!strcmp(lex_user->user.str,user) && - !my_strcasecmp(&my_charset_latin1, lex_user->host.str, host)) + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) break; } if (counter == acl_users.elements) { my_error(ER_NONEXISTING_GRANT, MYF(0), lex_user->user.str, lex_user->host.str); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } Item_string *field=new Item_string("",0,&my_charset_latin1); @@ -3107,8 +3741,9 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) strxmov(buff,"Grants for ",lex_user->user.str,"@", lex_user->host.str,NullS); field_list.push_back(field); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); rw_wrlock(&LOCK_grant); VOID(pthread_mutex_lock(&acl_cache->lock)); @@ -3143,7 +3778,8 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) global.append(lex_user->user.str, lex_user->user.length, system_charset_info); global.append ("'@'",3); - global.append(lex_user->host.str,lex_user->host.length); + global.append(lex_user->host.str,lex_user->host.length, + system_charset_info); global.append ('\''); if (acl_user->salt_len) { @@ -3177,7 +3813,8 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (ssl_options++) global.append(' '); global.append("SUBJECT \'",9); - global.append(acl_user->x509_subject,strlen(acl_user->x509_subject)); + global.append(acl_user->x509_subject,strlen(acl_user->x509_subject), + system_charset_info); global.append('\''); } if (acl_user->ssl_cipher) @@ -3185,13 +3822,16 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) if (ssl_options++) global.append(' '); global.append("CIPHER '",8); - global.append(acl_user->ssl_cipher,strlen(acl_user->ssl_cipher)); + global.append(acl_user->ssl_cipher,strlen(acl_user->ssl_cipher), + system_charset_info); global.append('\''); } } if ((want_access & GRANT_ACL) || - (acl_user->user_resource.questions | acl_user->user_resource.updates | - acl_user->user_resource.connections)) + (acl_user->user_resource.questions || + acl_user->user_resource.updates || + acl_user->user_resource.conn_per_hour || + acl_user->user_resource.user_conn)) { global.append(" WITH",5); if (want_access & GRANT_ACL) @@ -3200,8 +3840,10 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) "MAX_QUERIES_PER_HOUR"); add_user_option(&global, acl_user->user_resource.updates, "MAX_UPDATES_PER_HOUR"); - add_user_option(&global, acl_user->user_resource.connections, + add_user_option(&global, acl_user->user_resource.conn_per_hour, "MAX_CONNECTIONS_PER_HOUR"); + add_user_option(&global, acl_user->user_resource.user_conn, + "MAX_USER_CONNECTIONS"); } protocol->prepare_for_resend(); protocol->store(global.ptr(),global.length(),global.charset()); @@ -3224,7 +3866,7 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) host= ""; if (!strcmp(lex_user->user.str,user) && - !my_strcasecmp(&my_charset_latin1, lex_user->host.str, host)) + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) { want_access=acl_db->access; if (want_access) @@ -3258,7 +3900,8 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) db.append(lex_user->user.str, lex_user->user.length, system_charset_info); db.append ("'@'",3); - db.append(lex_user->host.str, lex_user->host.length); + db.append(lex_user->host.str, lex_user->host.length, + system_charset_info); db.append ('\''); if (want_access & GRANT_ACL) db.append(" WITH GRANT OPTION",18); @@ -3284,7 +3927,7 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) user= ""; if (!strcmp(lex_user->user.str,user) && - !my_strcasecmp(&my_charset_latin1, lex_user->host.str, + !my_strcasecmp(system_charset_info, lex_user->host.str, grant_table->orig_host)) { ulong table_access= grant_table->privs; @@ -3364,7 +4007,8 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) global.append(lex_user->user.str, lex_user->user.length, system_charset_info); global.append("'@'",3); - global.append(lex_user->host.str,lex_user->host.length); + global.append(lex_user->host.str,lex_user->host.length, + system_charset_info); global.append('\''); if (table_access & GRANT_ACL) global.append(" WITH GRANT OPTION",18); @@ -3378,6 +4022,74 @@ int mysql_show_grants(THD *thd,LEX_USER *lex_user) } } } + + /* Add procedure access */ + for (index=0 ; index < proc_priv_hash.records ; index++) + { + const char *user; + GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(&proc_priv_hash, + index); + + if (!(user=grant_proc->user)) + user= ""; + + if (!strcmp(lex_user->user.str,user) && + !my_strcasecmp(system_charset_info, lex_user->host.str, + grant_proc->orig_host)) + { + ulong proc_access= grant_proc->privs; + if (proc_access != 0) + { + String global(buff, sizeof(buff), system_charset_info); + ulong test_access= proc_access & ~GRANT_ACL; + + global.length(0); + global.append("GRANT ",6); + + if (!test_access) + global.append("USAGE",5); + else + { + /* Add specific procedure access */ + int found= 0; + ulong j; + + for (counter= 0, j= SELECT_ACL; j <= PROC_ACLS; counter++, j<<= 1) + { + if (test_access & j) + { + if (found) + global.append(", ",2); + found= 1; + global.append(command_array[counter],command_lengths[counter]); + } + } + } + global.append(" ON ",4); + append_identifier(thd, &global, grant_proc->db, + strlen(grant_proc->db)); + global.append('.'); + append_identifier(thd, &global, grant_proc->tname, + strlen(grant_proc->tname)); + global.append(" TO '",5); + global.append(lex_user->user.str, lex_user->user.length, + system_charset_info); + global.append("'@'",3); + global.append(lex_user->host.str,lex_user->host.length, + system_charset_info); + global.append('\''); + if (proc_access & GRANT_ACL) + global.append(" WITH GRANT OPTION",18); + protocol->prepare_for_resend(); + protocol->store(global.ptr(),global.length(),global.charset()); + if (protocol->write()) + { + error= -1; + break; + } + } + } + } end: VOID(pthread_mutex_unlock(&acl_cache->lock)); rw_unlock(&LOCK_grant); @@ -3424,28 +4136,53 @@ void get_mqh(const char *user, const char *host, USER_CONN *uc) bzero((char*) &uc->user_resources, sizeof(uc->user_resources)); } +/* + Open the grant tables. + + SYNOPSIS + open_grant_tables() + thd The current thread. + tables (out) The 4 elements array for the opened tables. + + DESCRIPTION + Tables are numbered as follows: + 0 user + 1 db + 2 tables_priv + 3 columns_priv + + RETURN + 1 Skip GRANT handling during replication. + 0 OK. + < 0 Error. +*/ + +#define GRANT_TABLES 5 int open_grant_tables(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("open_grant_tables"); if (!initialized) { - net_printf(thd,ER_OPTION_PREVENTS_STATEMENT, "--skip-grant-tables"); + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); DBUG_RETURN(-1); } - bzero((char*) tables, 4*sizeof(*tables)); + bzero((char*) tables, GRANT_TABLES*sizeof(*tables)); tables->alias= tables->real_name= (char*) "user"; (tables+1)->alias= (tables+1)->real_name= (char*) "db"; (tables+2)->alias= (tables+2)->real_name= (char*) "tables_priv"; (tables+3)->alias= (tables+3)->real_name= (char*) "columns_priv"; - tables->next= tables+1; - (tables+1)->next= tables+2; - (tables+2)->next= tables+3; - (tables+3)->next= 0; + (tables+4)->alias= (tables+4)->real_name= (char*) "procs_priv"; + tables->next_local= tables->next_global= tables+1; + (tables+1)->next_local= (tables+1)->next_global= tables+2; + (tables+2)->next_local= (tables+2)->next_global= tables+3; + (tables+3)->next_local= (tables+3)->next_global= tables+4; tables->lock_type= (tables+1)->lock_type= - (tables+2)->lock_type= (tables+3)->lock_type= TL_WRITE; - tables->db= (tables+1)->db= (tables+2)->db= (tables+3)->db=(char*) "mysql"; + (tables+2)->lock_type= (tables+3)->lock_type= + (tables+4)->lock_type= TL_WRITE; + tables->db= (tables+1)->db= (tables+2)->db= + (tables+3)->db= (tables+4)->db= (char*) "mysql"; #ifdef HAVE_REPLICATION /* @@ -3458,10 +4195,12 @@ int open_grant_tables(THD *thd, TABLE_LIST *tables) The tables must be marked "updating" so that tables_ok() takes them into account in tests. */ - tables[0].updating=tables[1].updating=tables[2].updating=tables[3].updating=1; + tables[0].updating=tables[1].updating=tables[2].updating= + tables[3].updating=tables[4].updating=1; if (!tables_ok(0, tables)) DBUG_RETURN(1); - tables[0].updating=tables[1].updating=tables[2].updating=tables[3].updating=0; + tables[0].updating=tables[1].updating=tables[2].updating= + tables[3].updating=tables[4].updating=0;; } #endif @@ -3500,111 +4239,667 @@ ACL_USER *check_acl_user(LEX_USER *user_name, } -int mysql_drop_user(THD *thd, List <LEX_USER> &list) +/* + Modify a privilege table. + + SYNOPSIS + modify_grant_table() + table The table to modify. + host_field The host name field. + user_field The user name field. + user_to The new name for the user if to be renamed, + NULL otherwise. + + DESCRIPTION + Update user/host in the current record if user_to is not NULL. + Delete the current record if user_to is NULL. + + RETURN + 0 OK. + != 0 Error. +*/ + +static int modify_grant_table(TABLE *table, Field *host_field, + Field *user_field, LEX_USER *user_to) { - uint counter, acl_userd; - int result; - ACL_USER *acl_user; - ACL_DB *acl_db; - TABLE_LIST tables[4]; + int error; + DBUG_ENTER("modify_grant_table"); - DBUG_ENTER("mysql_drop_user"); + if (user_to) + { + /* rename */ + store_record(table, record[1]); + host_field->store(user_to->host.str, user_to->host.length, + system_charset_info); + user_field->store(user_to->user.str, user_to->user.length, + system_charset_info); + if ((error= table->file->update_row(table->record[1], table->record[0]))) + table->file->print_error(error, MYF(0)); + } + else + { + /* delete */ + if ((error=table->file->delete_row(table->record[0]))) + table->file->print_error(error, MYF(0)); + } - if ((result= open_grant_tables(thd, tables))) - DBUG_RETURN(result == 1 ? 0 : 1); + DBUG_RETURN(error); +} - rw_wrlock(&LOCK_grant); - VOID(pthread_mutex_lock(&acl_cache->lock)); - LEX_USER *user_name; - List_iterator <LEX_USER> user_list(list); - while ((user_name=user_list++)) +/* + Handle a privilege table. + + SYNOPSIS + handle_grant_table() + tables The array with the four open tables. + table_no The number of the table to handle (0..4). + drop If user_from is to be dropped. + user_from The the user to be searched/dropped/renamed. + user_to The new name for the user if to be renamed, + NULL otherwise. + + DESCRIPTION + Scan through all records in a grant table and apply the requested + operation. For the "user" table, a single index access is sufficient, + since there is an unique index on (host, user). + Delete from grant table if drop is true. + Update in grant table if drop is false and user_to is not NULL. + Search in grant table if drop is false and user_to is NULL. + Tables are numbered as follows: + 0 user + 1 db + 2 tables_priv + 3 columns_priv + 4 procs_priv + + RETURN + > 0 At least one record matched. + 0 OK, but no record matched. + < 0 Error. +*/ + +static int handle_grant_table(TABLE_LIST *tables, uint table_no, bool drop, + LEX_USER *user_from, LEX_USER *user_to) +{ + int result= 0; + int error; + TABLE *table= tables[table_no].table; + Field *host_field= table->field[0]; + Field *user_field= table->field[table_no ? 2 : 1]; + char *host_str= user_from->host.str; + char *user_str= user_from->user.str; + const char *host; + const char *user; + byte user_key[MAX_KEY_LENGTH]; + uint key_prefix_length; + DBUG_ENTER("handle_grant_table"); + + if (! table_no) // mysql.user table { - if (!(acl_user= check_acl_user(user_name, &counter))) + /* + The 'user' table has an unique index on (host, user). + Thus, we can handle everything with a single index access. + The host- and user fields are consecutive in the user table records. + So we set host- and user fields of table->record[0] and use the + pointer to the host field as key. + index_read_idx() will replace table->record[0] (its first argument) + by the searched record, if it exists. + */ + DBUG_PRINT("info",("read table: '%s' search: '%s'@'%s'", + table->real_name, user_str, host_str)); + host_field->store(host_str, user_from->host.length, system_charset_info); + user_field->store(user_str, user_from->user.length, system_charset_info); + + key_prefix_length= (table->key_info->key_part[0].store_length + + table->key_info->key_part[1].store_length); + key_copy(user_key, table->record[0], table->key_info, key_prefix_length); + + if ((error= table->file->index_read_idx(table->record[0], 0, + user_key, key_prefix_length, + HA_READ_KEY_EXACT))) { - sql_print_error("DROP USER: Can't drop user: '%s'@'%s'; No such user", - user_name->user.str, - user_name->host.str); - result= -1; - continue; + if (error != HA_ERR_KEY_NOT_FOUND) + { + table->file->print_error(error, MYF(0)); + result= -1; + } } - if ((acl_user->access & ~0)) + else + { + /* If requested, delete or update the record. */ + result= ((drop || user_to) && + modify_grant_table(table, host_field, user_field, user_to)) ? + -1 : 1; /* Error or found. */ + } + DBUG_PRINT("info",("read result: %d", result)); + } + else + { + /* + The non-'user' table do not have indexes on (host, user). + And their host- and user fields are not consecutive. + Thus, we need to do a table scan to find all matching records. + */ + if ((error= table->file->ha_rnd_init(1))) { - sql_print_error("DROP USER: Can't drop user: '%s'@'%s'; Global privileges exists", - user_name->user.str, - user_name->host.str); + table->file->print_error(error, MYF(0)); result= -1; - continue; } - acl_userd= counter; + else + { +#ifdef EXTRA_DEBUG + DBUG_PRINT("info",("scan table: '%s' search: '%s'@'%s'", + table->real_name, user_str, host_str)); +#endif + while ((error= table->file->rnd_next(table->record[0])) != + HA_ERR_END_OF_FILE) + { + if (error) + { + /* Most probable 'deleted record'. */ + DBUG_PRINT("info",("scan error: %d", error)); + continue; + } + if (! (host= get_field(&mem, host_field))) + host= ""; + if (! (user= get_field(&mem, user_field))) + user= ""; + +#ifdef EXTRA_DEBUG + DBUG_PRINT("loop",("scan fields: '%s'@'%s' '%s' '%s' '%s'", + user, host, + get_field(&mem, table->field[1]) /*db*/, + get_field(&mem, table->field[3]) /*table*/, + get_field(&mem, table->field[4]) /*column*/)); +#endif + if (strcmp(user_str, user) || + my_strcasecmp(system_charset_info, host_str, host)) + continue; + + /* If requested, delete or update the record. */ + result= ((drop || user_to) && + modify_grant_table(table, host_field, user_field, user_to)) ? + -1 : result ? result : 1; /* Error or keep result or found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + break ; + } + (void) table->file->ha_rnd_end(); + DBUG_PRINT("info",("scan result: %d", result)); + } + } - for (counter= 0 ; counter < acl_dbs.elements ; counter++) + DBUG_RETURN(result); +} + + +/* + Handle an in-memory privilege structure. + + SYNOPSIS + handle_grant_struct() + struct_no The number of the structure to handle (0..2). + drop If user_from is to be dropped. + user_from The the user to be searched/dropped/renamed. + user_to The new name for the user if to be renamed, + NULL otherwise. + + DESCRIPTION + Scan through all elements in an in-memory grant structure and apply + the requested operation. + Delete from grant structure if drop is true. + Update in grant structure if drop is false and user_to is not NULL. + Search in grant structure if drop is false and user_to is NULL. + Structures are numbered as follows: + 0 acl_users + 1 acl_dbs + 2 column_priv_hash + 3 procs_priv_hash + + RETURN + > 0 At least one element matched. + 0 OK, but no element matched. +*/ + +static int handle_grant_struct(uint struct_no, bool drop, + LEX_USER *user_from, LEX_USER *user_to) +{ + int result= 0; + uint idx; + uint elements; + const char *user; + const char *host; + ACL_USER *acl_user; + ACL_DB *acl_db; + GRANT_NAME *grant_name; + DBUG_ENTER("handle_grant_struct"); + LINT_INIT(acl_user); + LINT_INIT(acl_db); + LINT_INIT(grant_name); + DBUG_PRINT("info",("scan struct: %u search: '%s'@'%s'", + struct_no, user_from->user.str, user_from->host.str)); + + /* Get the number of elements in the in-memory structure. */ + switch (struct_no) + { + case 0: + elements= acl_users.elements; + break; + case 1: + elements= acl_dbs.elements; + break; + case 2: + elements= column_priv_hash.records; + break; + case 3: + elements= proc_priv_hash.records; + break; + default: + DBUG_ASSERT((struct_no < 0) || (struct_no > 3)); + return -1; + } + +#ifdef EXTRA_DEBUG + DBUG_PRINT("loop",("scan struct: %u search user: '%s' host: '%s'", + struct_no, user_from->user.str, user_from->host.str)); +#endif + /* Loop over all elements. */ + for (idx= 0; idx < elements; idx++) + { + /* + Get a pointer to the element. + Unfortunaltely, the host default differs for the structures. + */ + switch (struct_no) + { + case 0: + acl_user= dynamic_element(&acl_users, idx, ACL_USER*); + user= acl_user->user; + if (!(host= acl_user->host.hostname)) + host= "%"; + break; + + case 1: + acl_db= dynamic_element(&acl_dbs, idx, ACL_DB*); + user= acl_db->user; + host= acl_db->host.hostname; + break; + + case 2: + grant_name= (GRANT_NAME*) hash_element(&column_priv_hash, idx); + user= grant_name->user; + host= grant_name->host; + break; + + case 3: + grant_name= (GRANT_NAME*) hash_element(&proc_priv_hash, idx); + user= grant_name->user; + host= grant_name->host; + break; + } + if (! user) + user= ""; + if (! host) + host= ""; +#ifdef EXTRA_DEBUG + DBUG_PRINT("loop",("scan struct: %u index: %u user: '%s' host: '%s'", + struct_no, idx, user, host)); +#endif + if (strcmp(user_from->user.str, user) || + my_strcasecmp(system_charset_info, user_from->host.str, host)) + continue; + + result= 1; /* At least one element found. */ + if ( drop ) { - const char *user,*host; - acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); - if (!(user= acl_db->user)) - user= ""; - if (!(host= acl_db->host.hostname)) - host= ""; + switch ( struct_no ) + { + case 0: + delete_dynamic_element(&acl_users, idx); + break; - if (!strcmp(user_name->user.str,user) && - !my_strcasecmp(system_charset_info, user_name->host.str, host)) + case 1: + delete_dynamic_element(&acl_dbs, idx); + break; + + case 2: + hash_delete(&column_priv_hash, (byte*) grant_name); + break; + + case 3: + hash_delete(&proc_priv_hash, (byte*) grant_name); break; + } + elements--; + idx--; } - if (counter != acl_dbs.elements) + else if ( user_to ) { - sql_print_error("DROP USER: Can't drop user: '%s'@'%s'; Database privileges exists", - user_name->user.str, - user_name->host.str); - result= -1; - continue; + switch ( struct_no ) + { + case 0: + acl_user->user= strdup_root(&mem, user_to->user.str); + acl_user->host.hostname= strdup_root(&mem, user_to->host.str); + break; + + case 1: + acl_db->user= strdup_root(&mem, user_to->user.str); + acl_db->host.hostname= strdup_root(&mem, user_to->host.str); + break; + + case 2: + case 3: + grant_name->user= strdup_root(&mem, user_to->user.str); + grant_name->host= strdup_root(&mem, user_to->host.str); + break; + } + } + else + { + /* If search is requested, we do not need to search further. */ + break; + } + } +#ifdef EXTRA_DEBUG + DBUG_PRINT("loop",("scan struct: %u result %d", struct_no, result)); +#endif + + DBUG_RETURN(result); +} + + +/* + Handle all privilege tables and in-memory privilege structures. + + SYNOPSIS + handle_grant_data() + tables The array with the four open tables. + drop If user_from is to be dropped. + user_from The the user to be searched/dropped/renamed. + user_to The new name for the user if to be renamed, + NULL otherwise. + + DESCRIPTION + Go through all grant tables and in-memory grant structures and apply + the requested operation. + Delete from grant data if drop is true. + Update in grant data if drop is false and user_to is not NULL. + Search in grant data if drop is false and user_to is NULL. + + RETURN + > 0 At least one element matched. + 0 OK, but no element matched. + < 0 Error. +*/ + +static int handle_grant_data(TABLE_LIST *tables, bool drop, + LEX_USER *user_from, LEX_USER *user_to) +{ + int result= 0; + int found; + DBUG_ENTER("handle_grant_data"); + + /* Handle user table. */ + if ((found= handle_grant_table(tables, 0, drop, user_from, user_to)) < 0) + { + /* Handle of table failed, don't touch the in-memory array. */ + result= -1; + } + else + { + /* Handle user array. */ + if ((handle_grant_struct(0, drop, user_from, user_to) && ! result) || + found) + { + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; } + } - for (counter= 0 ; counter < column_priv_hash.records ; counter++) + /* Handle db table. */ + if ((found= handle_grant_table(tables, 1, drop, user_from, user_to)) < 0) + { + /* Handle of table failed, don't touch the in-memory array. */ + result= -1; + } + else + { + /* Handle db array. */ + if (((handle_grant_struct(1, drop, user_from, user_to) && ! result) || + found) && ! result) { - const char *user,*host; - GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, - counter); - if (!(user=grant_table->user)) - user= ""; - if (!(host=grant_table->host)) - host= ""; + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; + } + } - if (!strcmp(user_name->user.str,user) && - !my_strcasecmp(system_charset_info, user_name->host.str, host)) - break; + /* Handle procedures table. */ + if ((found= handle_grant_table(tables, 4, drop, user_from, user_to)) < 0) + { + /* Handle of table failed, don't touch in-memory array. */ + result= -1; + } + else + { + /* Handle procs array. */ + if (((handle_grant_struct(3, drop, user_from, user_to) && ! result) || + found) && ! result) + { + result= 1; /* At least one record/element found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; } - if (counter != column_priv_hash.records) + } + + /* Handle tables table. */ + if ((found= handle_grant_table(tables, 2, drop, user_from, user_to)) < 0) + { + /* Handle of table failed, don't touch columns and in-memory array. */ + result= -1; + } + else + { + if (found && ! result) + { + result= 1; /* At least one record found. */ + /* If search is requested, we do not need to search further. */ + if (! drop && ! user_to) + goto end; + } + + /* Handle columns table. */ + if ((found= handle_grant_table(tables, 3, drop, user_from, user_to)) < 0) { - sql_print_error("DROP USER: Can't drop user: '%s'@'%s'; Table privileges exists", - user_name->user.str, - user_name->host.str); + /* Handle of table failed, don't touch the in-memory array. */ result= -1; + } + else + { + /* Handle columns hash. */ + if (((handle_grant_struct(2, drop, user_from, user_to) && ! result) || + found) && ! result) + result= 1; /* At least one record/element found. */ + } + } + end: + DBUG_RETURN(result); +} + + +static void append_user(String *str, LEX_USER *user) +{ + if (str->length()) + str->append(','); + str->append('\''); + str->append(user->user.str); + str->append("'@'"); + str->append(user->host.str); + str->append('\''); +} + + +/* + Create a list of users. + + SYNOPSIS + mysql_create_user() + thd The current thread. + list The users to create. + + RETURN + FALSE OK. + TRUE Error. +*/ + +bool mysql_create_user(THD *thd, List <LEX_USER> &list) +{ + int result; + int found; + String wrong_users; + ulong sql_mode; + LEX_USER *user_name; + List_iterator <LEX_USER> user_list(list); + TABLE_LIST tables[GRANT_TABLES]; + DBUG_ENTER("mysql_create_user"); + + /* CREATE USER may be skipped on replication client. */ + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result != 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + while ((user_name= user_list++)) + { + /* + Search all in-memory structures and grant tables + for a mention of the new user name. + */ + if ((found= handle_grant_data(tables, 0, user_name, NULL))) + { + append_user(&wrong_users, user_name); + result= TRUE; continue; } - tables[0].table->field[0]->store(user_name->host.str,(uint) - user_name->host.length, - system_charset_info); - tables[0].table->field[1]->store(user_name->user.str,(uint) - user_name->user.length, - system_charset_info); - tables[0].table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (!tables[0].table->file->index_read_idx(tables[0].table->record[0],0, - (byte*) tables[0].table-> - field[0]->ptr, - tables[0].table-> - key_info[0].key_length, - HA_READ_KEY_EXACT)) - { - int error; - if ((error = tables[0].table->file->delete_row(tables[0].table-> - record[0]))) - { - tables[0].table->file->print_error(error, MYF(0)); - DBUG_RETURN(-1); - } - delete_dynamic_element(&acl_users, acl_userd); + sql_mode= thd->variables.sql_mode; + thd->variables.sql_mode&= ~MODE_NO_AUTO_CREATE_USER; + if (replace_user_table(thd, tables[0].table, *user_name, 0, 0, 1)) + { + append_user(&wrong_users, user_name); + result= TRUE; + } + thd->variables.sql_mode= sql_mode; + } + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + rw_unlock(&LOCK_grant); + close_thread_tables(thd); + if (result) + my_error(ER_CANNOT_USER, MYF(0), "CREATE USER", wrong_users.c_ptr()); + DBUG_RETURN(result); +} + + +/* + Drop a list of users and all their privileges. + + SYNOPSIS + mysql_drop_user() + thd The current thread. + list The users to drop. + + RETURN + FALSE OK. + TRUE Error. +*/ + +bool mysql_drop_user(THD *thd, List <LEX_USER> &list) +{ + int result; + int found; + String wrong_users; + LEX_USER *user_name; + List_iterator <LEX_USER> user_list(list); + TABLE_LIST tables[GRANT_TABLES]; + DBUG_ENTER("mysql_drop_user"); + + /* DROP USER may be skipped on replication client. */ + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result != 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + while ((user_name= user_list++)) + { + if ((found= handle_grant_data(tables, 1, user_name, NULL)) <= 0) + { + append_user(&wrong_users, user_name); + result= TRUE; + } + } + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + rw_unlock(&LOCK_grant); + close_thread_tables(thd); + if (result) + my_error(ER_CANNOT_USER, MYF(0), "DROP USER", wrong_users.c_ptr_safe()); + DBUG_RETURN(result); +} + + +/* + Rename a user. + + SYNOPSIS + mysql_rename_user() + thd The current thread. + list The user name pairs: (from, to). + + RETURN + FALSE OK. + TRUE Error. +*/ + +bool mysql_rename_user(THD *thd, List <LEX_USER> &list) +{ + int result= 0; + int found; + String wrong_users; + LEX_USER *user_from; + LEX_USER *user_to; + List_iterator <LEX_USER> user_list(list); + TABLE_LIST tables[GRANT_TABLES]; + DBUG_ENTER("mysql_rename_user"); + + /* RENAME USER may be skipped on replication client. */ + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result != 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + while ((user_from= user_list++)) + { + user_to= user_list++; + DBUG_ASSERT(user_to); /* Syntax enforces pairs of users. */ + + /* + Search all in-memory structures and grant tables + for a mention of the new user name. + */ + if (handle_grant_data(tables, 0, user_to, NULL) || + handle_grant_data(tables, 0, user_from, user_to) <= 0) + { + append_user(&wrong_users, user_from); + result= TRUE; } } @@ -3612,20 +4907,35 @@ int mysql_drop_user(THD *thd, List <LEX_USER> &list) rw_unlock(&LOCK_grant); close_thread_tables(thd); if (result) - my_error(ER_DROP_USER, MYF(0)); + my_error(ER_CANNOT_USER, MYF(0), "RENAME USER", wrong_users.c_ptr()); DBUG_RETURN(result); } -int mysql_revoke_all(THD *thd, List <LEX_USER> &list) + +/* + Revoke all privileges from a list of users. + + SYNOPSIS + mysql_revoke_all() + thd The current thread. + list The users to revoke all privileges from. + + RETURN + > 0 Error. Error message already sent. + 0 OK. + < 0 Error. Error message not yet sent. +*/ + +bool mysql_revoke_all(THD *thd, List <LEX_USER> &list) { uint counter, revoked; int result; ACL_DB *acl_db; - TABLE_LIST tables[4]; + TABLE_LIST tables[GRANT_TABLES]; DBUG_ENTER("mysql_revoke_all"); if ((result= open_grant_tables(thd, tables))) - DBUG_RETURN(result == 1 ? 0 : 1); + DBUG_RETURN(result != 1); rw_wrlock(&LOCK_grant); VOID(pthread_mutex_lock(&acl_cache->lock)); @@ -3636,9 +4946,8 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list) { if (!check_acl_user(lex_user, &counter)) { - sql_print_error("REVOKE ALL PRIVILEGES, GRANT: User '%s'@'%s' not exists", - lex_user->user.str, - lex_user->host.str); + sql_print_error("REVOKE ALL PRIVILEGES, GRANT: User '%s'@'%s' does not " + "exists", lex_user->user.str, lex_user->host.str); result= -1; continue; } @@ -3661,13 +4970,13 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list) for (counter= 0, revoked= 0 ; counter < acl_dbs.elements ; ) { const char *user,*host; - + acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); if (!(user=acl_db->user)) user= ""; if (!(host=acl_db->host.hostname)) host= ""; - + if (!strcmp(lex_user->user.str,user) && !my_strcasecmp(system_charset_info, lex_user->host.str, host)) { @@ -3698,7 +5007,7 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list) user= ""; if (!(host=grant_table->host)) host= ""; - + if (!strcmp(lex_user->user.str,user) && !my_strcasecmp(system_charset_info, lex_user->host.str, host)) { @@ -3732,15 +5041,167 @@ int mysql_revoke_all(THD *thd, List <LEX_USER> &list) counter++; } } while (revoked); + + /* Remove procedure access */ + do { + for (counter= 0, revoked= 0 ; counter < proc_priv_hash.records ; ) + { + const char *user,*host; + GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(&proc_priv_hash, + counter); + if (!(user=grant_proc->user)) + user= ""; + if (!(host=grant_proc->host)) + host= ""; + + if (!strcmp(lex_user->user.str,user) && + !my_strcasecmp(system_charset_info, lex_user->host.str, host)) + { + if (!replace_proc_table(thd,grant_proc,tables[4].table,*lex_user, + grant_proc->db, + grant_proc->tname, + ~0, 1)) + { + revoked= 1; + continue; + } + result= -1; // Something went wrong + } + counter++; + } + } while (revoked); } - + VOID(pthread_mutex_unlock(&acl_cache->lock)); rw_unlock(&LOCK_grant); close_thread_tables(thd); - + if (result) - my_error(ER_REVOKE_GRANTS, MYF(0)); + my_message(ER_REVOKE_GRANTS, ER(ER_REVOKE_GRANTS), MYF(0)); + + DBUG_RETURN(result); +} + + +/* + Revoke privileges for all users on a stored procedure + + SYNOPSIS + sp_revoke_privileges() + thd The current thread. + db DB of the stored procedure + name Name of the stored procedure + + RETURN + 0 OK. + < 0 Error. Error message not yet sent. +*/ + +bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name) +{ + uint counter, revoked; + int result; + ACL_DB *acl_db; + TABLE_LIST tables[GRANT_TABLES]; + DBUG_ENTER("sp_revoke_privileges"); + + if ((result= open_grant_tables(thd, tables))) + DBUG_RETURN(result != 1); + + rw_wrlock(&LOCK_grant); + VOID(pthread_mutex_lock(&acl_cache->lock)); + + /* Remove procedure access */ + do { + for (counter= 0, revoked= 0 ; counter < proc_priv_hash.records ; ) + { + const char *db,*name; + GRANT_NAME *grant_proc= (GRANT_NAME*) hash_element(&proc_priv_hash, + counter); + if (!my_strcasecmp(system_charset_info, grant_proc->db, sp_db) && + !my_strcasecmp(system_charset_info, grant_proc->tname, sp_name)) + { + LEX_USER lex_user; + lex_user.user.str= grant_proc->user; + lex_user.user.length= strlen(grant_proc->user); + lex_user.host.str= grant_proc->host; + lex_user.host.length= strlen(grant_proc->host); + if (!replace_proc_table(thd,grant_proc,tables[4].table,lex_user, + grant_proc->db, grant_proc->tname, ~0, 1)) + { + revoked= 1; + continue; + } + result= -1; // Something went wrong + } + counter++; + } + } while (revoked); + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + rw_unlock(&LOCK_grant); + close_thread_tables(thd); + + if (result) + my_message(ER_REVOKE_GRANTS, ER(ER_REVOKE_GRANTS), MYF(0)); + + DBUG_RETURN(result); +} + + +/* + Grant EXECUTE,ALTER privilege for a stored procedure + + SYNOPSIS + sp_grant_privileges() + thd The current thread. + db DB of the stored procedure + name Name of the stored procedure + + RETURN + 0 OK. + < 0 Error. Error message not yet sent. +*/ + +bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name) +{ + LEX_USER *combo; + TABLE_LIST tables[1]; + List<LEX_USER> user_list; + bool result; + DBUG_ENTER("sp_grant_privileges"); + + if (!(combo=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) + DBUG_RETURN(TRUE); + + combo->user.str= thd->user; + if (!find_acl_user(combo->host.str=(char*)thd->host_or_ip, combo->user.str) && + !find_acl_user(combo->host.str=(char*)thd->host, combo->user.str) && + !find_acl_user(combo->host.str=(char*)thd->ip, combo->user.str) && + !find_acl_user(combo->host.str=(char*)"%", combo->user.str)) + DBUG_RETURN(TRUE); + + bzero((char*)tables, sizeof(TABLE_LIST)); + user_list.empty(); + + tables->db= (char*)sp_db; + tables->real_name= tables->alias= (char*)sp_name; + + combo->host.length= strlen(combo->host.str); + combo->user.length= strlen(combo->user.str); + combo->host.str= thd->strmake(combo->host.str,combo->host.length); + combo->user.str= thd->strmake(combo->user.str,combo->user.length); + combo->password.str= (char*)""; + combo->password.length= 0; + + if (user_list.push_back(combo)) + DBUG_RETURN(TRUE); + + thd->lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; + + result= mysql_procedure_grant(thd, tables, user_list, + DEFAULT_CREATE_PROC_ACLS, 0, 1); DBUG_RETURN(result); } @@ -3802,3 +5263,280 @@ int wild_case_compare(CHARSET_INFO *cs, const char *str,const char *wildstr) DBUG_RETURN (*str != '\0'); } + +void update_schema_privilege(TABLE *table, char *buff, const char* db, + const char* t_name, const char* column, + uint col_length, const char *priv, + uint priv_length, const char* is_grantable) +{ + int i= 2; + CHARSET_INFO *cs= system_charset_info; + restore_record(table, default_values); + table->field[0]->store(buff, strlen(buff), cs); + if (db) + table->field[i++]->store(db, strlen(db), cs); + if (t_name) + table->field[i++]->store(t_name, strlen(t_name), cs); + if (column) + table->field[i++]->store(column, col_length, cs); + table->field[i++]->store(priv, priv_length, cs); + table->field[i]->store(is_grantable, strlen(is_grantable), cs); + table->file->write_row(table->record[0]); +} + + +int fill_schema_user_privileges(THD *thd, TABLE_LIST *tables, COND *cond) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint counter; + ACL_USER *acl_user; + ulong want_access; + + char buff[100]; + TABLE *table= tables->table; + DBUG_ENTER("fill_schema_user_privileges"); + for (counter=0 ; counter < acl_users.elements ; counter++) + { + const char *user,*host, *is_grantable="YES"; + acl_user=dynamic_element(&acl_users,counter,ACL_USER*); + if (!(user=acl_user->user)) + user= ""; + if (!(host=acl_user->host.hostname)) + host= ""; + want_access= acl_user->access; + if (!(want_access & GRANT_ACL)) + is_grantable= "NO"; + + strxmov(buff,"'",user,"'@'",host,"'",NullS); + if (!(want_access & ~GRANT_ACL)) + update_schema_privilege(table, buff, 0, 0, 0, 0, "USAGE", 5, is_grantable); + else + { + uint priv_id; + ulong j,test_access= want_access & ~GRANT_ACL; + for (priv_id=0, j = SELECT_ACL;j <= GLOBAL_ACLS; priv_id++,j <<= 1) + { + if (test_access & j) + update_schema_privilege(table, buff, 0, 0, 0, 0, + command_array[priv_id], + command_lengths[priv_id], is_grantable); + } + } + } + DBUG_RETURN(0); +#else + return(0); +#endif +} + + +int fill_schema_schema_privileges(THD *thd, TABLE_LIST *tables, COND *cond) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint counter; + ACL_DB *acl_db; + ulong want_access; + char buff[100]; + TABLE *table= tables->table; + DBUG_ENTER("fill_schema_schema_privileges"); + + for (counter=0 ; counter < acl_dbs.elements ; counter++) + { + const char *user, *host, *is_grantable="YES"; + + acl_db=dynamic_element(&acl_dbs,counter,ACL_DB*); + if (!(user=acl_db->user)) + user= ""; + if (!(host=acl_db->host.hostname)) + host= ""; + + want_access=acl_db->access; + if (want_access) + { + if (!(want_access & GRANT_ACL)) + { + is_grantable= "NO"; + } + strxmov(buff,"'",user,"'@'",host,"'",NullS); + if (!(want_access & ~GRANT_ACL)) + update_schema_privilege(table, buff, acl_db->db, 0, 0, + 0, "USAGE", 5, is_grantable); + else + { + int cnt; + ulong j,test_access= want_access & ~GRANT_ACL; + for (cnt=0, j = SELECT_ACL; j <= DB_ACLS; cnt++,j <<= 1) + if (test_access & j) + update_schema_privilege(table, buff, acl_db->db, 0, 0, 0, + command_array[cnt], command_lengths[cnt], + is_grantable); + } + } + } + DBUG_RETURN(0); +#else + return (0); +#endif +} + + +int fill_schema_table_privileges(THD *thd, TABLE_LIST *tables, COND *cond) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint index; + char buff[100]; + TABLE *table= tables->table; + DBUG_ENTER("fill_schema_table_privileges"); + + for (index=0 ; index < column_priv_hash.records ; index++) + { + const char *user, *is_grantable= "YES"; + GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, + index); + if (!(user=grant_table->user)) + user= ""; + ulong table_access= grant_table->privs; + if (table_access) + { + ulong test_access= table_access & ~GRANT_ACL; + /* + We should skip 'usage' privilege on table if + we have any privileges on column(s) of this table + */ + if (!test_access && grant_table->cols) + continue; + if (!(table_access & GRANT_ACL)) + is_grantable= "NO"; + + strxmov(buff,"'",user,"'@'",grant_table->orig_host,"'",NullS); + if (!test_access) + update_schema_privilege(table, buff, grant_table->db, grant_table->tname, + 0, 0, "USAGE", 5, is_grantable); + else + { + ulong j; + int cnt; + for (cnt= 0, j= SELECT_ACL; j <= TABLE_ACLS; cnt++, j<<= 1) + { + if (test_access & j) + update_schema_privilege(table, buff, grant_table->db, + grant_table->tname, 0, 0, command_array[cnt], + command_lengths[cnt], is_grantable); + } + } + } + } + DBUG_RETURN(0); +#else + return (0); +#endif +} + + +int fill_schema_column_privileges(THD *thd, TABLE_LIST *tables, COND *cond) +{ +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint index; + char buff[100]; + TABLE *table= tables->table; + DBUG_ENTER("fill_schema_table_privileges"); + + for (index=0 ; index < column_priv_hash.records ; index++) + { + const char *user, *is_grantable= "YES"; + GRANT_TABLE *grant_table= (GRANT_TABLE*) hash_element(&column_priv_hash, + index); + if (!(user=grant_table->user)) + user= ""; + ulong table_access= grant_table->cols; + if (table_access != 0) + { + if (!(grant_table->privs & GRANT_ACL)) + is_grantable= "NO"; + + ulong test_access= table_access & ~GRANT_ACL; + strxmov(buff,"'",user,"'@'",grant_table->orig_host,"'",NullS); + if (!test_access) + continue; + else + { + ulong j; + int cnt; + for (cnt= 0, j= SELECT_ACL; j <= TABLE_ACLS; cnt++, j<<= 1) + { + if (test_access & j) + { + for (uint col_index=0 ; + col_index < grant_table->hash_columns.records ; + col_index++) + { + GRANT_COLUMN *grant_column = (GRANT_COLUMN*) + hash_element(&grant_table->hash_columns,col_index); + if ((grant_column->rights & j) && (table_access & j)) + update_schema_privilege(table, buff, grant_table->db, + grant_table->tname, + grant_column->column, + grant_column->key_length, + command_array[cnt], + command_lengths[cnt], is_grantable); + } + } + } + } + } + } + DBUG_RETURN(0); +#else + return (0); +#endif +} + + +#ifndef NO_EMBEDDED_ACCESS_CHECKS +/* + fill effective privileges for table + + SYNOPSIS + fill_effective_table_privileges() + thd thread handler + grant grants table descriptor + db db name + table table name +*/ + +void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant, + const char *db, const char *table) +{ + /* --skip-grants */ + if (!initialized) + { + grant->privilege= ~NO_ACCESS; // everything is allowed + return; + } + + /* global privileges */ + grant->privilege= thd->master_access; + + /* db privileges */ + grant->privilege|= acl_get(thd->host, thd->ip, thd->priv_user, db, 0); + + if (!grant_option) + return; + + /* table privileges */ + if (grant->version != grant_version) + { + rw_rdlock(&LOCK_grant); + grant->grant_table= + table_hash_search(thd->host, thd->ip, db, + thd->priv_user, + table, 0); /* purecov: inspected */ + grant->version= grant_version; /* purecov: inspected */ + rw_unlock(&LOCK_grant); + } + if (grant->grant_table != 0) + { + grant->privilege|= grant->grant_table->privs; + } +} +#endif diff --git a/sql/sql_acl.h b/sql/sql_acl.h index 68cb1476eb5..4ebc3ad7707 100644 --- a/sql/sql_acl.h +++ b/sql/sql_acl.h @@ -35,7 +35,10 @@ #define EXECUTE_ACL (1L << 18) #define REPL_SLAVE_ACL (1L << 19) #define REPL_CLIENT_ACL (1L << 20) - +#define CREATE_VIEW_ACL (1L << 21) +#define SHOW_VIEW_ACL (1L << 22) +#define CREATE_PROC_ACL (1L << 23) +#define ALTER_PROC_ACL (1L << 24) /* don't forget to update static struct show_privileges_st sys_privileges[] @@ -45,44 +48,73 @@ #define DB_ACLS \ (UPDATE_ACL | SELECT_ACL | INSERT_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ - GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | LOCK_TABLES_ACL) + GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_TMP_ACL | \ + LOCK_TABLES_ACL | EXECUTE_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL | \ + CREATE_PROC_ACL | ALTER_PROC_ACL) #define TABLE_ACLS \ (SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ - GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL) + GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL | CREATE_VIEW_ACL | \ + SHOW_VIEW_ACL) #define COL_ACLS \ (SELECT_ACL | INSERT_ACL | UPDATE_ACL | REFERENCES_ACL) +#define PROC_ACLS \ +(ALTER_PROC_ACL | EXECUTE_ACL | GRANT_ACL) + #define GLOBAL_ACLS \ (SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL | CREATE_ACL | DROP_ACL | \ RELOAD_ACL | SHUTDOWN_ACL | PROCESS_ACL | FILE_ACL | GRANT_ACL | \ REFERENCES_ACL | INDEX_ACL | ALTER_ACL | SHOW_DB_ACL | SUPER_ACL | \ CREATE_TMP_ACL | LOCK_TABLES_ACL | REPL_SLAVE_ACL | REPL_CLIENT_ACL | \ - EXECUTE_ACL) + EXECUTE_ACL | CREATE_VIEW_ACL | SHOW_VIEW_ACL | CREATE_PROC_ACL | \ + ALTER_PROC_ACL ) #define EXTRA_ACL (1L << 29) #define NO_ACCESS (1L << 30) +#define DEFAULT_CREATE_PROC_ACLS \ +(ALTER_PROC_ACL | EXECUTE_ACL) + /* Defines to change the above bits to how things are stored in tables This is needed as the 'host' and 'db' table is missing a few privileges */ /* Continius bit-segments that needs to be shifted */ -#define DB_REL1 (RELOAD_ACL | SHUTDOWN_ACL | PROCESS_ACL | FILE_ACL) -#define DB_REL2 (GRANT_ACL | REFERENCES_ACL) +#define DB_REL1 ((1L << 6) | (1L << 7) | (1L << 8) | (1L << 9)) +#define DB_REL2 ((1L << 10) | (1L << 11)) +#define DB_REL3 ((1L << 12) | (1L << 13) | (1L << 14) | (1L << 15)) +#define DB_REL4 ((1L << 16)) /* Privileges that needs to be reallocated (in continous chunks) */ #define DB_CHUNK1 (GRANT_ACL | REFERENCES_ACL | INDEX_ACL | ALTER_ACL) #define DB_CHUNK2 (CREATE_TMP_ACL | LOCK_TABLES_ACL) - -#define fix_rights_for_db(A) (((A) & 63) | (((A) & DB_REL1) << 4) | (((A) & DB_REL2) << 6)) -#define get_rights_for_db(A) (((A) & 63) | (((A) & DB_CHUNK1) >> 4) | (((A) & DB_CHUNK2) >> 6)) +#define DB_CHUNK3 (CREATE_VIEW_ACL | SHOW_VIEW_ACL | \ + CREATE_PROC_ACL | ALTER_PROC_ACL ) +#define DB_CHUNK4 (EXECUTE_ACL) + +#define fix_rights_for_db(A) (((A) & 63) | \ + (((A) & DB_REL1) << 4) | \ + (((A) & DB_REL2) << 6) | \ + (((A) & DB_REL3) << 9) | \ + (((A) & DB_REL4) << 2)) +#define get_rights_for_db(A) (((A) & 63) | \ + (((A) & DB_CHUNK1) >> 4) | \ + (((A) & DB_CHUNK2) >> 6) | \ + (((A) & DB_CHUNK3) >> 9) | \ + (((A) & DB_CHUNK4) >> 2)) #define fix_rights_for_table(A) (((A) & 63) | (((A) & ~63) << 4)) #define get_rights_for_table(A) (((A) & 63) | (((A) & ~63) >> 4)) #define fix_rights_for_column(A) (((A) & 7) | (((A) & ~7) << 8)) #define get_rights_for_column(A) (((A) & 7) | ((A) >> 8)) +#define fix_rights_for_procedure(A) ((((A) << 18) & EXECUTE_ACL) | \ + (((A) << 23) & ALTER_PROC_ACL) | \ + (((A) << 8) & GRANT_ACL)) +#define get_rights_for_procedure(A) ((((A) & EXECUTE_ACL) >> 18) | \ + (((A) & ALTER_PROC_ACL) >> 23) | \ + (((A) & GRANT_ACL) >> 8)) /* Classes */ @@ -141,32 +173,49 @@ ulong acl_get(const char *host, const char *ip, const char *user, const char *db, my_bool db_is_pattern); int acl_getroot(THD *thd, USER_RESOURCES *mqh, const char *passwd, uint passwd_len); +int acl_getroot_no_password(THD *thd); bool acl_check_host(const char *host, const char *ip); bool check_change_password(THD *thd, const char *host, const char *user, char *password); bool change_password(THD *thd, const char *host, const char *user, char *password); -int mysql_grant(THD *thd, const char *db, List <LEX_USER> &user_list, - ulong rights, bool revoke); -int mysql_table_grant(THD *thd, TABLE_LIST *table, List <LEX_USER> &user_list, - List <LEX_COLUMN> &column_list, ulong rights, - bool revoke); +bool mysql_grant(THD *thd, const char *db, List <LEX_USER> &user_list, + ulong rights, bool revoke); +bool mysql_table_grant(THD *thd, TABLE_LIST *table, List <LEX_USER> &user_list, + List <LEX_COLUMN> &column_list, ulong rights, + bool revoke); +bool mysql_procedure_grant(THD *thd, TABLE_LIST *table, + List <LEX_USER> &user_list, ulong rights, + bool revoke, bool no_error); my_bool grant_init(THD *thd); void grant_free(void); void grant_reload(THD *thd); bool check_grant(THD *thd, ulong want_access, TABLE_LIST *tables, uint show_command, uint number, bool dont_print_error); -bool check_grant_column (THD *thd,TABLE *table, const char *name, uint length, - uint show_command=0); -bool check_grant_all_columns(THD *thd, ulong want_access, TABLE *table); +bool check_grant_column (THD *thd, GRANT_INFO *grant, + char *db_name, char *table_name, + const char *name, uint length, uint show_command=0); +bool check_grant_all_columns(THD *thd, ulong want_access, GRANT_INFO *grant, + char* db_name, char *table_name, + Field_iterator *fields); +bool check_grant_procedure(THD *thd, ulong want_access, + TABLE_LIST *procs, bool no_error); bool check_grant_db(THD *thd,const char *db); ulong get_table_grant(THD *thd, TABLE_LIST *table); -ulong get_column_grant(THD *thd, TABLE_LIST *table, Field *field); -int mysql_show_grants(THD *thd, LEX_USER *user); +ulong get_column_grant(THD *thd, GRANT_INFO *grant, + const char *db_name, const char *table_name, + const char *field_name); +bool mysql_show_grants(THD *thd, LEX_USER *user); void get_privilege_desc(char *to, uint max_length, ulong access); void get_mqh(const char *user, const char *host, USER_CONN *uc); -int mysql_drop_user(THD *thd, List <LEX_USER> &list); -int mysql_revoke_all(THD *thd, List <LEX_USER> &list); +bool mysql_create_user(THD *thd, List <LEX_USER> &list); +bool mysql_drop_user(THD *thd, List <LEX_USER> &list); +bool mysql_rename_user(THD *thd, List <LEX_USER> &list); +bool mysql_revoke_all(THD *thd, List <LEX_USER> &list); +void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant, + const char *db, const char *table); +bool sp_revoke_privileges(THD *thd, const char *sp_db, const char *sp_name); +bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name); #ifdef NO_EMBEDDED_ACCESS_CHECKS #define check_grant(A,B,C,D,E,F) 0 diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index 1e0aebbc1ec..2259d3bead8 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -78,7 +78,7 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, { // first parameter if ((*param->item)->type() != Item::INT_ITEM || - (*param->item)->val() < 0) + (*param->item)->val_real() < 0) { delete pc; my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); @@ -93,7 +93,7 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, } // second parameter if ((*param->item)->type() != Item::INT_ITEM || - (*param->item)->val() < 0) + (*param->item)->val_real() < 0) { delete pc; my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); @@ -102,7 +102,7 @@ proc_analyse_init(THD *thd, ORDER *param, select_result *result, pc->max_treemem = (uint) (*param->item)->val_int(); } else if ((*param->item)->type() != Item::INT_ITEM || - (*param->item)->val() < 0) + (*param->item)->val_real() < 0) { delete pc; my_error(ER_WRONG_PARAMETERS_TO_PROCEDURE, MYF(0), proc_name); @@ -364,7 +364,7 @@ void field_str::add() void field_real::add() { char buff[MAX_FIELD_WIDTH], *ptr, *end; - double num = item->val(); + double num= item->val_real(); uint length, zero_count, decs; TREE_ELEMENT *element; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 5c71049e565..b6510e53679 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -19,6 +19,8 @@ #include "mysql_priv.h" #include "sql_select.h" +#include "sp_head.h" +#include "sql_trigger.h" #include <m_ctype.h> #include <my_dir.h> #include <hash.h> @@ -31,11 +33,17 @@ TABLE *unused_tables; /* Used by mysql_test */ HASH open_cache; /* Used by mysql_test */ HASH assign_cache; -static int open_unireg_entry(THD *thd,TABLE *entry,const char *db, - const char *name, const char *alias); +static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, + const char *name, const char *alias, + TABLE_LIST *table_list, MEM_ROOT *mem_root); static void free_cache_entry(TABLE *entry); static void mysql_rm_tmp_tables(void); - +static my_bool open_new_frm(const char *path, const char *alias, + const char *db, const char *table_name, + uint db_stat, uint prgflag, + uint ha_open_flags, TABLE *outparam, + TABLE_LIST *table_desc, MEM_ROOT *mem_root); +static void relink_tables_for_multidelete(THD *thd); extern "C" byte *table_cache_key(const byte *record,uint *length, my_bool not_used __attribute__((unused))) @@ -200,6 +208,7 @@ OPEN_TABLE_LIST *list_open_tables(THD *thd, const char *wild) void intern_close_table(TABLE *table) { // Free all structures free_io_cache(table); + delete table->triggers; if (table->file) VOID(closefrm(table)); // close file } @@ -276,7 +285,7 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, else { bool found=0; - for (TABLE_LIST *table=tables ; table ; table=table->next) + for (TABLE_LIST *table= tables; table; table= table->next_local) { if (remove_table_from_cache(thd, table->db, table->real_name, 1)) found=1; @@ -327,7 +336,7 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, result=reopen_tables(thd,1,1); thd->in_lock_tables=0; /* Set version for table */ - for (TABLE *table=thd->open_tables; table ; table=table->next) + for (TABLE *table=thd->open_tables; table ; table= table->next) table->version=refresh_version; } VOID(pthread_mutex_unlock(&LOCK_open)); @@ -360,7 +369,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, Put all normal tables used by thread in free list. */ -void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) +void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived, + TABLE *stopper) { bool found_old_table; DBUG_ENTER("close_thread_tables"); @@ -395,10 +405,10 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) VOID(pthread_mutex_lock(&LOCK_open)); safe_mutex_assert_owner(&LOCK_open); - DBUG_PRINT("info", ("thd->open_tables=%p", thd->open_tables)); + DBUG_PRINT("info", ("thd->open_tables: %p", thd->open_tables)); found_old_table= 0; - while (thd->open_tables) + while (thd->open_tables != stopper) found_old_table|=close_thread_table(thd, &thd->open_tables); thd->some_tables_deleted=0; @@ -426,6 +436,7 @@ bool close_thread_table(THD *thd, TABLE **table_ptr) bool found_old_table= 0; TABLE *table= *table_ptr; DBUG_ASSERT(table->key_read == 0); + DBUG_ASSERT(table->file->inited == handler::NONE); *table_ptr=table->next; if (table->version != refresh_version || @@ -539,56 +550,119 @@ void close_temporary_tables(THD *thd) thd->temporary_tables=0; } + /* - Find first suitable table by alias in given list. + Find table in list. SYNOPSIS find_table_in_list() - table - pointer to table list - db_name - data base name or 0 for any - table_name - table name or 0 for any + table Pointer to table list + offset Offset to which list in table structure to use + db_name Data base name + table_name Table name + + NOTES: + This is called by find_table_in_local_list() and + find_table_in_global_list(). RETURN VALUES NULL Table not found # Pointer to found table. */ -TABLE_LIST * find_table_in_list(TABLE_LIST *table, - const char *db_name, const char *table_name) +TABLE_LIST *find_table_in_list(TABLE_LIST *table, + uint offset, + const char *db_name, + const char *table_name) { - for (; table; table= table->next) - if ((!db_name || !strcmp(table->db, db_name)) && - (!table_name || !my_strcasecmp(table_alias_charset, - table->alias, table_name))) - break; + if (lower_case_table_names) + { + for (; table; table= *(TABLE_LIST **) ((char*) table + offset)) + { + if ((table->table == 0 || table->table->tmp_table == NO_TMP_TABLE) && + ((!strcmp(table->db, db_name) && + !strcmp(table->real_name, table_name)) || + (table->view && + !my_strcasecmp(table_alias_charset, + table->table->table_cache_key, db_name) && + !my_strcasecmp(table_alias_charset, + table->table->table_name, table_name)))) + break; + } + } + else + { + for (; table; table= *(TABLE_LIST **) ((char*) table + offset)) + { + if ((table->table == 0 || table->table->tmp_table == NO_TMP_TABLE) && + ((!strcmp(table->db, db_name) && + !strcmp(table->real_name, table_name)) || + (table->view && + !strcmp(table->table->table_cache_key, db_name) && + !strcmp(table->table->table_name, table_name)))) + break; + } + } return table; } + /* - Find real table in given list. + Test that table is unique SYNOPSIS - find_real_table_in_list() - table - pointer to table list - db_name - data base name - table_name - table name + unique_table() + table table which should be chaked + table_list list of tables - RETURN VALUES - NULL Table not found - # Pointer to found table. + RETURN + found duplicate + 0 if table is unique */ -TABLE_LIST * find_real_table_in_list(TABLE_LIST *table, - const char *db_name, - const char *table_name) +TABLE_LIST* unique_table(TABLE_LIST *table, TABLE_LIST *table_list) { - for (; table; table= table->next) - if (!strcmp(table->db, db_name) && - !strcmp(table->real_name, table_name)) + DBUG_ENTER("unique_table"); + DBUG_PRINT("enter", ("table alias: %s", table->alias)); + TABLE_LIST *res; + const char *d_name= table->db, *t_name= table->real_name; + char d_name_buff[MAX_ALIAS_NAME], t_name_buff[MAX_ALIAS_NAME]; + /* temporary table is always unique */ + if (table->table && table->table->tmp_table != NO_TMP_TABLE) + return 0; + if (table->view) + { + /* it is view and table opened */ + if (lower_case_table_names) + { + strmov(t_name_buff, table->table->table_name); + my_casedn_str(files_charset_info, t_name_buff); + t_name= t_name_buff; + strmov(d_name_buff, table->table->table_cache_key); + my_casedn_str(files_charset_info, d_name_buff); + d_name= d_name_buff; + } + else + { + d_name= table->table->table_cache_key; + t_name= table->table->table_name; + } + } + + DBUG_PRINT("info", ("real table: %s.%s", d_name, t_name)); + for(;;) + { + if (!(res= find_table_in_global_list(table_list, d_name, t_name)) || + !res->table || res->table != table->table) break; - return table; + /* if we found entry of this table try again. */ + table_list= res->next_global; + DBUG_PRINT("info", ("found same copy of table")); + } + DBUG_RETURN(res); } + TABLE **find_temporary_table(THD *thd, const char *db, const char *table_name) { char key[MAX_DBKEY_LENGTH]; @@ -748,10 +822,12 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) key_length=(uint) (strmov(strmov(key,db)+1,table_name)-key)+1; pthread_mutex_lock(&LOCK_open); - if (open_unireg_entry(thd, table, db, table_name, table_name) || + if (open_unireg_entry(thd, table, db, table_name, table_name, 0, + thd->mem_root) || !(table->table_cache_key =memdup_root(&table->mem_root,(char*) key, key_length))) { + delete table->triggers; closefrm(table); pthread_mutex_unlock(&LOCK_open); DBUG_RETURN(0); @@ -786,12 +862,13 @@ TABLE *reopen_name_locked_table(THD* thd, TABLE_LIST* table_list) ******************************************************************************/ -TABLE *open_table(THD *thd,const char *db,const char *table_name, - const char *alias,bool *refresh) +TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, + bool *refresh) { reg1 TABLE *table; char key[MAX_DBKEY_LENGTH]; uint key_length; + char *alias= table_list->alias; DBUG_ENTER("open_table"); /* find a unused table in the open table cache */ @@ -799,27 +876,30 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, *refresh=0; if (thd->killed) DBUG_RETURN(0); - key_length= (uint) (strmov(strmov(key,db)+1,table_name)-key)+1; + key_length= (uint) (strmov(strmov(key, table_list->db)+1, + table_list->real_name)-key)+1; int4store(key + key_length, thd->server_id); int4store(key + key_length + 4, thd->variables.pseudo_thread_id); - for (table=thd->temporary_tables; table ; table=table->next) + if (!table_list->skip_temporary) { - if (table->key_length == key_length + TMP_TABLE_KEY_EXTRA && - !memcmp(table->table_cache_key, key, - key_length + TMP_TABLE_KEY_EXTRA)) + for (table= thd->temporary_tables; table ; table=table->next) { - if (table->query_id == thd->query_id) + if (table->key_length == key_length + TMP_TABLE_KEY_EXTRA && + !memcmp(table->table_cache_key, key, + key_length + TMP_TABLE_KEY_EXTRA)) { - my_printf_error(ER_CANT_REOPEN_TABLE, - ER(ER_CANT_REOPEN_TABLE),MYF(0),table->table_name); - DBUG_RETURN(0); + if (table->query_id == thd->query_id) + { + my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->table_name); + DBUG_RETURN(0); + } + table->query_id= thd->query_id; + table->clear_query_id= 1; + thd->tmp_table_used= 1; + DBUG_PRINT("info",("Using temporary table")); + goto reset; } - table->query_id=thd->query_id; - table->clear_query_id=1; - thd->tmp_table_used= 1; - DBUG_PRINT("info",("Using temporary table")); - goto reset; } } @@ -837,7 +917,38 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, goto reset; } } - my_printf_error(ER_TABLE_NOT_LOCKED,ER(ER_TABLE_NOT_LOCKED),MYF(0),alias); + /* + is it view? + (it is work around to allow to open view with locked tables, + real fix will be made after definition cache will be made) + */ + { + char path[FN_REFLEN]; + strxnmov(path, FN_REFLEN, mysql_data_home, "/", table_list->db, "/", + table_list->real_name, reg_ext, NullS); + (void) unpack_filename(path, path); + if (mysql_frm_type(path) == FRMTYPE_VIEW) + { + TABLE tab;// will not be used (because it's VIEW) but have to be passed + table= &tab; + VOID(pthread_mutex_lock(&LOCK_open)); + if (open_unireg_entry(thd, table, table_list->db, + table_list->real_name, + alias, table_list, mem_root)) + { + table->next=table->prev=table; + free_cache_entry(table); + } + else + { + DBUG_ASSERT(table_list->view); + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(0); // VIEW + } + VOID(pthread_mutex_unlock(&LOCK_open)); + } + } + my_error(ER_TABLE_NOT_LOCKED, MYF(0), alias); DBUG_RETURN(0); } @@ -854,7 +965,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, } /* close handler tables which are marked for flush */ - mysql_ha_flush(thd, (TABLE_LIST*) NULL, MYSQL_HA_REOPEN_ON_USAGE); + if (thd->handler_tables) + mysql_ha_flush(thd, (TABLE_LIST*) NULL, MYSQL_HA_REOPEN_ON_USAGE); for (table=(TABLE*) hash_search(&open_cache,(byte*) key,key_length) ; table && table->in_use ; @@ -870,7 +982,9 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, if (table->in_use != thd) wait_for_refresh(thd); else + { VOID(pthread_mutex_unlock(&LOCK_open)); + } if (refresh) *refresh=1; DBUG_RETURN(0); @@ -886,7 +1000,7 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, } table->prev->next=table->next; /* Remove from unused list */ table->next->prev=table->prev; - + table->in_use= thd; } else { @@ -900,15 +1014,23 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(NULL); } - if (open_unireg_entry(thd, table,db,table_name,alias) || - !(table->table_cache_key=memdup_root(&table->mem_root,(char*) key, - key_length))) + if (open_unireg_entry(thd, table, table_list->db, table_list->real_name, + alias, table_list, mem_root) || + (!table_list->view && + !(table->table_cache_key= memdup_root(&table->mem_root, (char*) key, + key_length)))) { table->next=table->prev=table; free_cache_entry(table); VOID(pthread_mutex_unlock(&LOCK_open)); DBUG_RETURN(NULL); } + if (table_list->view) + { + my_free((gptr)table, MYF(0)); + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(0); // VIEW + } table->key_length=key_length; table->version=refresh_version; table->flush_version=flush_version; @@ -916,9 +1038,8 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, VOID(my_hash_insert(&open_cache,(byte*) table)); } - table->in_use=thd; check_unused(); // Debugging call - + VOID(pthread_mutex_unlock(&LOCK_open)); if (refresh) { @@ -928,8 +1049,11 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, table->reginfo.lock_type=TL_READ; /* Assume read */ reset: + if (thd->lex->need_correct_ident()) + table->alias_name_used= my_strcasecmp(table_alias_charset, + table->real_name, alias); /* Fix alias if table name changes */ - if (strcmp(table->table_name,alias)) + if (strcmp(table->table_name, alias)) { uint length=(uint) strlen(alias)+1; table->table_name= (char*) my_realloc(table->table_name,length, @@ -938,31 +1062,6 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, for (uint i=0 ; i < table->fields ; i++) table->field[i]->table_name=table->table_name; } -#if MYSQL_VERSION_ID < 40100 - /* - If per-connection "new" variable (represented by variables.new_mode) - is set then we should pretend that the length of TIMESTAMP field is 19. - The cheapest (from perfomance viewpoint) way to achieve that is to set - field_length of all Field_timestamp objects in a table after opening - it (to 19 if new_mode is true or to original field length otherwise). - We save value of new_mode variable in TABLE::timestamp_mode to - not perform this setup if new_mode value is the same between sequential - table opens. - */ - my_bool new_mode= thd->variables.new_mode; - if (table->timestamp_mode != new_mode) - { - for (uint i=0 ; i < table->fields ; i++) - { - Field *field= table->field[i]; - - if (field->type() == FIELD_TYPE_TIMESTAMP) - field->field_length= new_mode ? 19 : - ((Field_timestamp *)(field))->orig_field_length; - } - table->timestamp_mode= new_mode; - } -#endif /* These variables are also set in reopen_table() */ table->tablenr=thd->current_tablenr++; table->used_fields=0; @@ -970,9 +1069,11 @@ TABLE *open_table(THD *thd,const char *db,const char *table_name, table->outer_join= table->null_row= table->maybe_null= table->force_index= 0; table->status=STATUS_NO_RECORD; table->keys_in_use_for_query= table->keys_in_use; + table->insert_values= 0; table->used_keys= table->keys_for_keyread; if (table->timestamp_field) table->timestamp_field_type= table->timestamp_field->get_auto_set_type(); + table_list->updatable= 1; // It is not derived table nor non-updatable VIEW DBUG_ASSERT(table->key_read == 0); DBUG_RETURN(table); } @@ -1019,13 +1120,15 @@ bool reopen_table(TABLE *table,bool locked) VOID(pthread_mutex_lock(&LOCK_open)); safe_mutex_assert_owner(&LOCK_open); - if (open_unireg_entry(current_thd,&tmp,db,table_name,table->table_name)) + if (open_unireg_entry(table->in_use, &tmp, db, table_name, + table->table_name, 0, table->in_use->mem_root)) goto end; free_io_cache(table); if (!(tmp.table_cache_key= memdup_root(&tmp.mem_root,db, table->key_length))) { + delete tmp.triggers; closefrm(&tmp); // End of memory goto end; } @@ -1054,6 +1157,7 @@ bool reopen_table(TABLE *table,bool locked) tmp.next= table->next; tmp.prev= table->prev; + delete table->triggers; if (table->file) VOID(closefrm(table)); // close file, free everything @@ -1136,7 +1240,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) next=table->next; if (!tables || (!db_stat && reopen_table(table,1))) { - my_error(ER_CANT_REOPEN_TABLE,MYF(0),table->table_name); + my_error(ER_CANT_REOPEN_TABLE, MYF(0), table->table_name); VOID(hash_delete(&open_cache,(byte*) table)); error=1; } @@ -1336,6 +1440,8 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) db Database name name Table name alias Alias name + table_desc TABLE_LIST descriptor (used with views) + mem_root temporary mem_root for parsing NOTES Extra argument for open is taken from thd->open_options @@ -1344,9 +1450,9 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) 0 ok # Error */ - static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, - const char *name, const char *alias) + const char *name, const char *alias, + TABLE_LIST *table_desc, MEM_ROOT *mem_root) { char path[FN_REFLEN]; int error; @@ -1354,19 +1460,28 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, DBUG_ENTER("open_unireg_entry"); strxmov(path, mysql_data_home, "/", db, "/", name, NullS); - while (openfrm(path,alias, - (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | - HA_TRY_READ_ONLY), - READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, - thd->open_options, entry)) + while ((error= openfrm(thd, path, alias, + (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | + HA_GET_INDEX | HA_TRY_READ_ONLY | + NO_ERR_ON_NEW_FRM), + READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, + thd->open_options, entry)) && + (error != 5 || + (fn_format(path, path, 0, reg_ext, MY_UNPACK_FILENAME), + open_new_frm(path, alias, db, name, + (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | + HA_GET_INDEX | HA_TRY_READ_ONLY), + READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, + thd->open_options, entry, table_desc, mem_root)))) + { if (!entry->crashed) { /* - Frm file could not be found on disk - Since it does not exist, no one can be using it - LOCK_open has been locked to protect from someone else - trying to discover the table at the same time. + Frm file could not be found on disk + Since it does not exist, no one can be using it + LOCK_open has been locked to protect from someone else + trying to discover the table at the same time. */ if (discover_retry_count++ != 0) goto err; @@ -1400,7 +1515,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, pthread_mutex_unlock(&LOCK_open); thd->clear_error(); // Clear error message error= 0; - if (openfrm(path,alias, + if (openfrm(thd, path, alias, (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX | HA_TRY_READ_ONLY), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, @@ -1425,6 +1540,13 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, goto err; break; } + + if (error == 5) + DBUG_RETURN(0); // we have just opened VIEW + + if (Table_triggers_list::check_n_load(thd, db, name, entry)) + goto err; + /* If we are here, there was no fatal error (but error may be still unitialized). @@ -1453,6 +1575,7 @@ static int open_unireg_entry(THD *thd, TABLE *entry, const char *db, */ sql_print_error("When opening HEAP table, could not allocate \ memory to write 'DELETE FROM `%s`.`%s`' to the binary log",db,name); + delete entry->triggers; if (entry->file) closefrm(entry); goto err; @@ -1461,6 +1584,14 @@ memory to write 'DELETE FROM `%s`.`%s`' to the binary log",db,name); } DBUG_RETURN(0); err: + /* Hide "Table doesn't exist" errors if table belong to view */ + if (thd->net.last_errno == ER_NO_SUCH_TABLE && + table_desc && table_desc->belong_to_view) + { + TABLE_LIST * view= table_desc->belong_to_view; + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), view->view_db.str, view->view_name.str); + } DBUG_RETURN(1); } @@ -1484,12 +1615,18 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) bool refresh; int result=0; DBUG_ENTER("open_tables"); + MEM_ROOT new_frm_mem; + /* + temporary mem_root for new .frm parsing. + TODO: variables for size + */ + init_alloc_root(&new_frm_mem, 8024, 8024); thd->current_tablenr= 0; restart: *counter= 0; thd->proc_info="Opening tables"; - for (tables=start ; tables ; tables=tables->next) + for (tables= start; tables ;tables= tables->next_global) { /* Ignore placeholders for derived tables. After derived tables @@ -1497,13 +1634,23 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) */ if (tables->derived) continue; + if (tables->schema_table) + { + if (!mysql_schema_table(thd, thd->lex, tables)) + continue; + DBUG_RETURN(-1); + } (*counter)++; if (!tables->table && - !(tables->table= open_table(thd, - tables->db, - tables->real_name, - tables->alias, &refresh))) + !(tables->table= open_table(thd, tables, &new_frm_mem, &refresh))) { + free_root(&new_frm_mem, MYF(MY_KEEP_PREALLOC)); + if (tables->view) + { + (*counter)--; + continue; //VIEW placeholder + } + if (refresh) // Refresh in progress { /* close all 'old' tables used by this thread */ @@ -1513,7 +1660,7 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) thd->version=refresh_version; TABLE **prev_table= &thd->open_tables; bool found=0; - for (TABLE_LIST *tmp=start ; tmp ; tmp=tmp->next) + for (TABLE_LIST *tmp= start; tmp; tmp= tmp->next_global) { /* Close normal (not temporary) changed tables */ if (tmp->table && ! tmp->table->tmp_table) @@ -1541,11 +1688,15 @@ int open_tables(THD *thd, TABLE_LIST *start, uint *counter) result= -1; // Fatal error break; } + else + free_root(&new_frm_mem, MYF(MY_KEEP_PREALLOC)); + if (tables->lock_type != TL_UNLOCK && ! thd->locked_tables) tables->table->reginfo.lock_type=tables->lock_type; tables->table->grant= tables->grant; } thd->proc_info=0; + free_root(&new_frm_mem, MYF(0)); // Free pre-alloced block DBUG_RETURN(result); } @@ -1573,9 +1724,7 @@ static bool check_lock_and_start_stmt(THD *thd, TABLE *table, if ((int) lock_type >= (int) TL_WRITE_ALLOW_READ && (int) table->reginfo.lock_type < (int) TL_WRITE_ALLOW_READ) { - my_printf_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, - ER(ER_TABLE_NOT_LOCKED_FOR_WRITE), - MYF(0),table->table_name); + my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0),table->table_name); DBUG_RETURN(1); } if ((error=table->file->start_stmt(thd))) @@ -1613,9 +1762,11 @@ TABLE *open_ltable(THD *thd, TABLE_LIST *table_list, thr_lock_type lock_type) thd->proc_info="Opening table"; thd->current_tablenr= 0; - while (!(table=open_table(thd,table_list->db, - table_list->real_name,table_list->alias, - &refresh)) && refresh) ; + /* open_ltable can be used only for BASIC TABLEs */ + table_list->required_type= FRMTYPE_TABLE; + while (!(table= open_table(thd, table_list, thd->mem_root, &refresh)) && + refresh) + ; if (table) { @@ -1684,42 +1835,49 @@ int simple_open_n_lock_tables(THD *thd, TABLE_LIST *tables) tables - list of tables for open&locking RETURN - 0 - ok - -1 - error + FALSE - ok + TRUE - error NOTE The lock will automaticly be freed by close_thread_tables() */ -int open_and_lock_tables(THD *thd, TABLE_LIST *tables) +bool open_and_lock_tables(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("open_and_lock_tables"); uint counter; - if (open_tables(thd, tables, &counter) || lock_tables(thd, tables, counter)) - DBUG_RETURN(-1); /* purecov: inspected */ - relink_tables_for_derived(thd); - DBUG_RETURN(mysql_handle_derived(thd->lex)); + if (open_tables(thd, tables, &counter) || + lock_tables(thd, tables, counter) || + mysql_handle_derived(thd->lex, &mysql_derived_prepare) || + (thd->fill_derived_tables() && + mysql_handle_derived(thd->lex, &mysql_derived_filling))) + DBUG_RETURN(TRUE); /* purecov: inspected */ + relink_tables_for_multidelete(thd); + DBUG_RETURN(0); } /* Let us propagate pointers to open tables from global table list - to table lists in particular selects if needed. + to table lists for multi-delete */ -void relink_tables_for_derived(THD *thd) +static void relink_tables_for_multidelete(THD *thd) { - if (thd->lex->all_selects_list->next_select_in_list() || - thd->lex->time_zone_tables_used) + if (thd->lex->all_selects_list->next_select_in_list()) { for (SELECT_LEX *sl= thd->lex->all_selects_list; sl; sl= sl->next_select_in_list()) + { for (TABLE_LIST *cursor= (TABLE_LIST *) sl->table_list.first; cursor; - cursor=cursor->next) - if (cursor->table_list) - cursor->table= cursor->table_list->table; + cursor=cursor->next_local) + { + if (cursor->correspondent_table) + cursor->table= cursor->correspondent_table->table; + } + } } } @@ -1755,9 +1913,9 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count) TABLE **start,**ptr; if (!(ptr=start=(TABLE**) sql_alloc(sizeof(TABLE*)*count))) return -1; - for (table = tables ; table ; table=table->next) + for (table= tables; table; table= table->next_global) { - if (!table->derived) + if (!table->placeholder() && !table->schema_table) *(ptr++)= table->table; } if (!(thd->lock=mysql_lock_tables(thd,start, (uint) (ptr - start)))) @@ -1765,9 +1923,9 @@ int lock_tables(THD *thd, TABLE_LIST *tables, uint count) } else { - for (table = tables ; table ; table=table->next) + for (table= tables; table; table= table->next_global) { - if (!table->derived && + if (!table->placeholder() && check_lock_and_start_stmt(thd, table->table, table->lock_type)) { ha_rollback_stmt(thd); @@ -1803,7 +1961,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, MYF(MY_WME)))) DBUG_RETURN(0); /* purecov: inspected */ - if (openfrm(path, table_name, + if (openfrm(thd, path, table_name, (uint) (HA_OPEN_KEYFILE | HA_OPEN_RNDFILE | HA_GET_INDEX), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, ha_open_options, @@ -1814,7 +1972,6 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, } tmp_table->reginfo.lock_type=TL_WRITE; // Simulate locked - tmp_table->in_use= thd; tmp_table->tmp_table = (tmp_table->file->has_transactions() ? TRANSACTIONAL_TMP_TABLE : TMP_TABLE); tmp_table->table_cache_key=(char*) (tmp_table+1); @@ -1867,11 +2024,128 @@ bool rm_temporary_table(enum db_type base, char *path) ** return unique field ******************************************************************************/ +/* Special Field pointers for find_field_in_tables returning */ +Field *not_found_field= (Field*) 0x1; +Field *view_ref_found= (Field*) 0x2; + #define WRONG_GRANT (Field*) -1 -Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, - bool check_grants, bool allow_rowid, - uint *cached_field_index_ptr) + +/* + Find field in table or view + + SYNOPSIS + find_field_in_table() + thd thread handler + table_list table where to find + name name of field + item_name name of item if it will be created (VIEW) + length length of name + ref expression substituted in VIEW should be + passed using this reference (return + view_ref_found) + check_grants_table do check columns grants for table? + check_grants_view do check columns grants for view? + allow_rowid do allow finding of "_rowid" field? + cached_field_index_ptr cached position in field list (used to + speedup prepared tables field finding) + register_tree_change TRUE if ref is not stack variable and we + need register changes in item tree + + RETURN + 0 field is not found + view_ref_found found value in VIEW (real result is in *ref) + # pointer to field +*/ + +Field * +find_field_in_table(THD *thd, TABLE_LIST *table_list, + const char *name, const char *item_name, + uint length, Item **ref, + bool check_grants_table, bool check_grants_view, + bool allow_rowid, + uint *cached_field_index_ptr, + bool register_tree_change) +{ + Field *fld; + DBUG_ENTER("find_field_in_table"); + DBUG_PRINT("enter", ("table: '%s' name: '%s' item name: '%s' ref 0x%lx", + table_list->alias, name, item_name, (ulong) ref)); + if (table_list->field_translation) + { + DBUG_ASSERT(ref != 0 && table_list->view != 0); + uint num= table_list->view->select_lex.item_list.elements; + Field_translator *trans= table_list->field_translation; + for (uint i= 0; i < num; i ++) + { + if (strcmp(trans[i].name, name) == 0) + { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (check_grants_view && + check_grant_column(thd, &table_list->grant, + table_list->view_db.str, + table_list->view_name.str, + name, length)) + DBUG_RETURN(WRONG_GRANT); +#endif + if (thd->lex->current_select->no_wrap_view_item) + *ref= trans[i].item; + else + { + Item_ref *item_ref= new Item_ref(&trans[i].item, + table_list->view_name.str, + item_name); + /* as far as Item_ref have defined reference it do not need tables */ + if (register_tree_change && item_ref) + thd->change_item_tree(ref, item_ref); + } + DBUG_RETURN((Field*) view_ref_found); + } + } + DBUG_RETURN(0); + } + fld= find_field_in_real_table(thd, table_list->table, name, length, + check_grants_table, allow_rowid, + cached_field_index_ptr); +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* check for views with temporary table algorithm */ + if (check_grants_view && table_list->view && + fld && fld != WRONG_GRANT && + check_grant_column(thd, &table_list->grant, + table_list->view_db.str, + table_list->view_name.str, + name, length)) + { + DBUG_RETURN(WRONG_GRANT); + } +#endif + DBUG_RETURN(fld); +} + + +/* + Find field in table + + SYNOPSIS + find_field_in_real_table() + thd thread handler + table_list table where to find + name name of field + length length of name + check_grants do check columns grants? + allow_rowid do allow finding of "_rowid" field? + cached_field_index_ptr cached position in field list (used to + speedup prepared tables field finding) + + RETURN + 0 field is not found + # pointer to field +*/ + +Field *find_field_in_real_table(THD *thd, TABLE *table, + const char *name, uint length, + bool check_grants, bool allow_rowid, + uint *cached_field_index_ptr) { Field **field_ptr, *field; uint cached_field_index= *cached_field_index_ptr; @@ -1918,7 +2192,9 @@ Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, thd->dupp_field=field; } #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (check_grants && check_grant_column(thd,table,name,length)) + if (check_grants && check_grant_column(thd, &table->grant, + table->table_cache_key, + table->real_name, name, length)) return WRONG_GRANT; #endif return field; @@ -1932,26 +2208,33 @@ Field *find_field_in_table(THD *thd,TABLE *table,const char *name,uint length, find_field_in_tables() thd Pointer to current thread structure item Field item that should be found - tables Tables for scanning - where Table where field found will be returned via - this parameter - report_error If FALSE then do not report error if item not found - and return not_found_field + tables Tables to be searched for item + ref If 'item' is resolved to a view field, ref is set to + point to the found view field + report_error Degree of error reporting: + - IGNORE_ERRORS then do not report any error + - IGNORE_EXCEPT_NON_UNIQUE report only non-unique + fields, suppress all other errors + - REPORT_EXCEPT_NON_UNIQUE report all other errors + except when non-unique fields were found + - REPORT_ALL_ERRORS + check_privileges need to check privileges RETURN VALUES - 0 Field is not found or field is not unique- error - message is reported - not_found_field Function was called with report_error == FALSE and - field was not found. no error message reported. - found field + 0 If error: the found field is not unique, or there are + no sufficient access priviliges for the found field, + or the field is qualified with non-existing table. + not_found_field The function was called with report_error == + (IGNORE_ERRORS || IGNORE_EXCEPT_NON_UNIQUE) and a + field was not found. + view_ref_found View field is found, item passed through ref parameter + found field If a item was resolved to some field */ -// Special Field pointer for find_field_in_tables returning -const Field *not_found_field= (Field*) 0x1; - Field * find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, - TABLE_LIST **where, bool report_error) + Item **ref, find_item_error_report_type report_error, + bool check_privileges) { Field *found=0; const char *db=item->db_name; @@ -1968,20 +2251,38 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, TABLE_LIST *tables is not changed during query execution (which is true for all queries except RENAME but luckily RENAME doesn't use fields...) so we can rely on reusing pointer to its member. - With this optimisation we also miss case when addition of one more - field makes some prepared query ambiguous and so erronous, but we + With this optimization we also miss case when addition of one more + field makes some prepared query ambiguous and so erroneous, but we accept this trade off. */ - found= find_field_in_table(thd, item->cached_table->table, name, length, - test(item->cached_table-> - table->grant.want_privilege), - 1, &(item->cached_field_index)); + if (item->cached_table->table) + { + found= find_field_in_real_table(thd, item->cached_table->table, + name, length, + test(item->cached_table-> + table->grant.want_privilege) && + check_privileges, + 1, &(item->cached_field_index)); + } + else + { + TABLE_LIST *table= item->cached_table; + Field *find= find_field_in_table(thd, table, name, item->name, length, + ref, + (table->table && + test(table->table->grant. + want_privilege) && + check_privileges), + (test(table->grant.want_privilege) && + check_privileges), + 1, &(item->cached_field_index), + TRUE); + } if (found) { - (*where)= tables; if (found == WRONG_GRANT) - return (Field*) 0; + return (Field*) 0; return found; } } @@ -1989,7 +2290,7 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, if (db && lower_case_table_names) { /* - convert database to lower case for comparision. + convert database to lower case for comparison. We can't do this in Item_field as this would change the 'name' of the item which may be used in the select list */ @@ -2001,19 +2302,25 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, if (table_name && table_name[0]) { /* Qualified field */ bool found_table=0; - for (; tables ; tables=tables->next) + for (; tables; tables= tables->next_local) { if (!my_strcasecmp(table_alias_charset, tables->alias, table_name) && (!db || !tables->db || !tables->db[0] || !strcmp(db,tables->db))) { found_table=1; - Field *find=find_field_in_table(thd,tables->table,name,length, - test(tables->table->grant. - want_privilege), - 1, &(item->cached_field_index)); + Field *find= find_field_in_table(thd, tables, name, item->name, + length, ref, + (tables->table && + test(tables->table->grant. + want_privilege) && + check_privileges), + (test(tables->grant.want_privilege) && + check_privileges), + 1, &(item->cached_field_index), + TRUE); if (find) { - (*where)= item->cached_table= tables; + item->cached_table= tables; if (!tables->cacheable_table) item->cached_table= 0; if (find == WRONG_GRANT) @@ -2022,8 +2329,10 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, return find; if (found) { - my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), - item->full_name(),thd->where); + if (report_error == REPORT_ALL_ERRORS || + report_error == IGNORE_EXCEPT_NON_UNIQUE) + my_error(ER_NON_UNIQ_ERROR, MYF(0), + item->full_name(),thd->where); return (Field*) 0; } found=find; @@ -2032,7 +2341,8 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, } if (found) return found; - if (!found_table && report_error) + if (!found_table && (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE)) { char buff[NAME_LEN*2+1]; if (db && db[0]) @@ -2040,49 +2350,57 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, strxnmov(buff,sizeof(buff)-1,db,".",table_name,NullS); table_name=buff; } - if (report_error) - { - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - table_name, thd->where); - } + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) + my_error(ER_UNKNOWN_TABLE, MYF(0), table_name, thd->where); else return (Field*) not_found_field; } else - if (report_error) - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), - item->full_name(),thd->where); + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) + my_error(ER_BAD_FIELD_ERROR, MYF(0), item->full_name(),thd->where); else return (Field*) not_found_field; return (Field*) 0; } - bool allow_rowid= tables && !tables->next; // Only one table - for (; tables ; tables=tables->next) + + bool allow_rowid= tables && !tables->next_local; // Only one table + for (; tables ; tables= tables->next_local) { - if (!tables->table) + if (!tables->table && !tables->ancestor) { - if (report_error) - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR),MYF(0), - item->full_name(),thd->where); + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) + my_error(ER_BAD_FIELD_ERROR, MYF(0), item->full_name(),thd->where); return (Field*) not_found_field; } - Field *field=find_field_in_table(thd,tables->table,name,length, - test(tables->table->grant.want_privilege), - allow_rowid, &(item->cached_field_index)); + Field *field= find_field_in_table(thd, tables, name, item->name, + length, ref, + (tables->table && + test(tables->table->grant. + want_privilege) && + check_privileges), + (test(tables->grant.want_privilege) && + check_privileges), + allow_rowid, + &(item->cached_field_index), + TRUE); if (field) { if (field == WRONG_GRANT) return (Field*) 0; - (*where)= item->cached_table= tables; + item->cached_table= tables; if (!tables->cacheable_table) item->cached_table= 0; if (found) { if (!thd->where) // Returns first found break; - my_printf_error(ER_NON_UNIQ_ERROR,ER(ER_NON_UNIQ_ERROR),MYF(0), - name,thd->where); + if (report_error == REPORT_ALL_ERRORS || + report_error == IGNORE_EXCEPT_NON_UNIQUE) + my_error(ER_NON_UNIQ_ERROR, MYF(0), name, thd->where); return (Field*) 0; } found=field; @@ -2090,9 +2408,9 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, } if (found) return found; - if (report_error) - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), - MYF(0), item->full_name(), thd->where); + if (report_error == REPORT_ALL_ERRORS || + report_error == REPORT_EXCEPT_NON_UNIQUE) + my_error(ER_BAD_FIELD_ERROR, MYF(0), item->full_name(), thd->where); else return (Field*) not_found_field; return (Field*) 0; @@ -2129,8 +2447,8 @@ find_field_in_tables(THD *thd, Item_ident *item, TABLE_LIST *tables, found field */ -// Special Item pointer for find_item_in_list returning -const Item **not_found_item= (const Item**) 0x1; +/* Special Item pointer to serve as a return value from find_item_in_list(). */ +Item **not_found_item= (Item**) 0x1; Item ** @@ -2145,6 +2463,8 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, bool found_unaliased_non_uniq= 0; uint unaliased_counter; + LINT_INIT(unaliased_counter); // Dependent on found_unaliased + *unaliased= FALSE; if (find->type() == Item::FIELD_ITEM || find->type() == Item::REF_ITEM) @@ -2204,8 +2524,8 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, unaliased names only and will have duplicate error anyway. */ if (report_error != IGNORE_ERRORS) - my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), - MYF(0), find->full_name(), current_thd->where); + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); return (Item**) 0; } found_unaliased= li.ref(); @@ -2229,8 +2549,8 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, if ((*found)->eq(item, 0)) continue; // Same field twice if (report_error != IGNORE_ERRORS) - my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), - MYF(0), find->full_name(), current_thd->where); + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); return (Item**) 0; } found= li.ref(); @@ -2273,8 +2593,8 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, if (found_unaliased_non_uniq) { if (report_error != IGNORE_ERRORS) - my_printf_error(ER_NON_UNIQ_ERROR, ER(ER_NON_UNIQ_ERROR), MYF(0), - find->full_name(), current_thd->where); + my_error(ER_NON_UNIQ_ERROR, MYF(0), + find->full_name(), current_thd->where); return (Item **) 0; } if (found_unaliased) @@ -2289,8 +2609,8 @@ find_item_in_list(Item *find, List<Item> &items, uint *counter, if (report_error != REPORT_EXCEPT_NOT_FOUND) { if (report_error == REPORT_ALL_ERRORS) - my_printf_error(ER_BAD_FIELD_ERROR, ER(ER_BAD_FIELD_ERROR), MYF(0), - find->full_name(), current_thd->where); + my_error(ER_BAD_FIELD_ERROR, MYF(0), + find->full_name(), current_thd->where); return (Item **) 0; } else @@ -2305,28 +2625,29 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, List<Item> *sum_func_list, uint wild_num) { - bool is_stmt_prepare; - DBUG_ENTER("setup_wild"); if (!wild_num) - DBUG_RETURN(0); + return(0); - reg2 Item *item; + Item *item; List_iterator<Item> it(fields); Item_arena *arena, backup; + DBUG_ENTER("setup_wild"); + /* - If we are in preparing prepared statement phase then we have change - temporary mem_root to statement mem root to save changes of SELECT list + Don't use arena if we are not in prepared statements or stored procedures + For PS/SP we have to use arena to remember the changes */ arena= thd->change_arena_if_needed(&backup); while (wild_num && (item= it++)) - { + { if (item->type() == Item::FIELD_ITEM && ((Item_field*) item)->field_name && ((Item_field*) item)->field_name[0] == '*' && !((Item_field*) item)->field) { uint elem= fields.elements; + bool any_privileges= ((Item_field *) item)->any_privileges; Item_subselect *subsel= thd->lex->current_select->master_unit()->item; if (subsel && subsel->substype() == Item_subselect::EXISTS_SUBS) @@ -2339,9 +2660,10 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, it.replace(new Item_int("Not_used", (longlong) 1, 21)); } else if (insert_fields(thd,tables,((Item_field*) item)->db_name, - ((Item_field*) item)->table_name, &it)) + ((Item_field*) item)->table_name, &it, + any_privileges, arena != 0)) { - if (arena) + if (arena) thd->restore_backup_item_arena(arena, &backup); DBUG_RETURN(-1); } @@ -2358,7 +2680,14 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, } } if (arena) + { + /* make * substituting permanent */ + SELECT_LEX *select_lex= thd->lex->current_select; + select_lex->with_wild= 0; + select_lex->item_list= fields; + thd->restore_backup_item_arena(arena, &backup); + } DBUG_RETURN(0); } @@ -2366,12 +2695,13 @@ int setup_wild(THD *thd, TABLE_LIST *tables, List<Item> &fields, ** Check that all given fields exists and fill struct with current data ****************************************************************************/ -int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, - List<Item> &fields, bool set_query_id, - List<Item> *sum_func_list, bool allow_sum_func) +bool setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + List<Item> &fields, bool set_query_id, + List<Item> *sum_func_list, bool allow_sum_func) { reg2 Item *item; List_iterator<Item> it(fields); + SELECT_LEX *select_lex= thd->lex->current_select; DBUG_ENTER("setup_fields"); thd->set_query_id=set_query_id; @@ -2383,7 +2713,9 @@ int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, { if (!item->fixed && item->fix_fields(thd, tables, it.ref()) || (item= *(it.ref()))->check_cols(1)) - DBUG_RETURN(-1); /* purecov: inspected */ + { + DBUG_RETURN(TRUE); /* purecov: inspected */ + } if (ref) *(ref++)= item; if (item->with_sum_func && item->type() != Item::SUM_FUNC_ITEM && @@ -2396,34 +2728,89 @@ int setup_fields(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, /* + make list of leaves of join table tree + + SYNOPSIS + make_leaves_list() + list pointer to pointer on list first element + tables table list + + RETURN pointer on pointer to next_leaf of last element +*/ + +TABLE_LIST **make_leaves_list(TABLE_LIST **list, TABLE_LIST *tables) +{ + for (TABLE_LIST *table= tables; table; table= table->next_local) + { + if (table->view && !table->table) + { + /* it is for multi table views only, check it */ + DBUG_ASSERT(table->ancestor->next_local); + list= make_leaves_list(list, table->ancestor); + } + else + { + *list= table; + list= &table->next_leaf; + } + } + return list; +} + +/* prepare tables SYNOPSIS setup_tables() - tables table list + thd Thread handler + tables Table list + conds Condition of current SELECT (can be changed by VIEW) + leaves List of join table leaves list + refresh It is onle refresh for subquery + select_insert It is SELECT ... INSERT command + NOTE + Check also that the 'used keys' and 'ignored keys' exists and set up the + table structure accordingly + Create leaf tables list - NOTE - Remap table numbers if INSERT ... SELECT - Check also that the 'used keys' and 'ignored keys' exists and set up the - table structure accordingly - - This has to be called for all tables that are used by items, as otherwise - table->map is not set and all Item_field will be regarded as const items. + This has to be called for all tables that are used by items, as otherwise + table->map is not set and all Item_field will be regarded as const items. - RETURN - 0 ok; In this case *map will includes the choosed index - 1 error + RETURN + FALSE ok; In this case *map will includes the choosed index + TRUE error */ -bool setup_tables(TABLE_LIST *tables) +bool setup_tables(THD *thd, TABLE_LIST *tables, Item **conds, + TABLE_LIST **leaves, bool refresh, bool select_insert) { + uint tablenr= 0; DBUG_ENTER("setup_tables"); - uint tablenr=0; - for (TABLE_LIST *table_list=tables ; table_list ; - table_list=table_list->next,tablenr++) + /* + this is used for INSERT ... SELECT. + For select we setup tables except first (and its underlying tables) + */ + TABLE_LIST *first_select_table= (select_insert ? + tables->next_local: + 0); + if (!(*leaves)) + make_leaves_list(leaves, tables); + + for (TABLE_LIST *table_list= *leaves; + table_list; + table_list= table_list->next_leaf, tablenr++) { TABLE *table= table_list->table; + if (first_select_table && + (table_list->belong_to_view ? + table_list->belong_to_view : + table_list) == first_select_table) + { + /* new counting for SELECT of INSERT ... SELECT command */ + first_select_table= 0; + tablenr= 0; + } setup_table_map(table, table_list, tablenr); table->used_keys= table->keys_for_keyread; if (table_list->use_index) @@ -2449,6 +2836,18 @@ bool setup_tables(TABLE_LIST *tables) my_error(ER_TOO_MANY_TABLES,MYF(0),MAX_TABLES); DBUG_RETURN(1); } + if (!refresh) + { + for (TABLE_LIST *table_list= tables; + table_list; + table_list= table_list->next_local) + { + if (table_list->ancestor && + table_list->setup_ancestor(thd, conds, + table_list->effective_with_check)) + DBUG_RETURN(1); + } + } DBUG_RETURN(0); } @@ -2477,11 +2876,12 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, map->clear_all(); while ((name=it++)) { - if ((pos= find_type(&table->keynames, name->ptr(), name->length(), 1)) <= - 0) + if (table->keynames.type_names == 0 || + (pos= find_type(&table->keynames, name->ptr(), name->length(), 1)) <= + 0) { - my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), name->c_ptr(), - table->real_name); + my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), + name->c_ptr(), table->real_name); map->set_all(); return 1; } @@ -2491,17 +2891,37 @@ bool get_key_map_from_key_list(key_map *map, TABLE *table, } -/**************************************************************************** - This just drops in all fields instead of current '*' field - Returns pointer to last inserted field if ok -****************************************************************************/ +/* + Drops in all fields instead of current '*' field + + SYNOPSIS + insert_fields() + thd Thread handler + tables List of tables + db_name Database name in case of 'database_name.table_name.*' + table_name Table name in case of 'table_name.*' + it Pointer to '*' + any_privileges 0 If we should ensure that we have SELECT privileges + for all columns + 1 If any privilege is ok + allocate_view_names if true view names will be copied to current Item_arena + memory (made for SP/PS) + RETURN + 0 ok + 'it' is updated to point at last inserted + 1 error. Error message is generated but not sent to client +*/ bool -insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, - const char *table_name, List_iterator<Item> *it) +insert_fields(THD *thd, TABLE_LIST *tables, const char *db_name, + const char *table_name, List_iterator<Item> *it, + bool any_privileges, bool allocate_view_names) { - char name_buff[NAME_LEN+1]; + /* allocate variables on stack to avoid pool alloaction */ + Field_iterator_table table_iter; + Field_iterator_view view_iter; uint found; + char name_buff[NAME_LEN+1]; DBUG_ENTER("insert_fields"); if (db_name && lower_case_table_names) @@ -2516,216 +2936,417 @@ insert_fields(THD *thd,TABLE_LIST *tables, const char *db_name, db_name= name_buff; } - - found=0; - for (; tables ; tables=tables->next) + found= 0; + for (; tables; tables= tables->next_local) { - TABLE *table=tables->table; + Field_iterator *iterator; + TABLE_LIST *natural_join_table; + Field *field; + TABLE_LIST *embedded; + TABLE_LIST *last; + TABLE_LIST *embedding; + TABLE *table= tables->table; + if (!table_name || (!my_strcasecmp(table_alias_charset, table_name, tables->alias) && (!db_name || !strcmp(tables->db,db_name)))) { + bool view; #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Ensure that we have access right to all columns */ - if (!(table->grant.privilege & SELECT_ACL) && - check_grant_all_columns(thd,SELECT_ACL,table)) - DBUG_RETURN(-1); + if (!((table && (table->grant.privilege & SELECT_ACL) || + tables->view && (tables->grant.privilege & SELECT_ACL))) && + !any_privileges) + { + if (tables->view) + { + view_iter.set(tables); + if (check_grant_all_columns(thd, SELECT_ACL, &tables->grant, + tables->view_db.str, + tables->view_name.str, + &view_iter)) + goto err; + } + else if (!tables->schema_table) + { + DBUG_ASSERT(table != 0); + table_iter.set(tables); + if (check_grant_all_columns(thd, SELECT_ACL, &table->grant, + table->table_cache_key, table->real_name, + &table_iter)) + goto err; + } + } #endif - Field **ptr=table->field,*field; - TABLE *natural_join_table= 0; + if (table) + thd->used_tables|= table->map; + else + { + view_iter.set(tables); + for (; !view_iter.end_of_fields(); view_iter.next()) + { + thd->used_tables|= view_iter.item(thd)->used_tables(); + } + } + natural_join_table= 0; + last= embedded= tables; - thd->used_tables|=table->map; - if (!table->outer_join && - tables->natural_join && - !tables->natural_join->table->outer_join) - natural_join_table= tables->natural_join->table; + while ((embedding= embedded->embedding) && + embedding->join_list->elements != 1) + { + TABLE_LIST *next; + List_iterator_fast<TABLE_LIST> it(embedding->nested_join->join_list); + last= it++; + while ((next= it++)) + last= next; + if (last != tables) + break; + embedded= embedding; + } + + if (tables == last && + !embedded->outer_join && + embedded->natural_join && + !embedded->natural_join->outer_join) + { + embedding= embedded->natural_join; + while (embedding->nested_join) + embedding= embedding->nested_join->join_list.head(); + natural_join_table= embedding; + } + if (tables->field_translation) + { + iterator= &view_iter; + view= 1; + } + else + { + iterator= &table_iter; + view= 0; + } + iterator->set(tables); - while ((field = *ptr++)) + for (; !iterator->end_of_fields(); iterator->next()) { + Item *not_used_item; uint not_used_field_index= NO_CACHED_FIELD_INDEX; + const char *field_name= iterator->name(); /* Skip duplicate field names if NATURAL JOIN is used */ if (!natural_join_table || - !find_field_in_table(thd, natural_join_table, field->field_name, - strlen(field->field_name), 0, 0, - ¬_used_field_index)) + !find_field_in_table(thd, natural_join_table, field_name, + field_name, + strlen(field_name), ¬_used_item, 0, 0, 0, + ¬_used_field_index, TRUE)) { - Item_field *item= new Item_field(thd, field); + Item *item= iterator->item(thd); if (!found++) (void) it->replace(item); // Replace '*' else it->after(item); + if (view && !thd->lex->current_select->no_wrap_view_item) + { + item= new Item_ref(it->ref(), tables->view_name.str, + field_name); + } +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (any_privileges) + { + /* + In time of view creation MEGRGE algorithm for underlying + VIEWs can't be used => it should be Item_field + */ + DBUG_ASSERT(item->type() == Item::FIELD_ITEM); + Item_field *fld= (Item_field*)item; + char *db, *tab; + if (tables->view) + { + db= tables->view_db.str; + tab= tables->view_name.str; + } + else + { + db= tables->db; + tab= tables->real_name; + } + if (!tables->schema_table && + !(fld->have_privileges= (get_column_grant(thd, + &table->grant, + db, + tab, + fld->field_name) & + VIEW_ANY_ACL))) + { + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + "ANY", + thd->priv_user, + thd->host_or_ip, + fld->field_name, + tab); + goto err; + } + } +#endif + } + if ((field= iterator->field())) + { + /* + Mark if field used before in this select. + Used by 'insert' to verify if a field name is used twice + */ + if (field->query_id == thd->query_id) + thd->dupp_field=field; + field->query_id=thd->query_id; + table->used_keys.intersect(field->part_of_key); + } + else if (allocate_view_names && + thd->lex->current_select->first_execution) + { + Item_field *item= new Item_field(thd->strdup(tables->view_db.str), + thd->strdup(tables->view_name.str), + thd->strdup(field_name)); + /* + during cleunup() this item will be put in list to replace + expression from VIEW + */ + thd->nocheck_register_item_tree_change(it->ref(), item, + thd->mem_root); } - /* - Mark if field used before in this select. - Used by 'insert' to verify if a field name is used twice - */ - if (field->query_id == thd->query_id) - thd->dupp_field=field; - field->query_id=thd->query_id; - table->used_keys.intersect(field->part_of_key); } - /* All fields are used */ - table->used_fields=table->fields; + /* + All fields are used in case if usual tables (in case of view used + fields marked in setup_tables during fix_fields of view columns + */ + if (table) + table->used_fields=table->fields; } } - if (!found) - { - if (!table_name) - my_error(ER_NO_TABLES_USED,MYF(0)); - else - my_error(ER_BAD_TABLE_ERROR,MYF(0),table_name); - } - DBUG_RETURN(!found); + if (found) + DBUG_RETURN(0); + + if (!table_name) + my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0)); + else + my_error(ER_BAD_TABLE_ERROR, MYF(0), table_name); +#ifndef NO_EMBEDDED_ACCESS_CHECKS +err: +#endif + DBUG_RETURN(1); } /* -** Fix all conditions and outer join expressions + Fix all conditions and outer join expressions + + SYNOPSIS + setup_conds() + thd thread handler + tables list of tables for name resolving + leaves list of leaves of join table tree */ -int setup_conds(THD *thd,TABLE_LIST *tables,COND **conds) +int setup_conds(THD *thd, TABLE_LIST *tables, TABLE_LIST *leaves, COND **conds) { table_map not_null_tables= 0; - Item_arena *arena= 0, backup; + SELECT_LEX *select_lex= thd->lex->current_select; + Item_arena *arena= thd->current_arena, backup; + bool save_wrapper= thd->lex->current_select->no_wrap_view_item; + TABLE_LIST *table= NULL; // For HP compilers DBUG_ENTER("setup_conds"); + if (select_lex->conds_processed_with_permanent_arena || + arena->is_conventional()) + arena= 0; // For easier test + thd->set_query_id=1; - thd->lex->current_select->cond_count= 0; + + thd->lex->current_select->no_wrap_view_item= 1; + select_lex->cond_count= 0; if (*conds) { thd->where="where clause"; if (!(*conds)->fixed && (*conds)->fix_fields(thd, tables, conds) || (*conds)->check_cols(1)) - DBUG_RETURN(1); - not_null_tables= (*conds)->not_null_tables(); + goto err_no_arena; } - /* Check if we are using outer joins */ - for (TABLE_LIST *table=tables ; table ; table=table->next) + for (table= leaves; table; table= table->next_leaf) { - if (table->on_expr) + TABLE_LIST *embedded; + TABLE_LIST *embedding= table; + do { - /* Make a join an a expression */ - thd->where="on clause"; - - if (!table->on_expr->fixed && - table->on_expr->fix_fields(thd, tables, &table->on_expr) || - table->on_expr->check_cols(1)) - DBUG_RETURN(1); - thd->lex->current_select->cond_count++; - - /* - If it's a normal join or a LEFT JOIN which can be optimized away - add the ON/USING expression to the WHERE - */ - if (!table->outer_join || - ((table->table->map & not_null_tables) && - !(specialflag & SPECIAL_NO_NEW_FUNC))) + embedded= embedding; + if (embedded->on_expr) { - table->outer_join= 0; - arena= thd->change_arena_if_needed(&backup); - *conds= and_conds(*conds, table->on_expr); - table->on_expr=0; - if (arena) - { - thd->restore_backup_item_arena(arena, &backup); - arena= 0; // Safety if goto err - } - if ((*conds) && !(*conds)->fixed && - (*conds)->fix_fields(thd, tables, conds)) - DBUG_RETURN(1); + /* Make a join an a expression */ + thd->where="on clause"; + if (!embedded->on_expr->fixed && + embedded->on_expr->fix_fields(thd, tables, &embedded->on_expr) || + embedded->on_expr->check_cols(1)) + goto err_no_arena; + select_lex->cond_count++; } - } - if (table->natural_join) - { - arena= thd->change_arena_if_needed(&backup); - /* Make a join of all fields with have the same name */ - TABLE *t1= table->table; - TABLE *t2= table->natural_join->table; - Item_cond_and *cond_and= new Item_cond_and(); - if (!cond_and) // If not out of memory - goto err; - cond_and->top_level_item(); - - Field **t1_field, *t2_field; - for (t1_field= t1->field; (*t1_field); t1_field++) + if (embedded->natural_join) { - const char *t1_field_name= (*t1_field)->field_name; - uint not_used_field_index= NO_CACHED_FIELD_INDEX; - - if ((t2_field= find_field_in_table(thd, t2, t1_field_name, - strlen(t1_field_name), 0, 0, - ¬_used_field_index))) + /* Make a join of all fields wich have the same name */ + TABLE_LIST *tab1= embedded; + TABLE_LIST *tab2= embedded->natural_join; + if (!(embedded->outer_join & JOIN_TYPE_RIGHT)) { - Item_func_eq *tmp=new Item_func_eq(new Item_field(thd, *t1_field), - new Item_field(thd, t2_field)); - if (!tmp) - goto err; - /* Mark field used for table cache */ - (*t1_field)->query_id= t2_field->query_id= thd->query_id; - cond_and->list.push_back(tmp); - t1->used_keys.intersect((*t1_field)->part_of_key); - t2->used_keys.intersect(t2_field->part_of_key); + while (tab1->nested_join) + { + TABLE_LIST *next; + List_iterator_fast<TABLE_LIST> it(tab1->nested_join->join_list); + tab1= it++; + while ((next= it++)) + tab1= next; + } + } + else + { + while (tab1->nested_join) + tab1= tab1->nested_join->join_list.head(); + } + if (embedded->outer_join & JOIN_TYPE_RIGHT) + { + while (tab2->nested_join) + { + TABLE_LIST *next; + List_iterator_fast<TABLE_LIST> it(tab2->nested_join->join_list); + tab2= it++; + while ((next= it++)) + tab2= next; + } + } + else + { + while (tab2->nested_join) + tab2= tab2->nested_join->join_list.head(); } - } - thd->lex->current_select->cond_count+= cond_and->list.elements; - // to prevent natural join processing during PS re-execution - table->natural_join= 0; + if (arena) + arena= thd->change_arena_if_needed(&backup); + + TABLE *t1=tab1->table; + TABLE *t2=tab2->table; + Field_iterator_table table_iter; + Field_iterator_view view_iter; + Field_iterator *iterator; + Field *t1_field, *t2_field; + Item *item_t2= 0; + Item_cond_and *cond_and= new Item_cond_and(); + + if (!cond_and) // If not out of memory + goto err_no_arena; + cond_and->top_level_item(); + + if (table->field_translation) + { + iterator= &view_iter; + view_iter.set(tab1); + } + else + { + iterator= &table_iter; + table_iter.set(tab1); + } - if (cond_and->list.elements) - { - if (!table->outer_join) // Not left join + for (; !iterator->end_of_fields(); iterator->next()) { - *conds= and_conds(*conds, cond_and); - // fix_fields() should be made with temporary memory pool - if (arena) - thd->restore_backup_item_arena(arena, &backup); - if (*conds && !(*conds)->fixed) + const char *t1_field_name= iterator->name(); + uint not_used_field_index= NO_CACHED_FIELD_INDEX; + + if ((t2_field= find_field_in_table(thd, tab2, t1_field_name, + t1_field_name, + strlen(t1_field_name), &item_t2, + 0, 0, 0, + ¬_used_field_index, + FALSE))) { - if (!(*conds)->fixed && - (*conds)->fix_fields(thd, tables, conds)) - DBUG_RETURN(1); + if (t2_field != view_ref_found) + { + if (!(item_t2= new Item_field(thd, t2_field))) + goto err; + /* Mark field used for table cache */ + t2_field->query_id= thd->query_id; + t2->used_keys.intersect(t2_field->part_of_key); + } + if ((t1_field= iterator->field())) + { + /* Mark field used for table cache */ + t1_field->query_id= thd->query_id; + t1->used_keys.intersect(t1_field->part_of_key); + } + Item_func_eq *tmp= new Item_func_eq(iterator->item(thd), + item_t2); + if (!tmp) + goto err; + cond_and->list.push_back(tmp); } } - else + select_lex->cond_count+= cond_and->list.elements; + + // to prevent natural join processing during PS re-execution + embedding->natural_join= 0; + + if (cond_and->list.elements) { - table->on_expr= and_conds(table->on_expr, cond_and); - // fix_fields() should be made with temporary memory pool - if (arena) - thd->restore_backup_item_arena(arena, &backup); - if (table->on_expr && !table->on_expr->fixed) + COND *on_expr= cond_and; + on_expr->fix_fields(thd, 0, &on_expr); + if (!embedded->outer_join) // Not left join + { + *conds= and_conds(*conds, cond_and); + // fix_fields() should be made with temporary memory pool + if (arena) + thd->restore_backup_item_arena(arena, &backup); + if (*conds && !(*conds)->fixed) + { + if ((*conds)->fix_fields(thd, tables, conds)) + goto err_no_arena; + } + } + else { - if (!table->on_expr->fixed && - table->on_expr->fix_fields(thd, tables, &table->on_expr)) - DBUG_RETURN(1); + embedded->on_expr= and_conds(embedded->on_expr, cond_and); + // fix_fields() should be made with temporary memory pool + if (arena) + thd->restore_backup_item_arena(arena, &backup); + if (embedded->on_expr && !embedded->on_expr->fixed) + { + if (embedded->on_expr->fix_fields(thd, tables, &table->on_expr)) + goto err_no_arena; + } } } + else if (arena) + thd->restore_backup_item_arena(arena, &backup); } - else if (arena) - { - thd->restore_backup_item_arena(arena, &backup); - arena= 0; // Safety if goto err - } + embedding= embedded->embedding; } + while (embedding && + embedding->nested_join->join_list.head() == embedded); } - if (thd->current_arena->is_stmt_prepare()) + if (!thd->current_arena->is_conventional()) { /* We are in prepared statement preparation code => we should store WHERE clause changing for next executions. - We do this ON -> WHERE transformation only once per PS statement. + We do this ON -> WHERE transformation only once per PS/SP statement. */ - thd->lex->current_select->where= *conds; + select_lex->where= *conds; + select_lex->conds_processed_with_permanent_arena= 1; } + thd->lex->current_select->no_wrap_view_item= save_wrapper; DBUG_RETURN(test(thd->net.report_error)); err: if (arena) thd->restore_backup_item_arena(arena, &backup); +err_no_arena: + thd->lex->current_select->no_wrap_view_item= save_wrapper; DBUG_RETURN(1); } @@ -2735,8 +3356,25 @@ err: ** Returns : 1 if some field has wrong type ******************************************************************************/ -int -fill_record(List<Item> &fields,List<Item> &values, bool ignore_errors) + +/* + Fill fields with given items. + + SYNOPSIS + fill_record() + thd thread handler + fields Item_fields list to be filled + values values to fill with + ignore_errors TRUE if we should ignore errors + + RETURN + FALSE OK + TRUE error occured +*/ + +bool +fill_record(THD * thd, List<Item> &fields, List<Item> &values, + bool ignore_errors) { List_iterator_fast<Item> f(fields),v(values); Item *value; @@ -2751,14 +3389,32 @@ fill_record(List<Item> &fields,List<Item> &values, bool ignore_errors) if (rfield == table->next_number_field) table->auto_increment_field_not_null= TRUE; if ((value->save_in_field(rfield, 0) < 0) && !ignore_errors) - DBUG_RETURN(1); + { + my_message(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), MYF(0)); + DBUG_RETURN(TRUE); + } } - DBUG_RETURN(0); + DBUG_RETURN(thd->net.report_error); } -int -fill_record(Field **ptr,List<Item> &values, bool ignore_errors) +/* + Fill field buffer with values from Field list + + SYNOPSIS + fill_record() + thd thread handler + ptr pointer on pointer to record + values list of fields + ignore_errors TRUE if we should ignore errors + + RETURN + FALSE OK + TRUE error occured +*/ + +bool +fill_record(THD *thd, Field **ptr, List<Item> &values, bool ignore_errors) { List_iterator_fast<Item> v(values); Item *value; @@ -2772,9 +3428,12 @@ fill_record(Field **ptr,List<Item> &values, bool ignore_errors) if (field == table->next_number_field) table->auto_increment_field_not_null= TRUE; if ((value->save_in_field(field, 0) < 0) && !ignore_errors) - DBUG_RETURN(1); + { + my_message(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR), MYF(0)); + DBUG_RETURN(TRUE); + } } - DBUG_RETURN(0); + DBUG_RETURN(thd->net.report_error); } @@ -2899,7 +3558,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) && ! in_use->killed) { - in_use->killed=1; + in_use->killed= THD::KILL_CONNECTION; pthread_mutex_lock(&in_use->mysys_var->mutex); if (in_use->mysys_var->current_cond) { @@ -2965,3 +3624,62 @@ int init_ftfuncs(THD *thd, SELECT_LEX *select_lex, bool no_order) } return 0; } + + +/* + open new .frm format table + + SYNOPSIS + open_new_frm() + path path to .frm + alias alias for table + db database + table_name name of table + db_stat open flags (for example HA_OPEN_KEYFILE|HA_OPEN_RNDFILE..) + can be 0 (example in ha_example_table) + prgflag READ_ALL etc.. + ha_open_flags HA_OPEN_ABORT_IF_LOCKED etc.. + outparam result table + table_desc TABLE_LIST descriptor + mem_root temporary MEM_ROOT for parsing +*/ + +static my_bool +open_new_frm(const char *path, const char *alias, + const char *db, const char *table_name, + uint db_stat, uint prgflag, + uint ha_open_flags, TABLE *outparam, TABLE_LIST *table_desc, + MEM_ROOT *mem_root) +{ + LEX_STRING pathstr; + File_parser *parser; + DBUG_ENTER("open_new_frm"); + + pathstr.str= (char*) path; + pathstr.length= strlen(path); + + if ((parser= sql_parse_prepare(&pathstr, mem_root, 1))) + { + if (!strncmp("VIEW", parser->type()->str, parser->type()->length)) + { + if (table_desc == 0 || table_desc->required_type == FRMTYPE_TABLE) + { + my_error(ER_WRONG_OBJECT, MYF(0), db, table_name, "BASE TABLE"); + goto err; + } + if (mysql_make_view(parser, table_desc)) + goto err; + } + else + { + /* only VIEWs are supported now */ + my_error(ER_FRM_UNKNOWN_TYPE, MYF(0), path, parser->type()->str); + goto err; + } + DBUG_RETURN(0); + } + +err: + bzero(outparam, sizeof(TABLE)); // do not run repair + DBUG_RETURN(1); +} diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index bd42a2c1720..26b1eff49e7 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -610,6 +610,7 @@ void query_cache_insert(NET *net, const char *packet, ulong length) DBUG_VOID_RETURN; } header->result(result); + header->last_pkt_nr= net->pkt_nr; BLOCK_UNLOCK_WR(query_block); } else @@ -934,11 +935,16 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) /* Test if the query is a SELECT - (pre-space is removed in dispatch_command) + (pre-space is removed in dispatch_command). + + First '/' looks like comment before command it is not + frequently appeared in real lihe, consequently we can + check all such queries, too. */ - if (my_toupper(system_charset_info, sql[0]) != 'S' || - my_toupper(system_charset_info, sql[1]) != 'E' || - my_toupper(system_charset_info,sql[2]) !='L') + if ((my_toupper(system_charset_info, sql[0]) != 'S' || + my_toupper(system_charset_info, sql[1]) != 'E' || + my_toupper(system_charset_info,sql[2]) !='L') && + sql[0] != '/') { DBUG_PRINT("qcache", ("The statement is not a SELECT; Not cached")); goto err; @@ -1114,6 +1120,7 @@ Query_cache::send_result_to_client(THD *thd, char *sql, uint query_length) ALIGN_SIZE(sizeof(Query_cache_result)))) break; // Client aborted result_block = result_block->next; + thd->net.pkt_nr= query->last_pkt_nr; // Keep packet number updated } while (result_block != first_result_block); #else { @@ -1152,7 +1159,7 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used, using_transactions = using_transactions && (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)); - for (; tables_used; tables_used=tables_used->next) + for (; tables_used; tables_used= tables_used->next_local) { DBUG_ASSERT(!using_transactions || tables_used->table!=0); if (tables_used->derived) @@ -1184,7 +1191,7 @@ void Query_cache::invalidate(CHANGED_TABLE_LIST *tables_used) if (query_cache_size > 0) { DUMP(this); - for (; tables_used; tables_used=tables_used->next) + for (; tables_used; tables_used= tables_used->next) { invalidate_table((byte*) tables_used->key, tables_used->key_length); DBUG_PRINT("qcache", (" db %s, table %s", tables_used->key, @@ -1217,7 +1224,7 @@ void Query_cache::invalidate_locked_for_write(TABLE_LIST *tables_used) if (query_cache_size > 0) { DUMP(this); - for (; tables_used; tables_used= tables_used->next) + for (; tables_used; tables_used= tables_used->next_local) { if (tables_used->lock_type & (TL_WRITE_LOW_PRIORITY | TL_WRITE)) invalidate_table(tables_used->table); @@ -2103,7 +2110,9 @@ my_bool Query_cache::register_all_tables(Query_cache_block *block, Query_cache_block_table *block_table = block->table(0); - for (n=0; tables_used; tables_used=tables_used->next, n++, block_table++) + for (n= 0; + tables_used; + tables_used= tables_used->next_global, n++, block_table++) { DBUG_PRINT("qcache", ("table %s, db %s, openinfo at 0x%lx, keylen %u, key at 0x%lx", @@ -2650,7 +2659,7 @@ TABLE_COUNTER_TYPE Query_cache::is_cacheable(THD *thd, uint32 query_len, lex->select_lex.options, (int) thd->variables.query_cache_type)); - for (; tables_used; tables_used= tables_used->next) + for (; tables_used; tables_used= tables_used->next_global) { table_count++; DBUG_PRINT("qcache", ("table %s, db %s, type %u", @@ -2717,7 +2726,7 @@ my_bool Query_cache::ask_handler_allowance(THD *thd, { DBUG_ENTER("Query_cache::ask_handler_allowance"); - for (; tables_used; tables_used= tables_used->next) + for (; tables_used; tables_used= tables_used->next_global) { TABLE *table= tables_used->table; if (!ha_caching_allowed(thd, table->table_cache_key, @@ -3166,7 +3175,7 @@ void Query_cache::wreck(uint line, const char *message) DBUG_PRINT("warning", ("%5d QUERY CACHE WRECK => DISABLED",line)); DBUG_PRINT("warning", ("==================================")); if (thd) - thd->killed = 1; + thd->killed= THD::KILL_CONNECTION; cache_dump(); /* check_integrity(0); */ /* Can't call it here because of locks */ bins_dump(); diff --git a/sql/sql_cache.h b/sql/sql_cache.h index c933a2349af..93d89aeae4f 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -112,6 +112,7 @@ struct Query_cache_query NET *wri; ulong len; uint8 tbls_type; + unsigned int last_pkt_nr; inline void init_n_lock(); void unlock_n_destroy(); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 596097d9dc2..d369e85d775 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -35,6 +35,9 @@ #endif #include <mysys_err.h> +#include "sp_rcontext.h" +#include "sp_cache.h" + /* The following is used to initialise Table_ident with a internal table name @@ -156,18 +159,22 @@ bool foreign_key_prefix(Key *a, Key *b) THD::THD() :user_time(0), global_read_lock(0), is_fatal_error(0), - last_insert_id_used(0), - insert_id_used(0), rand_used(0), time_zone_used(0), - in_lock_tables(0), bootstrap(0) + rand_used(0), time_zone_used(0), + last_insert_id_used(0), insert_id_used(0), clear_next_insert_id(0), + in_lock_tables(0), bootstrap(0), spcont(NULL) { current_arena= this; - host= user= priv_user= db= ip=0; +#ifndef DBUG_OFF + backup_arena= 0; +#endif + host= user= priv_user= db= ip= 0; + catalog= (char*)"std"; // the only catalog we have for now host_or_ip= "connecting host"; locked=some_tables_deleted=no_errors=password= 0; - killed=0; query_start_used= 0; count_cuted_fields= CHECK_FIELD_IGNORE; - db_length= col_access= 0; + killed= NOT_KILLED; + db_length= col_access=0; query_error= tmp_table_used= 0; next_insert_id=last_insert_id=0; open_tables= temporary_tables= handler_tables= derived_tables= 0; @@ -196,7 +203,7 @@ THD::THD() #endif net.last_error[0]=0; // If error on boot ull=0; - system_thread=cleanup_done=0; + system_thread= cleanup_done= abort_on_warning= 0; peer_port= 0; // For SHOW PROCESSLIST transaction.changed_tables = 0; #ifdef __WIN__ @@ -223,9 +230,12 @@ THD::THD() /* Initialize sub structures */ init_sql_alloc(&warn_root, WARN_ALLOC_BLOCK_SIZE, WARN_ALLOC_PREALLOC_SIZE); user_connect=(USER_CONN *)0; - hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, + hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, - (hash_free_key) free_user_var,0); + (hash_free_key) free_user_var, 0); + + sp_proc_cache= NULL; + sp_func_cache= NULL; /* For user vars replication*/ if (opt_bin_log) @@ -253,7 +263,7 @@ THD::THD() if (open_cached_file(&transaction.trans_log, mysql_tmpdir, LOG_PREFIX, binlog_cache_size, MYF(MY_WME))) - killed=1; + killed= KILL_CONNECTION; transaction.trans_log.end_of_file= max_binlog_cache_size; } #endif @@ -294,6 +304,7 @@ void THD::init(void) bzero((char*) warn_count, sizeof(warn_count)); total_warn_count= 0; update_charset(); + bzero((char *) &status_var, sizeof(status_var)); } @@ -332,9 +343,11 @@ void THD::change_user(void) cleanup_done= 0; init(); stmt_map.reset(); - hash_init(&user_vars, &my_charset_bin, USER_VARS_HASH_SIZE, 0, 0, + hash_init(&user_vars, system_charset_info, USER_VARS_HASH_SIZE, 0, 0, (hash_get_key) get_var_key, (hash_free_key) free_user_var, 0); + sp_cache_clear(&sp_proc_cache); + sp_cache_clear(&sp_func_cache); } @@ -358,6 +371,8 @@ void THD::cleanup(void) my_free((char*) variables.datetime_format, MYF(MY_ALLOW_ZERO_PTR)); delete_dynamic(&user_var_events); hash_free(&user_vars); + sp_cache_clear(&sp_proc_cache); + sp_cache_clear(&sp_func_cache); if (global_read_lock) unlock_global_read_lock(this); if (ull) @@ -367,6 +382,7 @@ void THD::cleanup(void) pthread_mutex_unlock(&LOCK_user_locks); ull= 0; } + cleanup_done=1; DBUG_VOID_RETURN; } @@ -379,6 +395,7 @@ THD::~THD() /* Ensure that no one is using THD */ pthread_mutex_lock(&LOCK_delete); pthread_mutex_unlock(&LOCK_delete); + add_to_status(&global_status_var, &status_var); /* Close connection */ #ifndef EMBEDDED_LIBRARY @@ -398,6 +415,9 @@ THD::~THD() } #endif + sp_cache_clear(&sp_proc_cache); + sp_cache_clear(&sp_func_cache); + DBUG_PRINT("info", ("freeing host")); if (host != my_localhost) // If not pointer to constant safeFree(host); @@ -410,7 +430,7 @@ THD::~THD() mysys_var=0; // Safety (shouldn't be needed) pthread_mutex_destroy(&LOCK_delete); #ifndef DBUG_OFF - dbug_sentry = THD_SENTRY_GONE; + dbug_sentry= THD_SENTRY_GONE; #endif /* Reset stmt_backup.mem_root to not double-free memory from thd.mem_root */ clear_alloc_root(&stmt_backup.main_mem_root); @@ -418,14 +438,35 @@ THD::~THD() } -void THD::awake(bool prepare_to_die) +/* + Add to one status variable another status variable + + NOTES + This function assumes that all variables are long/ulong. + If this assumption will change, then we have to explictely add + the other variables after the while loop +*/ + +void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var) +{ + ulong *end= (ulong*) ((byte*) to_var + offsetof(STATUS_VAR, + last_system_status_var) + + sizeof(ulong)); + ulong *to= (ulong*) to_var, *from= (ulong*) from_var; + + while (to != end) + *(to++)+= *(from++); +} + + +void THD::awake(THD::killed_state state_to_set) { THD_CHECK_SENTRY(this); safe_mutex_assert_owner(&LOCK_delete); - if (prepare_to_die) - killed = 1; - thr_alarm_kill(real_id); + killed= state_to_set; + if (state_to_set != THD::KILL_QUERY) + thr_alarm_kill(real_id); #ifdef SIGNAL_WITH_VIO_CLOSE close_active_vio(); #endif @@ -484,6 +525,24 @@ bool THD::store_globals() } +/* Cleanup after a query */ + +void THD::cleanup_after_query() +{ + if (clear_next_insert_id) + { + clear_next_insert_id= 0; + next_insert_id= 0; + } + /* Free Items that were created during this execution */ + free_items(free_list); + /* + In the rest of code we assume that free_list never points to garbage: + Keep this predicate true. + */ + free_list= 0; +} + /* Convert a string to another character set @@ -645,8 +704,8 @@ CHANGED_TABLE_LIST* THD::changed_table_dup(const char *key, long key_length) if (!new_table) { my_error(EE_OUTOFMEMORY, MYF(ME_BELL), - ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1); - killed= 1; + ALIGN_SIZE(sizeof(TABLE_LIST)) + key_length + 1); + killed= KILL_CONNECTION; return 0; } @@ -673,15 +732,16 @@ int THD::send_explain_fields(select_result *result) item->maybe_null=1; field_list.push_back(item=new Item_empty_string("key", NAME_LEN, cs)); item->maybe_null=1; - field_list.push_back(item=new Item_return_int("key_len",3, - MYSQL_TYPE_LONGLONG)); + field_list.push_back(item=new Item_empty_string("key_len", + NAME_LEN*MAX_KEY)); item->maybe_null=1; field_list.push_back(item=new Item_empty_string("ref", NAME_LEN*MAX_REF_PARTS, cs)); item->maybe_null=1; field_list.push_back(new Item_return_int("rows", 10, MYSQL_TYPE_LONGLONG)); field_list.push_back(new Item_empty_string("Extra", 255, cs)); - return (result->send_fields(field_list,1)); + return (result->send_fields(field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)); } #ifdef SIGNAL_WITH_VIO_CLOSE @@ -742,10 +802,13 @@ void THD::rollback_item_tree_changes() { I_List_iterator<Item_change_record> it(change_list); Item_change_record *change; + DBUG_ENTER("rollback_item_tree_changes"); + while ((change= it++)) *change->place= change->old_value; /* We can forget about changes memory: it's allocated in runtime memroot */ change_list.empty(); + DBUG_VOID_RETURN; } @@ -760,7 +823,7 @@ select_result::select_result() void select_result::send_error(uint errcode,const char *err) { - ::send_error(thd, errcode, err); + my_message(errcode, err, MYF(0)); } @@ -782,9 +845,9 @@ sql_exchange::sql_exchange(char *name,bool flag) escaped= &default_escaped; } -bool select_send::send_fields(List<Item> &list,uint flag) +bool select_send::send_fields(List<Item> &list, uint flags) { - return thd->protocol->send_fields(&list,flag); + return thd->protocol->send_fields(&list, flags); } /* Send data to client. Returns 0 if ok */ @@ -864,7 +927,7 @@ bool select_send::send_eof() void select_to_file::send_error(uint errcode,const char *err) { - ::send_error(thd,errcode,err); + my_message(errcode, err, MYF(0)); if (file > 0) { (void) end_io_cache(&cache); @@ -1188,7 +1251,7 @@ bool select_dump::send_data(List<Item> &items) } if (row_count++ > 1) { - my_error(ER_TOO_MANY_ROWS, MYF(0)); + my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0)); goto err; } while ((item=li++)) @@ -1201,7 +1264,7 @@ bool select_dump::send_data(List<Item> &items) } else if (my_b_write(&cache,(byte*) res->ptr(),res->length())) { - my_error(ER_ERROR_ON_WRITE,MYF(0), path, my_errno); + my_error(ER_ERROR_ON_WRITE, MYF(0), path, my_errno); goto err; } } @@ -1285,7 +1348,7 @@ bool select_max_min_finder_subselect::send_data(List<Item> &items) bool select_max_min_finder_subselect::cmp_real() { Item *maxmin= ((Item_singlerow_subselect *)item)->el(0); - double val1= cache->val(), val2= maxmin->val(); + double val1= cache->val_real(), val2= maxmin->val_real(); if (fmax) return (cache->null_value && !maxmin->null_value) || (!cache->null_value && !maxmin->null_value && @@ -1352,27 +1415,36 @@ bool select_exists_subselect::send_data(List<Item> &items) int select_dumpvar::prepare(List<Item> &list, SELECT_LEX_UNIT *u) { List_iterator_fast<Item> li(list); - List_iterator_fast<LEX_STRING> gl(var_list); + List_iterator_fast<my_var> gl(var_list); Item *item; - LEX_STRING *ls; + + local_vars.empty(); // Clear list if SP + unit= u; + row_count= 0; + if (var_list.elements != list.elements) { - my_error(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, MYF(0)); + my_message(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT, + ER(ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT), MYF(0)); return 1; } - unit=u; while ((item=li++)) { - ls= gl++; - Item_func_set_user_var *xx = new Item_func_set_user_var(*ls,item); - /* - Item_func_set_user_var can't substitute something else on its place => - 0 can be passed as last argument (reference on item) - */ - xx->fix_fields(thd,(TABLE_LIST*) thd->lex->select_lex.table_list.first, - 0); - xx->fix_length_and_dec(); - vars.push_back(xx); + my_var *mv= gl++; + if (mv->local) + (void)local_vars.push_back(new Item_splocal(mv->s, mv->offset)); + else + { + Item_func_set_user_var *xx = new Item_func_set_user_var(mv->s, item); + /* + Item_func_set_user_var can't substitute something else on its place => + 0 can be passed as last argument (reference on item) + */ + xx->fix_fields(thd, (TABLE_LIST*) thd->lex->select_lex.table_list.first, + 0); + xx->fix_length_and_dec(); + vars.push_back(xx); + } } return 0; } @@ -1433,7 +1505,7 @@ Item_arena::Item_arena(bool init_mem_root) Item_arena::Type Item_arena::type() const { - DBUG_ASSERT("Item_arena::type()" == "abstract"); + DBUG_ASSERT(0); /* Should never be called */ return STATEMENT; } @@ -1449,7 +1521,8 @@ Statement::Statement(THD *thd) allow_sum_func(0), lex(&main_lex), query(0), - query_length(0) + query_length(0), + cursor(0) { name.str= NULL; } @@ -1467,7 +1540,8 @@ Statement::Statement() allow_sum_func(0), /* initialized later */ lex(&main_lex), query(0), /* these two are set */ - query_length(0) /* in alloc_query() */ + query_length(0), /* in alloc_query() */ + cursor(0) { } @@ -1486,6 +1560,7 @@ void Statement::set_statement(Statement *stmt) lex= stmt->lex; query= stmt->query; query_length= stmt->query_length; + cursor= stmt->cursor; } @@ -1510,8 +1585,8 @@ void THD::end_statement() lex_end(lex); delete lex->result; lex->result= 0; - free_items(free_list); - free_list= 0; + /* Note that free_list is freed in cleanup_after_query() */ + /* Don't free mem_root, as mem_root is freed in the end of dispatch_command (once for any command). @@ -1522,8 +1597,12 @@ void THD::end_statement() void Item_arena::set_n_backup_item_arena(Item_arena *set, Item_arena *backup) { DBUG_ENTER("Item_arena::set_n_backup_item_arena"); + DBUG_ASSERT(backup_arena == 0); backup->set_item_arena(this); set_item_arena(set); +#ifndef DBUG_OFF + backup_arena= 1; +#endif DBUG_VOID_RETURN; } @@ -1533,6 +1612,9 @@ void Item_arena::restore_backup_item_arena(Item_arena *set, Item_arena *backup) DBUG_ENTER("Item_arena::restore_backup_item_arena"); set->set_item_arena(this); set_item_arena(backup); +#ifndef DBUG_OFF + backup_arena= 0; +#endif #ifdef NOT_NEEDED_NOW /* Reset backup mem_root to avoid its freeing. @@ -1623,8 +1705,19 @@ int Statement_map::insert(Statement *statement) bool select_dumpvar::send_data(List<Item> &items) { List_iterator_fast<Item_func_set_user_var> li(vars); + List_iterator_fast<Item_splocal> var_li(local_vars); + List_iterator_fast<my_var> my_li(var_list); + List_iterator_fast<Item> it(items); Item_func_set_user_var *xx; + Item_splocal *yy; + Item *item; + my_var *zz; DBUG_ENTER("send_data"); + if (unit->offset_limit_cnt) + { // using limit offset,count + unit->offset_limit_cnt--; + DBUG_RETURN(0); + } if (unit->offset_limit_cnt) { // Using limit offset,count @@ -1633,29 +1726,38 @@ bool select_dumpvar::send_data(List<Item> &items) } if (row_count++) { - my_error(ER_TOO_MANY_ROWS, MYF(0)); + my_message(ER_TOO_MANY_ROWS, ER(ER_TOO_MANY_ROWS), MYF(0)); DBUG_RETURN(1); } - while ((xx=li++)) + while ((zz=my_li++) && (item=it++)) { - xx->check(); - xx->update(); + if (zz->local) + { + if ((yy=var_li++)) + { + if (thd->spcont->set_item_eval(yy->get_offset(), item, zz->type)) + DBUG_RETURN(1); + } + } + else + { + if ((xx=li++)) + { + xx->check(); + xx->update(); + } + } } DBUG_RETURN(0); } bool select_dumpvar::send_eof() { - if (row_count) - { - ::send_ok(thd,row_count); - return 0; - } - else - { - my_error(ER_EMPTY_QUERY,MYF(0)); - return 1; - } + if (! row_count) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_SP_FETCH_NO_DATA, ER(ER_SP_FETCH_NO_DATA)); + ::send_ok(thd,row_count); + return 0; } /**************************************************************************** @@ -1667,4 +1769,29 @@ void TMP_TABLE_PARAM::init() field_count= sum_func_count= func_count= hidden_field_count= 0; group_parts= group_length= group_null_parts= 0; quick_group= 1; + table_charset= 0; +} + + +void thd_increment_bytes_sent(ulong length) +{ + current_thd->status_var.bytes_sent+= length; +} + + +void thd_increment_bytes_received(ulong length) +{ + current_thd->status_var.bytes_received+= length; +} + + +void thd_increment_net_big_packet_count(ulong length) +{ + current_thd->status_var.net_big_packet_count+= length; +} + + +void THD::set_status_var_init() +{ + bzero((char*) &status_var, sizeof(status_var)); } diff --git a/sql/sql_class.h b/sql/sql_class.h index f7d25077238..1cf9f5b3f48 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -26,6 +26,9 @@ class Query_log_event; class Load_log_event; class Slave_log_event; +class Format_description_log_event; +class sp_rcontext; +class sp_cache; enum enum_enable_or_disable { LEAVE_AS_IS, ENABLE, DISABLE }; enum enum_ha_read_modes { RFIRST, RNEXT, RPREV, RLAST, RKEY, RNEXT_SAME }; @@ -38,6 +41,7 @@ enum enum_check_fields { CHECK_FIELD_IGNORE, CHECK_FIELD_WARN, CHECK_FIELD_ERROR_FOR_NULL }; extern char internal_table_name[2]; +extern const char **errmesg; /* log info errors */ #define LOG_INFO_EOF -1 @@ -97,7 +101,14 @@ class MYSQL_LOG enum cache_type io_cache_type; bool write_error, inited; bool need_start_event; - bool no_auto_events; // For relay binlog + /* + no_auto_events means we don't want any of these automatic events : + Start/Rotate/Stop. That is, in 4.x when we rotate a relay log, we don't want + a Rotate_log event to be written to the relay log. When we start a relay log + etc. So in 4.x this is 1 for relay logs, 0 for binlogs. + In 5.0 it's 0 for relay logs too! + */ + bool no_auto_events; /* The max size before rotation (usable only if log_type == LOG_BIN: binary logs and relay logs). @@ -114,6 +125,18 @@ class MYSQL_LOG public: MYSQL_LOG(); ~MYSQL_LOG(); + + /* + These describe the log's format. This is used only for relay logs. + _for_exec is used by the SQL thread, _for_queue by the I/O thread. It's + necessary to have 2 distinct objects, because the I/O thread may be reading + events in a different format from what the SQL thread is reading (consider + the case of a master which has been upgraded from 5.0 to 5.1 without doing + RESET MASTER, or from 4.x to 5.0). + */ + Format_description_log_event *description_event_for_exec, + *description_event_for_queue; + void reset_bytes_written() { bytes_written = 0; @@ -142,7 +165,8 @@ public: bool open(const char *log_name,enum_log_type log_type, const char *new_name, const char *index_file_name_arg, enum cache_type io_cache_type_arg, - bool no_auto_events_arg, ulong max_size); + bool no_auto_events_arg, ulong max_size, + bool null_created); void new_file(bool need_lock= 1); bool write(THD *thd, enum enum_server_command command, const char *format,...); @@ -205,6 +229,9 @@ typedef struct st_copy_info { /* for INSERT ... UPDATE */ List<Item> *update_fields; List<Item> *update_values; +/* for VIEW ... WITH CHECK OPTION */ + TABLE_LIST *view; + bool ignore; } COPY_INFO; @@ -351,6 +378,7 @@ struct system_variables ulonglong myisam_max_sort_file_size; ha_rows select_limit; ha_rows max_join_size; + ulong auto_increment_increment, auto_increment_offset; ulong bulk_insert_buff_size; ulong join_buff_size; ulong long_query_time; @@ -361,6 +389,7 @@ struct system_variables ulong max_sort_length; ulong max_tmp_tables; ulong max_insert_delayed_threads; + ulong multi_range_count; ulong myisam_repair_threads; ulong myisam_sort_buff_size; ulong net_buffer_length; @@ -369,6 +398,8 @@ struct system_variables ulong net_retry_count; ulong net_wait_timeout; ulong net_write_timeout; + ulong optimizer_prune_level; + ulong optimizer_search_depth; ulong preload_buff_size; ulong query_cache_type; ulong read_buff_size; @@ -379,6 +410,8 @@ struct system_variables ulong tx_isolation; /* Determines which non-standard SQL behaviour should be enabled */ ulong sql_mode; + /* check of key presence in updatable view */ + ulong updatable_views_with_limit; ulong default_week_format; ulong max_seeks_for_key; ulong range_alloc_block_size; @@ -425,6 +458,61 @@ struct system_variables DATE_TIME_FORMAT *time_format; }; + +/* per thread status variables */ + +typedef struct system_status_var +{ + ulong bytes_received; + ulong bytes_sent; + ulong com_other; + ulong com_stat[(uint) SQLCOM_END]; + ulong created_tmp_disk_tables; + ulong created_tmp_tables; + ulong ha_commit_count; + ulong ha_delete_count; + ulong ha_read_first_count; + ulong ha_read_last_count; + ulong ha_read_key_count; + ulong ha_read_next_count; + ulong ha_read_prev_count; + ulong ha_read_rnd_count; + ulong ha_read_rnd_next_count; + ulong ha_rollback_count; + ulong ha_update_count; + ulong ha_write_count; + + /* KEY_CACHE parts. These are copies of the original */ + ulong key_blocks_changed; + ulong key_blocks_used; + ulong key_cache_r_requests; + ulong key_cache_read; + ulong key_cache_w_requests; + ulong key_cache_write; + /* END OF KEY_CACHE parts */ + + ulong net_big_packet_count; + ulong opened_tables; + ulong select_full_join_count; + ulong select_full_range_join_count; + ulong select_range_count; + ulong select_range_check_count; + ulong select_scan_count; + ulong long_query_count; + ulong filesort_merge_passes; + ulong filesort_range_count; + ulong filesort_rows; + ulong filesort_scan_count; +} STATUS_VAR; + +/* + This is used for 'show status'. It must be updated to the last ulong + variable in system_status_var +*/ + +#define last_system_status_var filesort_scan_count + + void free_tmp_table(THD *thd, TABLE *entry); @@ -438,6 +526,9 @@ public: Item *free_list; MEM_ROOT main_mem_root; MEM_ROOT *mem_root; // Pointer to current memroot +#ifndef DBUG_OFF + bool backup_arena; +#endif enum enum_state { INITIALIZED= 0, PREPARED= 1, EXECUTED= 3, CONVENTIONAL_EXECUTION= 2, @@ -474,7 +565,7 @@ public: inline bool is_first_stmt_execute() const { return state == PREPARED; } inline bool is_stmt_execute() const { return state == PREPARED || state == EXECUTED; } - inline bool is_conventional_execution() const + inline bool is_conventional() const { return state == CONVENTIONAL_EXECUTION; } inline gptr alloc(unsigned int size) { return alloc_root(mem_root,size); } inline gptr calloc(unsigned int size) @@ -504,6 +595,8 @@ public: }; +class Cursor; + /* State of a single command executed against this connection. One connection can contain a lot of simultaneously running statements, @@ -577,6 +670,7 @@ public: */ char *query; uint32 query_length; // current query length + Cursor *cursor; public: @@ -682,7 +776,7 @@ typedef I_List<Item_change_record> Item_change_list; a thread/connection descriptor */ -class THD :public ilink, +class THD :public ilink, public Statement { public: @@ -707,6 +801,7 @@ public: struct sockaddr_in remote; // client socket address struct rand_struct rand; // used for authentication struct system_variables variables; // Changeable local variables + struct system_status_var status_var; // Per thread statistic vars pthread_mutex_t LOCK_delete; // Locked before thd is deleted /* all prepared statements and cursors of this connection */ Statement_map stmt_map; @@ -727,9 +822,17 @@ public: the connection priv_user - The user privilege we are using. May be '' for anonymous user. db - currently selected database + catalog - currently selected catalog ip - client IP + WARNING: some members of THD (currently 'db', 'catalog' and 'query') are + set and alloced by the slave SQL thread (for the THD of that thread); that + thread is (and must remain, for now) the only responsible for freeing these + 3 members. If you add members here, and you add code to set them in + replication, don't forget to free_them_and_set_them_to_0 in replication + properly. For details see the 'err:' label of the pthread_handler_decl of + the slave SQL thread, in sql/slave.cc. */ - char *host,*user,*priv_user,*db,*ip; + char *host,*user,*priv_user,*db,*catalog,*ip; char priv_host[MAX_HOSTNAME]; /* remote (peer) port */ uint16 peer_port; @@ -844,6 +947,8 @@ public: generated auto_increment value in handler.cc */ ulonglong next_insert_id; + /* Remember last next_insert_id to reset it if something went wrong */ + ulonglong prev_insert_id; /* The insert_id used for the last statement or set by SET LAST_INSERT_ID=# or SELECT LAST_INSERT_ID(#). Used for binary log and returned by @@ -898,6 +1003,9 @@ public: /* for user variables replication*/ DYNAMIC_ARRAY user_var_events; + enum killed_state { NOT_KILLED=0, KILL_BAD_DATA=1, KILL_CONNECTION=ER_SERVER_SHUTDOWN, KILL_QUERY=ER_QUERY_INTERRUPTED }; + killed_state volatile killed; + /* scramble - random string sent to client on handshake */ char scramble[SCRAMBLE_LENGTH+1]; @@ -905,14 +1013,18 @@ public: bool locked, some_tables_deleted; bool last_cuted_field; bool no_errors, password, is_fatal_error; - bool query_start_used,last_insert_id_used,insert_id_used,rand_used; - bool time_zone_used; + bool query_start_used, rand_used, time_zone_used; + bool last_insert_id_used,insert_id_used, clear_next_insert_id; bool in_lock_tables; bool query_error, bootstrap, cleanup_done; bool tmp_table_used; bool charset_is_system_charset, charset_is_collation_connection; bool slow_command; - my_bool volatile killed; + bool no_trans_update, abort_on_warning; + longlong row_count_func; /* For the ROW_COUNT() function */ + sp_rcontext *spcont; // SP runtime context + sp_cache *sp_proc_cache; + sp_cache *sp_func_cache; /* If we do a purge of binary logs, log index info of the threads @@ -944,6 +1056,7 @@ public: void init_for_queries(); void change_user(void); void cleanup(void); + void cleanup_after_query(); bool store_globals(); #ifdef SIGNAL_WITH_VIO_CLOSE inline void set_active_vio(Vio* vio) @@ -960,7 +1073,7 @@ public: } void close_active_vio(); #endif - void awake(bool prepare_to_die); + void awake(THD::killed_state state_to_set); /* For enter_cond() / exit_cond() to work the mutex must be got before enter_cond() (in 4.1 an assertion will soon ensure this); this mutex is @@ -1024,8 +1137,16 @@ public: return 0; #endif } - inline gptr trans_alloc(unsigned int size) - { + inline bool only_prepare() + { + return command == COM_PREPARE; + } + inline bool fill_derived_tables() + { + return !only_prepare() && !lex->only_view_structure(); + } + inline gptr trans_alloc(unsigned int size) + { return alloc_root(&transaction.mem_root,size); } @@ -1045,6 +1166,7 @@ public: net.last_error[0]= 0; net.last_errno= 0; net.report_error= 0; + query_error= 0; } inline bool vio_ok() const { return net.vio != 0; } #else @@ -1066,7 +1188,7 @@ public: use new arena if we are in a prepared statements and we have not already changed to use this arena. */ - if (current_arena->is_stmt_prepare() && + if (!current_arena->is_conventional() && mem_root != ¤t_arena->main_mem_root) { set_n_backup_item_arena(current_arena, backup); @@ -1078,7 +1200,7 @@ public: void change_item_tree(Item **place, Item *new_value) { /* TODO: check for OOM condition here */ - if (!current_arena->is_conventional_execution()) + if (!current_arena->is_conventional()) nocheck_register_item_tree_change(place, *place, mem_root); *place= new_value; } @@ -1091,6 +1213,23 @@ public: state after execution of a non-prepared SQL statement. */ void end_statement(); + inline int killed_errno() const + { + return killed != KILL_BAD_DATA ? killed : 0; + } + inline void send_kill_message() const + { + int err= killed_errno(); + my_message(err, ER(err), MYF(0)); + } + /* return TRUE if we will abort query if we make a warning now */ + inline bool really_abort_on_warning() + { + return (abort_on_warning && + (!no_trans_update || + (variables.sql_mode & MODE_STRICT_ALL_TABLES))); + } + void set_status_var_init(); }; #define tmp_disable_binlog(A) \ @@ -1129,8 +1268,6 @@ public: class JOIN; -void send_error(THD *thd, uint sql_errno=0, const char *err=0); - class select_result :public Sql_alloc { protected: THD *thd; @@ -1150,11 +1287,12 @@ public: */ virtual uint field_count(List<Item> &fields) const { return fields.elements; } - virtual bool send_fields(List<Item> &list,uint flag)=0; + virtual bool send_fields(List<Item> &list, uint flags)=0; virtual bool send_data(List<Item> &items)=0; virtual bool initialize_tables (JOIN *join=0) { return 0; } virtual void send_error(uint errcode,const char *err); virtual bool send_eof()=0; + virtual bool simple_select() { return 0; } virtual void abort() {} /* Cleanup instance of this class for next execution of a prepared @@ -1181,9 +1319,10 @@ public: class select_send :public select_result { public: select_send() {} - bool send_fields(List<Item> &list,uint flag); + bool send_fields(List<Item> &list, uint flags); bool send_data(List<Item> &items); bool send_eof(); + bool simple_select() { return 1; } }; @@ -1227,30 +1366,17 @@ public: class select_insert :public select_result_interceptor { public: + TABLE_LIST *table_list; TABLE *table; List<Item> *fields; ulonglong last_insert_id; COPY_INFO info; + bool insert_into_view; - select_insert(TABLE *table_par, List<Item> *fields_par, - enum_duplicates duplic, bool ignore) - :table(table_par), fields(fields_par), last_insert_id(0) - { - bzero((char*) &info,sizeof(info)); - info.ignore= ignore; - info.handle_duplicates=duplic; - } - select_insert(TABLE *table_par, List<Item> *fields_par, + select_insert(TABLE_LIST *table_list_par, + TABLE *table_par, List<Item> *fields_par, List<Item> *update_fields, List<Item> *update_values, - enum_duplicates duplic, bool ignore) - :table(table_par), fields(fields_par), last_insert_id(0) - { - bzero((char*) &info,sizeof(info)); - info.ignore= ignore; - info.handle_duplicates= duplic; - info.update_fields= update_fields; - info.update_values= update_values; - } + enum_duplicates duplic, bool ignore); ~select_insert(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); @@ -1264,22 +1390,21 @@ class select_insert :public select_result_interceptor { class select_create: public select_insert { ORDER *group; - const char *db; - const char *name; + TABLE_LIST *create_table; List<create_field> *extra_fields; List<Key> *keys; HA_CREATE_INFO *create_info; MYSQL_LOCK *lock; Field **field; public: - select_create(const char *db_name, const char *table_name, - HA_CREATE_INFO *create_info_par, - List<create_field> &fields_par, - List<Key> &keys_par, - List<Item> &select_fields,enum_duplicates duplic, bool ignore) - :select_insert (NULL, &select_fields, duplic, ignore), db(db_name), - name(table_name), extra_fields(&fields_par),keys(&keys_par), - create_info(create_info_par), lock(0) + select_create (TABLE_LIST *table, + HA_CREATE_INFO *create_info_par, + List<create_field> &fields_par, + List<Key> &keys_par, + List<Item> &select_fields,enum_duplicates duplic, bool ignore) + :select_insert (NULL, NULL, &select_fields, 0, 0, duplic, ignore), create_table(table), + extra_fields(&fields_par),keys(&keys_par), create_info(create_info_par), + lock(0) {} int prepare(List<Item> &list, SELECT_LEX_UNIT *u); void store_values(List<Item> &values); @@ -1317,6 +1442,7 @@ public: bool using_indirect_summary_function; /* If >0 convert all blob fields to varchar(convert_blob_length) */ uint convert_blob_length; + CHARSET_INFO *table_charset; TMP_TABLE_PARAM() :copy_funcs_it(copy_funcs), copy_field(0), group_parts(0), @@ -1463,8 +1589,13 @@ class user_var_entry DTCollation collation; }; - -/* Class for unique (removing of duplicates) */ +/* + Unique -- class for unique (removing of duplicates). + Puts all values to the TREE. If the tree becomes too big, + it's dumped to the file. User can request sorted values, or + just iterate through them. In the last case tree merging is performed in + memory simultaneously with iteration, so it should be ~2-3x faster. + */ class Unique :public Sql_alloc { @@ -1478,10 +1609,10 @@ class Unique :public Sql_alloc public: ulong elements; - Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, + Unique(qsort_cmp2 comp_func, void *comp_func_fixed_arg, uint size_arg, ulong max_in_memory_size_arg); ~Unique(); - inline bool unique_add(gptr ptr) + inline bool unique_add(void *ptr) { if (tree.elements_in_tree > max_elements && flush()) return 1; @@ -1489,6 +1620,18 @@ public: } bool get(TABLE *table); + static double get_use_cost(uint *buffer, uint nkeys, uint key_size, + ulong max_in_memory_size); + inline static int get_cost_calc_buff_size(ulong nkeys, uint key_size, + ulong max_in_memory_size) + { + register ulong max_elems_in_tree= + (1 + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size)); + return sizeof(uint)*(1 + nkeys/max_elems_in_tree); + } + + void reset(); + bool walk(tree_walk_action action, void *walk_action_arg); friend int unique_write_to_file(gptr key, element_count count, Unique *unique); friend int unique_write_to_ptrs(gptr key, element_count count, Unique *unique); @@ -1518,7 +1661,9 @@ public: class multi_update :public select_result_interceptor { - TABLE_LIST *all_tables, *update_tables, *table_being_updated; + TABLE_LIST *all_tables; /* query/update command tables */ + TABLE_LIST *leaves; /* list of leves of join table tree */ + TABLE_LIST *update_tables, *table_being_updated; THD *thd; TABLE **tmp_tables, *main_table, *table_to_update; TMP_TABLE_PARAM *tmp_table_param; @@ -1531,8 +1676,9 @@ class multi_update :public select_result_interceptor bool do_update, trans_safe, transactional_tables, log_delayed, ignore; public: - multi_update(THD *thd_arg, TABLE_LIST *ut, List<Item> *fields, - List<Item> *values, enum_duplicates handle_duplicates, bool ignore); + multi_update(THD *thd_arg, TABLE_LIST *ut, TABLE_LIST *leaves_list, + List<Item> *fields, List<Item> *values, + enum_duplicates handle_duplicates, bool ignore); ~multi_update(); int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); @@ -1542,16 +1688,32 @@ public: bool send_eof(); }; +class my_var : public Sql_alloc { +public: + LEX_STRING s; + bool local; + uint offset; + enum_field_types type; + my_var (LEX_STRING& j, bool i, uint o, enum_field_types t) + :s(j), local(i), offset(o), type(t) + {} + ~my_var() {} +}; class select_dumpvar :public select_result_interceptor { ha_rows row_count; public: - List<LEX_STRING> var_list; + List<my_var> var_list; List<Item_func_set_user_var> vars; - select_dumpvar(void) { var_list.empty(); vars.empty(); row_count=0;} + List<Item_splocal> local_vars; + select_dumpvar(void) { var_list.empty(); local_vars.empty(); vars.empty(); row_count=0;} ~select_dumpvar() {} int prepare(List<Item> &list, SELECT_LEX_UNIT *u); bool send_data(List<Item> &items); bool send_eof(); void cleanup(); }; + +/* Functions in sql_class.cc */ + +void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var); diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 39c3f8586ed..d3b30de0bcd 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -19,6 +19,7 @@ #include "mysql_priv.h" #include <mysys_err.h> +#include "sp.h" #include <my_dir.h> #include <m_ctype.h> #ifdef __WIN__ @@ -32,7 +33,8 @@ static TYPELIB deletable_extentions= static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, const char *path, uint level); - +static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, const char *org_path); +static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error); /* Database options hash */ static HASH dboptions; static my_bool dboptions_init= 0; @@ -371,13 +373,13 @@ bool load_db_opt(THD *thd, const char *path, HA_CREATE_INFO *create) In this case the entry should not be logged. RETURN VALUES - 0 ok - -1 Error + FALSE ok + TRUE Error */ -int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, - bool silent) +bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, + bool silent) { char path[FN_REFLEN+16]; long result= 1; @@ -386,6 +388,13 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, uint create_options= create_info ? create_info->options : 0; uint path_len; DBUG_ENTER("mysql_create_db"); + + /* do not create 'information_schema' db */ + if (!my_strcasecmp(system_charset_info, db, information_schema_name.str)) + { + my_error(ER_DB_CREATE_EXISTS, MYF(0), db); + DBUG_RETURN(-1); + } VOID(pthread_mutex_lock(&LOCK_mysql_create_db)); @@ -405,7 +414,7 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, { if (!(create_options & HA_LEX_CREATE_IF_NOT_EXISTS)) { - my_error(ER_DB_CREATE_EXISTS,MYF(0),db); + my_error(ER_DB_CREATE_EXISTS, MYF(0), db); error= -1; goto exit; } @@ -415,12 +424,12 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, { if (my_errno != ENOENT) { - my_error(EE_STAT, MYF(0),path,my_errno); + my_error(EE_STAT, MYF(0), path, my_errno); goto exit; } if (my_mkdir(path,0777,MYF(0)) < 0) { - my_error(ER_CANT_CREATE_DB,MYF(0),db,my_errno); + my_error(ER_CANT_CREATE_DB, MYF(0), db, my_errno); error= -1; goto exit; } @@ -463,7 +472,6 @@ int mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create_info, query= thd->query; query_length= thd->query_length; } - mysql_update_log.write(thd, query, query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, query, query_length, 0, @@ -504,7 +512,7 @@ exit2: /* db-name is already validated when we come here */ -int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) +bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) { char path[FN_REFLEN+16]; long result=1; @@ -535,7 +543,6 @@ int mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create_info) thd->variables.collation_database= thd->db_charset; } - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0, @@ -558,7 +565,7 @@ exit: start_waiting_global_read_lock(thd); exit2: VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); - DBUG_RETURN(error ? -1 : 0); /* -1 to delegate send_error() */ + DBUG_RETURN(error); } @@ -574,11 +581,11 @@ exit2: silent Don't generate errors RETURN - 0 ok (Database dropped) - -1 Error generated + FALSE ok (Database dropped) + ERROR Error */ -int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) +bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) { long deleted=0; int error= 0; @@ -608,7 +615,7 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) if (!if_exists) { error= -1; - my_error(ER_DB_DROP_EXISTS,MYF(0),db); + my_error(ER_DB_DROP_EXISTS, MYF(0), db); goto exit; } else @@ -652,7 +659,6 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) query =thd->query; query_length= thd->query_length; } - mysql_update_log.write(thd, query, query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, query, query_length, 0, @@ -672,6 +678,7 @@ int mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) } exit: + (void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */ start_waiting_global_read_lock(thd); /* If this database was the client's selected database, we silently change the @@ -740,9 +747,9 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, continue; /* Check if file is a raid directory */ - if ((my_isdigit(&my_charset_latin1, file->name[0]) || + if ((my_isdigit(system_charset_info, file->name[0]) || (file->name[0] >= 'a' && file->name[0] <= 'f')) && - (my_isdigit(&my_charset_latin1, file->name[1]) || + (my_isdigit(system_charset_info, file->name[1]) || (file->name[1] >= 'a' && file->name[1] <= 'f')) && !file->name[2] && !level) { @@ -768,6 +775,25 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, found_other_files++; continue; } + else if (file->name[0] == 'a' && file->name[1] == 'r' && + file->name[2] == 'c' && file->name[3] == '\0') + { + /* .frm archive */ + char newpath[FN_REFLEN], *copy_of_path; + MY_DIR *new_dirp; + uint length; + strxmov(newpath, org_path, "/", "arc", NullS); + length= unpack_filename(newpath, newpath); + if ((new_dirp = my_dir(newpath, MYF(MY_DONT_SORT)))) + { + DBUG_PRINT("my",("Archive subdir found: %s", newpath)); + if ((mysql_rm_arc_files(thd, new_dirp, newpath)) < 0) + goto err; + continue; + } + found_other_files++; + continue; + } extension= fn_ext(file->name); if (find_type(extension, &deletable_extentions,1+2) <= 0) { @@ -790,7 +816,7 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, table_list->alias= table_list->real_name; // If lower_case_table_names=2 /* Link into list */ (*tot_list_next)= table_list; - tot_list_next= &table_list->next; + tot_list_next= &table_list->next_local; deleted++; } else @@ -827,44 +853,140 @@ static long mysql_rm_known_files(THD *thd, MY_DIR *dirp, const char *db, } else { - char tmp_path[FN_REFLEN], *pos; - char *path= tmp_path; - unpack_filename(tmp_path,org_path); + /* Don't give errors if we can't delete 'RAID' directory */ + if (rm_dir_w_symlink(org_path, level == 0)) + DBUG_RETURN(-1); + } + + DBUG_RETURN(deleted); + +err: + my_dirend(dirp); + DBUG_RETURN(-1); +} + + +/* + Remove directory with symlink + + SYNOPSIS + rm_dir_w_symlink() + org_path path of derictory + send_error send errors + RETURN + 0 OK + 1 ERROR +*/ + +static my_bool rm_dir_w_symlink(const char *org_path, my_bool send_error) +{ + char tmp_path[FN_REFLEN], tmp2_path[FN_REFLEN], *pos; + char *path= tmp_path; + DBUG_ENTER("rm_dir_w_symlink"); + unpack_filename(tmp_path, org_path); #ifdef HAVE_READLINK - int error; - - /* Remove end FN_LIBCHAR as this causes problem on Linux in readlink */ - pos=strend(path); - if (pos > path && pos[-1] == FN_LIBCHAR) - *--pos=0; + int error; - if ((error=my_readlink(filePath, path, MYF(MY_WME))) < 0) - DBUG_RETURN(-1); - if (!error) + /* Remove end FN_LIBCHAR as this causes problem on Linux in readlink */ + pos= strend(path); + if (pos > path && pos[-1] == FN_LIBCHAR) + *--pos=0; + + if ((error= my_readlink(tmp2_path, path, MYF(MY_WME))) < 0) + DBUG_RETURN(1); + if (!error) + { + if (my_delete(path, MYF(send_error ? MY_WME : 0))) { - if (my_delete(path,MYF(!level ? MY_WME : 0))) - { - /* Don't give errors if we can't delete 'RAID' directory */ - if (level) - DBUG_RETURN(deleted); - DBUG_RETURN(-1); - } - /* Delete directory symbolic link pointed at */ - path= filePath; + DBUG_RETURN(send_error); } + /* Delete directory symbolic link pointed at */ + path= tmp2_path; + } #endif - /* Remove last FN_LIBCHAR to not cause a problem on OS/2 */ - pos=strend(path); + /* Remove last FN_LIBCHAR to not cause a problem on OS/2 */ + pos= strend(path); - if (pos > path && pos[-1] == FN_LIBCHAR) - *--pos=0; - /* Don't give errors if we can't delete 'RAID' directory */ - if (rmdir(path) < 0 && !level) + if (pos > path && pos[-1] == FN_LIBCHAR) + *--pos=0; + if (rmdir(path) < 0 && send_error) + { + my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno); + DBUG_RETURN(-1); + } + DBUG_RETURN(0); +} + + +/* + Remove .frm archives from directory + + SYNOPSIS + thd thread handler + dirp list of files in archive directory + db data base name + org_path path of archive directory + + RETURN + > 0 number of removed files + -1 error +*/ +static long mysql_rm_arc_files(THD *thd, MY_DIR *dirp, + const char *org_path) +{ + long deleted= 0; + ulong found_other_files= 0; + char filePath[FN_REFLEN]; + DBUG_ENTER("mysql_rm_arc_files"); + DBUG_PRINT("enter", ("path: %s", org_path)); + + for (uint idx=0 ; + idx < (uint) dirp->number_off_files && !thd->killed ; + idx++) + { + FILEINFO *file=dirp->dir_entry+idx; + char *extension, *revision; + DBUG_PRINT("info",("Examining: %s", file->name)); + + /* skiping . and .. */ + if (file->name[0] == '.' && (!file->name[1] || + (file->name[1] == '.' && !file->name[2]))) + continue; + + extension= fn_ext(file->name); + if (extension[0] != '.' || + extension[1] != 'f' || extension[2] != 'r' || + extension[3] != 'm' || extension[4] != '-') { - my_error(ER_DB_DROP_RMDIR, MYF(0), path, errno); - DBUG_RETURN(-1); + found_other_files++; + continue; + } + revision= extension+5; + while (*revision && my_isdigit(system_charset_info, *revision)) + revision++; + if (*revision) + { + found_other_files++; + continue; + } + strxmov(filePath, org_path, "/", file->name, NullS); + if (my_delete_with_symlink(filePath,MYF(MY_WME))) + { + goto err; } } + if (thd->killed) + goto err; + + my_dirend(dirp); + + /* + If the directory is a symbolic link, remove the link first, then + remove the directory the symbolic link pointed at + */ + if (!found_other_files && + rm_dir_w_symlink(org_path, 0)) + DBUG_RETURN(-1); DBUG_RETURN(deleted); err: @@ -902,6 +1024,7 @@ bool mysql_change_db(THD *thd, const char *name) char *dbname=my_strdup((char*) name,MYF(MY_WME)); char path[FN_REFLEN]; HA_CREATE_INFO create; + bool schema_db= 0; #ifndef NO_EMBEDDED_ACCESS_CHECKS ulong db_access; #endif @@ -910,16 +1033,26 @@ bool mysql_change_db(THD *thd, const char *name) if (!dbname || !(db_length= strlen(dbname))) { x_free(dbname); /* purecov: inspected */ - send_error(thd,ER_NO_DB_ERROR); /* purecov: inspected */ + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), + MYF(0)); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } if (check_db_name(dbname)) { - net_printf(thd, ER_WRONG_DB_NAME, dbname); + my_error(ER_WRONG_DB_NAME, MYF(0), dbname); x_free(dbname); DBUG_RETURN(1); } DBUG_PRINT("info",("Use database: %s", dbname)); + if (!my_strcasecmp(system_charset_info, dbname, information_schema_name.str)) + { + schema_db= 1; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + db_access= SELECT_ACL; +#endif + goto end; + } + #ifndef NO_EMBEDDED_ACCESS_CHECKS if (test_all_bits(thd->master_access,DB_ACLS)) db_access=DB_ACLS; @@ -928,10 +1061,10 @@ bool mysql_change_db(THD *thd, const char *name) thd->master_access); if (!(db_access & DB_ACLS) && (!grant_option || check_grant_db(thd,dbname))) { - net_printf(thd,ER_DBACCESS_DENIED_ERROR, - thd->priv_user, - thd->priv_host, - dbname); + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + thd->priv_user, + thd->priv_host, + dbname); mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), thd->priv_user, thd->priv_host, @@ -946,10 +1079,11 @@ bool mysql_change_db(THD *thd, const char *name) path[length-1]=0; // remove ending '\' if (access(path,F_OK)) { - net_printf(thd,ER_BAD_DB_ERROR,dbname); + my_error(ER_BAD_DB_ERROR, MYF(0), dbname); my_free(dbname,MYF(0)); DBUG_RETURN(1); } +end: send_ok(thd); x_free(thd->db); thd->db=dbname; // THD::~THD will free this @@ -957,11 +1091,19 @@ bool mysql_change_db(THD *thd, const char *name) #ifndef NO_EMBEDDED_ACCESS_CHECKS thd->db_access=db_access; #endif - strmov(path+unpack_dirname(path,path), MY_DB_OPT_FILE); - load_db_opt(thd, path, &create); - thd->db_charset= create.default_table_charset ? - create.default_table_charset : - thd->variables.collation_server; - thd->variables.collation_database= thd->db_charset; + if (schema_db) + { + thd->db_charset= system_charset_info; + thd->variables.collation_database= system_charset_info; + } + else + { + strmov(path+unpack_dirname(path,path), MY_DB_OPT_FILE); + load_db_opt(thd, path, &create); + thd->db_charset= create.default_table_charset ? + create.default_table_charset : + thd->variables.collation_server; + thd->variables.collation_database= thd->db_charset; + } DBUG_RETURN(0); } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 8b4a0f0f6d0..8edfc08fa82 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -26,9 +26,11 @@ #include "mysql_priv.h" #include "ha_innodb.h" #include "sql_select.h" +#include "sp_head.h" +#include "sql_trigger.h" -int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, - ha_rows limit, ulong options) +bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, + SQL_LIST *order, ha_rows limit, ulong options) { int error; TABLE *table; @@ -37,31 +39,45 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, bool using_limit=limit != HA_POS_ERROR; bool transactional_table, log_delayed, safe_update, const_cond; ha_rows deleted; + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_delete"); - if ((open_and_lock_tables(thd, table_list))) + if (open_and_lock_tables(thd, table_list)) + DBUG_RETURN(TRUE); + if (!(table= table_list->table)) + { + DBUG_ASSERT(table_list->view && + table_list->ancestor && table_list->ancestor->next_local); + my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), + table_list->view_db.str, table_list->view_name.str); DBUG_RETURN(-1); - table= table_list->table; + } table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); thd->proc_info="init"; table->map=1; - if ((error= mysql_prepare_delete(thd, table_list, &conds))) - DBUG_RETURN(error); + if (mysql_prepare_delete(thd, table_list, &conds)) + DBUG_RETURN(TRUE); const_cond= (!conds || conds->const_item()); safe_update=test(thd->options & OPTION_SAFE_UPDATES); if (safe_update && const_cond) { - send_error(thd,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); - DBUG_RETURN(1); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); + DBUG_RETURN(TRUE); } - thd->lex->select_lex.no_error= thd->lex->ignore; + select_lex->no_error= thd->lex->ignore; - /* Test if the user wants to delete all rows */ + /* + Test if the user wants to delete all rows and deletion doesn't have + any side-effects (because of triggers), so we can use optimized + handler::delete_all_rows() method. + */ if (!using_limit && const_cond && (!conds || conds->val_int()) && - !(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE))) + !(specialflag & (SPECIAL_NO_NEW_FUNC | SPECIAL_SAFE_MODE)) && + !(table->triggers && table->triggers->has_delete_triggers())) { deleted= table->file->records; if (!(error=table->file->delete_all_rows())) @@ -82,11 +98,12 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, table->quick_keys.clear_all(); // Can't use 'only index' select=make_select(table,0,0,conds,&error); if (error) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); if ((select && select->check_quick(thd, safe_update, limit)) || !limit) { delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); + free_underlaid_joins(thd, select_lex); + thd->row_count_func= 0; send_ok(thd,0L); DBUG_RETURN(0); // Nothing to delete } @@ -98,9 +115,10 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, if (safe_update && !using_limit) { delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); - send_error(thd,ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE); - DBUG_RETURN(1); + free_underlaid_joins(thd, select_lex); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); + DBUG_RETURN(TRUE); } } if (options & OPTION_QUICK) @@ -120,8 +138,8 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE), MYF(MY_FAE | MY_ZEROFILL)); - if (thd->lex->select_lex.setup_ref_array(thd, order->elements) || - setup_order(thd, thd->lex->select_lex.ref_pointer_array, &tables, + if (select_lex->setup_ref_array(thd, order->elements) || + setup_order(thd, select_lex->ref_pointer_array, &tables, fields, all_fields, (ORDER*) order->first) || !(sortorder=make_unireg_sortorder((ORDER*) order->first, &length)) || (table->sort.found_records = filesort(thd, table, sortorder, length, @@ -130,8 +148,8 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, == HA_POS_ERROR) { delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); - DBUG_RETURN(-1); // This will force out message + free_underlaid_joins(thd, select_lex); + DBUG_RETURN(TRUE); } /* Filesort has already found and selected the rows we want to delete, @@ -141,9 +159,16 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, select= 0; } + /* If quick select is used, initialize it before retrieving rows. */ + if (select && select->quick && select->quick->reset()) + { + delete select; + free_underlaid_joins(thd, select_lex); + DBUG_RETURN(TRUE); + } init_read_record(&info,thd,table,select,1,1); deleted=0L; - init_ftfuncs(thd, &thd->lex->select_lex, 1); + init_ftfuncs(thd, select_lex, 1); thd->proc_info="updating"; while (!(error=info.read_record(&info)) && !thd->killed && !thd->net.report_error) @@ -151,6 +176,11 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, // thd->net.report_error is tested to disallow delete row on error if (!(select && select->skip_record())&& !thd->net.report_error ) { + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_BEFORE); + if (!(error=table->file->delete_row(table->record[0]))) { deleted++; @@ -174,6 +204,10 @@ int mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds, SQL_LIST *order, error= 1; break; } + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_DELETE, + TRG_ACTION_AFTER); } else table->file->unlock_row(); // Row failed selection, release lock on it @@ -209,7 +243,6 @@ cleanup: */ if ((deleted || (error < 0)) && (error <= 0 || !transactional_table)) { - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) @@ -233,15 +266,14 @@ cleanup: mysql_unlock_tables(thd, thd->lock); thd->lock=0; } - free_underlaid_joins(thd, &thd->lex->select_lex); - if (error >= 0 || thd->net.report_error) - send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN: 0); - else + free_underlaid_joins(thd, select_lex); + if (error < 0) { + thd->row_count_func= deleted; send_ok(thd,deleted); DBUG_PRINT("info",("%d records deleted",deleted)); } - DBUG_RETURN(0); + DBUG_RETURN(error >= 0 || thd->net.report_error); } @@ -251,30 +283,35 @@ cleanup: SYNOPSIS mysql_prepare_delete() thd - thread handler - table_list - global table list + table_list - global/local table list conds - conditions RETURN VALUE - 0 - OK - 1 - error (message is sent to user) - -1 - error (message is not sent to user) + FALSE OK + TRUE error */ -int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) +bool mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) { - TABLE_LIST *delete_table_list= ((TABLE_LIST*) thd->lex-> - select_lex.table_list.first); + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_prepare_delete"); - if (setup_conds(thd, delete_table_list, conds) || - setup_ftfuncs(&thd->lex->select_lex)) - DBUG_RETURN(-1); - if (find_real_table_in_list(table_list->next, - table_list->db, table_list->real_name)) + if (setup_tables(thd, table_list, conds, &select_lex->leaf_tables, + FALSE, FALSE) || + setup_conds(thd, table_list, select_lex->leaf_tables, conds) || + setup_ftfuncs(select_lex)) + DBUG_RETURN(TRUE); + if (!table_list->updatable || check_key_in_view(thd, table_list)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "DELETE"); + DBUG_RETURN(TRUE); + } + if (unique_table(table_list, table_list->next_global)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + select_lex->fix_prepare_information(thd, conds); + DBUG_RETURN(FALSE); } @@ -284,12 +321,78 @@ int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds) #define MEM_STRIP_BUF_SIZE current_thd->variables.sortbuff_size -extern "C" int refposcmp2(void* arg, const void *a,const void *b) +extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b) +{ + handler *file= (handler*)arg; + return file->cmp_ref((const byte*)a, (const byte*)b); +} + +/* + make delete specific preparation and checks after opening tables + + SYNOPSIS + mysql_multi_delete_prepare() + thd thread handler + + RETURN + FALSE OK + TRUE Error +*/ + +bool mysql_multi_delete_prepare(THD *thd) { - /* arg is a pointer to file->ref_length */ - return memcmp(a,b, *(int*) arg); + LEX *lex= thd->lex; + TABLE_LIST *aux_tables= (TABLE_LIST *)lex->auxilliary_table_list.first; + TABLE_LIST *target_tbl; + DBUG_ENTER("mysql_multi_delete_prepare"); + + /* + setup_tables() need for VIEWs. JOIN::prepare() will not do it second + time. + + lex->query_tables also point on local list of DELETE SELECT_LEX + */ + if (setup_tables(thd, lex->query_tables, &lex->select_lex.where, + &lex->select_lex.leaf_tables, FALSE, FALSE)) + DBUG_RETURN(TRUE); + + /* Fix tables-to-be-deleted-from list to point at opened tables */ + for (target_tbl= (TABLE_LIST*) aux_tables; + target_tbl; + target_tbl= target_tbl->next_local) + { + target_tbl->table= target_tbl->correspondent_table->table; + if (!target_tbl->correspondent_table->updatable || + check_key_in_view(thd, target_tbl->correspondent_table)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), + target_tbl->real_name, "DELETE"); + DBUG_RETURN(TRUE); + } + /* + Check are deleted table used somewhere inside subqueries. + + Multi-delete can't be constructed over-union => we always have + single SELECT on top and have to check underlying SELECTs of it + */ + for (SELECT_LEX_UNIT *un= lex->select_lex.first_inner_unit(); + un; + un= un->next_unit()) + { + if (un->first_select()->linkage != DERIVED_TABLE_TYPE && + un->check_updateable(target_tbl->correspondent_table->db, + target_tbl->correspondent_table->real_name)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), + target_tbl->correspondent_table->real_name); + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); } + multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt, uint num_of_tables_arg) : delete_tables(dt), thd(thd_arg), deleted(0), found(0), @@ -322,7 +425,7 @@ multi_delete::initialize_tables(JOIN *join) DBUG_RETURN(1); table_map tables_to_delete_from=0; - for (walk= delete_tables ; walk ; walk=walk->next) + for (walk= delete_tables; walk; walk= walk->next_local) tables_to_delete_from|= walk->table->map; walk= delete_tables; @@ -334,7 +437,7 @@ multi_delete::initialize_tables(JOIN *join) { /* We are going to delete from this table */ TABLE *tbl=walk->table=tab->table; - walk=walk->next; + walk= walk->next_local; /* Don't use KEYREAD optimization on this table */ tbl->no_keyread=1; /* Don't use record cache */ @@ -350,11 +453,11 @@ multi_delete::initialize_tables(JOIN *join) } walk= delete_tables; tempfiles_ptr= tempfiles; - for (walk=walk->next ; walk ; walk=walk->next) + for (walk= walk->next_local ;walk ;walk= walk->next_local) { TABLE *table=walk->table; - *tempfiles_ptr++= new Unique (refposcmp2, - (void *) &table->file->ref_length, + *tempfiles_ptr++= new Unique (refpos_order_cmp, + (void *) table->file, table->file->ref_length, MEM_STRIP_BUF_SIZE); } @@ -365,9 +468,9 @@ multi_delete::initialize_tables(JOIN *join) multi_delete::~multi_delete() { - for (table_being_deleted=delete_tables ; - table_being_deleted ; - table_being_deleted=table_being_deleted->next) + for (table_being_deleted= delete_tables; + table_being_deleted; + table_being_deleted= table_being_deleted->next_local) { TABLE *t=table_being_deleted->table; free_io_cache(t); // Alloced by unique @@ -387,9 +490,9 @@ bool multi_delete::send_data(List<Item> &values) int secure_counter= -1; DBUG_ENTER("multi_delete::send_data"); - for (table_being_deleted=delete_tables ; - table_being_deleted ; - table_being_deleted=table_being_deleted->next, secure_counter++) + for (table_being_deleted= delete_tables; + table_being_deleted; + table_being_deleted= table_being_deleted->next_local, secure_counter++) { TABLE *table=table_being_deleted->table; @@ -406,7 +509,8 @@ bool multi_delete::send_data(List<Item> &values) table->status|= STATUS_DELETED; if (!(error=table->file->delete_row(table->record[0]))) deleted++; - else if (!table_being_deleted->next || table_being_deleted->table->file->has_transactions()) + else if (!table_being_deleted->next_local || + table_being_deleted->table->file->has_transactions()) { table->file->print_error(error,MYF(0)); DBUG_RETURN(1); @@ -431,7 +535,7 @@ void multi_delete::send_error(uint errcode,const char *err) DBUG_ENTER("multi_delete::send_error"); /* First send error what ever it is ... */ - ::send_error(thd,errcode,err); + my_message(errcode, err, MYF(0)); /* If nothing deleted return */ if (!deleted) @@ -476,9 +580,9 @@ int multi_delete::do_deletes(bool from_send_error) if (from_send_error) { /* Found out table number for 'table_being_deleted*/ - for (TABLE_LIST *aux=delete_tables; + for (TABLE_LIST *aux= delete_tables; aux != table_being_deleted; - aux=aux->next) + aux= aux->next_local) counter++; } else @@ -487,9 +591,9 @@ int multi_delete::do_deletes(bool from_send_error) do_delete= 0; if (!found) DBUG_RETURN(0); - for (table_being_deleted=table_being_deleted->next; - table_being_deleted ; - table_being_deleted=table_being_deleted->next, counter++) + for (table_being_deleted= table_being_deleted->next_local; + table_being_deleted; + table_being_deleted= table_being_deleted->next_local, counter++) { TABLE *table = table_being_deleted->table; if (tempfiles[counter]->get(table)) @@ -546,7 +650,9 @@ bool multi_delete::send_eof() ha_autocommit_... */ if (deleted) + { query_cache_invalidate3(thd, delete_tables, 1); + } /* Write the SQL statement to the binlog if we deleted @@ -558,7 +664,6 @@ bool multi_delete::send_eof() */ if (deleted && (error <= 0 || normal_tables)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) @@ -576,10 +681,11 @@ bool multi_delete::send_eof() if (ha_autocommit_or_rollback(thd,local_error > 0)) local_error=1; - if (local_error) - ::send_error(thd); - else + if (!local_error) + { + thd->row_count_func= deleted; ::send_ok(thd, deleted); + } return 0; } @@ -600,12 +706,12 @@ bool multi_delete::send_eof() - If we want to have a name lock on the table on exit without errors. */ -int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) +bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) { HA_CREATE_INFO create_info; char path[FN_REFLEN]; TABLE **table_ptr; - int error; + bool error; DBUG_ENTER("mysql_truncate"); bzero((char*) &create_info,sizeof(create_info)); @@ -641,9 +747,9 @@ int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) db_type table_type; if ((table_type=get_table_type(path)) == DB_TYPE_UNKNOWN) { - my_error(ER_NO_SUCH_TABLE, MYF(0), table_list->db, - table_list->real_name); - DBUG_RETURN(-1); + my_error(ER_NO_SUCH_TABLE, MYF(0), + table_list->db, table_list->real_name); + DBUG_RETURN(TRUE); } if (!ha_supports_generate(table_type)) { @@ -656,11 +762,11 @@ int mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) DBUG_RETURN(error); } if (lock_and_wait_for_table_name(thd, table_list)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } *fn_ext(path)=0; // Remove the .frm extension - error= ha_create_table(path,&create_info,1) ? -1 : 0; + error= ha_create_table(path,&create_info,1); query_cache_invalidate3(thd, table_list, 0); end: @@ -668,7 +774,6 @@ end: { if (!error) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -688,5 +793,5 @@ end: unlock_table_name(thd, table_list); VOID(pthread_mutex_unlock(&LOCK_open)); } - DBUG_RETURN(error ? -1 : 0); + DBUG_RETURN(error); } diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index 9475ec08c96..520393f8544 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -24,15 +24,15 @@ #include "mysql_priv.h" #include "sql_select.h" -static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *s, - TABLE_LIST *t); + /* - Resolve derived tables in all queries + call given derived table processor (preparing or filling tables) SYNOPSIS mysql_handle_derived() lex LEX for this thread + processor procedure of derived table processing RETURN 0 ok @@ -41,7 +41,7 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *s, */ int -mysql_handle_derived(LEX *lex) +mysql_handle_derived(LEX *lex, int (*processor)(THD*, LEX*, TABLE_LIST*)) { if (lex->derived_tables) { @@ -51,15 +51,11 @@ mysql_handle_derived(LEX *lex) { for (TABLE_LIST *cursor= sl->get_table_list(); cursor; - cursor= cursor->next) + cursor= cursor->next_local) { int res; - if (cursor->derived && (res=mysql_derived(lex->thd, lex, - cursor->derived, - cursor))) - { + if ((res= (*processor)(lex->thd, lex, cursor))) return res; - } } if (lex->describe) { @@ -77,20 +73,16 @@ mysql_handle_derived(LEX *lex) /* - Resolve derived tables in all queries + Create temporary table structure (but do not fill it) SYNOPSIS - mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, TABLE_LIST *t) + mysql_derived_prepare() thd Thread handle lex LEX for this thread - unit node that contains all SELECT's for derived tables - t TABLE_LIST for the upper SELECT + orig_table_list TABLE_LIST for the upper SELECT IMPLEMENTATION - Derived table is resolved with temporary table. It is created based on the - queries defined. After temporary table is created, if this is not EXPLAIN, - then the entire unit / node is deleted. unit is deleted if UNION is used - for derived table and node is deleted is it is a simple SELECT. + Derived table is resolved with temporary table. After table creation, the above TABLE_LIST is updated with a new table. @@ -104,59 +96,138 @@ mysql_handle_derived(LEX *lex) 0 ok 1 Error -1 Error and error message given -*/ - + */ -static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, - TABLE_LIST *org_table_list) +int mysql_derived_prepare(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) { - SELECT_LEX *first_select= unit->first_select(); - TABLE *table; - int res; - select_union *derived_result; - bool is_union= first_select->next_select() && - first_select->next_select()->linkage == UNION_TYPE; - SELECT_LEX *save_current_select= lex->current_select; - DBUG_ENTER("mysql_derived"); - - if (!(derived_result= new select_union(0))) - DBUG_RETURN(1); // out of memory - - // st_select_lex_unit::prepare correctly work for single select - if ((res= unit->prepare(thd, derived_result, 0))) - goto exit; - - - derived_result->tmp_table_param.init(); - derived_result->tmp_table_param.field_count= unit->types.elements; - /* - Temp table is created so that it hounours if UNION without ALL is to be - processed - */ - if (!(table= create_tmp_table(thd, &derived_result->tmp_table_param, - unit->types, (ORDER*) 0, - is_union && unit->union_distinct, 1, - (first_select->options | thd->options | - TMP_TABLE_ALL_COLUMNS), - HA_POS_ERROR, - org_table_list->alias))) + SELECT_LEX_UNIT *unit= orig_table_list->derived; + int res= 0; + DBUG_ENTER("mysql_derived_prepare"); + if (unit) { - res= -1; - goto exit; + SELECT_LEX *first_select= unit->first_select(); + TABLE *table= 0; + select_union *derived_result; + bool is_union= first_select->next_select() && + first_select->next_select()->linkage == UNION_TYPE; + + if (!(derived_result= new select_union(0))) + DBUG_RETURN(1); // out of memory + + // st_select_lex_unit::prepare correctly work for single select + if ((res= unit->prepare(thd, derived_result, 0))) + goto exit; + + + derived_result->tmp_table_param.init(); + derived_result->tmp_table_param.field_count= unit->types.elements; + /* + Temp table is created so that it hounours if UNION without ALL is to be + processed + */ + if (!(table= create_tmp_table(thd, &derived_result->tmp_table_param, + unit->types, (ORDER*) 0, + is_union && unit->union_distinct, 1, + (first_select->options | thd->options | + TMP_TABLE_ALL_COLUMNS), + HA_POS_ERROR, + orig_table_list->alias))) + { + res= -1; + goto exit; + } + derived_result->set_table(table); + +exit: + /* Hide "Unknown column" or "Unknown function" error */ + if (orig_table_list->view) + { + if (thd->net.last_errno == ER_BAD_FIELD_ERROR || + thd->net.last_errno == ER_SP_DOES_NOT_EXIST) + { + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), orig_table_list->db, + orig_table_list->real_name); + } + } + + /* + if it is preparation PS only or commands that need only VIEW structure + then we do not need real data and we can skip execution (and parameters + is not defined, too) + */ + if (res) + { + if (table) + free_tmp_table(thd, table); + delete derived_result; + } + else + { + if (!thd->fill_derived_tables()) + delete derived_result; + orig_table_list->derived_result= derived_result; + orig_table_list->table= table; + orig_table_list->real_name= table->real_name; + table->derived_select_number= first_select->select_number; + table->tmp_table= TMP_TABLE; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + table->grant.privilege= SELECT_ACL; +#endif + orig_table_list->db= (char *)""; + // Force read of table stats in the optimizer + table->file->info(HA_STATUS_VARIABLE); + /* Add new temporary table to list of open derived tables */ + table->next= thd->derived_tables; + thd->derived_tables= table; + } } - derived_result->set_table(table); + else if (orig_table_list->ancestor) + orig_table_list->set_ancestor(); + DBUG_RETURN(res); +} + + +/* + fill derived table - /* - if it is preparation PS only then we do not need real data and we - can skip execution (and parameters is not defined, too) + SYNOPSIS + mysql_derived_filling() + thd Thread handle + lex LEX for this thread + unit node that contains all SELECT's for derived tables + orig_table_list TABLE_LIST for the upper SELECT + + IMPLEMENTATION + Derived table is resolved with temporary table. It is created based on the + queries defined. After temporary table is filled, if this is not EXPLAIN, + then the entire unit / node is deleted. unit is deleted if UNION is used + for derived table and node is deleted is it is a simple SELECT. + + RETURN + 0 ok + 1 Error + -1 Error and error message given */ - if (! thd->current_arena->is_stmt_prepare()) + +int mysql_derived_filling(THD *thd, LEX *lex, TABLE_LIST *orig_table_list) +{ + TABLE *table= orig_table_list->table; + SELECT_LEX_UNIT *unit= orig_table_list->derived; + int res= 0; + + /*check that table creation pass without problem and it is derived table */ + if (table && unit) { + SELECT_LEX *first_select= unit->first_select(); + select_union *derived_result= orig_table_list->derived_result; + SELECT_LEX *save_current_select= lex->current_select; + bool is_union= first_select->next_select() && + first_select->next_select()->linkage == UNION_TYPE; if (is_union) { // execute union without clean up - if (!(res= unit->prepare(thd, derived_result, SELECT_NO_UNLOCK))) - res= unit->exec(); + res= unit->exec(); } else { @@ -169,7 +240,7 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, first_select->options&= ~OPTION_FOUND_ROWS; lex->current_select= first_select; - res= mysql_select(thd, &first_select->ref_pointer_array, + res= mysql_select(thd, &first_select->ref_pointer_array, (TABLE_LIST*) first_select->table_list.first, first_select->with_wild, first_select->item_list, first_select->where, @@ -182,54 +253,22 @@ static int mysql_derived(THD *thd, LEX *lex, SELECT_LEX_UNIT *unit, SELECT_NO_UNLOCK), derived_result, unit, first_select); } - } - if (!res) - { - /* - Here we entirely fix both TABLE_LIST and list of SELECT's as - there were no derived tables - */ - if (derived_result->flush()) - res= 1; - else + if (!res) { - org_table_list->real_name= table->real_name; - org_table_list->table= table; - if (org_table_list->table_list) - { - org_table_list->table_list->real_name= table->real_name; - org_table_list->table_list->table= table; - } - table->derived_select_number= first_select->select_number; - table->tmp_table= TMP_TABLE; -#ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.privilege= SELECT_ACL; -#endif - org_table_list->db= (char *)""; - // Force read of table stats in the optimizer - table->file->info(HA_STATUS_VARIABLE); - } + /* + Here we entirely fix both TABLE_LIST and list of SELECT's as + there were no derived tables + */ + if (derived_result->flush()) + res= 1; - if (!lex->describe) - unit->cleanup(); - if (res) - free_tmp_table(thd, table); - else - { - /* Add new temporary table to list of open derived tables */ - table->next= thd->derived_tables; - thd->derived_tables= table; + if (!lex->describe) + unit->cleanup(); } + else + unit->cleanup(); + lex->current_select= save_current_select; } - else - { - free_tmp_table(thd, table); - unit->cleanup(); - } - -exit: - delete derived_result; - lex->current_select= save_current_select; - DBUG_RETURN(res); + return res; } diff --git a/sql/sql_do.cc b/sql/sql_do.cc index af72632199f..e37f3e86dda 100644 --- a/sql/sql_do.cc +++ b/sql/sql_do.cc @@ -19,17 +19,17 @@ #include "mysql_priv.h" -int mysql_do(THD *thd, List<Item> &values) +bool mysql_do(THD *thd, List<Item> &values) { List_iterator<Item> li(values); Item *value; DBUG_ENTER("mysql_do"); if (setup_fields(thd, 0, 0, values, 0, 0, 0)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); while ((value = li++)) value->val_int(); free_underlaid_joins(thd, &thd->lex->select_lex); thd->clear_error(); // DO always is OK send_ok(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_error.cc b/sql/sql_error.cc index eab5ec890df..adae481d608 100644 --- a/sql/sql_error.cc +++ b/sql/sql_error.cc @@ -43,6 +43,7 @@ This file contains the implementation of error and warnings related ***********************************************************************/ #include "mysql_priv.h" +#include "sp_rcontext.h" /* Store a new message in an error object @@ -104,8 +105,27 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, { MYSQL_ERROR *err= 0; DBUG_ENTER("push_warning"); + if (thd->query_id != thd->warn_id) mysql_reset_errors(thd); + if (thd->spcont && + thd->spcont->find_handler(code, + ((int) level >= + (int) MYSQL_ERROR::WARN_LEVEL_WARN && + thd->really_abort_on_warning()) ? + MYSQL_ERROR::WARN_LEVEL_ERROR : level)) + { + DBUG_RETURN(NULL); + } + + /* Abort if we are using strict mode and we are not using IGNORE */ + if ((int) level >= (int) MYSQL_ERROR::WARN_LEVEL_WARN && + thd->really_abort_on_warning()) + { + thd->killed= THD::KILL_BAD_DATA; + my_message(code, msg, MYF(0)); + DBUG_RETURN(NULL); + } if (thd->warn_list.elements < thd->variables.max_error_count) { @@ -115,8 +135,7 @@ MYSQL_ERROR *push_warning(THD *thd, MYSQL_ERROR::enum_warning_level level, */ MEM_ROOT *old_root= thd->mem_root; thd->mem_root= &thd->warn_root; - err= new MYSQL_ERROR(thd, code, level, msg); - if (err) + if ((err= new MYSQL_ERROR(thd, code, level, msg))) thd->warn_list.push_back(err); thd->mem_root= old_root; } @@ -164,14 +183,14 @@ void push_warning_printf(THD *thd, MYSQL_ERROR::enum_warning_level level, Takes into account the current LIMIT RETURN VALUES - 0 ok - 1 Error sending data to client + FALSE ok + TRUE Error sending data to client */ static const char *warning_level_names[]= {"Note", "Warning", "Error", "?"}; static int warning_level_length[]= { 4, 7, 5, 1 }; -my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show) +bool mysqld_show_warnings(THD *thd, ulong levels_to_show) { List<Item> field_list; DBUG_ENTER("mysqld_show_warnings"); @@ -180,8 +199,9 @@ my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show) field_list.push_back(new Item_return_int("Code",4, MYSQL_TYPE_LONG)); field_list.push_back(new Item_empty_string("Message",MYSQL_ERRMSG_SIZE)); - if (thd->protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (thd->protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); MYSQL_ERROR *err; SELECT_LEX *sel= &thd->lex->select_lex; @@ -205,10 +225,10 @@ my_bool mysqld_show_warnings(THD *thd, ulong levels_to_show) protocol->store((uint32) err->code); protocol->store(err->msg, strlen(err->msg), system_charset_info); if (protocol->write()) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); if (!--limit) break; } - send_eof(thd); - DBUG_RETURN(0); + send_eof(thd); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index f250a00eca1..2ee7734e4f3 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -64,7 +64,7 @@ #define HANDLER_TABLES_HASH_SIZE 120 static enum enum_ha_read_modes rkey_to_rnext[]= - { RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV, RPREV }; +{ RNEXT_SAME, RNEXT, RPREV, RNEXT, RPREV, RNEXT, RPREV, RPREV }; #define HANDLER_TABLES_HACK(thd) { \ TABLE *tmp=thd->open_tables; \ @@ -140,16 +140,16 @@ static void mysql_ha_hash_free(TABLE_LIST *tables) error messages. RETURN - 0 ok - != 0 error + FALSE OK + TRUE Error */ -int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) +bool mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) { TABLE_LIST *hash_tables; char *db, *name, *alias; uint dblen, namelen, aliaslen, counter; - int err; + int error; DBUG_ENTER("mysql_ha_open"); DBUG_PRINT("enter",("'%s'.'%s' as '%s' reopen: %d", tables->db, tables->real_name, tables->alias, @@ -173,8 +173,7 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) { DBUG_PRINT("info",("duplicate '%s'", tables->alias)); if (! reopen) - my_printf_error(ER_NONUNIQ_TABLE, ER(ER_NONUNIQ_TABLE), - MYF(0), tables->alias); + my_error(ER_NONUNIQ_TABLE, MYF(0), tables->alias); goto err; } } @@ -185,16 +184,20 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) */ DBUG_ASSERT(! tables->table); HANDLER_TABLES_HACK(thd); - err=open_tables(thd, tables, &counter); + + /* for now HANDLER can be used only for real TABLES */ + tables->required_type= FRMTYPE_TABLE; + error= open_tables(thd, tables, &counter); + HANDLER_TABLES_HACK(thd); - if (err) + if (error) goto err; /* There can be only one table in '*tables'. */ if (! (tables->table->file->table_flags() & HA_CAN_SQL_HANDLER)) { if (! reopen) - my_printf_error(ER_ILLEGAL_HA,ER(ER_ILLEGAL_HA),MYF(0), tables->alias); + my_error(ER_ILLEGAL_HA, MYF(0), tables->alias); mysql_ha_close(thd, tables); goto err; } @@ -232,11 +235,11 @@ int mysql_ha_open(THD *thd, TABLE_LIST *tables, bool reopen) if (! reopen) send_ok(thd); DBUG_PRINT("exit",("OK")); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: DBUG_PRINT("exit",("ERROR")); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } @@ -253,11 +256,11 @@ err: will be closed. Broadcasts a COND_refresh condition. RETURN - 0 ok - != 0 error + FALSE ok + TRUE error */ -int mysql_ha_close(THD *thd, TABLE_LIST *tables) +bool mysql_ha_close(THD *thd, TABLE_LIST *tables) { TABLE_LIST *hash_tables; TABLE **table_ptr; @@ -277,54 +280,32 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables) */ for (table_ptr= &(thd->handler_tables); *table_ptr && (*table_ptr != hash_tables->table); - table_ptr= &(*table_ptr)->next); + table_ptr= &(*table_ptr)->next) + ; -#if MYSQL_VERSION_ID < 40100 - if (*tables->db && strcmp(hash_tables->db, tables->db)) + if (*table_ptr) { - DBUG_PRINT("info",("wrong db")); - hash_tables= NULL; - } - else -#endif - { - if (*table_ptr) + (*table_ptr)->file->ha_index_or_rnd_end(); + VOID(pthread_mutex_lock(&LOCK_open)); + if (close_thread_table(thd, table_ptr)) { - (*table_ptr)->file->ha_index_or_rnd_end(); - VOID(pthread_mutex_lock(&LOCK_open)); - if (close_thread_table(thd, table_ptr)) - { - /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); - } - VOID(pthread_mutex_unlock(&LOCK_open)); + /* Tell threads waiting for refresh that something has happened */ + VOID(pthread_cond_broadcast(&COND_refresh)); } - - hash_delete(&thd->handler_tables_hash, (byte*) hash_tables); + VOID(pthread_mutex_unlock(&LOCK_open)); } + hash_delete(&thd->handler_tables_hash, (byte*) hash_tables); } - - if (! hash_tables) + else { -#if MYSQL_VERSION_ID < 40100 - char buff[MAX_DBKEY_LENGTH]; - if (*tables->db) - strxnmov(buff, sizeof(buff), tables->db, ".", tables->real_name, NullS); - else - strncpy(buff, tables->alias, sizeof(buff)); - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - buff, "HANDLER"); -#else - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - tables->alias, "HANDLER"); -#endif + my_error(ER_UNKNOWN_TABLE, MYF(0), tables->alias, "HANDLER"); DBUG_PRINT("exit",("ERROR")); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_ok(thd); DBUG_PRINT("exit", ("OK")); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -344,14 +325,15 @@ int mysql_ha_close(THD *thd, TABLE_LIST *tables) offset_limit RETURN - 0 ok - != 0 error + FALSE ok + TRUE error */ -int mysql_ha_read(THD *thd, TABLE_LIST *tables, - enum enum_ha_read_modes mode, char *keyname, List<Item> *key_expr, - enum ha_rkey_function ha_rkey_mode, Item *cond, - ha_rows select_limit,ha_rows offset_limit) +bool mysql_ha_read(THD *thd, TABLE_LIST *tables, + enum enum_ha_read_modes mode, char *keyname, + List<Item> *key_expr, + enum ha_rkey_function ha_rkey_mode, Item *cond, + ha_rows select_limit,ha_rows offset_limit) { TABLE_LIST *hash_tables; TABLE *table; @@ -360,7 +342,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, Protocol *protocol= thd->protocol; char buff[MAX_FIELD_WIDTH]; String buffer(buff, sizeof(buff), system_charset_info); - int err, keyno= -1; + int error, keyno= -1; uint num_rows; byte *key; uint key_len; @@ -419,11 +401,9 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, strxnmov(buff, sizeof(buff), tables->db, ".", tables->real_name, NullS); else strncpy(buff, tables->alias, sizeof(buff)); - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - buff, "HANDLER"); + my_error(ER_UNKNOWN_TABLE, MYF(0), buff, "HANDLER"); #else - my_printf_error(ER_UNKNOWN_TABLE, ER(ER_UNKNOWN_TABLE), MYF(0), - tables->alias, "HANDLER"); + my_error(ER_UNKNOWN_TABLE, MYF(0), tables->alias, "HANDLER"); #endif goto err0; } @@ -439,26 +419,25 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, { if ((keyno=find_type(keyname, &table->keynames, 1+2)-1)<0) { - my_printf_error(ER_KEY_DOES_NOT_EXITS,ER(ER_KEY_DOES_NOT_EXITS),MYF(0), - keyname,tables->alias); + my_error(ER_KEY_DOES_NOT_EXITS, MYF(0), keyname, tables->alias); goto err0; } table->file->ha_index_or_rnd_end(); table->file->ha_index_init(keyno); } - if (insert_fields(thd,tables,tables->db,tables->alias,&it)) + if (insert_fields(thd, tables, tables->db, tables->alias, &it, 0, 0)) goto err0; select_limit+=offset_limit; - protocol->send_fields(&list,1); + protocol->send_fields(&list, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); HANDLER_TABLES_HACK(thd); lock= mysql_lock_tables(thd, &tables->table, 1); HANDLER_TABLES_HACK(thd); if (!lock) - goto err0; // mysql_lock_tables() printed error message already + goto err0; // mysql_lock_tables() printed error message already /* In ::external_lock InnoDB resets the fields which tell it that @@ -473,33 +452,33 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, switch (mode) { case RFIRST: if (keyname) - err=table->file->index_first(table->record[0]); + error= table->file->index_first(table->record[0]); else { table->file->ha_index_or_rnd_end(); - if (!(err=table->file->ha_rnd_init(1))) - err=table->file->rnd_next(table->record[0]); + if (!(error= table->file->ha_rnd_init(1))) + error= table->file->rnd_next(table->record[0]); } mode=RNEXT; break; case RLAST: DBUG_ASSERT(keyname != 0); - err=table->file->index_last(table->record[0]); + error= table->file->index_last(table->record[0]); mode=RPREV; break; case RNEXT: - err=keyname ? - table->file->index_next(table->record[0]) : - table->file->rnd_next(table->record[0]); + error= (keyname ? + table->file->index_next(table->record[0]) : + table->file->rnd_next(table->record[0])); break; case RPREV: DBUG_ASSERT(keyname != 0); - err=table->file->index_prev(table->record[0]); + error= table->file->index_prev(table->record[0]); break; case RNEXT_SAME: /* Continue scan on "(keypart1,keypart2,...)=(c1, c2, ...) */ DBUG_ASSERT(keyname != 0); - err= table->file->index_next_same(table->record[0], key, key_len); + error= table->file->index_next_same(table->record[0], key, key_len); break; case RKEY: { @@ -508,8 +487,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, KEY_PART_INFO *key_part=keyinfo->key_part; if (key_expr->elements > keyinfo->key_parts) { - my_printf_error(ER_TOO_MANY_KEY_PARTS,ER(ER_TOO_MANY_KEY_PARTS), - MYF(0),keyinfo->key_parts); + my_error(ER_TOO_MANY_KEY_PARTS, MYF(0), keyinfo->key_parts); goto err; } List_iterator<Item> it_ke(*key_expr); @@ -530,30 +508,27 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, key_len+=key_part->store_length; } if (!(key= (byte*) thd->calloc(ALIGN_SIZE(key_len)))) - { - send_error(thd,ER_OUTOFMEMORY); goto err; - } - key_copy(key, table, keyno, key_len); - err=table->file->index_read(table->record[0], + key_copy(key, table->record[0], table->key_info + keyno, key_len); + error= table->file->index_read(table->record[0], key,key_len,ha_rkey_mode); mode=rkey_to_rnext[(int)ha_rkey_mode]; break; } default: - send_error(thd,ER_ILLEGAL_HA); + my_message(ER_ILLEGAL_HA, ER(ER_ILLEGAL_HA), MYF(0)); goto err; } - if (err == HA_ERR_RECORD_DELETED) - continue; - if (err) + if (error) { - if (err != HA_ERR_KEY_NOT_FOUND && err != HA_ERR_END_OF_FILE) + if (error == HA_ERR_RECORD_DELETED) + continue; + if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { sql_print_error("mysql_ha_read: Got error %d when reading table '%s'", - err, tables->real_name); - table->file->print_error(err,MYF(0)); + error, tables->real_name); + table->file->print_error(error,MYF(0)); goto err; } goto ok; @@ -570,7 +545,7 @@ int mysql_ha_read(THD *thd, TABLE_LIST *tables, if (item->send(thd->protocol, &buffer)) { protocol->free(); // Free used - my_error(ER_OUT_OF_RESOURCES,MYF(0)); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); goto err; } } @@ -582,13 +557,13 @@ ok: mysql_unlock_tables(thd,lock); send_eof(thd); DBUG_PRINT("exit",("OK")); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: mysql_unlock_tables(thd,lock); err0: DBUG_PRINT("exit",("ERROR")); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } @@ -631,7 +606,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags) if (tables) { /* Close all tables in the list. */ - for (tmp_tables= tables ; tmp_tables; tmp_tables= tmp_tables->next) + for (tmp_tables= tables ; tmp_tables; tmp_tables= tmp_tables->next_local) { DBUG_PRINT("info-in-tables-list",("'%s'.'%s' as '%s'", tmp_tables->db, tmp_tables->real_name, diff --git a/sql/sql_help.cc b/sql/sql_help.cc index 0e0d32a922d..99273b42f2a 100644 --- a/sql/sql_help.cc +++ b/sql/sql_help.cc @@ -84,12 +84,11 @@ static bool init_fields(THD *thd, TABLE_LIST *tables, DBUG_ENTER("init_fields"); for (; count-- ; find_fields++) { - TABLE_LIST *not_used; /* We have to use 'new' here as field will be re_linked on free */ Item_field *field= new Item_field("mysql", find_fields->table_name, find_fields->field_name); if (!(find_fields->field= find_field_in_tables(thd, field, tables, - ¬_used, TRUE))) + 0, REPORT_ALL_ERRORS, 1))) DBUG_RETURN(1); } DBUG_RETURN(0); @@ -277,7 +276,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, (iindex_relations= find_type((char*) primary_key_name, &relations->keynames, 1+2)-1)<0) { - send_error(thd,ER_CORRUPT_HELP_DB); + my_message(ER_CORRUPT_HELP_DB, ER(ER_CORRUPT_HELP_DB), MYF(0)); DBUG_RETURN(-1); } rtopic_id= find_fields[help_relation_help_topic_id].field; @@ -287,8 +286,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, relations->file->ha_index_init(iindex_relations); rkey_id->store((longlong) key_id); - rkey_id->get_key_image(buff, rkey_id->pack_length(), rkey_id->charset(), - Field::itRAW); + rkey_id->get_key_image(buff, rkey_id->pack_length(), Field::itRAW); int key_res= relations->file->index_read(relations->record[0], (byte *)buff, rkey_id->pack_length(), HA_READ_KEY_EXACT); @@ -301,8 +299,7 @@ int get_topics_for_keyword(THD *thd, TABLE *topics, TABLE *relations, longlong topic_id= rtopic_id->val_int(); Field *field= find_fields[help_topic_help_topic_id].field; field->store((longlong) topic_id); - field->get_key_image(topic_id_buff, field->pack_length(), field->charset(), - Field::itRAW); + field->get_key_image(topic_id_buff, field->pack_length(), Field::itRAW); if (!topics->file->index_read(topics->record[0], (byte *)topic_id_buff, field->pack_length(), HA_READ_KEY_EXACT)) @@ -427,7 +424,8 @@ int send_answer_1(Protocol *protocol, String *s1, String *s2, String *s3) field_list.push_back(new Item_empty_string("description",1000)); field_list.push_back(new Item_empty_string("example",1000)); - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_RETURN(1); protocol->prepare_for_resend(); @@ -469,7 +467,8 @@ int send_header_2(Protocol *protocol, bool for_category) field_list.push_back(new Item_empty_string("source_category_name",64)); field_list.push_back(new Item_empty_string("name",64)); field_list.push_back(new Item_empty_string("is_it_category",1)); - DBUG_RETURN(protocol->send_fields(&field_list,1)); + DBUG_RETURN(protocol->send_fields(&field_list, Protocol::SEND_NUM_ROWS | + Protocol::SEND_EOF)); } /* @@ -607,32 +606,31 @@ SQL_SELECT *prepare_select_for_name(THD *thd, const char *mask, uint mlen, thd Thread handler RETURN VALUES - 0 Success - 1 Error and send_error already commited - -1 error && send_error should be issued (normal case) + FALSE Success + TRUE Error and send_error already commited */ -int mysqld_help(THD *thd, const char *mask) +bool mysqld_help(THD *thd, const char *mask) { Protocol *protocol= thd->protocol; SQL_SELECT *select; st_find_field used_fields[array_elements(init_used_fields)]; DBUG_ENTER("mysqld_help"); + TABLE_LIST *leaves= 0; TABLE_LIST tables[4]; bzero((gptr)tables,sizeof(tables)); tables[0].alias= tables[0].real_name= (char*) "help_topic"; tables[0].lock_type= TL_READ; - tables[0].next= &tables[1]; + tables[0].next_global= tables[0].next_local= &tables[1]; tables[1].alias= tables[1].real_name= (char*) "help_category"; tables[1].lock_type= TL_READ; - tables[1].next= &tables[2]; + tables[1].next_global= tables[1].next_local= &tables[2]; tables[2].alias= tables[2].real_name= (char*) "help_relation"; tables[2].lock_type= TL_READ; - tables[2].next= &tables[3]; + tables[2].next_global= tables[2].next_local= &tables[3]; tables[3].alias= tables[3].real_name= (char*) "help_keyword"; tables[3].lock_type= TL_READ; - tables[3].next= 0; tables[0].db= tables[1].db= tables[2].db= tables[3].db= (char*) "mysql"; List<String> topics_list, categories_list, subcategories_list; @@ -642,18 +640,16 @@ int mysqld_help(THD *thd, const char *mask) MEM_ROOT *mem_root= thd->mem_root; if (open_and_lock_tables(thd, tables)) - { - res= -1; - goto end; - } - /* Init tables and fields to be usable from items */ - setup_tables(tables); + goto error; + /* + Init tables and fields to be usable from items + + tables do not contain VIEWs => we can pass 0 as conds + */ + setup_tables(thd, tables, 0, &leaves, FALSE, FALSE); memcpy((char*) used_fields, (char*) init_used_fields, sizeof(used_fields)); if (init_fields(thd, tables, used_fields, array_elements(used_fields))) - { - res= -1; - goto end; - } + goto error; size_t i; for (i=0; i<sizeof(tables)/sizeof(TABLE_LIST); i++) tables[i].table->file->init_table_handle_for_HANDLER(); @@ -661,12 +657,8 @@ int mysqld_help(THD *thd, const char *mask) if (!(select= prepare_select_for_name(thd,mask,mlen,tables,tables[0].table, used_fields[help_topic_name].field,&error))) - { - res= -1; - goto end; - } + goto error; - res= 1; count_topics= search_topics(thd,tables[0].table,used_fields, select,&topics_list, &name, &description, &example); @@ -678,10 +670,8 @@ int mysqld_help(THD *thd, const char *mask) if (!(select= prepare_select_for_name(thd,mask,mlen,tables,tables[3].table, used_fields[help_keyword_name].field,&error))) - { - res= -1; - goto end; - } + goto error; + count_topics=search_keyword(thd,tables[3].table,used_fields,select,&key_id); delete select; count_topics= (count_topics != 1) ? 0 : @@ -697,10 +687,7 @@ int mysqld_help(THD *thd, const char *mask) if (!(select= prepare_select_for_name(thd,mask,mlen,tables,tables[1].table, used_fields[help_category_name].field,&error))) - { - res= -1; - goto end; - } + goto error; count_categories= search_categories(thd, tables[1].table, used_fields, select, @@ -709,13 +696,13 @@ int mysqld_help(THD *thd, const char *mask) if (!count_categories) { if (send_header_2(protocol,FALSE)) - goto end; + goto error; } else if (count_categories > 1) { if (send_header_2(protocol,FALSE) || send_variant_2_list(mem_root,protocol,&categories_list,"Y",0)) - goto end; + goto error; } else { @@ -728,20 +715,14 @@ int mysqld_help(THD *thd, const char *mask) new Item_int((int32)category_id)); if (!(select= prepare_simple_select(thd,cond_topic_by_cat, tables,tables[0].table,&error))) - { - res= -1; - goto end; - } + goto error; get_all_items_for_category(thd,tables[0].table, used_fields[help_topic_name].field, select,&topics_list); delete select; if (!(select= prepare_simple_select(thd,cond_cat_by_cat,tables, tables[1].table,&error))) - { - res= -1; - goto end; - } + goto error; get_all_items_for_category(thd,tables[1].table, used_fields[help_category_name].field, select,&subcategories_list); @@ -750,39 +731,36 @@ int mysqld_help(THD *thd, const char *mask) if (send_header_2(protocol, TRUE) || send_variant_2_list(mem_root,protocol,&topics_list, "N",cat) || send_variant_2_list(mem_root,protocol,&subcategories_list,"Y",cat)) - goto end; + goto error; } } else if (count_topics == 1) { if (send_answer_1(protocol,&name,&description,&example)) - goto end; + goto error; } else { /* First send header and functions */ if (send_header_2(protocol, FALSE) || send_variant_2_list(mem_root,protocol, &topics_list, "N", 0)) - goto end; + goto error; if (!(select= prepare_select_for_name(thd,mask,mlen,tables,tables[1].table, used_fields[help_category_name].field,&error))) - { - res= -1; - goto end; - } + goto error; search_categories(thd, tables[1].table, used_fields, select,&categories_list, 0); delete select; /* Then send categories */ if (send_variant_2_list(mem_root,protocol, &categories_list, "Y", 0)) - goto end; + goto error; } - res= 0; - send_eof(thd); end: - DBUG_RETURN(res); + DBUG_RETURN(FALSE); +error: + DBUG_RETURN(TRUE); } diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 92c429bcfde..2a6e772db32 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -18,16 +18,20 @@ /* Insert of records */ #include "mysql_priv.h" +#include "sp_head.h" +#include "sql_trigger.h" +#include "sql_select.h" static int check_null_fields(THD *thd,TABLE *entry); #ifndef EMBEDDED_LIBRARY static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list); static int write_delayed(THD *thd,TABLE *table, enum_duplicates dup, bool ignore, - char *query, uint query_length, int log_on); + char *query, uint query_length, bool log_on); static void end_delayed_insert(THD *thd); extern "C" pthread_handler_decl(handle_delayed_insert,arg); static void unlink_blobs(register TABLE *table); #endif +static bool check_view_insertability(TABLE_LIST *view, ulong query_id); /* Define to force use of my_malloc() if the allocated memory block is big */ @@ -39,59 +43,93 @@ static void unlink_blobs(register TABLE *table); #define my_safe_afree(ptr, size, min_length) if (size > min_length) my_free(ptr,MYF(0)) #endif -#define DELAYED_LOG_UPDATE 1 -#define DELAYED_LOG_BIN 2 - /* Check if insert fields are correct. Sets table->timestamp_field_type to TIMESTAMP_NO_AUTO_SET or leaves it as is, depending on if timestamp should be updated or not. */ -int -check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, - List<Item> &values, ulong counter) +static int +check_insert_fields(THD *thd, TABLE_LIST *table_list, List<Item> &fields, + List<Item> &values, ulong counter, bool check_unique) { + TABLE *table= table_list->table; + + if (!table_list->updatable) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + return -1; + } + if (fields.elements == 0 && values.elements != 0) { + if (!table) + { + DBUG_ASSERT(table_list->view && + table_list->ancestor && table_list->ancestor->next_local); + my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0), + table_list->view_db.str, table_list->view_name.str); + return -1; + } if (values.elements != table->fields) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),counter); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); return -1; } #ifndef NO_EMBEDDED_ACCESS_CHECKS - if (grant_option && - check_grant_all_columns(thd,INSERT_ACL,table)) - return -1; + if (grant_option) + { + Field_iterator_table fields; + fields.set_table(table); + if (check_grant_all_columns(thd, INSERT_ACL, &table->grant, + table->table_cache_key, table->real_name, + &fields)) + return -1; + } #endif table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; } else { // Part field list + TABLE_LIST *save_next; + int res; if (fields.elements != values.elements) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),counter); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); return -1; } - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - table_list.db= table->table_cache_key; - table_list.real_name= table_list.alias= table->table_name; - table_list.table=table; - table_list.grant=table->grant; thd->dupp_field=0; - if (setup_tables(&table_list) || - setup_fields(thd, 0, &table_list,fields,1,0,0)) + thd->lex->select_lex.no_wrap_view_item= 1; + save_next= table_list->next_local; // fields only from first table + table_list->next_local= 0; + res= setup_fields(thd, 0, table_list, fields, 1, 0, 0); + table_list->next_local= save_next; + thd->lex->select_lex.no_wrap_view_item= 0; + if (res) return -1; + if (table == 0) + { + /* it is join view => we need to find table for update */ + List_iterator_fast<Item> it(fields); + Item *item; + TABLE_LIST *tbl= 0; + table_map map= 0; + + while ((item= it++)) + map|= item->used_tables(); + if (table_list->check_single_table(&tbl, map) || tbl == 0) + { + my_error(ER_VIEW_MULTIUPDATE, MYF(0), + table_list->view_db.str, table_list->view_name.str); + return -1; + } + table_list->table= table= tbl->table; + } - if (thd->dupp_field) + if (check_unique && thd->dupp_field) { - my_error(ER_FIELD_SPECIFIED_TWICE,MYF(0), thd->dupp_field->field_name); + my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dupp_field->field_name); return -1; } if (table->timestamp_field && // Don't set timestamp if used @@ -102,17 +140,26 @@ check_insert_fields(THD *thd,TABLE *table,List<Item> &fields, #ifndef NO_EMBEDDED_ACCESS_CHECKS table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); #endif + + if (check_key_in_view(thd, table_list) || + (table_list->view && + check_view_insertability(table_list, thd->query_id))) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "INSERT"); + return -1; + } + return 0; } -int mysql_insert(THD *thd,TABLE_LIST *table_list, - List<Item> &fields, - List<List_item> &values_list, - List<Item> &update_fields, - List<Item> &update_values, - enum_duplicates duplic, - bool ignore) +bool mysql_insert(THD *thd,TABLE_LIST *table_list, + List<Item> &fields, + List<List_item> &values_list, + List<Item> &update_fields, + List<Item> &update_values, + enum_duplicates duplic, + bool ignore) { int error, res; /* @@ -120,27 +167,22 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, By default, both logs are enabled (this won't cause problems if the server runs without --log-update or --log-bin). */ - int log_on= DELAYED_LOG_UPDATE | DELAYED_LOG_BIN ; + bool log_on= (thd->options & OPTION_BIN_LOG) || (!(thd->master_access & SUPER_ACL)); bool transactional_table, log_delayed; uint value_count; ulong counter = 1; ulonglong id; COPY_INFO info; - TABLE *table; + TABLE *table= 0; List_iterator_fast<List_item> its(values_list); List_item *values; #ifndef EMBEDDED_LIBRARY char *query= thd->query; #endif thr_lock_type lock_type = table_list->lock_type; - TABLE_LIST *insert_table_list= (TABLE_LIST*) - thd->lex->select_lex.table_list.first; + Item *unused_conds= 0; DBUG_ENTER("mysql_insert"); - if (!(thd->options & OPTION_UPDATE_LOG)) - log_on&= ~(int) DELAYED_LOG_UPDATE; - if (!(thd->options & OPTION_BIN_LOG)) - log_on&= ~(int) DELAYED_LOG_BIN; /* in safe mode or with skip-new change delayed insert to be regular if we are told to replace duplicates, the insert cannot be concurrent @@ -168,17 +210,22 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, table_list->db ? table_list->db : thd->db, table_list->real_name)) { - my_printf_error(ER_DELAYED_INSERT_TABLE_LOCKED, - ER(ER_DELAYED_INSERT_TABLE_LOCKED), - MYF(0), table_list->real_name); - DBUG_RETURN(-1); + my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0), + table_list->real_name); + DBUG_RETURN(TRUE); } } if ((table= delayed_get_table(thd,table_list)) && !thd->is_fatal_error) { res= 0; - if (table_list->next) /* if sub select */ - res= open_and_lock_tables(thd, table_list->next); + if (table_list->next_global) /* if sub select */ + res= open_and_lock_tables(thd, table_list->next_global); + /* + First is not processed by open_and_lock_tables() => we need set + updateability flags "by hands". + */ + if (!table_list->derived && !table_list->view) + table_list->updatable= 1; // usual table } else { @@ -191,30 +238,31 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, #endif /* EMBEDDED_LIBRARY */ res= open_and_lock_tables(thd, table_list); if (res || thd->is_fatal_error) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); - table= table_list->table; thd->proc_info="init"; thd->used_tables=0; values= its++; - if (mysql_prepare_insert(thd, table_list, insert_table_list, table, - fields, values, update_fields, - update_values, duplic)) + if (mysql_prepare_insert(thd, table_list, table, fields, values, + update_fields, update_values, duplic, &unused_conds, + FALSE)) goto abort; + /* mysql_prepare_insert set table_list->table if it was not set */ + table= table_list->table; + + // is table which we are changing used somewhere in other parts of query value_count= values->elements; while ((values= its++)) { counter++; if (values->elements != value_count) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),counter); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); goto abort; } - if (setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) + if (setup_fields(thd, 0, table_list, *values, 0, 0, 0)) goto abort; } its.rewind (); @@ -227,12 +275,15 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, info.handle_duplicates=duplic; info.update_fields= &update_fields; info.update_values= &update_values; + info.view= (table_list->view ? table_list : 0); + /* Count warnings for all inserts. For single line insert, generate an error if try to set a NOT NULL field - to NULL + to NULL. */ - thd->count_cuted_fields= ((values_list.elements == 1) ? + thd->count_cuted_fields= ((values_list.elements == 1 && + duplic != DUP_IGNORE) ? CHECK_FIELD_ERROR_FOR_NULL : CHECK_FIELD_WARN); thd->cuted_fields = 0L; @@ -254,19 +305,35 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, if (lock_type != TL_WRITE_DELAYED) table->file->start_bulk_insert(values_list.elements); + thd->no_trans_update= 0; + thd->abort_on_warning= (duplic != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + + if (fields.elements && check_that_all_fields_are_given_values(thd, table)) + { + /* thd->net.report_error is now set, which will abort the next loop */ + error= 1; + } + while ((values= its++)) { if (fields.elements || !value_count) { restore_record(table,default_values); // Get empty record - if (fill_record(fields, *values, 0)|| thd->net.report_error || - check_null_fields(thd,table)) + if (fill_record(thd, fields, *values, 0)) { if (values_list.elements != 1 && !thd->net.report_error) { info.records++; continue; } + /* + TODO: set thd->abort_on_warning if values_list.elements == 1 + and check that all items return warning in case of problem with + storing field. + */ error=1; break; } @@ -277,7 +344,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, restore_record(table,default_values); // Get empty record else table->record[0][0]=table->default_values[0]; // Fix delete marker - if (fill_record(table->field,*values, 0) || thd->net.report_error) + if (fill_record(thd, table->field, *values, 0)) { if (values_list.elements != 1 && ! thd->net.report_error) { @@ -288,6 +355,26 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, break; } } + + /* + FIXME: Actually we should do this before + check_that_all_fields_are_given_values Or even go into write_record ? + */ + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_INSERT, + TRG_ACTION_BEFORE); + + if ((res= table_list->view_check_option(thd, + (values_list.elements == 1 ? + 0 : + ignore))) == + VIEW_CHECK_SKIP) + continue; + else if (res == VIEW_CHECK_ERROR) + { + error= 1; + break; + } #ifndef EMBEDDED_LIBRARY if (lock_type == TL_WRITE_DELAYED) { @@ -296,9 +383,7 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, } else #endif - error=write_record(table,&info); - if (error) - break; + error=write_record(thd, table ,&info); /* If auto_increment values are used, save the first one for LAST_INSERT_ID() and for the update log. @@ -309,7 +394,12 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, { // Get auto increment value id= thd->last_insert_id; } + if (error) + break; thd->row_count++; + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_INSERT, TRG_ACTION_AFTER); } /* @@ -346,7 +436,9 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, before binlog writing and ha_autocommit_... */ if (info.copied || info.deleted || info.updated) + { query_cache_invalidate3(thd, table_list, 1); + } transactional_table= table->file->has_transactions(); @@ -354,7 +446,6 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, if ((info.copied || info.deleted || info.updated) && (error <= 0 || !transactional_table)) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) @@ -393,7 +484,10 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, goto abort; if (values_list.elements == 1 && (!(thd->options & OPTION_WARNINGS) || !thd->cuted_fields)) - send_ok(thd,info.copied+info.deleted+info.updated,id); + { + thd->row_count_func= info.copied+info.deleted+info.updated; + send_ok(thd, (ulong) thd->row_count_func, id); + } else { char buff[160]; @@ -404,11 +498,12 @@ int mysql_insert(THD *thd,TABLE_LIST *table_list, else sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); - ::send_ok(thd,info.copied+info.deleted+info.updated,(ulonglong)id,buff); + thd->row_count_func= info.copied+info.deleted+info.updated; + ::send_ok(thd, (ulong) thd->row_count_func, id, buff); } free_underlaid_joins(thd, &thd->lex->select_lex); - table->insert_values=0; - DBUG_RETURN(0); + thd->abort_on_warning= 0; + DBUG_RETURN(FALSE); abort: #ifndef EMBEDDED_LIBRARY @@ -416,8 +511,137 @@ abort: end_delayed_insert(thd); #endif free_underlaid_joins(thd, &thd->lex->select_lex); - table->insert_values=0; - DBUG_RETURN(-1); + thd->abort_on_warning= 0; + DBUG_RETURN(TRUE); +} + + +/* + Additional check for insertability for VIEW + + SYNOPSIS + check_view_insertability() + view - reference on VIEW + + IMPLEMENTATION + A view is insertable if the folloings are true: + - All columns in the view are columns from a table + - All not used columns in table have a default values + - All field in view are unique (not referring to the same column) + + RETURN + FALSE - OK + view->contain_auto_increment is 1 if and only if the view contains an + auto_increment field + + TRUE - can't be used for insert +*/ + +static bool check_view_insertability(TABLE_LIST *view, ulong query_id) +{ + uint num= view->view->select_lex.item_list.elements; + TABLE *table= view->table; + Field_translator *trans_start= view->field_translation, + *trans_end= trans_start + num; + Field_translator *trans; + Field **field_ptr= table->field; + ulong other_query_id= query_id - 1; + DBUG_ENTER("check_key_in_view"); + + DBUG_ASSERT(view->table != 0 && view->field_translation != 0); + + view->contain_auto_increment= 0; + /* check simplicity and prepare unique test of view */ + for (trans= trans_start; trans != trans_end; trans++) + { + Item_field *field; + /* simple SELECT list entry (field without expression) */ + if (!(field= trans->item->filed_for_view_update())) + DBUG_RETURN(TRUE); + if (field->field->unireg_check == Field::NEXT_NUMBER) + view->contain_auto_increment= 1; + /* prepare unique test */ + field->field->query_id= other_query_id; + /* + remove collation (or other transparent for update function) if we have + it + */ + trans->item= field; + } + /* unique test */ + for (trans= trans_start; trans != trans_end; trans++) + { + /* Thanks to test above, we know that all columns are of type Item_field */ + Item_field *field= (Item_field *)trans->item; + if (field->field->query_id == query_id) + DBUG_RETURN(TRUE); + field->field->query_id= query_id; + } + + /* VIEW contain all fields without default value */ + for (; *field_ptr; field_ptr++) + { + Field *field= *field_ptr; + /* field have not default value */ + if ((field->type() == FIELD_TYPE_BLOB) && + (table->timestamp_field != field || + field->unireg_check == Field::TIMESTAMP_UN_FIELD)) + { + for (trans= trans_start; ; trans++) + { + if (trans == trans_end) + DBUG_RETURN(TRUE); // Field was not part of view + if (((Item_field *)trans->item)->field == *field_ptr) + break; // ok + } + } + } + DBUG_RETURN(FALSE); +} + + +/* + Check if table can be updated + + SYNOPSIS + mysql_prepare_insert_check_table() + thd Thread handle + table_list Table list + fields List of fields to be updated + where Pointer to where clause + select_insert Check is making for SELECT ... INSERT + + RETURN + FALSE ok + TRUE ERROR +*/ + +static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list, + List<Item> &fields, COND **where, + bool select_insert) +{ + bool insert_into_view= (table_list->view != 0); + DBUG_ENTER("mysql_prepare_insert_check_table"); + + if (setup_tables(thd, table_list, where, &thd->lex->select_lex.leaf_tables, + FALSE, select_insert)) + DBUG_RETURN(TRUE); + + if (insert_into_view && !fields.elements) + { + thd->lex->empty_field_list_on_rset= 1; + if (!table_list->table) + { + DBUG_ASSERT(table_list->view && + table_list->ancestor && table_list->ancestor->next_local); + my_error(ER_VIEW_NO_INSERT_FIELD_LIST, MYF(0), + table_list->view_db.str, table_list->view_name.str); + DBUG_RETURN(TRUE); + } + DBUG_RETURN(insert_view_fields(&fields, table_list)); + } + + DBUG_RETURN(FALSE); } @@ -426,47 +650,66 @@ abort: SYNOPSIS mysql_prepare_insert() - thd - thread handler - table_list - global table list - insert_table_list - local table list of INSERT SELECT_LEX - values - values to insert. NULL for INSERT ... SELECT + thd Thread handler + table_list Global/local table list + table Table to insert into (can be NULL if table should be taken from + table_list->table) + where Where clause (for insert ... select) + select_insert TRUE if INSERT ... SELECT statement RETURN VALUE - 0 - OK - -1 - error (message is not sent to user) + FALSE OK + TRUE error */ -int mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *insert_table_list, TABLE *table, - List<Item> &fields, List_item *values, - List<Item> &update_fields, List<Item> &update_values, - enum_duplicates duplic) + +bool mysql_prepare_insert(THD *thd, TABLE_LIST *table_list, TABLE *table, + List<Item> &fields, List_item *values, + List<Item> &update_fields, List<Item> &update_values, + enum_duplicates duplic, + COND **where, bool select_insert) { + bool insert_into_view= (table_list->view != 0); + /* TODO: use this condition for 'WITH CHECK OPTION' */ + bool res; DBUG_ENTER("mysql_prepare_insert"); - if (duplic == DUP_UPDATE && !table->insert_values) + DBUG_PRINT("enter", ("table_list 0x%lx, table 0x%lx, view %d", + (ulong)table_list, (ulong)table, + (int)insert_into_view)); + + if (duplic == DUP_UPDATE) { /* it should be allocated before Item::fix_fields() */ - table->insert_values= - (byte *)alloc_root(thd->mem_root, table->rec_buff_length); - if (!table->insert_values) - DBUG_RETURN(-1); + if (table_list->set_insert_values(thd->mem_root)) + DBUG_RETURN(TRUE); } - if ((values && check_insert_fields(thd, table, fields, *values, 1)) || - setup_tables(insert_table_list) || - (values && setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) || + + if (mysql_prepare_insert_check_table(thd, table_list, fields, where, + select_insert)) + DBUG_RETURN(TRUE); + + if ((values && check_insert_fields(thd, table_list, fields, *values, 1, + !insert_into_view)) || + (values && setup_fields(thd, 0, table_list, *values, 0, 0, 0)) || (duplic == DUP_UPDATE && - (setup_fields(thd, 0, insert_table_list, update_fields, 1, 0, 0) || - setup_fields(thd, 0, insert_table_list, update_values, 1, 0, 0)))) - DBUG_RETURN(-1); - if (values && find_real_table_in_list(table_list->next, table_list->db, - table_list->real_name)) + ((thd->lex->select_lex.no_wrap_view_item= 1, + (res= setup_fields(thd, 0, table_list, update_fields, 1, 0, 0)), + thd->lex->select_lex.no_wrap_view_item= 0, + res) || + setup_fields(thd, 0, table_list, update_values, 1, 0, 0)))) + DBUG_RETURN(TRUE); + + if (!table) + table= table_list->table; + + if (!select_insert && unique_table(table_list, table_list->next_global)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } if (duplic == DUP_UPDATE || duplic == DUP_REPLACE) table->file->extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY); - - DBUG_RETURN(0); + thd->lex->select_lex.first_execution= 0; + DBUG_RETURN(FALSE); } @@ -483,10 +726,12 @@ static int last_uniq_key(TABLE *table,uint keynr) /* Write a record to table with optional deleting of conflicting records + + Sets thd->no_trans_update if table which is updated didn't have transactions */ -int write_record(TABLE *table,COPY_INFO *info) +int write_record(THD *thd, TABLE *table,COPY_INFO *info) { int error; char *key=0; @@ -498,9 +743,10 @@ int write_record(TABLE *table,COPY_INFO *info) { while ((error=table->file->write_row(table->record[0]))) { + uint key_nr; if (error != HA_WRITE_SKIP) goto err; - uint key_nr; + table->file->restore_auto_increment(); if ((int) (key_nr = table->file->get_dup_key(error)) < 0) { error=HA_WRITE_SKIP; /* Database can't find key */ @@ -538,7 +784,7 @@ int write_record(TABLE *table,COPY_INFO *info) goto err; } } - key_copy((byte*) key,table,key_nr,0); + key_copy((byte*) key,table->record[0],table->key_info+key_nr,0); if ((error=(table->file->index_read_idx(table->record[1],key_nr, (byte*) key, table->key_info[key_nr]. @@ -548,6 +794,7 @@ int write_record(TABLE *table,COPY_INFO *info) } if (info->handle_duplicates == DUP_UPDATE) { + int res= 0; /* we don't check for other UNIQUE keys - the first row that matches, is updated. If update causes a conflict again, an error is returned @@ -555,9 +802,19 @@ int write_record(TABLE *table,COPY_INFO *info) DBUG_ASSERT(table->insert_values != NULL); store_record(table,insert_values); restore_record(table,record[1]); - DBUG_ASSERT(info->update_fields->elements==info->update_values->elements); - if (fill_record(*info->update_fields, *info->update_values, 0)) + DBUG_ASSERT(info->update_fields->elements == + info->update_values->elements); + if (fill_record(thd, *info->update_fields, *info->update_values, 0)) goto err; + + /* CHECK OPTION for VIEW ... ON DUPLICATE KEY UPDATE ... */ + if (info->view && + (res= info->view->view_check_option(current_thd, info->ignore)) == + VIEW_CHECK_SKIP) + break; + if (res == VIEW_CHECK_ERROR) + goto err; + if ((error=table->file->update_row(table->record[1],table->record[0]))) goto err; info->updated++; @@ -587,6 +844,8 @@ int write_record(TABLE *table,COPY_INFO *info) else if ((error=table->file->delete_row(table->record[1]))) goto err; info->deleted++; + if (!table->file->has_transactions()) + thd->no_trans_update= 1; } } info->copied++; @@ -596,11 +855,14 @@ int write_record(TABLE *table,COPY_INFO *info) if (!info->ignore || (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)) goto err; + table->file->restore_auto_increment(); } else info->copied++; if (key) my_safe_afree(key,table->max_unique_length,MAX_KEY_LENGTH); + if (!table->file->has_transactions()) + thd->no_trans_update= 1; DBUG_RETURN(0); err: @@ -614,26 +876,22 @@ err: /****************************************************************************** Check that all fields with arn't null_fields are used - If DONT_USE_DEFAULT_FIELDS isn't defined use default value for not set - fields. ******************************************************************************/ -static int check_null_fields(THD *thd __attribute__((unused)), - TABLE *entry __attribute__((unused))) +int check_that_all_fields_are_given_values(THD *thd, TABLE *entry) { -#ifdef DONT_USE_DEFAULT_FIELDS + if (!thd->abort_on_warning) // No check if not strict mode + return 0; + for (Field **field=entry->field ; *field ; field++) { - if ((*field)->query_id != thd->query_id && !(*field)->maybe_null() && - *field != entry->timestamp_field && - *field != entry->next_number_field) + if ((*field)->query_id != thd->query_id && + ((*field)->flags & NO_DEFAULT_VALUE_FLAG)) { - my_printf_error(ER_BAD_NULL_ERROR, ER(ER_BAD_NULL_ERROR),MYF(0), - (*field)->field_name); + my_error(ER_NO_DEFAULT_FOR_FIELD, MYF(0), (*field)->field_name); return 1; } } -#endif return 0; } @@ -649,14 +907,13 @@ public: char *record,*query; enum_duplicates dup; time_t start_time; - bool query_start_used,last_insert_id_used,insert_id_used, ignore; - int log_query; + bool query_start_used,last_insert_id_used,insert_id_used, ignore, log_query; ulonglong last_insert_id; timestamp_auto_set_type timestamp_field_type; uint query_length; - delayed_row(enum_duplicates dup_arg, bool ignore_arg, int log_query_arg) - :record(0),query(0),dup(dup_arg),ignore(ignore_arg),log_query(log_query_arg) {} + delayed_row(enum_duplicates dup_arg, bool ignore_arg, bool log_query_arg) + :record(0), query(0), dup(dup_arg), ignore(ignore_arg), log_query(log_query_arg) {} ~delayed_row() { x_free(record); @@ -807,7 +1064,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) { delete tmp; thd->fatal_error(); - my_error(ER_OUT_OF_RESOURCES,MYF(0)); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); pthread_mutex_unlock(&LOCK_delayed_create); DBUG_RETURN(0); } @@ -827,7 +1084,7 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) delete tmp; thd->fatal_error(); pthread_mutex_unlock(&LOCK_delayed_create); - net_printf(thd,ER_CANT_CREATE_THREAD,error); + my_error(ER_CANT_CREATE_THREAD, MYF(0), error); DBUG_RETURN(0); } @@ -969,7 +1226,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) /* Put a question in queue */ static int write_delayed(THD *thd,TABLE *table,enum_duplicates duplic, bool ignore, - char *query, uint query_length, int log_on) + char *query, uint query_length, bool log_on) { delayed_row *row=0; delayed_insert *di=thd->di; @@ -1049,7 +1306,7 @@ void kill_delayed_threads(void) { /* Ensure that the thread doesn't kill itself while we are looking at it */ pthread_mutex_lock(&tmp->mutex); - tmp->thd.killed=1; + tmp->thd.killed= THD::KILL_CONNECTION; if (tmp->thd.mysys_var) { pthread_mutex_lock(&tmp->thd.mysys_var->mutex); @@ -1088,7 +1345,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) thd->thread_id=thread_id++; thd->end_time(); threads.append(thd); - thd->killed=abort_loop; + thd->killed=abort_loop ? THD::KILL_CONNECTION : THD::NOT_KILLED; pthread_mutex_unlock(&LOCK_thread_count); pthread_mutex_lock(&di->mutex); @@ -1141,7 +1398,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) for (;;) { - if (thd->killed) + if (thd->killed == THD::KILL_CONNECTION) { uint lock_count; /* @@ -1189,7 +1446,7 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) break; if (error == ETIME || error == ETIMEDOUT) { - thd->killed=1; + thd->killed= THD::KILL_CONNECTION; break; } } @@ -1208,8 +1465,9 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) /* request for new delayed insert */ if (!(thd->lock=mysql_lock_tables(thd,&di->table,1))) { - di->dead= 1; // Some fatal error - thd->killed= 1; + /* Fatal error */ + di->dead= 1; + thd->killed= THD::KILL_CONNECTION; } pthread_cond_broadcast(&di->cond_client); } @@ -1217,8 +1475,9 @@ extern "C" pthread_handler_decl(handle_delayed_insert,arg) { if (di->handle_inserts()) { - di->dead= 1; // Some fatal error - thd->killed= 1; + /* Some fatal error */ + di->dead= 1; + thd->killed= THD::KILL_CONNECTION; } } di->status=0; @@ -1248,7 +1507,7 @@ end: close_thread_tables(thd); // Free the table di->table=0; di->dead= 1; // If error - thd->killed= 1; + thd->killed= THD::KILL_CONNECTION; // If error pthread_cond_broadcast(&di->cond_client); // Safety pthread_mutex_unlock(&di->mutex); @@ -1317,7 +1576,7 @@ bool delayed_insert::handle_inserts(void) max_rows=delayed_insert_limit; if (thd.killed || table->version != refresh_version) { - thd.killed=1; + thd.killed= THD::KILL_CONNECTION; max_rows= ~0; // Do as much as possible } @@ -1351,7 +1610,7 @@ bool delayed_insert::handle_inserts(void) using_ignore=1; } thd.clear_error(); // reset error for binlog - if (write_record(table,&info)) + if (write_record(&thd, table, &info)) { info.error_count++; // Ignore errors thread_safe_increment(delayed_insert_errors,&LOCK_delayed_status); @@ -1362,15 +1621,10 @@ bool delayed_insert::handle_inserts(void) using_ignore=0; table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY); } - if (row->query) + if (row->query && row->log_query && using_bin_log) { - if (row->log_query & DELAYED_LOG_UPDATE) - mysql_update_log.write(&thd,row->query, row->query_length); - if (row->log_query & DELAYED_LOG_BIN && using_bin_log) - { - Query_log_event qinfo(&thd, row->query, row->query_length,0, FALSE); - mysql_bin_log.write(&qinfo); - } + Query_log_event qinfo(&thd, row->query, row->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); } if (table->blob_fields) free_delayed_insert_blobs(table); @@ -1386,7 +1640,7 @@ bool delayed_insert::handle_inserts(void) on this table until all entries has been processed */ if (group_count++ >= max_rows && (row= rows.head()) && - (!(row->log_query & DELAYED_LOG_BIN && using_bin_log) || + (!(row->log_query & using_bin_log) || row->query)) { group_count=0; @@ -1450,14 +1704,103 @@ bool delayed_insert::handle_inserts(void) Store records in INSERT ... SELECT * ***************************************************************************/ + +/* + make insert specific preparation and checks after opening tables + + SYNOPSIS + mysql_insert_select_prepare() + thd thread handler + + RETURN + FALSE OK + TRUE Error +*/ + +bool mysql_insert_select_prepare(THD *thd) +{ + LEX *lex= thd->lex; + TABLE_LIST *first_select_table= + (TABLE_LIST*) lex->select_lex.table_list.first; + TABLE_LIST *first_select_leaf_table; + int res; + DBUG_ENTER("mysql_insert_select_prepare"); + /* + SELECT_LEX do not belong to INSERT statement, so we can't add WHERE + clause if table is VIEW + */ + lex->query_tables->no_where_clause= 1; + if (mysql_prepare_insert(thd, lex->query_tables, + lex->query_tables->table, lex->field_list, 0, + lex->update_list, lex->value_list, + lex->duplicates, + &lex->select_lex.where, TRUE)) + DBUG_RETURN(TRUE); + + /* + exclude first table from leaf tables list, because it belong to + INSERT + */ + DBUG_ASSERT(lex->select_lex.leaf_tables); + lex->leaf_tables_insert= lex->select_lex.leaf_tables; + /* skip all leaf tables belonged to view where we are insert */ + for (first_select_leaf_table= lex->select_lex.leaf_tables->next_leaf; + first_select_leaf_table && + first_select_leaf_table->belong_to_view && + first_select_leaf_table->belong_to_view == + lex->leaf_tables_insert->belong_to_view; + first_select_leaf_table= first_select_leaf_table->next_leaf) + {} + lex->select_lex.leaf_tables= first_select_leaf_table; + DBUG_RETURN(FALSE); +} + + +select_insert::select_insert(TABLE_LIST *table_list_par, TABLE *table_par, + List<Item> *fields_par, + List<Item> *update_fields, List<Item> *update_values, + enum_duplicates duplic, + bool ignore_check_option_errors) + :table_list(table_list_par), table(table_par), fields(fields_par), + last_insert_id(0), + insert_into_view(table_list_par && table_list_par->view != 0) +{ + bzero((char*) &info,sizeof(info)); + info.handle_duplicates= duplic; + info.ignore= ignore_check_option_errors; + info.update_fields= update_fields; + info.update_values= update_values; + if (table_list_par) + info.view= (table_list_par->view ? table_list_par : 0); +} + + int select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) { DBUG_ENTER("select_insert::prepare"); unit= u; - if (check_insert_fields(thd,table,*fields,values,1)) + if (check_insert_fields(thd, table_list, *fields, values, 1, + !insert_into_view)) DBUG_RETURN(1); + /* + if it is INSERT into join view then check_insert_fields already found + real table for insert + */ + table= table_list->table; + + /* + Is table which we are changing used somewhere in other parts of + query + */ + if (!(thd->lex->current_select->options & OPTION_BUFFER_RESULT) && + unique_table(table_list, table_list->next_global)) + { + /* Using same table for INSERT and SELECT */ + thd->lex->current_select->options|= OPTION_BUFFER_RESULT; + thd->lex->current_select->join->select_options|= OPTION_BUFFER_RESULT; + } restore_record(table,default_values); // Get empty record table->next_number_field=table->found_next_number_field; @@ -1466,7 +1809,13 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u) info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->start_bulk_insert((ha_rows) 0); - DBUG_RETURN(0); + thd->no_trans_update= 0; + thd->abort_on_warning= (info.handle_duplicates != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + DBUG_RETURN(fields->elements && + check_that_all_fields_are_given_values(thd, table)); } @@ -1478,12 +1827,15 @@ void select_insert::cleanup() select_insert::~select_insert() { + DBUG_ENTER("~select_insert"); if (table) { table->next_number_field=0; table->file->reset(); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; + thd->abort_on_warning= 0; + DBUG_VOID_RETURN; } @@ -1496,12 +1848,26 @@ bool select_insert::send_data(List<Item> &values) unit->offset_limit_cnt--; DBUG_RETURN(0); } - thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields + + thd->count_cuted_fields= CHECK_FIELD_WARN; // Calculate cuted fields store_values(values); - error=thd->net.report_error || write_record(table,&info); thd->count_cuted_fields= CHECK_FIELD_IGNORE; - if (!error && table->next_number_field) // Clear for next record + if (thd->net.report_error) + DBUG_RETURN(1); + if (table_list) // Not CREATE ... SELECT { + switch (table_list->view_check_option(thd, + thd->lex->duplicates == + DUP_IGNORE)) { + case VIEW_CHECK_SKIP: + DBUG_RETURN(0); + case VIEW_CHECK_ERROR: + DBUG_RETURN(1); + } + } + if (!(error= write_record(thd, table,&info)) && table->next_number_field) + { + /* Clear for next record */ table->next_number_field->reset(); if (! last_insert_id && thd->insert_id_used) last_insert_id=thd->insert_id(); @@ -1513,17 +1879,16 @@ bool select_insert::send_data(List<Item> &values) void select_insert::store_values(List<Item> &values) { if (fields->elements) - fill_record(*fields, values, 1); + fill_record(thd, *fields, values, 1); else - fill_record(table->field, values, 1); + fill_record(thd, table->field, values, 1); } void select_insert::send_error(uint errcode,const char *err) { DBUG_ENTER("select_insert::send_error"); - /* TODO error should be sent at the query processing end */ - ::send_error(thd,errcode,err); + my_message(errcode, err, MYF(0)); if (!table) { @@ -1545,7 +1910,6 @@ void select_insert::send_error(uint errcode,const char *err) { if (last_insert_id) thd->insert_id(last_insert_id); // For binary log - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, @@ -1556,7 +1920,9 @@ void select_insert::send_error(uint errcode,const char *err) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; } if (info.copied || info.deleted || info.updated) + { query_cache_invalidate3(thd, table, 1); + } ha_rollback_stmt(thd); DBUG_VOID_RETURN; } @@ -1585,7 +1951,6 @@ bool select_insert::send_eof() if (last_insert_id) thd->insert_id(last_insert_id); // For binary log /* Write to binlog before commiting transaction */ - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (!error) @@ -1599,8 +1964,6 @@ bool select_insert::send_eof() if (error) { table->file->print_error(error,MYF(0)); - //TODO error should be sent at the query processing end - ::send_error(thd); DBUG_RETURN(1); } char buff[160]; @@ -1610,7 +1973,8 @@ bool select_insert::send_eof() else sprintf(buff, ER(ER_INSERT_INFO), (ulong) info.records, (ulong) (info.deleted+info.updated), (ulong) thd->cuted_fields); - ::send_ok(thd,info.copied+info.deleted+info.updated,last_insert_id,buff); + thd->row_count_func= info.copied+info.deleted+info.updated; + ::send_ok(thd, (ulong) thd->row_count_func, last_insert_id, buff); DBUG_RETURN(0); } @@ -1625,16 +1989,14 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) DBUG_ENTER("select_create::prepare"); unit= u; - table= create_table_from_items(thd, create_info, db, name, + table= create_table_from_items(thd, create_info, create_table, extra_fields, keys, &values, &lock); if (!table) DBUG_RETURN(-1); // abort() deletes table if (table->fields < values.elements) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0),1); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), 1); DBUG_RETURN(-1); } @@ -1652,13 +2014,18 @@ select_create::prepare(List<Item> &values, SELECT_LEX_UNIT *u) info.handle_duplicates == DUP_REPLACE) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); table->file->start_bulk_insert((ha_rows) 0); - DBUG_RETURN(0); + thd->no_trans_update= 0; + thd->abort_on_warning= (info.handle_duplicates != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + DBUG_RETURN(check_that_all_fields_are_given_values(thd, table)); } void select_create::store_values(List<Item> &values) { - fill_record(field, values, 1); + fill_record(thd, field, values, 1); } @@ -1721,13 +2088,13 @@ void select_create::abort() ulong version= table->version; hash_delete(&open_cache,(byte*) table); if (!create_info->table_existed) - quick_rm_table(table_type, db, name); + quick_rm_table(table_type, create_table->db, create_table->real_name); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) VOID(pthread_cond_broadcast(&COND_refresh)); } else if (!create_info->table_existed) - close_temporary_table(thd, db, name); + close_temporary_table(thd, create_table->db, create_table->real_name); table=0; } VOID(pthread_mutex_unlock(&LOCK_open)); diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 5730073bd35..5929ad5c14b 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -21,16 +21,14 @@ #include "item_create.h" #include <m_ctype.h> #include <hash.h> - +#include "sp.h" +#include "sp_head.h" /* - Fake table list object, pointer to which is used as special value for - st_lex::time_zone_tables_used indicating that we implicitly use time - zone tables in this statement but real table list was not yet created. - Pointer to it is also returned by my_tz_get_tables_list() as indication - of transient error; + We are using pointer to this variable for distinguishing between assignment + to NEW row field (when parsing trigger definition) and structured variable. */ -TABLE_LIST fake_time_zone_tables_list; +sys_var_long_ptr trg_new_row_fake_var(0, 0); /* Macros to look like lex */ @@ -119,12 +117,12 @@ void lex_start(THD *thd, uchar *buf,uint length) LEX *lex= thd->lex; lex->unit.init_query(); lex->unit.init_select(); - lex->thd= thd; - lex->unit.thd= thd; + lex->thd= lex->unit.thd= thd; lex->select_lex.init_query(); lex->value_list.empty(); lex->update_list.empty(); lex->param_list.empty(); + lex->view_list.empty(); lex->unit.next= lex->unit.master= lex->unit.link_next= lex->unit.return_to= 0; lex->unit.prev= lex->unit.link_prev= 0; @@ -135,15 +133,25 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->select_lex.link_next= lex->select_lex.slave= lex->select_lex.next= 0; lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list); lex->select_lex.options= 0; + lex->select_lex.init_order(); + lex->select_lex.group_list.empty(); lex->describe= 0; - lex->subqueries= lex->derived_tables= FALSE; + lex->subqueries= FALSE; + lex->view_prepare_mode= FALSE; + lex->derived_tables= 0; lex->lock_option= TL_READ; lex->found_colon= 0; lex->safe_to_cache_query= 1; lex->time_zone_tables_used= 0; + lex->leaf_tables_insert= lex->proc_table= lex->query_tables= 0; + lex->query_tables_last= &lex->query_tables; + lex->variables_used= 0; + lex->select_lex.parent_lex= lex; + lex->empty_field_list_on_rset= 0; lex->select_lex.select_number= 1; lex->next_state=MY_LEX_START; - lex->end_of_query=(lex->ptr=buf)+length; + lex->buf= lex->ptr= buf; + lex->end_of_query=buf+length; lex->yylineno = 1; lex->in_comment=0; lex->length=0; @@ -159,7 +167,12 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->sql_command=SQLCOM_END; lex->duplicates= DUP_ERROR; lex->ignore= 0; + lex->sphead= NULL; + lex->spcont= NULL; lex->proc_list.first= 0; + + if (lex->spfuns.records) + hash_reset(&lex->spfuns); } void lex_end(LEX *lex) @@ -180,29 +193,16 @@ static int find_keyword(LEX *lex, uint len, bool function) lex->yylval->symbol.symbol=symbol; lex->yylval->symbol.str= (char*) tok; lex->yylval->symbol.length=len; + + if ((symbol->tok == NOT_SYM) && + (lex->thd->variables.sql_mode & MODE_HIGH_NOT_PRECEDENCE)) + return NOT2_SYM; + if ((symbol->tok == OR_OR_SYM) && + !(lex->thd->variables.sql_mode & MODE_PIPES_AS_CONCAT)) + return OR2_SYM; + return symbol->tok; } -#ifdef HAVE_DLOPEN - udf_func *udf; - if (function && using_udf_functions && (udf=find_udf((char*) tok, len))) - { - lex->safe_to_cache_query=0; - lex->yylval->udf=udf; - switch (udf->returns) { - case STRING_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_CHAR_FUNC : UDA_CHAR_SUM; - case REAL_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_FLOAT_FUNC : UDA_FLOAT_SUM; - case INT_RESULT: - return (udf->type == UDFTYPE_FUNCTION) ? UDF_INT_FUNC : UDA_INT_SUM; - case ROW_RESULT: - default: - // This case should never be choosen - DBUG_ASSERT(0); - return 0; - } - } -#endif return 0; } @@ -287,7 +287,8 @@ static char *get_text(LEX *lex) continue; } #endif - if (c == '\\') + if (c == '\\' && + !(lex->thd->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES)) { // Escaped character found_escape=1; if (lex->ptr == lex->end_of_query) @@ -559,8 +560,12 @@ int yylex(void *arg, void *yythd) state= MY_LEX_HEX_NUMBER; break; } - /* Fall through */ - case MY_LEX_IDENT_OR_BIN: // TODO: Add binary string handling + case MY_LEX_IDENT_OR_BIN: + if (yyPeek() == '\'') + { // Found b'bin-number' + state= MY_LEX_BIN_NUMBER; + break; + } case MY_LEX_IDENT: uchar *start; #if defined(USE_MB) && defined(USE_MB_IDENT) @@ -681,6 +686,20 @@ int yylex(void *arg, void *yythd) } yyUnget(); } + else if (c == 'b' && (lex->ptr - lex->tok_start) == 2 && + lex->tok_start[0] == '0' ) + { // b'bin-number' + while (my_isxdigit(cs,(c = yyGet()))) ; + if ((lex->ptr - lex->tok_start) >= 4 && !ident_map[c]) + { + yylval->lex_str= get_token(lex, yyLength()); + yylval->lex_str.str+= 2; // Skip 0x + yylval->lex_str.length-= 2; + lex->yytoklen-= 2; + return (BIN_NUM); + } + yyUnget(); + } // fall through case MY_LEX_IDENT_START: // We come here after '.' result_state= IDENT; @@ -793,6 +812,19 @@ int yylex(void *arg, void *yythd) lex->yytoklen-=3; return (HEX_NUM); + case MY_LEX_BIN_NUMBER: // Found b'bin-string' + yyGet(); // Skip ' + while ((c= yyGet()) == '0' || c == '1'); + length= (lex->ptr - lex->tok_start); // Length of bin-num + 3 + if (c != '\'') + return(ABORT_SYM); // Illegal hex constant + yyGet(); // get_token makes an unget + yylval->lex_str= get_token(lex, length); + yylval->lex_str.str+= 2; // Skip b' + yylval->lex_str.length-= 3; // Don't count b' and last ' + lex->yytoklen-= 3; + return (BIN_NUM); + case MY_LEX_CMP_OP: // Incomplete comparison operator if (state_map[yyPeek()] == MY_LEX_CMP_OP || state_map[yyPeek()] == MY_LEX_LONG_CMP_OP) @@ -912,13 +944,12 @@ int yylex(void *arg, void *yythd) if ((thd->client_capabilities & CLIENT_MULTI_STATEMENTS) && (thd->command != COM_PREPARE)) { - lex->found_colon=(char*)lex->ptr; - thd->server_status |= SERVER_MORE_RESULTS_EXISTS; - lex->next_state=MY_LEX_END; - return(END_OF_INPUT); + lex->found_colon= (char*) lex->ptr; + thd->server_status|= SERVER_MORE_RESULTS_EXISTS; + lex->next_state= MY_LEX_END; + return (END_OF_INPUT); } - else - state=MY_LEX_CHAR; // Return ';' + state= MY_LEX_CHAR; // Return ';' break; } /* fall true */ @@ -1032,24 +1063,32 @@ void st_select_lex_unit::init_query() cleaned= 0; item_list.empty(); describe= 0; + found_rows_for_union= 0; } void st_select_lex::init_query() { st_select_lex_node::init_query(); table_list.empty(); + top_join_list.empty(); + join_list= &top_join_list; + embedding= leaf_tables= 0; item_list.empty(); join= 0; - where= 0; + where= prep_where= 0; olap= UNSPECIFIED_OLAP_TYPE; having_fix_field= 0; resolve_mode= NOMATTER_MODE; cond_count= with_wild= 0; + conds_processed_with_permanent_arena= 0; ref_pointer_array= 0; select_n_having_items= 0; - prep_where= 0; subquery_in_having= explicit_limit= 0; + first_execution= 1; + first_cond_optimization= 1; parsing_place= NO_MATTER; + no_wrap_view_item= 0; + link_next= 0; } void st_select_lex::init_select() @@ -1301,7 +1340,7 @@ bool st_select_lex::test_limit() if (select_limit != HA_POS_ERROR) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), - "LIMIT & IN/ALL/ANY/SOME subquery"); + "LIMIT & IN/ALL/ANY/SOME subquery"); return(1); } // We need only 1 row to determinate existence @@ -1311,147 +1350,6 @@ bool st_select_lex::test_limit() return(0); } -/* - Interface method of table list creation for query - - SYNOPSIS - st_select_lex_unit::create_total_list() - thd THD pointer - result pointer on result list of tables pointer - check_derived force derived table chacking (used for creating - table list for derived query) - DESCRIPTION - This is used for UNION & subselect to create a new table list of all used - tables. - The table_list->table entry in all used tables are set to point - to the entries in this list. - - RETURN - 0 - OK - !0 - error -*/ -bool st_select_lex_unit::create_total_list(THD *thd_arg, st_lex *lex, - TABLE_LIST **result_arg) -{ - *result_arg= 0; - if (!(res= create_total_list_n_last_return(thd_arg, lex, &result_arg))) - { - /* - If time zone tables were used implicitly in statement we should add - them to global table list. - */ - if (lex->time_zone_tables_used) - { - /* - Altough we are modifying lex data, it won't raise any problem in - case when this lex belongs to some prepared statement or stored - procedure: such modification does not change any invariants imposed - by requirement to reuse the same lex for multiple executions. - */ - if ((lex->time_zone_tables_used= my_tz_get_table_list(thd)) != - &fake_time_zone_tables_list) - { - *result_arg= lex->time_zone_tables_used; - } - else - { - send_error(thd, 0); - res= 1; - } - } - } - return res; -} - -/* - Table list creation for query - - SYNOPSIS - st_select_lex_unit::create_total_list() - thd THD pointer - lex pointer on LEX stricture - result pointer on pointer on result list of tables pointer - - DESCRIPTION - This is used for UNION & subselect to create a new table list of all used - tables. - The table_list->table_list in all tables of global list are set to point - to the local SELECT_LEX entries. - - RETURN - 0 - OK - !0 - error -*/ -bool st_select_lex_unit:: -create_total_list_n_last_return(THD *thd_arg, - st_lex *lex, - TABLE_LIST ***result_arg) -{ - TABLE_LIST *slave_list_first=0, **slave_list_last= &slave_list_first; - TABLE_LIST **new_table_list= *result_arg, *aux; - SELECT_LEX *sl= (SELECT_LEX*)slave; - - /* - iterate all inner selects + fake_select (if exists), - fake_select->next_select() always is 0 - */ - for (; - sl; - sl= (sl->next_select() ? - sl->next_select() : - (sl == fake_select_lex ? - 0 : - fake_select_lex))) - { - // check usage of ORDER BY in union - if (sl->order_list.first && sl->next_select() && !sl->braces && - sl->linkage != GLOBAL_OPTIONS_TYPE) - { - net_printf(thd_arg,ER_WRONG_USAGE,"UNION","ORDER BY"); - return 1; - } - - for (SELECT_LEX_UNIT *inner= sl->first_inner_unit(); - inner; - inner= inner->next_unit()) - { - if (inner->create_total_list_n_last_return(thd, lex, - &slave_list_last)) - return 1; - } - - if ((aux= (TABLE_LIST*) sl->table_list.first)) - { - TABLE_LIST *next_table; - for (; aux; aux= next_table) - { - TABLE_LIST *cursor; - next_table= aux->next; - /* Add to the total table list */ - if (!(cursor= (TABLE_LIST *) thd->memdup((char*) aux, - sizeof(*aux)))) - { - send_error(thd,0); - return 1; - } - *new_table_list= cursor; - cursor->table_list= aux; - new_table_list= &cursor->next; - *new_table_list= 0; // end result list - aux->table_list= cursor; - } - } - } - - if (slave_list_first) - { - *new_table_list= slave_list_first; - new_table_list= slave_list_last; - } - *result_arg= new_table_list; - return 0; -} - st_select_lex_unit* st_select_lex_unit::master_unit() { @@ -1473,7 +1371,9 @@ bool st_select_lex::add_order_to_list(THD *thd, Item *item, bool asc) bool st_select_lex::add_item_to_list(THD *thd, Item *item) { - return item_list.push_back(item); + DBUG_ENTER("st_select_lex::add_item_to_list"); + DBUG_PRINT("info", ("Item: %p", item)); + DBUG_RETURN(item_list.push_back(item)); } @@ -1561,10 +1461,10 @@ bool st_select_lex::setup_ref_array(THD *thd, uint order_group_num) */ Item_arena *arena= thd->current_arena; return (ref_pointer_array= - (Item **)arena->alloc(sizeof(Item*) * - (item_list.elements + - select_n_having_items + - order_group_num)* 5)) == 0; + (Item **)arena->alloc(sizeof(Item*) * + (item_list.elements + + select_n_having_items + + order_group_num)* 5)) == 0; } @@ -1606,7 +1506,7 @@ bool st_select_lex_unit::check_updateable(char *db, char *table) bool st_select_lex::check_updateable(char *db, char *table) { - if (find_real_table_in_list(get_table_list(), db, table)) + if (find_table_in_local_list(get_table_list(), db, table)) return 1; return check_updateable_in_subqueries(db, table); @@ -1673,7 +1573,14 @@ void st_select_lex::print_order(String *str, ORDER *order) { for (; order; order= order->next) { - (*order->item)->print(str); + if (order->counter_used) + { + char buffer[20]; + my_snprintf(buffer, 20, "%u", order->counter); + str->append(buffer); + } + else + (*order->item)->print(str); if (!order->asc) str->append(" desc", 5); if (order->next) @@ -1684,6 +1591,17 @@ void st_select_lex::print_order(String *str, ORDER *order) void st_select_lex::print_limit(THD *thd, String *str) { + SELECT_LEX_UNIT *unit= master_unit(); + Item_subselect *item= unit->item; + if (item && unit->global_parameters == this && + (item->substype() == Item_subselect::EXISTS_SUBS || + item->substype() == Item_subselect::IN_SUBS || + item->substype() == Item_subselect::ALL_SUBS)) + { + DBUG_ASSERT(select_limit == 1L && offset_limit == 0L); + return; + } + if (!thd) thd= current_thd; @@ -1705,84 +1623,364 @@ void st_select_lex::print_limit(THD *thd, String *str) } -st_lex::st_lex() - :result(0) -{} +/* + Check whether the merging algorithm can be used on this VIEW + + SYNOPSIS + st_lex::can_be_merged() + + DESCRIPTION + We can apply merge algorithm if it is single SELECT view with + subqueries only in WHERE clause (we do not count SELECTs of underlying + views, and second level subqueries) and we have not grpouping, ordering, + HAVING clause, aggregate functions, DISTINCT clause, LIMIT clause and + several underlying tables. + + RETURN + FALSE - only temporary table algorithm can be used + TRUE - merge algorithm can be used +*/ + +bool st_lex::can_be_merged() +{ + // TODO: do not forget implement case when select_lex.table_list.elements==0 + + /* find non VIEW subqueries/unions */ + bool selects_allow_merge= select_lex.next_select() == 0; + if (selects_allow_merge) + { + for (SELECT_LEX_UNIT *unit= select_lex.first_inner_unit(); + unit; + unit= unit->next_unit()) + { + if (unit->first_select()->parent_lex == this && + (unit->item == 0 || unit->item->place() != IN_WHERE)) + { + selects_allow_merge= 0; + break; + } + } + } + + return (selects_allow_merge && + select_lex.order_list.elements == 0 && + select_lex.group_list.elements == 0 && + select_lex.having == 0 && + select_lex.with_sum_func == 0 && + select_lex.table_list.elements >= 1 && + !(select_lex.options & SELECT_DISTINCT) && + select_lex.select_limit == HA_POS_ERROR); +} /* - Unlink first table from global table list and first table from outer select - list (lex->select_lex) + check if command can use VIEW with MERGE algorithm (for top VIEWs) + + SYNOPSIS + st_lex::can_use_merged() + + DESCRIPTION + Only listed here commands can use merge algorithm in top level + SELECT_LEX (for subqueries will be used merge algorithm if + st_lex::can_not_use_merged() is not TRUE). + + RETURN + FALSE - command can't use merged VIEWs + TRUE - VIEWs with MERGE algorithms can be used +*/ + +bool st_lex::can_use_merged() +{ + switch (sql_command) + { + case SQLCOM_SELECT: + case SQLCOM_CREATE_TABLE: + case SQLCOM_UPDATE: + case SQLCOM_UPDATE_MULTI: + case SQLCOM_DELETE: + case SQLCOM_DELETE_MULTI: + case SQLCOM_INSERT: + case SQLCOM_INSERT_SELECT: + case SQLCOM_REPLACE: + case SQLCOM_REPLACE_SELECT: + case SQLCOM_LOAD: + return TRUE; + default: + return FALSE; + } +} + +/* + Check if command can't use merged views in any part of command + + SYNOPSIS + st_lex::can_not_use_merged() + + DESCRIPTION + Temporary table algorithm will be used on all SELECT levels for queries + listed here (see also st_lex::can_use_merged()). + + RETURN + FALSE - command can't use merged VIEWs + TRUE - VIEWs with MERGE algorithms can be used +*/ + +bool st_lex::can_not_use_merged() +{ + switch (sql_command) + { + case SQLCOM_CREATE_VIEW: + case SQLCOM_SHOW_CREATE: + return TRUE; + default: + return FALSE; + } +} + +/* + Detect that we need only table structure of derived table/view + + SYNOPSIS + only_view_structure() + + RETURN + TRUE yes, we need only structure + FALSE no, we need data +*/ + +bool st_lex::only_view_structure() +{ + switch(sql_command) + { + case SQLCOM_SHOW_CREATE: + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_FIELDS: + case SQLCOM_REVOKE_ALL: + case SQLCOM_REVOKE: + case SQLCOM_GRANT: + case SQLCOM_CREATE_VIEW: + return TRUE; + default: + return FALSE; + } +} + + +/* + Should Items_ident be printed correctly + + SYNOPSIS + need_correct_ident() + + RETURN + TRUE yes, we need only structure + FALSE no, we need data +*/ + + +bool st_lex::need_correct_ident() +{ + switch(sql_command) + { + case SQLCOM_SHOW_CREATE: + case SQLCOM_SHOW_TABLES: + case SQLCOM_CREATE_VIEW: + return TRUE; + default: + return FALSE; + } +} + + +/* + initialize limit counters + + SYNOPSIS + st_select_lex_unit::set_limit() + values - SELECT_LEX with initial values for counters + sl - SELECT_LEX for options set +*/ + +void st_select_lex_unit::set_limit(SELECT_LEX *values, + SELECT_LEX *sl) +{ + offset_limit_cnt= values->offset_limit; + select_limit_cnt= values->select_limit+values->offset_limit; + if (select_limit_cnt < values->select_limit) + select_limit_cnt= HA_POS_ERROR; // no limit + if (select_limit_cnt == HA_POS_ERROR) + sl->options&= ~OPTION_FOUND_ROWS; +} + + +/* + Unlink the first table from the global table list and the first table from + outer select (lex->select_lex) local list SYNOPSIS unlink_first_table() - tables Global table list - global_first Save first global table here - local_first Save first local table here + link_to_local Set to 1 if caller should link this table to local list - NORES - global_first & local_first are used to save result for link_first_table_back + NOTES + We assume that first tables in both lists is the same table or the local + list is empty. RETURN - global list without first table + 0 If 'query_tables' == 0 + unlinked table + In this case link_to_local is set. */ -TABLE_LIST *st_lex::unlink_first_table(TABLE_LIST *tables, - TABLE_LIST **global_first, - TABLE_LIST **local_first) +TABLE_LIST *st_lex::unlink_first_table(bool *link_to_local) { - *global_first= tables; - *local_first= (TABLE_LIST*)select_lex.table_list.first; - /* - Exclude from global table list - */ - tables= tables->next; - /* - and from local list if it is not the same - */ - select_lex.table_list.first= ((&select_lex != all_selects_list) ? - (byte*) (*local_first)->next : - (byte*) tables); - (*global_first)->next= 0; - return tables; + TABLE_LIST *first; + if ((first= query_tables)) + { + /* + Exclude from global table list + */ + if ((query_tables= query_tables->next_global)) + query_tables->prev_global= &query_tables; + first->next_global= 0; + + /* + and from local list if it is not empty + */ + if ((*link_to_local= test(select_lex.table_list.first))) + { + select_lex.table_list.first= (byte*) first->next_local; + select_lex.table_list.elements--; //safety + first->next_local= 0; + /* + Ensure that the global list has the same first table as the local + list. + */ + first_lists_tables_same(); + } + } + return first; } /* + Bring first local table of first most outer select to first place in global + table list + + SYNOPSYS + st_lex::first_lists_tables_same() + + NOTES + In many cases (for example, usual INSERT/DELETE/...) the first table of + main SELECT_LEX have special meaning => check that it is the first table + in global list and re-link to be first in the global list if it is + necessary. We need such re-linking only for queries with sub-queries in + the select list, as only in this case tables of sub-queries will go to + the global list first. +*/ + +void st_lex::first_lists_tables_same() +{ + TABLE_LIST *first_table= (TABLE_LIST*) select_lex.table_list.first; + if (query_tables != first_table && first_table != 0) + { + TABLE_LIST *next; + if (query_tables_last == &first_table->next_global) + query_tables_last= first_table->prev_global; + + if ((next= *first_table->prev_global= first_table->next_global)) + next->prev_global= first_table->prev_global; + /* include in new place */ + first_table->next_global= query_tables; + /* + We are sure that query_tables is not 0, because first_table was not + first table in the global list => we can use + query_tables->prev_global without check of query_tables + */ + query_tables->prev_global= &first_table->next_global; + first_table->prev_global= &query_tables; + query_tables= first_table; + } +} + + +/* + Add implicitly used time zone description tables to global table list + (if needed). + + SYNOPSYS + st_lex::add_time_zone_tables_to_query_tables() + thd - pointer to current thread context + + RETURN VALUE + TRUE - error + FALSE - success +*/ + +bool st_lex::add_time_zone_tables_to_query_tables(THD *thd) +{ + /* We should not add these tables twice */ + if (!time_zone_tables_used) + { + time_zone_tables_used= my_tz_get_table_list(thd, &query_tables_last); + if (time_zone_tables_used == &fake_time_zone_tables_list) + return TRUE; + } + return FALSE; +} + +/* Link table back that was unlinked with unlink_first_table() SYNOPSIS link_first_table_back() - tables Global table list - global_first Saved first global table - local_first Saved first local table + link_to_local do we need link this table to local RETURN global list */ -TABLE_LIST *st_lex::link_first_table_back(TABLE_LIST *tables, - TABLE_LIST *global_first, - TABLE_LIST *local_first) + +void st_lex::link_first_table_back(TABLE_LIST *first, + bool link_to_local) { - global_first->next= tables; - if (&select_lex != all_selects_list) + if (first) { - /* - we do not touch local table 'next' field => we need just - put the table in the list - */ - select_lex.table_list.first= (byte*) local_first; + if ((first->next_global= query_tables)) + query_tables->prev_global= &first->next_global; + query_tables= first; + + if (link_to_local) + { + first->next_local= (TABLE_LIST*) select_lex.table_list.first; + select_lex.table_list.first= (byte*) first; + select_lex.table_list.elements++; //safety + } + } +} + + +/* + fix some structures at the end of preparation + + SYNOPSIS + st_select_lex::fix_prepare_information + thd thread handler + conds pointer on conditions which will be used for execution statement +*/ + +void st_select_lex::fix_prepare_information(THD *thd, Item **conds) +{ + if (!thd->current_arena->is_conventional() && first_execution) + { + first_execution= 0; + prep_where= where; } - else - select_lex.table_list.first= (byte*) global_first; - return global_first; } /* - There are st_select_lex::add_table_to_list & + There are st_select_lex::add_table_to_list & st_select_lex::set_lock_for_tables are in sql_parse.cc - st_select_lex::print is in sql_select.h + st_select_lex::print is in sql_select.cc st_select_lex_unit::prepare, st_select_lex_unit::exec, st_select_lex_unit::cleanup, st_select_lex_unit::reinit_exec_mechanism, diff --git a/sql/sql_lex.h b/sql/sql_lex.h index e2e0bc61c23..6ed5fb247dc 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -21,6 +21,10 @@ class Table_ident; class sql_exchange; class LEX_COLUMN; +class sp_head; +class sp_name; +class sp_instr; +class sp_pcontext; /* The following hack is needed because mysql_yacc.cc does not define @@ -49,17 +53,17 @@ enum enum_sql_command { SQLCOM_SHOW_DATABASES, SQLCOM_SHOW_TABLES, SQLCOM_SHOW_FIELDS, SQLCOM_SHOW_KEYS, SQLCOM_SHOW_VARIABLES, SQLCOM_SHOW_LOGS, SQLCOM_SHOW_STATUS, - SQLCOM_SHOW_INNODB_STATUS, + SQLCOM_SHOW_INNODB_STATUS, SQLCOM_SHOW_MUTEX_STATUS, SQLCOM_SHOW_PROCESSLIST, SQLCOM_SHOW_MASTER_STAT, SQLCOM_SHOW_SLAVE_STAT, SQLCOM_SHOW_GRANTS, SQLCOM_SHOW_CREATE, SQLCOM_SHOW_CHARSETS, - SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, + SQLCOM_SHOW_COLLATIONS, SQLCOM_SHOW_CREATE_DB, SQLCOM_SHOW_TABLE_STATUS, SQLCOM_LOAD,SQLCOM_SET_OPTION,SQLCOM_LOCK_TABLES,SQLCOM_UNLOCK_TABLES, SQLCOM_GRANT, SQLCOM_CHANGE_DB, SQLCOM_CREATE_DB, SQLCOM_DROP_DB, SQLCOM_ALTER_DB, SQLCOM_REPAIR, SQLCOM_REPLACE, SQLCOM_REPLACE_SELECT, SQLCOM_CREATE_FUNCTION, SQLCOM_DROP_FUNCTION, - SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK, + SQLCOM_REVOKE,SQLCOM_OPTIMIZE, SQLCOM_CHECK, SQLCOM_ASSIGN_TO_KEYCACHE, SQLCOM_PRELOAD_KEYS, SQLCOM_FLUSH, SQLCOM_KILL, SQLCOM_ANALYZE, SQLCOM_ROLLBACK, SQLCOM_ROLLBACK_TO_SAVEPOINT, @@ -74,10 +78,17 @@ enum enum_sql_command { SQLCOM_SHOW_BINLOG_EVENTS, SQLCOM_SHOW_NEW_MASTER, SQLCOM_DO, SQLCOM_SHOW_WARNS, SQLCOM_EMPTY_QUERY, SQLCOM_SHOW_ERRORS, SQLCOM_SHOW_COLUMN_TYPES, SQLCOM_SHOW_STORAGE_ENGINES, SQLCOM_SHOW_PRIVILEGES, - SQLCOM_HELP, SQLCOM_DROP_USER, SQLCOM_REVOKE_ALL, SQLCOM_CHECKSUM, - + SQLCOM_HELP, SQLCOM_CREATE_USER, SQLCOM_DROP_USER, SQLCOM_RENAME_USER, + SQLCOM_REVOKE_ALL, SQLCOM_CHECKSUM, + SQLCOM_CREATE_PROCEDURE, SQLCOM_CREATE_SPFUNCTION, SQLCOM_CALL, + SQLCOM_DROP_PROCEDURE, SQLCOM_ALTER_PROCEDURE,SQLCOM_ALTER_FUNCTION, + SQLCOM_SHOW_CREATE_PROC, SQLCOM_SHOW_CREATE_FUNC, + SQLCOM_SHOW_STATUS_PROC, SQLCOM_SHOW_STATUS_FUNC, SQLCOM_PREPARE, SQLCOM_EXECUTE, SQLCOM_DEALLOCATE_PREPARE, + SQLCOM_CREATE_VIEW, SQLCOM_DROP_VIEW, + SQLCOM_CREATE_TRIGGER, SQLCOM_DROP_TRIGGER, /* This should be the last !!! */ + SQLCOM_END }; @@ -85,6 +96,39 @@ enum enum_sql_command { #define DESCRIBE_NORMAL 1 #define DESCRIBE_EXTENDED 2 +enum enum_sp_suid_behaviour +{ + SP_IS_DEFAULT_SUID= 0, + SP_IS_NOT_SUID, + SP_IS_SUID +}; + +enum enum_sp_data_access +{ + SP_DEFAULT_ACCESS= 0, + SP_CONTAINS_SQL, + SP_NO_SQL, + SP_READS_SQL_DATA, + SP_MODIFIES_SQL_DATA +}; + + +#define DERIVED_SUBQUERY 1 +#define DERIVED_VIEW 2 + +enum enum_view_create_mode +{ + VIEW_CREATE_NEW, // check that there are not such VIEW/table + VIEW_ALTER, // check that VIEW .frm with such name exists + VIEW_CREATE_OR_REPLACE // check only that there are not such table +}; + +enum enum_drop_mode +{ + DROP_DEFAULT, // mode is not specified + DROP_CASCADE, // CASCADE option + DROP_RESTRICT // RESTRICT option +}; typedef List<Item> List_item; @@ -240,8 +284,9 @@ public: } static void *operator new(size_t size, MEM_ROOT *mem_root) { return (void*) alloc_root(mem_root, (uint) size); } - static void operator delete(void *ptr,size_t size) {} - static void operator delete(void *ptr,size_t size, MEM_ROOT *mem_root) {} + static void operator delete(void *ptr,size_t size) { TRASH(ptr, size); } + static void operator delete(void *ptr,size_t size, MEM_ROOT *mem_root) + { TRASH(ptr, size); } st_select_lex_node(): linkage(UNSPECIFIED_TYPE) {} virtual ~st_select_lex_node() {} inline st_select_lex_node* get_master() { return master; } @@ -276,6 +321,8 @@ public: friend class st_select_lex_unit; friend bool mysql_new_select(struct st_lex *lex, bool move_down); + friend my_bool mysql_make_view (File_parser *parser, + TABLE_LIST *table); private: void fast_exclude(); }; @@ -298,8 +345,8 @@ protected: TABLE *table; /* temporary table using for appending UNION results */ select_result *result; - int res; ulong found_rows_for_union; + bool res; bool prepared, // prepare phase already performed for UNION (unit) optimized, // optimize phase already performed for UNION (unit) executed, // already executed @@ -340,7 +387,6 @@ public: Procedure *last_procedure; /* Pointer to procedure, if such exists */ void init_query(); - bool create_total_list(THD *thd, st_lex *lex, TABLE_LIST **result); st_select_lex_unit* master_unit(); st_select_lex* outer_select(); st_select_lex* first_select() @@ -360,9 +406,9 @@ public: void exclude_tree(); /* UNION methods */ - int prepare(THD *thd, select_result *result, ulong additional_options); - int exec(); - int cleanup(); + bool prepare(THD *thd, select_result *result, ulong additional_options); + bool exec(); + bool cleanup(); inline void unclean() { cleaned= 0; } void reinit_exec_mechanism(); @@ -370,13 +416,11 @@ public: void print(String *str); ulong init_prepare_fake_select_lex(THD *thd); - int change_result(select_subselect *result, select_subselect *old_result); + bool change_result(select_subselect *result, select_subselect *old_result); + void set_limit(st_select_lex *values, st_select_lex *sl); friend void lex_start(THD *thd, uchar *buf, uint length); friend int subselect_union_engine::exec(); -private: - bool create_total_list_n_last_return(THD *thd, st_lex *lex, - TABLE_LIST ***result); }; typedef class st_select_lex_unit SELECT_LEX_UNIT; @@ -389,6 +433,8 @@ public: char *db, *db1, *table1, *db2, *table2; /* For outer join using .. */ Item *where, *having; /* WHERE & HAVING clauses */ Item *prep_where; /* saved WHERE clause for prepared statement processing */ + /* point on lex in which it was created, used in view subquery detection */ + st_lex *parent_lex; enum olap_type olap; SQL_LIST table_list, group_list; /* FROM & GROUP BY clauses */ List<Item> item_list; /* list of fields & expressions */ @@ -401,7 +447,11 @@ public: List<Item_func_match> *ftfunc_list; List<Item_func_match> ftfunc_list_alloc; JOIN *join; /* after JOIN::prepare it is pointer to corresponding JOIN */ - const char *type; /* type of select for EXPLAIN */ + List<TABLE_LIST> top_join_list; /* join list of the top level */ + List<TABLE_LIST> *join_list; /* list for the currently parsed join */ + TABLE_LIST *embedding; /* table embedding to the above list */ + TABLE_LIST *leaf_tables; /* list of leaves in join table tree */ + const char *type; /* type of select for EXPLAIN */ SQL_LIST order_list; /* ORDER clause */ List<List_item> expr_list; @@ -420,6 +470,11 @@ public: uint cond_count; /* number of arguments of and/or/xor in where/having */ enum_parsing_place parsing_place; /* where we are parsing expression */ bool with_sum_func; /* sum function indicator */ + /* + PS or SP cond natural joins was alredy processed with permanent + arena and all additional items which we need alredy stored in it + */ + bool conds_processed_with_permanent_arena; ulong table_join_options; uint in_sum_expr; @@ -435,6 +490,10 @@ public: query processing end even if we use temporary table */ bool subquery_in_having; + bool first_execution; /* first execution in SP or PS */ + bool first_cond_optimization; + /* do not wrap view fields with Item_ref */ + bool no_wrap_view_item; /* SELECT for SELECT command st_select_lex. Used to privent scaning @@ -497,6 +556,12 @@ public: List<String> *ignore_index= 0, LEX_STRING *option= 0); TABLE_LIST* get_table_list(); + bool init_nested_join(THD *thd); + TABLE_LIST *end_nested_join(THD *thd); + TABLE_LIST *nest_last_join(THD *thd); + void save_names_for_using_list(TABLE_LIST *tab1, TABLE_LIST *tab2); + void add_joined_table(TABLE_LIST *table); + TABLE_LIST *convert_right_join(); List<Item>* get_item_list(); List<String>* get_use_index(); List<String>* get_ignore_index(); @@ -524,6 +589,7 @@ public: void print(THD *thd, String *str); static void print_order(String *str, ORDER *order); void print_limit(THD *thd, String *str); + void fix_prepare_information(THD *thd, Item **conds); }; typedef class st_select_lex SELECT_LEX; @@ -535,6 +601,9 @@ typedef class st_select_lex SELECT_LEX; #define ALTER_RENAME 32 #define ALTER_ORDER 64 #define ALTER_OPTIONS 128 +#define ALTER_CHANGE_COLUMN_DEFAULT 256 +#define ALTER_KEYS_ONOFF 512 +#define ALTER_CONVERT 1024 typedef struct st_alter_info { @@ -543,13 +612,29 @@ typedef struct st_alter_info uint flags; enum enum_enable_or_disable keys_onoff; enum tablespace_op_type tablespace_op; - bool is_simple; st_alter_info(){clear();} void clear(){keys_onoff= LEAVE_AS_IS;tablespace_op= NO_TABLESPACE_OP;} void reset(){drop_list.empty();alter_list.empty();clear();} } ALTER_INFO; +struct st_sp_chistics +{ + LEX_STRING comment; + enum enum_sp_suid_behaviour suid; + bool detistic; + enum enum_sp_data_access daccess; +}; + + +struct st_trg_chistics +{ + enum trg_action_time_type action_time; + enum trg_event_type event; +}; + +extern sys_var_long_ptr trg_new_row_fake_var; + /* The state of the lex parsing. This is saved in the THD struct */ typedef struct st_lex @@ -562,12 +647,12 @@ typedef struct st_lex SELECT_LEX *current_select; /* list of all SELECT_LEX */ SELECT_LEX *all_selects_list; + uchar *buf; /* The beginning of string, used by SPs */ uchar *ptr,*tok_start,*tok_end,*end_of_query; char *length,*dec,*change,*name; char *help_arg; char *backup_dir; /* For RESTORE/BACKUP */ char* to_log; /* For PURGE MASTER LOGS TO */ - time_t purge_time; /* For PURGE MASTER LOGS BEFORE */ char* x509_subject,*x509_issuer,*ssl_cipher; char* found_colon; /* For multi queries - next query */ String *wild; @@ -579,6 +664,16 @@ typedef struct st_lex gptr yacc_yyss,yacc_yyvs; THD *thd; CHARSET_INFO *charset; + TABLE_LIST *query_tables; /* global list of all tables in this query */ + /* + last element next_global of previous list (used only for list building + during parsing and VIEW processing. This pointer is not valid in + mysql_execute_command + */ + TABLE_LIST **query_tables_last; + TABLE_LIST *proc_table; /* refer to mysql.proc if it was opened by VIEW */ + /* store original leaf_tables for INSERT SELECT and PS/SP */ + TABLE_LIST *leaf_tables_insert; List<key_part_spec> col_list; List<key_part_spec> ref_list; @@ -591,6 +686,7 @@ typedef struct st_lex List<List_item> many_values; List<set_var_base> var_list; List<Item_param> param_list; + List<LEX_STRING> view_list; // view list (list of field names in view) SQL_LIST proc_list, auxilliary_table_list, save_list; create_field *last_field; char *savepoint_name; // Transaction savepoint id @@ -599,8 +695,8 @@ typedef struct st_lex HA_CREATE_INFO create_info; LEX_MASTER_INFO mi; // used by CHANGE MASTER USER_RESOURCES mqh; - ulong thread_id,type; - enum_sql_command sql_command; + ulong type; + enum_sql_command sql_command, orig_sql_command; thr_lock_type lock_option, multi_lock_option; enum SSL_type ssl_type; /* defined in violite.h */ enum my_lex_states next_state; @@ -609,16 +705,24 @@ typedef struct st_lex enum enum_ha_read_modes ha_read_mode; enum ha_rkey_function ha_rkey_mode; enum enum_var_type option_type; + enum enum_view_create_mode create_view_mode; + enum enum_drop_mode drop_mode; uint uint_geom_type; uint grant, grant_tot_col, which_columns; uint fk_delete_opt, fk_update_opt, fk_match_option; uint slave_thd_opt, start_transaction_opt; + uint table_count; /* used when usual update transformed in multiupdate */ uint8 describe; + uint8 derived_tables; + uint8 create_view_algorithm; + uint8 create_view_check; bool drop_if_exists, drop_temporary, local_file, one_shot_set; bool in_comment, ignore_space, verbose, no_write_to_binlog; - bool derived_tables; + /* special JOIN::prepare mode: changing of query is prohibited */ + bool view_prepare_mode; bool safe_to_cache_query; bool subqueries, ignore; + bool variables_used; ALTER_INFO alter_info; /* Prepared statements SQL syntax:*/ LEX_STRING prepared_stmt_name; /* Statement name (in all queries) */ @@ -632,12 +736,46 @@ typedef struct st_lex /* Names of user variables holding parameters (in EXECUTE) */ List<LEX_STRING> prepared_stmt_params; /* - If points to fake_time_zone_tables_list indicates that time zone - tables are implicitly used by statement, also is used for holding - list of those tables after they are opened. + Points to part of global table list which contains time zone tables + implicitly used by the statement. */ TABLE_LIST *time_zone_tables_used; - st_lex(); + sp_head *sphead; + sp_name *spname; + bool sp_lex_in_use; /* Keep track on lex usage in SPs for error handling */ + bool all_privileges; + sp_pcontext *spcont; + HASH spfuns; /* Called functions */ + st_sp_chistics sp_chistics; + bool only_view; /* used for SHOW CREATE TABLE/VIEW */ + /* + field_list was created for view and should be removed before PS/SP + rexecuton + */ + bool empty_field_list_on_rset; + /* Characterstics of trigger being created */ + st_trg_chistics trg_chistics; + /* + List of all items (Item_trigger_field objects) representing fields in + old/new version of row in trigger. We use this list for checking whenever + all such fields are valid at trigger creation time and for binding these + fields to TABLE object at table open (altough for latter pointer to table + being opened is probably enough). + */ + SQL_LIST trg_table_fields; + + st_lex() :result(0), sql_command(SQLCOM_END) + { + extern byte *sp_lex_spfuns_key(const byte *ptr, uint *plen, my_bool first); + hash_init(&spfuns, system_charset_info, 0, 0, 0, sp_lex_spfuns_key, 0, 0); + } + + ~st_lex() + { + if (spfuns.array.buffer) + hash_free(&spfuns); + } + inline void uncacheable(uint8 cause) { safe_to_cache_query= 0; @@ -657,15 +795,36 @@ typedef struct st_lex un->uncacheable|= cause; } } - TABLE_LIST *unlink_first_table(TABLE_LIST *tables, - TABLE_LIST **global_first, - TABLE_LIST **local_first); - TABLE_LIST *link_first_table_back(TABLE_LIST *tables, - TABLE_LIST *global_first, - TABLE_LIST *local_first); + TABLE_LIST *unlink_first_table(bool *link_to_local); + void link_first_table_back(TABLE_LIST *first, bool link_to_local); + void first_lists_tables_same(); + inline void add_to_query_tables(TABLE_LIST *table) + { + *(table->prev_global= query_tables_last)= table; + query_tables_last= &table->next_global; + } + bool add_time_zone_tables_to_query_tables(THD *thd); + + bool can_be_merged(); + bool can_use_merged(); + bool can_not_use_merged(); + bool only_view_structure(); + bool need_correct_ident(); } LEX; -extern TABLE_LIST fake_time_zone_tables_list; +struct st_lex_local: public st_lex +{ + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } + static void *operator new(size_t size, MEM_ROOT *mem_root) + { + return (void*) alloc_root(mem_root, (uint) size); + } + static void operator delete(void *ptr,size_t size) + { TRASH(ptr, size); } +}; void lex_init(void); void lex_free(void); diff --git a/sql/sql_list.h b/sql/sql_list.h index 38c5af2a4f7..657943f1e69 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -21,12 +21,6 @@ /* mysql standard class memory allocator */ -#ifdef SAFEMALLOC -#define TRASH(XX,YY) bfill((XX), (YY), 0x8F) -#else -#define TRASH(XX,YY) /* no-op */ -#endif - class Sql_alloc { public: @@ -113,6 +107,16 @@ public: } return 1; } + inline bool push_back(void *info, MEM_ROOT *mem_root) + { + if (((*last)=new (mem_root) list_node(info, &end_of_list))) + { + last= &(*last)->next; + elements++; + return 0; + } + return 1; + } inline bool push_front(void *info) { list_node *node=new list_node(info,first); @@ -129,10 +133,12 @@ public: void remove(list_node **prev) { list_node *node=(*prev)->next; - delete *prev; - *prev=node; if (!--elements) last= &first; + else if (last == &(*prev)->next) + last= prev; + delete *prev; + *prev=node; } inline void *pop(void) { @@ -143,6 +149,39 @@ public: last= &first; return tmp->info; } + inline void concat(base_list *list) + { + if (!list->is_empty()) + { + *last= list->first; + last= list->last; + elements+= list->elements; + } + } + inline void disjoin(base_list *list) + { + list_node **prev= &first; + list_node *node= first; + list_node *list_first= list->first; + elements=0; + while (node && node != list_first) + { + prev= &node->next; + node= node->next; + elements++; + } + *prev= *last; + last= prev; + } + inline void prepand(base_list *list) + { + if (!list->is_empty()) + { + *list->last= first; + first= list->first; + elements+= list->elements; + } + } inline list_node* last_node() { return *last; } inline list_node* first_node() { return first;} inline void *head() { return first->info; } @@ -249,10 +288,15 @@ public: inline List() :base_list() {} inline List(const List<T> &tmp) :base_list(tmp) {} inline bool push_back(T *a) { return base_list::push_back(a); } + inline bool push_back(T *a, MEM_ROOT *mem_root) + { return base_list::push_back(a, mem_root); } inline bool push_front(T *a) { return base_list::push_front(a); } inline T* head() {return (T*) base_list::head(); } inline T** head_ref() {return (T**) base_list::head_ref(); } inline T* pop() {return (T*) base_list::pop(); } + inline void concat(List<T> *list) { base_list::concat(list); } + inline void disjoin(List<T> *list) { base_list::disjoin(list); } + inline void prepand(List<T> *list) { base_list::prepand(list); } void delete_elements(void) { list_node *element,*next; @@ -273,6 +317,8 @@ public: inline T* operator++(int) { return (T*) base_list_iterator::next(); } inline T *replace(T *a) { return (T*) base_list_iterator::replace(a); } inline T *replace(List<T> &a) { return (T*) base_list_iterator::replace(a); } + inline void rewind(void) { base_list_iterator::rewind(); } + inline void remove() { base_list_iterator::remove(); } inline void after(T *a) { base_list_iterator::after(a); } inline T** ref(void) { return (T**) base_list_iterator::ref(); } }; diff --git a/sql/sql_load.cc b/sql/sql_load.cc index c4f5b1427af..eed3aee791a 100644 --- a/sql/sql_load.cc +++ b/sql/sql_load.cc @@ -71,14 +71,16 @@ public: void set_io_cache_arg(void* arg) { cache.arg = arg; } }; -static int read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table, +static int read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, List<Item> &fields, READ_INFO &read_info, - ulong skip_lines); -static int read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, + ulong skip_lines, + bool ignore_check_option_errors); +static int read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, List<Item> &fields, READ_INFO &read_info, - String &enclosed, ulong skip_lines); + String &enclosed, ulong skip_lines, + bool ignore_check_option_errors); -int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, +bool mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, List<Item> &fields, enum enum_duplicates handle_duplicates, bool ignore, bool read_file_from_client,thr_lock_type lock_type) @@ -89,6 +91,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, int error; String *field_term=ex->field_term,*escaped=ex->escaped; String *enclosed=ex->enclosed; + Item *unused_conds= 0; bool is_fifo=0; #ifndef EMBEDDED_LIBRARY LOAD_FILE_INFO lf_info; @@ -100,8 +103,9 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, loaded is located */ char *tdb= thd->db ? thd->db : db; // Result is never null - bool transactional_table, log_delayed; ulong skip_lines= ex->skip_lines; + int res; + bool transactional_table, log_delayed; DBUG_ENTER("mysql_load"); #ifdef EMBEDDED_LIBRARY @@ -112,10 +116,22 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, { my_message(ER_WRONG_FIELD_TERMINATORS,ER(ER_WRONG_FIELD_TERMINATORS), MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } - if (!(table = open_ltable(thd,table_list,lock_type))) - DBUG_RETURN(-1); + table_list->lock_type= lock_type; + if (open_and_lock_tables(thd, table_list)) + DBUG_RETURN(TRUE); + if (setup_tables(thd, table_list, &unused_conds, + &thd->lex->select_lex.leaf_tables, FALSE, FALSE)) + DBUG_RETURN(-1); + if (!table_list->table || // do not suport join view + !table_list->updatable || // and derived tables + check_key_in_view(thd, table_list)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "LOAD"); + DBUG_RETURN(TRUE); + } + table= table_list->table; transactional_table= table->file->has_transactions(); log_delayed= (transactional_table || table->tmp_table); @@ -128,14 +144,18 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, else { // Part field list thd->dupp_field=0; - if (setup_tables(table_list) || - setup_fields(thd, 0, table_list, fields, 1, 0, 0)) - DBUG_RETURN(-1); + /* TODO: use this conds for 'WITH CHECK OPTIONS' */ + Item *unused_conds= 0; + TABLE_LIST *leaves= 0; + if (setup_fields(thd, 0, table_list, fields, 1, 0, 0)) + DBUG_RETURN(TRUE); if (thd->dupp_field) { my_error(ER_FIELD_SPECIFIED_TWICE, MYF(0), thd->dupp_field->field_name); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } + if (check_that_all_fields_are_given_values(thd, table)) + DBUG_RETURN(TRUE); } uint tot_length=0; @@ -161,7 +181,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, { my_message(ER_BLOBS_AND_NO_TERMINATED,ER(ER_BLOBS_AND_NO_TERMINATED), MYF(0)); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } /* We can't give an error in the middle when using LOCAL files */ @@ -193,7 +213,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, #if !defined(__WIN__) && !defined(OS2) && ! defined(__NETWARE__) MY_STAT stat_info; if (!my_stat(name,&stat_info,MYF(MY_WME))) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); // if we are not in slave thread, the file must be: if (!thd->slave_thread && @@ -204,15 +224,15 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, ((stat_info.st_mode & S_IFREG) == S_IFREG || (stat_info.st_mode & S_IFIFO) == S_IFIFO))) { - my_error(ER_TEXTFILE_NOT_READABLE,MYF(0),name); - DBUG_RETURN(-1); + my_error(ER_TEXTFILE_NOT_READABLE, MYF(0), name); + DBUG_RETURN(TRUE); } if ((stat_info.st_mode & S_IFIFO) == S_IFIFO) is_fifo = 1; #endif } if ((file=my_open(name,O_RDONLY,MYF(MY_WME))) < 0) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } COPY_INFO info; @@ -228,7 +248,7 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, { if (file >= 0) my_close(file,MYF(0)); // no files in net reading - DBUG_RETURN(-1); // Can't allocate buffers + DBUG_RETURN(TRUE); // Can't allocate buffers } #ifndef EMBEDDED_LIBRARY @@ -276,12 +296,20 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, ha_enable_transaction(thd, FALSE); table->file->start_bulk_insert((ha_rows) 0); table->copy_blobs=1; + + thd->no_trans_update= 0; + thd->abort_on_warning= (handle_duplicates != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + if (!field_term->length() && !enclosed->length()) - error=read_fixed_length(thd,info,table,fields,read_info, - skip_lines); + error= read_fixed_length(thd, info, table_list, fields,read_info, + skip_lines, ignore); else - error=read_sep_field(thd,info,table,fields,read_info,*enclosed, - skip_lines); + error= read_sep_field(thd, info, table_list, fields, read_info, + *enclosed, skip_lines, + ignore); if (table->file->end_bulk_insert()) error=1; /* purecov: inspected */ ha_enable_transaction(thd, TRUE); @@ -348,9 +376,6 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, sprintf(name, ER(ER_LOAD_INFO), (ulong) info.records, (ulong) info.deleted, (ulong) (info.records - info.copied), (ulong) thd->cuted_fields); send_ok(thd,info.copied+info.deleted,0L,name); - // on the slave thd->query is never initialized - if (!thd->slave_thread) - mysql_update_log.write(thd,thd->query,thd->query_length); if (!log_delayed) thd->options|=OPTION_STATUS_NO_TRANS_UPDATE; @@ -372,12 +397,14 @@ int mysql_load(THD *thd,sql_exchange *ex,TABLE_LIST *table_list, #endif /*!EMBEDDED_LIBRARY*/ if (transactional_table) error=ha_autocommit_or_rollback(thd,error); + err: if (thd->lock) { mysql_unlock_tables(thd, thd->lock); thd->lock=0; } + thd->abort_on_warning= 0; DBUG_RETURN(error); } @@ -386,12 +413,15 @@ err: ****************************************************************************/ static int -read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, - READ_INFO &read_info, ulong skip_lines) +read_fixed_length(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, + List<Item> &fields, READ_INFO &read_info, ulong skip_lines, + bool ignore_check_option_errors) { List_iterator_fast<Item> it(fields); Item_field *sql_field; + TABLE *table= table_list->table; ulonglong id; + bool no_trans_update; DBUG_ENTER("read_fixed_length"); id= 0; @@ -404,7 +434,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); DBUG_RETURN(1); } if (skip_lines) @@ -423,6 +453,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, #ifdef HAVE_purify read_info.row_end[0]=0; #endif + no_trans_update= !table->file->has_transactions(); while ((sql_field= (Item_field*) it++)) { Field *field= sql_field->field; @@ -442,7 +473,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, field->field_length) length=field->field_length; save_chr=pos[length]; pos[length]='\0'; // Safeguard aganst malloc - field->store((char*) pos,length,read_info.read_charset); + field->store((char*) pos,length,read_info.read_charset); pos[length]=save_chr; if ((pos+=length) > read_info.row_end) pos= read_info.row_end; /* Fills rest with space */ @@ -455,8 +486,20 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); } - if (write_record(table,&info)) + + switch (table_list->view_check_option(thd, + ignore_check_option_errors)) { + case VIEW_CHECK_SKIP: + read_info.next_line(); + goto continue_loop; + case VIEW_CHECK_ERROR: + DBUG_RETURN(-1); + } + + if (thd->killed || write_record(thd,table,&info)) DBUG_RETURN(1); + thd->no_trans_update= no_trans_update; + /* If auto_increment values are used, save the first one for LAST_INSERT_ID() and for the binary/update log. @@ -477,6 +520,7 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); } thd->row_count++; +continue_loop:; } if (id && !read_info.error) thd->insert_id(id); // For binary/update log @@ -486,24 +530,28 @@ read_fixed_length(THD *thd,COPY_INFO &info,TABLE *table,List<Item> &fields, static int -read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, +read_sep_field(THD *thd, COPY_INFO &info, TABLE_LIST *table_list, List<Item> &fields, READ_INFO &read_info, - String &enclosed, ulong skip_lines) + String &enclosed, ulong skip_lines, + bool ignore_check_option_errors) { List_iterator_fast<Item> it(fields); Item_field *sql_field; + TABLE *table= table_list->table; uint enclosed_length; ulonglong id; + bool no_trans_update; DBUG_ENTER("read_sep_field"); enclosed_length=enclosed.length(); id= 0; + no_trans_update= !table->file->has_transactions(); for (;;it.rewind()) { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); DBUG_RETURN(1); } while ((sql_field=(Item_field*) it++)) @@ -559,7 +607,18 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, ER(ER_WARN_TOO_FEW_RECORDS), thd->row_count); } } - if (write_record(table,&info)) + + switch (table_list->view_check_option(thd, + ignore_check_option_errors)) { + case VIEW_CHECK_SKIP: + read_info.next_line(); + goto continue_loop; + case VIEW_CHECK_ERROR: + DBUG_RETURN(-1); + } + + + if (thd->killed || write_record(thd, table, &info)) DBUG_RETURN(1); /* If auto_increment values are used, save the first one @@ -571,6 +630,7 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, id= thd->last_insert_id; if (table->next_number_field) table->next_number_field->reset(); // Clear for next record + thd->no_trans_update= no_trans_update; if (read_info.next_line()) // Skip to next line break; if (read_info.line_cuted) @@ -579,8 +639,11 @@ read_sep_field(THD *thd,COPY_INFO &info,TABLE *table, push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_TOO_MANY_RECORDS, ER(ER_WARN_TOO_MANY_RECORDS), thd->row_count); + if (thd->killed) + DBUG_RETURN(1); } thd->row_count++; +continue_loop:; } if (id && !read_info.error) thd->insert_id(id); // For binary/update log diff --git a/sql/sql_map.cc b/sql/sql_map.cc index e7e24f957c6..687e60b7c72 100644 --- a/sql/sql_map.cc +++ b/sql/sql_map.cc @@ -47,13 +47,12 @@ mapped_files::mapped_files(const my_string filename,byte *magic,uint magic_lengt 0L))) { error=errno; - my_printf_error(0,"Can't map file: %s, errno: %d",MYF(0), - (my_string) name,error); + my_error(ER_NO_FILE_MAPPING, MYF(0), (my_string) name, error); } } if (map && memcmp(map,magic,magic_length)) { - my_printf_error(0,"Wrong magic in %s",MYF(0),name); + my_error(ER_WRONG_MAGIC, MYF(0), name); VOID(munmap(map,size)); map=0; } @@ -112,8 +111,7 @@ mapped_files *map_file(const my_string name,byte *magic,uint magic_length) { map->use_count++; if (!map->map) - my_printf_error(0,"Can't map file: %s, error: %d",MYF(0),path, - map->error); + my_error(ER_NO_FILE_MAPPING, MYF(0), path, map->error); } VOID(pthread_mutex_unlock(&LOCK_mapped_file)); return map; diff --git a/sql/sql_olap.cc b/sql/sql_olap.cc index 46f1e6c156e..07271d40492 100644 --- a/sql/sql_olap.cc +++ b/sql/sql_olap.cc @@ -152,7 +152,9 @@ int handle_olaps(LEX *lex, SELECT_LEX *select_lex) List<Item> all_fields(select_lex->item_list); - if (setup_tables((TABLE_LIST *)select_lex->table_list.first) || + if (setup_tables(lex->thd, (TABLE_LIST *)select_lex->table_list.first + &select_lex->where, &select_lex->leaf_tables, + FALSE, FALSE) || setup_fields(lex->thd, 0, (TABLE_LIST *)select_lex->table_list.first, select_lex->item_list, 1, &all_fields,1) || setup_fields(lex->thd, 0, (TABLE_LIST *)select_lex->table_list.first, diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 6353622098c..442d398889f 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -25,6 +25,9 @@ #include "ha_innodb.h" #endif +#include "sp_head.h" +#include "sp.h" + #ifdef HAVE_OPENSSL /* Without SSL the handshake consists of one packet. This packet @@ -43,6 +46,15 @@ #define MIN_HANDSHAKE_SIZE 6 #endif /* HAVE_OPENSSL */ +/* Used in error handling only */ +#define SP_TYPE_STRING(LP) \ + ((LP)->sphead->m_type == TYPE_ENUM_FUNCTION ? "FUNCTION" : "PROCEDURE") +#define SP_COM_STRING(LP) \ + ((LP)->sql_command == SQLCOM_CREATE_SPFUNCTION || \ + (LP)->sql_command == SQLCOM_ALTER_FUNCTION || \ + (LP)->sql_command == SQLCOM_DROP_FUNCTION ? \ + "FUNCTION" : "PROCEDURE") + #ifdef SOLARIS extern "C" int gethostname(char *name, int namelen); #endif @@ -65,7 +77,7 @@ const char *command_name[]={ "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user", "Binlog Dump","Table Dump", "Connect Out", "Register Slave", "Prepare", "Prepare Execute", "Long Data", "Close stmt", - "Reset stmt", "Set option", + "Reset stmt", "Set option", "Fetch", "Error" // Last command number }; @@ -95,7 +107,7 @@ static void unlock_locked_tables(THD *thd) if (thd->locked_tables) { thd->lock=thd->locked_tables; - thd->locked_tables=0; // Will be automaticly closed + thd->locked_tables=0; // Will be automatically closed close_thread_tables(thd); // Free tables } } @@ -104,15 +116,20 @@ static void unlock_locked_tables(THD *thd) static bool end_active_trans(THD *thd) { int error=0; + DBUG_ENTER("end_active_trans"); if (thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN | OPTION_TABLE_LOCK)) { + DBUG_PRINT("info",("options: 0x%lx", (ulong) thd->options)); thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); + /* Safety if one did "drop table" on locked tables */ + if (!thd->locked_tables) + thd->options&= ~OPTION_TABLE_LOCK; thd->server_status&= ~SERVER_STATUS_IN_TRANS; if (ha_commit(thd)) error=1; } - return error; + DBUG_RETURN(error); } @@ -152,25 +169,21 @@ static int get_or_create_user_conn(THD *thd, const char *user, my_malloc(sizeof(struct user_conn) + temp_len+1, MYF(MY_WME))))) { - send_error(thd, 0, NullS); // Out of memory + net_send_error(thd, 0, NullS); // Out of memory return_val=1; goto end; } uc->user=(char*) (uc+1); memcpy(uc->user,temp_user,temp_len+1); - uc->user_len= user_len; - uc->host=uc->user + uc->user_len + 1; + uc->host= uc->user + user_len + 1; uc->len = temp_len; - uc->connections = 1; - uc->questions=uc->updates=uc->conn_per_hour=0; + uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0; uc->user_resources=*mqh; - if (max_user_connections && mqh->connections > max_user_connections) - uc->user_resources.connections = max_user_connections; uc->intime=thd->thr_create_time; if (my_hash_insert(&hash_user_connections, (byte*) uc)) { my_free((char*) uc,0); - send_error(thd, 0, NullS); // Out of memory + net_send_error(thd, 0, NullS); // Out of memory return_val=1; goto end; } @@ -192,13 +205,13 @@ end: command originator of the check: now check_user is called during connect and change user procedures; used for logging. - passwd scrambled password recieved from client + passwd scrambled password received from client passwd_len length of scrambled password db database name to connect to, may be NULL check_count dont know exactly Note, that host, user and passwd may point to communication buffer. - Current implementation does not depened on that, but future changes + Current implementation does not depend on that, but future changes should be done with this in mind; 'thd' is INOUT, all other params are 'IN'. @@ -245,7 +258,7 @@ int check_user(THD *thd, enum enum_server_command command, */ if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323) { - net_printf(thd, ER_NOT_SUPPORTED_AUTH_MODE); + net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); mysql_log.write(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); DBUG_RETURN(-1); } @@ -256,7 +269,7 @@ int check_user(THD *thd, enum enum_server_command command, /* Clear thd->db as it points to something, that will be freed when - connection is closed. We don't want to accidently free a wrong pointer + connection is closed. We don't want to accidentally free a wrong pointer if connect failed. Also in case of 'CHANGE USER' failure, current database will be switched to 'no database selected'. */ @@ -277,15 +290,16 @@ int check_user(THD *thd, enum enum_server_command command, NET *net= &thd->net; if (opt_secure_auth_local) { - net_printf(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE, - thd->user, thd->host_or_ip); + net_printf_error(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE, + thd->user, thd->host_or_ip); mysql_log.write(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE), thd->user, thd->host_or_ip); DBUG_RETURN(-1); } + /* We have to read very specific packet size */ if (send_old_password_request(thd) || - my_net_read(net) != SCRAMBLE_LENGTH_323 + 1) // We have to read very - { // specific packet size + my_net_read(net) != SCRAMBLE_LENGTH_323 + 1) + { inc_host_errors(&thd->remote.sin_addr); DBUG_RETURN(ER_HANDSHAKE_ERROR); } @@ -297,7 +311,7 @@ int check_user(THD *thd, enum enum_server_command command, /* here res is always >= 0 */ if (res == 0) { - if (!(thd->master_access & NO_ACCESS)) // authentification is OK + if (!(thd->master_access & NO_ACCESS)) // authentication is OK { DBUG_PRINT("info", ("Capabilities: %d packet_length: %ld Host: '%s' " @@ -316,7 +330,7 @@ int check_user(THD *thd, enum enum_server_command command, VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (!count_ok) { // too many connections - send_error(thd, ER_CON_COUNT_ERROR); + net_send_error(thd, ER_CON_COUNT_ERROR); DBUG_RETURN(-1); } } @@ -337,12 +351,16 @@ int check_user(THD *thd, enum enum_server_command command, thd->db_access=0; /* Don't allow user to connect if he has done too many queries */ - if ((ur.questions || ur.updates || ur.connections || + if ((ur.questions || ur.updates || ur.conn_per_hour || ur.user_conn || max_user_connections) && - get_or_create_user_conn(thd,thd->user,thd->host_or_ip,&ur)) + get_or_create_user_conn(thd, + opt_old_style_user_limits ? thd->user : thd->priv_user, + opt_old_style_user_limits ? thd->host_or_ip : thd->priv_host, + &ur)) DBUG_RETURN(-1); if (thd->user_connect && - (thd->user_connect->user_resources.connections || + (thd->user_connect->user_resources.conn_per_hour || + thd->user_connect->user_resources.user_conn || max_user_connections) && check_for_max_user_connections(thd, thd->user_connect)) DBUG_RETURN(-1); @@ -366,14 +384,14 @@ int check_user(THD *thd, enum enum_server_command command, } else if (res == 2) // client gave short hash, server has long hash { - net_printf(thd, ER_NOT_SUPPORTED_AUTH_MODE); + net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); mysql_log.write(thd,COM_CONNECT,ER(ER_NOT_SUPPORTED_AUTH_MODE)); DBUG_RETURN(-1); } - net_printf(thd, ER_ACCESS_DENIED_ERROR, - thd->user, - thd->host_or_ip, - passwd_len ? ER(ER_YES) : ER(ER_NO)); + net_printf_error(thd, ER_ACCESS_DENIED_ERROR, + thd->user, + thd->host_or_ip, + passwd_len ? ER(ER_YES) : ER(ER_NO)); mysql_log.write(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR), thd->user, thd->host_or_ip, @@ -433,19 +451,28 @@ static int check_for_max_user_connections(THD *thd, USER_CONN *uc) DBUG_ENTER("check_for_max_user_connections"); (void) pthread_mutex_lock(&LOCK_user_conn); - if (max_user_connections && + if (max_user_connections && !uc->user_resources.user_conn && max_user_connections < (uint) uc->connections) { - net_printf(thd,ER_TOO_MANY_USER_CONNECTIONS, uc->user); + net_printf_error(thd, ER_TOO_MANY_USER_CONNECTIONS, uc->user); error=1; goto end; } - if (uc->user_resources.connections && - uc->user_resources.connections <= uc->conn_per_hour) + if (uc->user_resources.user_conn && + uc->user_resources.user_conn < uc->connections) + { + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, + "max_user_connections", + (long) uc->user_resources.user_conn); + error= 1; + goto end; + } + if (uc->user_resources.conn_per_hour && + uc->user_resources.conn_per_hour <= uc->conn_per_hour) { - net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, - "max_connections", - (long) uc->user_resources.connections); + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, + "max_connections", + (long) uc->user_resources.conn_per_hour); error=1; goto end; } @@ -532,6 +559,8 @@ void init_update_queries(void) uc_update_queries[SQLCOM_DELETE_MULTI]=1; uc_update_queries[SQLCOM_DROP_INDEX]=1; uc_update_queries[SQLCOM_UPDATE_MULTI]=1; + uc_update_queries[SQLCOM_CREATE_VIEW]=1; + uc_update_queries[SQLCOM_DROP_VIEW]=1; } bool is_update_query(enum enum_sql_command command) @@ -575,8 +604,8 @@ static bool check_mqh(THD *thd, uint check_command) if (uc->user_resources.questions && uc->questions++ >= uc->user_resources.questions) { - net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, "max_questions", - (long) uc->user_resources.questions); + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_questions", + (long) uc->user_resources.questions); error=1; goto end; } @@ -586,8 +615,8 @@ static bool check_mqh(THD *thd, uint check_command) if (uc->user_resources.updates && uc_update_queries[check_command] && uc->updates++ >= uc->user_resources.updates) { - net_printf(thd, ER_USER_LIMIT_REACHED, uc->user, "max_updates", - (long) uc->user_resources.updates); + net_printf_error(thd, ER_USER_LIMIT_REACHED, uc->user, "max_updates", + (long) uc->user_resources.updates); error=1; goto end; } @@ -598,7 +627,7 @@ end: } -static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) +static void reset_mqh(LEX_USER *lu, bool get_them= 0) { #ifndef NO_EMBEDDED_ACCESS_CHECKS (void) pthread_mutex_lock(&LOCK_user_conn); @@ -620,8 +649,9 @@ static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) uc->conn_per_hour=0; } } - else // for FLUSH PRIVILEGES and FLUSH USER_RESOURCES + else { + /* for FLUSH PRIVILEGES and FLUSH USER_RESOURCES */ for (uint idx=0;idx < hash_user_connections.records; idx++) { USER_CONN *uc=(struct user_conn *) hash_element(&hash_user_connections, @@ -720,7 +750,7 @@ static int check_connection(THD *thd) #endif /* HAVE_COMPRESS */ #ifdef HAVE_OPENSSL if (ssl_acceptor_fd) - client_flags |= CLIENT_SSL; /* Wow, SSL is avalaible! */ + client_flags |= CLIENT_SSL; /* Wow, SSL is available! */ #endif /* HAVE_OPENSSL */ end= strnmov(buff, server_version, SERVER_VERSION_LENGTH) + 1; @@ -947,7 +977,7 @@ pthread_handler_decl(handle_one_connection,arg) pthread_detach_this_thread(); #if !defined( __WIN__) && !defined(OS2) // Win32 calls this in pthread_create - // The following calls needs to be done before we call DBUG_ macros + /* The following calls needs to be done before we call DBUG_ macros */ if (!(test_flags & TEST_NO_THREADS) & my_thread_init()) { close_connection(thd, ER_OUT_OF_RESOURCES, 1); @@ -966,7 +996,7 @@ pthread_handler_decl(handle_one_connection,arg) */ DBUG_PRINT("info", ("handle_one_connection called by thread %d\n", thd->thread_id)); - // now that we've called my_thread_init(), it is safe to call DBUG_* + /* now that we've called my_thread_init(), it is safe to call DBUG_* */ #if defined(__WIN__) init_signals(); // IRENA; testing ? @@ -992,7 +1022,7 @@ pthread_handler_decl(handle_one_connection,arg) if ((error=check_connection(thd))) { // Wrong permissions if (error > 0) - net_printf(thd,error,thd->host_or_ip); + net_printf_error(thd, error, thd->host_or_ip); #ifdef __NT__ if (vio_type(net->vio) == VIO_TYPE_NAMEDPIPE) my_sleep(1000); /* must wait after eof() */ @@ -1010,32 +1040,37 @@ pthread_handler_decl(handle_one_connection,arg) thd->version= refresh_version; thd->proc_info= 0; + thd->command= COM_SLEEP; thd->set_time(); thd->init_for_queries(); + if (sys_init_connect.value_length && !(thd->master_access & SUPER_ACL)) { execute_init_command(thd, &sys_init_connect, &LOCK_sys_init_connect); if (thd->query_error) - thd->killed= 1; + thd->killed= THD::KILL_CONNECTION; } - while (!net->error && net->vio != 0 && !thd->killed) + + thd->proc_info=0; + thd->set_time(); + thd->init_for_queries(); + while (!net->error && net->vio != 0 && !(thd->killed == THD::KILL_CONNECTION)) { if (do_command(thd)) break; } if (thd->user_connect) decrease_user_connections(thd->user_connect); - free_root(thd->mem_root,MYF(0)); if (net->error && net->vio != 0 && net->report_error) { if (!thd->killed && thd->variables.log_warnings > 1) - sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), + sql_print_warning(ER(ER_NEW_ABORTING_CONNECTION), thd->thread_id,(thd->db ? thd->db : "unconnected"), thd->user ? thd->user : "unauthenticated", thd->host_or_ip, (net->last_errno ? ER(net->last_errno) : ER(ER_UNKNOWN_ERROR))); - send_error(thd,net->last_errno,NullS); + net_send_error(thd, net->last_errno, NullS); statistic_increment(aborted_threads,&LOCK_status); } else if (thd->killed) @@ -1101,25 +1136,26 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg) thd->init_for_queries(); while (fgets(buff, thd->net.max_packet, file)) { - ulong length= (ulong) strlen(buff); - while (buff[length-1] != '\n' && !feof(file)) - { - /* - We got only a part of the current string. Will try to increase - net buffer then read the rest of the current string. - */ - if (net_realloc(&(thd->net), 2 * thd->net.max_packet)) - { - send_error(thd, thd->net.last_errno, NullS); - thd->is_fatal_error= 1; - break; - } - buff= (char*) thd->net.buff; - fgets(buff + length, thd->net.max_packet - length, file); - length+= (ulong) strlen(buff + length); - } - if (thd->is_fatal_error) - break; + ulong length= (ulong) strlen(buff); + while (buff[length-1] != '\n' && !feof(file)) + { + /* + We got only a part of the current string. Will try to increase + net buffer then read the rest of the current string. + */ + if (net_realloc(&(thd->net), 2 * thd->net.max_packet)) + { + net_send_error(thd, ER_NET_PACKET_TOO_LARGE, NullS); + thd->fatal_error(); + break; + } + buff= (char*) thd->net.buff; + fgets(buff + length, thd->net.max_packet - length, file); + length+= (ulong) strlen(buff + length); + } + if (thd->is_fatal_error) + break; + while (length && (my_isspace(thd->charset(), buff[length-1]) || buff[length-1] == ';')) length--; @@ -1127,6 +1163,10 @@ extern "C" pthread_handler_decl(handle_bootstrap,arg) thd->query_length=length; thd->query= thd->memdup_w_gap(buff, length+1, thd->db_length+1); thd->query[length] = '\0'; + /* + We don't need to obtain LOCK_thread_count here because in bootstrap + mode we have only one thread. + */ thd->query_id=query_id++; if (mqh_used && thd->user_connect && check_mqh(thd, SQLCOM_END)) { @@ -1160,8 +1200,14 @@ end: void free_items(Item *item) { - for (; item ; item=item->next) + Item *next; + DBUG_ENTER("free_items"); + for (; item ; item=next) + { + next=item->next; item->delete_self(); + } + DBUG_VOID_RETURN; } /* This works because items are allocated with sql_alloc() */ @@ -1181,14 +1227,14 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) db = (db && db[0]) ? db : thd->db; if (!(table_list = (TABLE_LIST*) thd->calloc(sizeof(TABLE_LIST)))) DBUG_RETURN(1); // out of memory - table_list->db = db; - table_list->real_name = table_list->alias = tbl_name; - table_list->lock_type = TL_READ_NO_INSERT; - table_list->next = 0; + table_list->db= db; + table_list->real_name= table_list->alias= tbl_name; + table_list->lock_type= TL_READ_NO_INSERT; + table_list->prev_global= &table_list; // can be removed after merge with 4.1 if (!db || check_db_name(db)) { - net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); + my_error(ER_WRONG_DB_NAME ,MYF(0), db ? db : "NULL"); goto err; } if (lower_case_table_names) @@ -1203,7 +1249,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) thd->free_list = 0; thd->query_length=(uint) strlen(tbl_name); thd->query = tbl_name; - if ((error = mysqld_dump_create_info(thd, table, -1))) + if ((error = mysqld_dump_create_info(thd, table_list, -1))) { my_error(ER_GET_ERRNO, MYF(0), my_errno); goto err; @@ -1248,7 +1294,7 @@ bool do_command(THD *thd) packet=0; old_timeout=net->read_timeout; - // Wait max for 8 hours + /* Wait max for 8 hours */ net->read_timeout=(uint) thd->variables.net_wait_timeout; thd->clear_error(); // Clear error message @@ -1264,12 +1310,15 @@ bool do_command(THD *thd) statistic_increment(aborted_threads,&LOCK_status); DBUG_RETURN(TRUE); // We have to close it. } - send_error(thd,net->last_errno,NullS); + net_send_error(thd, net->last_errno, NullS); net->error= 0; DBUG_RETURN(FALSE); } else { + if (thd->killed == THD::KILL_QUERY || thd->killed == THD::KILL_BAD_DATA) + thd->killed= THD::NOT_KILLED; + packet=(char*) net->read_pos; command = (enum enum_server_command) (uchar) packet[0]; if (command >= COM_END) @@ -1321,6 +1370,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, this so that they will not get logged to the slow query log */ thd->slow_command=FALSE; + thd->lex->sql_command= SQLCOM_END; /* to avoid confusing VIEW detectors */ thd->set_time(); VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->query_id=query_id; @@ -1336,7 +1386,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, case COM_INIT_DB: { LEX_STRING tmp; - statistic_increment(com_stat[SQLCOM_CHANGE_DB],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_CHANGE_DB], + &LOCK_status); thd->convert_string(&tmp, system_charset_info, packet, strlen(packet), thd->charset()); if (!mysql_change_db(thd, tmp.str)) @@ -1357,13 +1408,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd, uint db_len= *(uchar*) packet; uint tbl_len= *(uchar*) (packet + db_len + 1); - statistic_increment(com_other, &LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); thd->slow_command= TRUE; db= thd->alloc(db_len + tbl_len + 2); tbl_name= strmake(db, packet + 1, db_len)+1; strmake(tbl_name, packet + db_len + 2, tbl_len); - if (mysql_table_dump(thd, db, tbl_name, -1)) - send_error(thd); // dump to NET + mysql_table_dump(thd, db, tbl_name, -1); break; } case COM_CHANGE_USER: @@ -1371,7 +1421,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->change_user(); thd->clear_error(); // if errors from rollback - statistic_increment(com_other, &LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); char *user= (char*) packet; char *passwd= strend(user)+1; /* @@ -1385,10 +1435,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, *passwd++ : strlen(passwd); db+= passwd_len + 1; #ifndef EMBEDDED_LIBRARY - /* Small check for incomming packet */ + /* Small check for incoming packet */ if ((uint) ((uchar*) db - net->read_pos) > packet_length) { - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } #endif @@ -1411,7 +1461,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (!(thd->user= my_strdup(user, MYF(0)))) { thd->user= save_user; - send_error(thd, ER_OUT_OF_RESOURCES); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); break; } @@ -1421,9 +1471,9 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (res) { - /* authentification failure, we shall restore old user */ + /* authentication failure, we shall restore old user */ if (res > 0) - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); x_free(thd->user); thd->user= save_user; thd->priv_user= save_priv_user; @@ -1448,6 +1498,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd, mysql_stmt_execute(thd, packet, packet_length); break; } + case COM_FETCH: + { + mysql_stmt_fetch(thd, packet, packet_length); + break; + } case COM_LONG_DATA: { mysql_stmt_get_longdata(thd, packet, packet_length); @@ -1476,7 +1531,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, DBUG_PRINT("query",("%-.4096s",thd->query)); mysql_parse(thd,thd->query, thd->query_length); - while (!thd->killed && !thd->is_fatal_error && thd->lex->found_colon) + while (!thd->killed && thd->lex->found_colon && !thd->net.report_error) { char *packet= thd->lex->found_colon; /* @@ -1526,7 +1581,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } case COM_FIELD_LIST: // This isn't actually needed #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ break; #else { @@ -1534,20 +1590,29 @@ bool dispatch_command(enum enum_server_command command, THD *thd, TABLE_LIST table_list; LEX_STRING conv_name; - statistic_increment(com_stat[SQLCOM_SHOW_FIELDS],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS], + &LOCK_status); bzero((char*) &table_list,sizeof(table_list)); if (!(table_list.db=thd->db)) { - send_error(thd,ER_NO_DB_ERROR); + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); break; } - thd->free_list=0; pend= strend(packet); thd->convert_string(&conv_name, system_charset_info, packet, (uint) (pend-packet), thd->charset()); table_list.alias= table_list.real_name= conv_name.str; packet= pend+1; - // command not cachable => no gap for data base name + + if (!my_strcasecmp(system_charset_info, table_list.db, + information_schema_name.str)) + { + ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, table_list.alias); + if (schema_table) + table_list.schema_table= schema_table; + } + + /* command not cachable => no gap for data base name */ if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1))) break; mysql_log.write(thd,command,"%s %s",table_list.real_name,fields); @@ -1561,9 +1626,18 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (grant_option && check_grant(thd, SELECT_ACL, &table_list, 2, UINT_MAX, 0)) break; + /* init structures for VIEW processing */ + table_list.select_lex= &(thd->lex->select_lex); + mysql_init_query(thd, (uchar*)"", 0); + thd->lex-> + select_lex.table_list.link_in_list((byte*) &table_list, + (byte**) &table_list.next_local); + + /* switch on VIEW optimisation: do not fill temporary tables */ + thd->lex->sql_command= SQLCOM_SHOW_FIELDS; mysqld_list_fields(thd,&table_list,fields); - free_items(thd->free_list); - thd->free_list= 0; + thd->lex->unit.cleanup(); + thd->cleanup_after_query(); break; } #endif @@ -1579,43 +1653,44 @@ bool dispatch_command(enum enum_server_command command, THD *thd, char *db=thd->strdup(packet), *alias; HA_CREATE_INFO create_info; - statistic_increment(com_stat[SQLCOM_CREATE_DB],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_CREATE_DB], + &LOCK_status); // null test to handle EOM if (!db || !(alias= thd->strdup(db)) || check_db_name(db)) { - net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); + my_error(ER_WRONG_DB_NAME, MYF(0), db ? db : "NULL"); break; } if (check_access(thd,CREATE_ACL,db,0,1,0)) break; mysql_log.write(thd,command,packet); bzero(&create_info, sizeof(create_info)); - if (mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db), - &create_info, 0) < 0) - send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); + mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db), + &create_info, 0); break; } case COM_DROP_DB: // QQ: To be removed { - statistic_increment(com_stat[SQLCOM_DROP_DB],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_DROP_DB], + &LOCK_status); char *db=thd->strdup(packet), *alias; - // null test to handle EOM + /* null test to handle EOM */ if (!db || !(alias= thd->strdup(db)) || check_db_name(db)) { - net_printf(thd,ER_WRONG_DB_NAME, db ? db : "NULL"); + my_error(ER_WRONG_DB_NAME, MYF(0), db ? db : "NULL"); break; } if (check_access(thd,DROP_ACL,db,0,1,0)) break; if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); break; } mysql_log.write(thd,command,db); - if (mysql_rm_db(thd, (lower_case_table_names == 2 ? alias : db), - 0, 0) < 0) - send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); + mysql_rm_db(thd, (lower_case_table_names == 2 ? alias : db), + 0, 0); break; } #ifndef EMBEDDED_LIBRARY @@ -1625,7 +1700,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, ushort flags; uint32 slave_server_id; - statistic_increment(com_other,&LOCK_status); + statistic_increment(thd->status_var.com_other,&LOCK_status); thd->slow_command = TRUE; if (check_global_access(thd, REPL_SLAVE_ACL)) break; @@ -1642,7 +1717,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, (long) pos); mysql_binlog_send(thd, thd->strdup(packet + 10), (my_off_t) pos, flags); unregister_slave(thd,1,1); - // fake COM_QUIT -- if we get here, the thread needs to terminate + /* fake COM_QUIT -- if we get here, the thread needs to terminate */ error = TRUE; net->error = 0; break; @@ -1650,21 +1725,20 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif case COM_REFRESH: { - statistic_increment(com_stat[SQLCOM_FLUSH],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_FLUSH], + &LOCK_status); ulong options= (ulong) (uchar) packet[0]; if (check_global_access(thd,RELOAD_ACL)) break; mysql_log.write(thd,command,NullS); - if (reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, NULL)) - send_error(thd, 0); - else + if (!reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, NULL)) send_ok(thd); break; } #ifndef EMBEDDED_LIBRARY case COM_SHUTDOWN: { - statistic_increment(com_other,&LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); if (check_global_access(thd,SHUTDOWN_ACL)) break; /* purecov: inspected */ /* @@ -1681,7 +1755,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd, else if (level != SHUTDOWN_WAIT_ALL_BUFFERS) { my_error(ER_NOT_SUPPORTED_YET, MYF(0), "this shutdown level"); - send_error(thd); break; } DBUG_PRINT("quit",("Got shutdown command for level %u", level)); @@ -1695,8 +1768,6 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif close_connection(thd, 0, 1); close_thread_tables(thd); // Free before kill - free_root(thd->mem_root,MYF(0)); - free_root(&thd->transaction.mem_root,MYF(0)); kill_mysql(); error=TRUE; break; @@ -1705,7 +1776,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, case COM_STATISTICS: { mysql_log.write(thd,command,NullS); - statistic_increment(com_stat[SQLCOM_SHOW_STATUS],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS], + &LOCK_status); #ifndef EMBEDDED_LIBRARY char buff[200]; #else @@ -1715,8 +1787,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, sprintf((char*) buff, "Uptime: %ld Threads: %d Questions: %lu Slow queries: %ld Opens: %ld Flush tables: %ld Open tables: %u Queries per second avg: %.3f", uptime, - (int) thread_count,thd->query_id,long_query_count, - opened_tables,refresh_version, cached_tables(), + (int) thread_count,thd->query_id,thd->status_var.long_query_count, + thd->status_var.opened_tables,refresh_version, cached_tables(), uptime ? (float)thd->query_id/(float)uptime : 0); #ifdef SAFEMALLOC if (sf_malloc_cur_memory) // Using SAFEMALLOC @@ -1731,11 +1803,12 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; } case COM_PING: - statistic_increment(com_other,&LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); send_ok(thd); // Tell client we are alive break; case COM_PROCESS_INFO: - statistic_increment(com_stat[SQLCOM_SHOW_PROCESSLIST],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_PROCESSLIST], + &LOCK_status); if (!thd->priv_user[0] && check_global_access(thd,PROCESS_ACL)) break; mysql_log.write(thd,command,NullS); @@ -1745,14 +1818,15 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; case COM_PROCESS_KILL: { - statistic_increment(com_stat[SQLCOM_KILL],&LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_KILL], &LOCK_status); ulong id=(ulong) uint4korr(packet); - kill_one_thread(thd,id); + kill_one_thread(thd,id,false); break; } case COM_SET_OPTION: { - statistic_increment(com_stat[SQLCOM_SET_OPTION], &LOCK_status); + statistic_increment(thd->status_var.com_stat[SQLCOM_SET_OPTION], + &LOCK_status); enum_mysql_set_option command= (enum_mysql_set_option) uint2korr(packet); switch (command) { case MYSQL_OPTION_MULTI_STATEMENTS_ON: @@ -1764,13 +1838,13 @@ bool dispatch_command(enum enum_server_command command, THD *thd, send_eof(thd); break; default: - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } break; } case COM_DEBUG: - statistic_increment(com_other,&LOCK_status); + statistic_increment(thd->status_var.com_other, &LOCK_status); if (check_global_access(thd, SUPER_ACL)) break; /* purecov: inspected */ mysql_print_status(thd); @@ -1783,7 +1857,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, case COM_DELAYED_INSERT: case COM_END: default: - send_error(thd, ER_UNKNOWN_COM_ERROR); + my_message(ER_UNKNOWN_COM_ERROR, ER(ER_UNKNOWN_COM_ERROR), MYF(0)); break; } if (thd->lock || thd->open_tables || thd->derived_tables) @@ -1792,8 +1866,11 @@ bool dispatch_command(enum enum_server_command command, THD *thd, close_thread_tables(thd); /* Free tables */ } - if (thd->is_fatal_error) - send_error(thd,0); // End of memory ? + /* report error issued during command execution */ + if (thd->killed_errno() && !thd->net.report_error) + thd->send_kill_message(); + if (thd->net.report_error) + net_send_error(thd); time_t start_of_query=thd->start_time; thd->end_time(); // Set start time @@ -1809,7 +1886,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, (SERVER_QUERY_NO_INDEX_USED | SERVER_QUERY_NO_GOOD_INDEX_USED)) && (specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES))) { - long_query_count++; + thd->status_var.long_query_count++; mysql_slow_log.write(thd, thd->query, thd->query_length, start_of_query); } } @@ -1822,11 +1899,120 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thread_running--; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->packet.shrink(thd->variables.net_buffer_length); // Reclaim some memory + free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); DBUG_RETURN(error); } +int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, + enum enum_schema_tables schema_table_idx) +{ + DBUG_ENTER("prepare_schema_table"); + SELECT_LEX *sel= 0; + switch(schema_table_idx) { + case SCH_SCHEMATA: +#if defined(DONT_ALLOW_SHOW_COMMANDS) + my_message(ER_NOT_ALLOWED_COMMAND, + ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ + DBUG_RETURN(1); +#else + if ((specialflag & SPECIAL_SKIP_SHOW_DB) && + check_global_access(thd, SHOW_DB_ACL)) + DBUG_RETURN(1); + break; +#endif + case SCH_TABLE_NAMES: + case SCH_TABLES: + case SCH_VIEWS: +#ifdef DONT_ALLOW_SHOW_COMMANDS + my_message(ER_NOT_ALLOWED_COMMAND, + ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ + DBUG_RETURN(1); +#else + { + char *db= lex->select_lex.db ? lex->select_lex.db : thd->db; + if (!db) + { + my_message(ER_NO_DB_ERROR, + ER(ER_NO_DB_ERROR), MYF(0)); /* purecov: inspected */ + DBUG_RETURN(1); /* purecov: inspected */ + } + remove_escape(db); // Fix escaped '_' + if (check_db_name(db)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), db); + DBUG_RETURN(1); + } + if (check_access(thd,SELECT_ACL,db,&thd->col_access,0,0)) + DBUG_RETURN(1); /* purecov: inspected */ + if (!thd->col_access && check_grant_db(thd,db)) + { + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + thd->priv_user, thd->priv_host, db); + DBUG_RETURN(1); + } + lex->select_lex.db= db; + break; + } +#endif + case SCH_COLUMNS: + case SCH_STATISTICS: +#ifdef DONT_ALLOW_SHOW_COMMANDS + my_message(ER_NOT_ALLOWED_COMMAND, + ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); /* purecov: inspected */ + DBUG_RETURN(1); +#else + if (table_ident) + { + TABLE_LIST **query_tables_last= lex->query_tables_last; + sel= new SELECT_LEX(); + sel->init_query(); + if(!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ, + (List<String> *) 0, (List<String> *) 0)) + DBUG_RETURN(1); + lex->query_tables_last= query_tables_last; + TABLE_LIST *table_list= (TABLE_LIST*) sel->table_list.first; + char *db= table_list->db; + remove_escape(db); // Fix escaped '_' + remove_escape(table_list->real_name); + if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, + &table_list->grant.privilege, 0, 0)) + DBUG_RETURN(1); /* purecov: inspected */ + if (grant_option && check_grant(thd, SELECT_ACL, table_list, 2, + UINT_MAX, 0)) + DBUG_RETURN(1); + break; + } +#endif + case SCH_OPEN_TABLES: + case SCH_VARIABLES: + case SCH_STATUS: + case SCH_PROCEDURES: + case SCH_CHARSETS: + case SCH_COLLATIONS: + case SCH_COLLATION_CHARACTER_SET_APPLICABILITY: + case SCH_USER_PRIVILEGES: + case SCH_SCHEMA_PRIVILEGES: + case SCH_TABLE_PRIVILEGES: + case SCH_COLUMN_PRIVILEGES: + case SCH_TABLE_CONSTRAINTS: + case SCH_KEY_COLUMN_USAGE: + default: + break; + } + + SELECT_LEX *select_lex= lex->current_select; + if (make_schema_select(thd, select_lex, schema_table_idx)) + { + DBUG_RETURN(1); + } + TABLE_LIST *table_list= (TABLE_LIST*) select_lex->table_list.first; + table_list->schema_select_lex= sel; + DBUG_RETURN(0); +} + + /* Read query from packet and store in thd->query Used in COM_QUERY and COM_PREPARE @@ -1837,8 +2023,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, query_length RETURN VALUES - 0 ok - 1 error; In this case thd->fatal_error is set + FALSE ok + TRUE error; In this case thd->fatal_error is set */ bool alloc_query(THD *thd, char *packet, ulong packet_length) @@ -1863,7 +2049,7 @@ bool alloc_query(THD *thd, char *packet, ulong packet_length) packet_length, thd->db_length+ 1 + QUERY_CACHE_FLAGS_SIZE))) - return 1; + return TRUE; thd->query[packet_length]=0; thd->query_length= packet_length; @@ -1873,7 +2059,7 @@ bool alloc_query(THD *thd, char *packet, ulong packet_length) if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),QUERY_PRIOR); - return 0; + return FALSE; } /**************************************************************************** @@ -1881,23 +2067,55 @@ bool alloc_query(THD *thd, char *packet, ulong packet_length) ** Execute command saved in thd and current_lex->sql_command ****************************************************************************/ -void +bool mysql_execute_command(THD *thd) { - int res= 0; + bool res= FALSE; + int result= 0; LEX *lex= thd->lex; + /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ SELECT_LEX *select_lex= &lex->select_lex; - TABLE_LIST *tables= (TABLE_LIST*) select_lex->table_list.first; + /* first table of first SELECT_LEX */ + TABLE_LIST *first_table= (TABLE_LIST*) select_lex->table_list.first; + /* list of all tables in query */ + TABLE_LIST *all_tables; + /* most outer SELECT_LEX_UNIT of query */ SELECT_LEX_UNIT *unit= &lex->unit; DBUG_ENTER("mysql_execute_command"); /* + In many cases first table of main SELECT_LEX have special meaning => + check that it is first table in global list and relink it first in + queries_tables list if it is necessary (we need such relinking only + for queries with subqueries in select list, in this case tables of + subqueries will go to global list first) + + all_tables will differ from first_table only if most upper SELECT_LEX + do not contain tables. + + Because of above in place where should be at least one table in most + outer SELECT_LEX we have following check: + DBUG_ASSERT(first_table == all_tables); + DBUG_ASSERT(first_table == all_tables && first_table != 0); + */ + lex->first_lists_tables_same(); + /* should be assigned after making first tables same */ + all_tables= lex->query_tables; + + if (lex->sql_command != SQLCOM_CREATE_PROCEDURE && + lex->sql_command != SQLCOM_CREATE_SPFUNCTION) + { + if (sp_cache_functions(thd, lex)) + DBUG_RETURN(-1); + } + + /* Reset warning count for each query that uses tables A better approach would be to reset this for any commands that is not a SHOW command or a select that only access local variables, but for now this is probably good enough. */ - if (tables || &lex->select_lex != lex->all_selects_list) + if (all_tables || &lex->select_lex != lex->all_selects_list) mysql_reset_errors(thd); #ifdef HAVE_REPLICATION @@ -1907,11 +2125,11 @@ mysql_execute_command(THD *thd) Skip if we are in the slave thread, some table rules have been given and the table list says the query should not be replicated */ - if (all_tables_not_ok(thd,tables)) + if (all_tables_not_ok(thd, all_tables)) { /* we warn the slave SQL thread */ - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); - DBUG_VOID_RETURN; + my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); + DBUG_RETURN(0); } #ifndef TO_BE_DELETED /* @@ -1927,10 +2145,6 @@ mysql_execute_command(THD *thd) #endif } #endif /* !HAVE_REPLICATION */ - if ((&lex->select_lex != lex->all_selects_list || - lex->time_zone_tables_used) && - lex->unit.create_total_list(thd, lex, &tables)) - DBUG_VOID_RETURN; /* When option readonly is set deny operations which change tables. @@ -1940,11 +2154,12 @@ mysql_execute_command(THD *thd) !(thd->slave_thread || (thd->master_access & SUPER_ACL)) && (uc_update_queries[lex->sql_command] > 0)) { - net_printf(thd, ER_OPTION_PREVENTS_STATEMENT, "--read-only"); - DBUG_VOID_RETURN; + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--read-only"); + DBUG_RETURN(-1); } - statistic_increment(com_stat[lex->sql_command],&LOCK_status); + statistic_increment(thd->status_var.com_stat[lex->sql_command], + &LOCK_status); switch (lex->sql_command) { case SQLCOM_SELECT: { @@ -1956,43 +2171,26 @@ mysql_execute_command(THD *thd) } select_result *result=lex->result; - if (tables) + if (all_tables) { - res=check_table_access(thd, - lex->exchange ? SELECT_ACL | FILE_ACL : - SELECT_ACL, - tables,0); + res= check_table_access(thd, + lex->exchange ? SELECT_ACL | FILE_ACL : + SELECT_ACL, + all_tables, 0); } else - res=check_access(thd, lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL, - any_db,0,0,0); + res= check_access(thd, + lex->exchange ? SELECT_ACL | FILE_ACL : SELECT_ACL, + any_db, 0, 0, 0); if (res) - { - res=0; - break; // Error message is given - } - /* - In case of single SELECT unit->global_parameters points on first SELECT - TODO: move counters to SELECT_LEX - */ - unit->offset_limit_cnt= (ha_rows) unit->global_parameters->offset_limit; - unit->select_limit_cnt= (ha_rows) (unit->global_parameters->select_limit+ - unit->global_parameters->offset_limit); - if (unit->select_limit_cnt < - (ha_rows) unit->global_parameters->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // no limit - if (unit->select_limit_cnt == HA_POS_ERROR && !select_lex->next_select()) - select_lex->options&= ~OPTION_FOUND_ROWS; + goto error; - if (!(res=open_and_lock_tables(thd,tables))) + if (!(res= open_and_lock_tables(thd, all_tables))) { if (lex->describe) { if (!(result= new select_send())) - { - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_VOID_RETURN; - } + goto error; else thd->send_explain_fields(result); res= mysql_explain_union(thd, &thd->lex->unit, result); @@ -2012,12 +2210,9 @@ mysql_execute_command(THD *thd) else { if (!result && !(result= new select_send())) - { - res= -1; - break; - } - query_cache_store_query(thd, tables); - res= handle_select(thd, lex, result); + goto error; + query_cache_store_query(thd, all_tables); + res= handle_select(thd, lex, result, 0); if (result != lex->result) delete result; } @@ -2056,10 +2251,7 @@ mysql_execute_command(THD *thd) */ DBUG_ASSERT(!is_var_null); if (!pstr) - { - res= -1; - break; // EOM (error should be reported by allocator) - } + goto error; } else { @@ -2077,11 +2269,8 @@ mysql_execute_command(THD *thd) query_len= need_conversion? (pstr->length() * to_cs->mbmaxlen) : pstr->length(); if (!(query_str= alloc_root(thd->mem_root, query_len+1))) - { - res= -1; - break; // EOM (error should be reported by allocator) - } - + goto error; + if (need_conversion) { uint dummy_errors; @@ -2103,8 +2292,8 @@ mysql_execute_command(THD *thd) query_len, query_str)); } thd->command= COM_PREPARE; - if (!mysql_stmt_prepare(thd, query_str, query_len + 1, - &lex->prepared_stmt_name)) + if (!(res= mysql_stmt_prepare(thd, query_str, query_len + 1, + &lex->prepared_stmt_name))) send_ok(thd, 0L, 0L, "Statement prepared"); break; } @@ -2130,21 +2319,21 @@ mysql_execute_command(THD *thd) } else { - res= -1; my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), - lex->prepared_stmt_name.length, lex->prepared_stmt_name.str, + lex->prepared_stmt_name.length, + lex->prepared_stmt_name.str, "DEALLOCATE PREPARE"); + goto error; } break; } case SQLCOM_DO: - if (tables && ((res= check_table_access(thd, SELECT_ACL, tables,0)) || - (res= open_and_lock_tables(thd,tables)))) - break; + if (all_tables && + (check_table_access(thd, SELECT_ACL, all_tables, 0) || + open_and_lock_tables(thd, all_tables))) + goto error; res= mysql_do(thd, *lex->insert_list); - if (thd->net.report_error) - res= -1; break; case SQLCOM_EMPTY_QUERY: @@ -2160,16 +2349,30 @@ mysql_execute_command(THD *thd) { if (check_global_access(thd, SUPER_ACL)) goto error; - // PURGE MASTER LOGS TO 'file' + /* PURGE MASTER LOGS TO 'file' */ res = purge_master_logs(thd, lex->to_log); break; } case SQLCOM_PURGE_BEFORE: { + Item *it; + if (check_global_access(thd, SUPER_ACL)) goto error; - // PURGE MASTER LOGS BEFORE 'data' - res = purge_master_logs_before_date(thd, lex->purge_time); + /* PURGE MASTER LOGS BEFORE 'data' */ + it= (Item *)lex->value_list.head(); + if (it->check_cols(1) || it->fix_fields(lex->thd, 0, &it)) + { + my_error(ER_WRONG_ARGUMENTS, MYF(0), "PURGE LOGS BEFORE"); + goto error; + } + it= new Item_func_unix_timestamp(it); + /* + it is OK only emulate fix_fieds, because we need only + value of constant + */ + it->quick_fix_field(); + res = purge_master_logs_before_date(thd, (ulong)it->val_int()); break; } #endif @@ -2194,12 +2397,12 @@ mysql_execute_command(THD *thd) goto error; /* This query don't work now. See comment in repl_failsafe.cc */ #ifndef WORKING_NEW_MASTER - net_printf(thd, ER_NOT_SUPPORTED_YET, "SHOW NEW MASTER"); - res= 1; + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "SHOW NEW MASTER"); + goto error; #else res = show_new_master(thd); -#endif break; +#endif } #ifdef HAVE_REPLICATION @@ -2221,41 +2424,45 @@ mysql_execute_command(THD *thd) case SQLCOM_BACKUP_TABLE: { - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL, tables,0) || + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL, all_tables, 0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ thd->slow_command=TRUE; - res = mysql_backup_table(thd, tables); + res = mysql_backup_table(thd, first_table); break; } case SQLCOM_RESTORE_TABLE: { - if (check_db_used(thd,tables) || - check_table_access(thd, INSERT_ACL, tables,0) || + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, INSERT_ACL, all_tables, 0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ thd->slow_command=TRUE; - res = mysql_restore_table(thd, tables); + res = mysql_restore_table(thd, first_table); break; } case SQLCOM_ASSIGN_TO_KEYCACHE: { - if (check_db_used(thd, tables) || - check_access(thd, INDEX_ACL, tables->db, - &tables->grant.privilege, 0, 0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_access(thd, INDEX_ACL, first_table->db, + &first_table->grant.privilege, 0, 0)) goto error; - res= mysql_assign_to_keycache(thd, tables, &lex->name_and_length); + res= mysql_assign_to_keycache(thd, first_table, &lex->name_and_length); break; } case SQLCOM_PRELOAD_KEYS: { - if (check_db_used(thd, tables) || - check_access(thd, INDEX_ACL, tables->db, - &tables->grant.privilege, 0, 0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_access(thd, INDEX_ACL, first_table->db, + &first_table->grant.privilege, 0, 0)) goto error; - res = mysql_preload_keys(thd, tables); + res = mysql_preload_keys(thd, first_table); break; } #ifdef HAVE_REPLICATION @@ -2291,7 +2498,7 @@ mysql_execute_command(THD *thd) if (check_global_access(thd, SUPER_ACL)) goto error; if (end_active_trans(thd)) - res= -1; + goto error; else res = load_master_data(thd); break; @@ -2304,23 +2511,32 @@ mysql_execute_command(THD *thd) res = innodb_show_status(thd); break; } + case SQLCOM_SHOW_MUTEX_STATUS: + { + if (check_global_access(thd, SUPER_ACL)) + goto error; + res = innodb_mutex_show_status(thd); + break; + } #endif #ifdef HAVE_REPLICATION case SQLCOM_LOAD_MASTER_TABLE: { - if (!tables->db) - tables->db=thd->db; - if (check_access(thd,CREATE_ACL,tables->db,&tables->grant.privilege,0,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (!first_table->db) + first_table->db= thd->db; + if (check_access(thd, CREATE_ACL, first_table->db, + &first_table->grant.privilege, 0, 0)) goto error; /* purecov: inspected */ if (grant_option) { /* Check that the first table has CREATE privilege */ - if (check_grant(thd, CREATE_ACL, tables, 0, 1, 0)) + if (check_grant(thd, CREATE_ACL, all_tables, 0, 1, 0)) goto error; } - if (strlen(tables->real_name) > NAME_LEN) + if (strlen(first_table->real_name) > NAME_LEN) { - net_printf(thd,ER_WRONG_TABLE_NAME, tables->real_name); + my_error(ER_WRONG_TABLE_NAME, MYF(0), first_table->real_name); break; } pthread_mutex_lock(&LOCK_active_mi); @@ -2328,7 +2544,7 @@ mysql_execute_command(THD *thd) fetch_master_table will send the error to the client on failure. Give error if the table already exists. */ - if (!fetch_master_table(thd, tables->db, tables->real_name, + if (!fetch_master_table(thd, first_table->db, first_table->real_name, active_mi, 0, 0)) { send_ok(thd); @@ -2340,13 +2556,14 @@ mysql_execute_command(THD *thd) case SQLCOM_CREATE_TABLE: { - /* Skip first table, which is the table we are creating */ - TABLE_LIST *create_table, *create_table_local; - tables= lex->unlink_first_table(tables, &create_table, - &create_table_local); + DBUG_ASSERT(first_table == all_tables && first_table != 0); + bool link_to_local; + // Skip first table, which is the table we are creating + TABLE_LIST *create_table= lex->unlink_first_table(&link_to_local); + TABLE_LIST *select_tables= lex->query_tables; - if ((res= create_table_precheck(thd, tables, create_table))) - goto unsent_create_error; + if ((res= create_table_precheck(thd, select_tables, create_table))) + goto create_error; #ifndef HAVE_READLINK lex->create_info.data_file_name=lex->create_info.index_file_name=0; @@ -2354,15 +2571,12 @@ mysql_execute_command(THD *thd) /* Fix names if symlinked tables */ if (append_file_to_dir(thd, &lex->create_info.data_file_name, create_table->real_name) || - append_file_to_dir(thd,&lex->create_info.index_file_name, + append_file_to_dir(thd, &lex->create_info.index_file_name, create_table->real_name)) - { - res=-1; - goto unsent_create_error; - } + goto create_error; #endif /* - If we are using SET CHARSET without DEFAULT, add an implicite + If we are using SET CHARSET without DEFAULT, add an implicit DEFAULT to not confuse old users. (This may change). */ if ((lex->create_info.used_fields & @@ -2379,73 +2593,91 @@ mysql_execute_command(THD *thd) select_result *result; select_lex->options|= SELECT_NO_UNLOCK; - unit->offset_limit_cnt= select_lex->offset_limit; - unit->select_limit_cnt= select_lex->select_limit+ - select_lex->offset_limit; - if (unit->select_limit_cnt < select_lex->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // No limit + unit->set_limit(select_lex, select_lex); - if (!(res=open_and_lock_tables(thd,tables))) + if (!(res= open_and_lock_tables(thd, select_tables))) { - res= -1; // If error - if ((result=new select_create(create_table->db, - create_table->real_name, - &lex->create_info, - lex->create_list, - lex->key_list, - select_lex->item_list, lex->duplicates, - lex->ignore))) + /* + Is table which we are changing used somewhere in other parts + of query + */ + if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && + unique_table(create_table, select_tables)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), create_table->real_name); + goto create_error; + } + /* If we create merge table, we have to test tables in merge, too */ + if (lex->create_info.used_fields & HA_CREATE_USED_UNION) + { + TABLE_LIST *tab; + for (tab= (TABLE_LIST*) lex->create_info.merge_list.first; + tab; + tab= tab->next_local) + { + if (unique_table(tab, select_tables)) + { + my_error(ER_UPDATE_TABLE_USED, MYF(0), tab->real_name); + goto create_error; + } + } + } + + if ((result= new select_create(create_table, + &lex->create_info, + lex->create_list, + lex->key_list, + select_lex->item_list, + lex->duplicates, + lex->ignore))) { /* CREATE from SELECT give its SELECT_LEX for SELECT, and item_list belong to SELECT */ select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; - res=handle_select(thd, lex, result); + res= handle_select(thd, lex, result, 0); select_lex->resolve_mode= SELECT_LEX::NOMATTER_MODE; + delete result; } - //reset for PS + /* reset for PS */ lex->create_list.empty(); lex->key_list.empty(); } } - else // regular create + else { + /* regular create */ if (lex->name) res= mysql_create_like_table(thd, create_table, &lex->create_info, (Table_ident *)lex->name); else { - res= mysql_create_table(thd,create_table->db, - create_table->real_name, &lex->create_info, - lex->create_list, - lex->key_list,0,0); + res= mysql_create_table(thd, create_table->db, + create_table->real_name, &lex->create_info, + lex->create_list, + lex->key_list, 0, 0); } if (!res) send_ok(thd); } - - // put tables back for PS rexecuting - tables= lex->link_first_table_back(tables, create_table, - create_table_local); + lex->link_first_table_back(create_table, link_to_local); break; create_error: - res= 1; //error reported -unsent_create_error: - // put tables back for PS rexecuting - tables= lex->link_first_table_back(tables, create_table, - create_table_local); - break; + /* put tables back for PS rexecuting */ + lex->link_first_table_back(create_table, link_to_local); + goto error; } case SQLCOM_CREATE_INDEX: - if (check_one_table_access(thd, INDEX_ACL, tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_one_table_access(thd, INDEX_ACL, all_tables)) goto error; /* purecov: inspected */ thd->slow_command=TRUE; if (end_active_trans(thd)) - res= -1; + goto error; else - res = mysql_create_index(thd, tables, lex->key_list); + res = mysql_create_index(thd, first_table, lex->key_list); break; #ifdef HAVE_REPLICATION @@ -2472,8 +2704,9 @@ unsent_create_error: */ if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); - break; + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, ER(ER_LOCK_OR_ACTIVE_TRANSACTION), + MYF(0)); + goto error; } { pthread_mutex_lock(&LOCK_active_mi); @@ -2484,29 +2717,31 @@ unsent_create_error: #endif /* HAVE_REPLICATION */ case SQLCOM_ALTER_TABLE: + DBUG_ASSERT(first_table == all_tables && first_table != 0); #if defined(DONT_ALLOW_SHOW_COMMANDS) - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - break; + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ + goto error; #else { ulong priv=0; if (lex->name && (!lex->name[0] || strlen(lex->name) > NAME_LEN)) { - net_printf(thd, ER_WRONG_TABLE_NAME, lex->name); - res= 1; - break; + my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name); + goto error; } if (!select_lex->db) - select_lex->db=tables->db; - if (check_access(thd,ALTER_ACL,tables->db,&tables->grant.privilege,0,0) || + select_lex->db= first_table->db; + if (check_access(thd, ALTER_ACL, first_table->db, + &first_table->grant.privilege, 0, 0) || check_access(thd,INSERT_ACL | CREATE_ACL,select_lex->db,&priv,0,0)|| - check_merge_table_access(thd, tables->db, + check_merge_table_access(thd, first_table->db, (TABLE_LIST *) lex->create_info.merge_list.first)) goto error; /* purecov: inspected */ if (grant_option) { - if (check_grant(thd, ALTER_ACL, tables, 0, UINT_MAX, 0)) + if (check_grant(thd, ALTER_ACL, all_tables, 0, UINT_MAX, 0)) goto error; if (lex->name && !test_all_bits(priv,INSERT_ACL | CREATE_ACL)) { // Rename of table @@ -2524,13 +2759,13 @@ unsent_create_error: lex->create_info.data_file_name=lex->create_info.index_file_name=0; /* ALTER TABLE ends previous transaction */ if (end_active_trans(thd)) - res= -1; + goto error; else { thd->slow_command=TRUE; res= mysql_alter_table(thd, select_lex->db, lex->name, &lex->create_info, - tables, lex->create_list, + first_table, lex->create_list, lex->key_list, select_lex->order_list.elements, (ORDER *) select_lex->order_list.first, @@ -2541,46 +2776,44 @@ unsent_create_error: #endif /*DONT_ALLOW_SHOW_COMMANDS*/ case SQLCOM_RENAME_TABLE: { + DBUG_ASSERT(first_table == all_tables && first_table != 0); TABLE_LIST *table; - if (check_db_used(thd,tables)) + if (check_db_used(thd, all_tables)) goto error; - for (table=tables ; table ; table=table->next->next) + for (table= first_table; table; table= table->next_local->next_local) { if (check_access(thd, ALTER_ACL | DROP_ACL, table->db, &table->grant.privilege,0,0) || - check_access(thd, INSERT_ACL | CREATE_ACL, table->next->db, - &table->next->grant.privilege,0,0)) + check_access(thd, INSERT_ACL | CREATE_ACL, table->next_local->db, + &table->next_local->grant.privilege, 0, 0)) goto error; if (grant_option) { - TABLE_LIST old_list,new_list; + TABLE_LIST old_list, new_list; /* we do not need initialize old_list and new_list because we will come table[0] and table->next[0] there */ - old_list=table[0]; - new_list=table->next[0]; - old_list.next=new_list.next=0; - if (check_grant(thd, ALTER_ACL, &old_list, 0, UINT_MAX, 0) || - (!test_all_bits(table->next->grant.privilege, + old_list= table[0]; + new_list= table->next_local[0]; + if (check_grant(thd, ALTER_ACL, &old_list, 0, 1, 0) || + (!test_all_bits(table->next_local->grant.privilege, INSERT_ACL | CREATE_ACL) && - check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, - UINT_MAX, 0))) + check_grant(thd, INSERT_ACL | CREATE_ACL, &new_list, 0, 1, 0))) goto error; } } - query_cache_invalidate3(thd, tables, 0); - if (end_active_trans(thd)) - res= -1; - else if (mysql_rename_tables(thd,tables)) - res= -1; + query_cache_invalidate3(thd, first_table, 0); + if (end_active_trans(thd) || mysql_rename_tables(thd, first_table)) + goto error; break; } #ifndef EMBEDDED_LIBRARY case SQLCOM_SHOW_BINLOGS: #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ + goto error; #else { if (check_global_access(thd, SUPER_ACL)) @@ -2591,40 +2824,43 @@ unsent_create_error: #endif #endif /* EMBEDDED_LIBRARY */ case SQLCOM_SHOW_CREATE: + DBUG_ASSERT(first_table == all_tables && first_table != 0); #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ + goto error; #else { - if (check_db_used(thd, tables) || - check_access(thd, SELECT_ACL | EXTRA_ACL, tables->db, - &tables->grant.privilege,0,0)) + if (check_db_used(thd, all_tables) || + check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db, + &first_table->grant.privilege, 0, 0)) goto error; - if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) + if (grant_option && check_grant(thd, SELECT_ACL, all_tables, 2, UINT_MAX, 0)) goto error; - res= mysqld_show_create(thd, tables); + res= mysqld_show_create(thd, first_table); break; } #endif case SQLCOM_CHECKSUM: { - if (check_db_used(thd,tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL , tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0)) goto error; /* purecov: inspected */ - res = mysql_checksum_table(thd, tables, &lex->check_opt); + res = mysql_checksum_table(thd, first_table, &lex->check_opt); break; } case SQLCOM_REPAIR: { - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->slow_command=TRUE; - res = mysql_repair_table(thd, tables, &lex->check_opt); + res= mysql_repair_table(thd, first_table, &lex->check_opt); /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -2636,24 +2872,25 @@ unsent_create_error: } case SQLCOM_CHECK: { - if (check_db_used(thd,tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL , tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0)) goto error; /* purecov: inspected */ thd->slow_command=TRUE; - res = mysql_check_table(thd, tables, &lex->check_opt); + res = mysql_check_table(thd, first_table, &lex->check_opt); break; } case SQLCOM_ANALYZE: { - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->slow_command=TRUE; - res = mysql_analyze_table(thd, tables, &lex->check_opt); + res = mysql_analyze_table(thd, first_table, &lex->check_opt); /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -2666,17 +2903,17 @@ unsent_create_error: case SQLCOM_OPTIMIZE: { - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL | INSERT_ACL, tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->slow_command=TRUE; res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ? - mysql_recreate_table(thd, tables, 1) : - mysql_optimize_table(thd, tables, &lex->check_opt); + mysql_recreate_table(thd, first_table, 1) : + mysql_optimize_table(thd, first_table, &lex->check_opt); /* ! we write after unlocking the table */ if (!res && !lex->no_write_to_binlog) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); // No binlog error generated @@ -2687,101 +2924,104 @@ unsent_create_error: break; } case SQLCOM_UPDATE: - if (update_precheck(thd, tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (update_precheck(thd, all_tables)) break; - res= mysql_update(thd,tables, - select_lex->item_list, - lex->value_list, - select_lex->where, - select_lex->order_list.elements, - (ORDER *) select_lex->order_list.first, - select_lex->select_limit, - lex->duplicates, lex->ignore); - if (thd->net.report_error) - res= -1; - break; - case SQLCOM_UPDATE_MULTI: - { - if ((res= multi_update_precheck(thd, tables))) + res= (result= mysql_update(thd, all_tables, + select_lex->item_list, + lex->value_list, + select_lex->where, + select_lex->order_list.elements, + (ORDER *) select_lex->order_list.first, + select_lex->select_limit, + lex->duplicates, lex->ignore)); + /* mysql_update return 2 if we need to switch to multi-update */ + if (result != 2) break; - res= mysql_multi_update(thd,tables, - &select_lex->item_list, - &lex->value_list, - select_lex->where, - select_lex->options, - lex->duplicates, lex->ignore, unit, select_lex); + case SQLCOM_UPDATE_MULTI: + { + DBUG_ASSERT(first_table == all_tables && first_table != 0); + /* if we switched from normal update, rights are checked */ + if (result != 2) + { + if ((res= multi_update_precheck(thd, all_tables))) + break; + } + else + res= 0; + + res= mysql_multi_update(thd, all_tables, + &select_lex->item_list, + &lex->value_list, + select_lex->where, + select_lex->options, + lex->duplicates, lex->ignore, unit, select_lex); break; } case SQLCOM_REPLACE: case SQLCOM_INSERT: { - if ((res= insert_precheck(thd, tables))) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if ((res= insert_precheck(thd, all_tables))) break; - res= mysql_insert(thd,tables,lex->field_list,lex->many_values, - lex->update_list, lex->value_list, + res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values, + lex->update_list, lex->value_list, lex->duplicates, lex->ignore); - if (thd->net.report_error) - res= -1; + if (first_table->view && !first_table->contain_auto_increment) + thd->last_insert_id= 0; // do not show last insert ID if VIEW have not it break; } case SQLCOM_REPLACE_SELECT: case SQLCOM_INSERT_SELECT: { - TABLE_LIST *first_local_table= (TABLE_LIST *) select_lex->table_list.first; - if ((res= insert_precheck(thd, tables))) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if ((res= insert_precheck(thd, all_tables))) break; /* Fix lock for first table */ - if (tables->lock_type == TL_WRITE_DELAYED) - tables->lock_type= TL_WRITE; + if (first_table->lock_type == TL_WRITE_DELAYED) + first_table->lock_type= TL_WRITE; /* Don't unlock tables until command is written to binary log */ select_lex->options|= SELECT_NO_UNLOCK; select_result *result; - unit->offset_limit_cnt= select_lex->offset_limit; - unit->select_limit_cnt= select_lex->select_limit+select_lex->offset_limit; - if (unit->select_limit_cnt < select_lex->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // No limit - - if (find_real_table_in_list(tables->next, tables->db, tables->real_name)) - { - /* Using same table for INSERT and SELECT */ - select_lex->options |= OPTION_BUFFER_RESULT; - } + unit->set_limit(select_lex, select_lex); - if (!(res= open_and_lock_tables(thd, tables)) && - !(res= mysql_prepare_insert(thd, tables, first_local_table, - tables->table, lex->field_list, 0, - lex->update_list, lex->value_list, - lex->duplicates)) && - (result= new select_insert(tables->table, &lex->field_list, - &lex->update_list, &lex->value_list, - lex->duplicates, lex->ignore))) + if (!(res= open_and_lock_tables(thd, all_tables))) { - TABLE *table= tables->table; /* Skip first table, which is the table we are inserting in */ - lex->select_lex.table_list.first= (byte*) first_local_table->next; - /* - insert/replace from SELECT give its SELECT_LEX for SELECT, - and item_list belong to SELECT - */ - lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; - res= handle_select(thd, lex, result); + lex->select_lex.table_list.first= (byte*)first_table->next_local; + + res= mysql_insert_select_prepare(thd); + if (!res && (result= new select_insert(first_table, first_table->table, + &lex->field_list, + &lex->update_list, &lex->value_list, + lex->duplicates, lex->ignore))) + { + /* + insert/replace from SELECT give its SELECT_LEX for SELECT, + and item_list belong to SELECT + */ + lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; + res= handle_select(thd, lex, result, OPTION_SETUP_TABLES_DONE); + lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; + delete result; + } /* revert changes for SP */ - lex->select_lex.table_list.first= (byte*) first_local_table; - lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; - delete result; - table->insert_values= 0; - if (thd->net.report_error) - res= -1; + lex->select_lex.table_list.first= (byte*) first_table; } else - res= -1; + res= TRUE; + + if (first_table->view && !first_table->contain_auto_increment) + thd->last_insert_id= 0; // do not show last insert ID if VIEW have not it + break; } case SQLCOM_TRUNCATE: - if (check_one_table_access(thd, DELETE_ACL, tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_one_table_access(thd, DELETE_ACL, all_tables)) goto error; /* Don't allow this within a transaction because we want to use @@ -2789,66 +3029,57 @@ unsent_create_error: */ if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION,NullS); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); goto error; } - res=mysql_truncate(thd, tables, 0); + + res= mysql_truncate(thd, first_table, 0); break; case SQLCOM_DELETE: { - if ((res= delete_precheck(thd, tables))) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if ((res= delete_precheck(thd, all_tables))) break; - res = mysql_delete(thd,tables, select_lex->where, + res = mysql_delete(thd, all_tables, select_lex->where, &select_lex->order_list, select_lex->select_limit, select_lex->options); - if (thd->net.report_error) - res= -1; break; } case SQLCOM_DELETE_MULTI: { + DBUG_ASSERT(first_table == all_tables && first_table != 0); TABLE_LIST *aux_tables= (TABLE_LIST *)thd->lex->auxilliary_table_list.first; - TABLE_LIST *target_tbl; uint table_count; multi_delete *result; - if ((res= multi_delete_precheck(thd, tables, &table_count))) + if ((res= multi_delete_precheck(thd, all_tables, &table_count))) break; /* condition will be TRUE on SP re-excuting */ if (select_lex->item_list.elements != 0) select_lex->item_list.empty(); if (add_item_to_list(thd, new Item_null())) - { - res= -1; - break; - } + goto error; thd->proc_info="init"; - if ((res=open_and_lock_tables(thd,tables))) + if ((res= open_and_lock_tables(thd, all_tables))) break; - /* Fix tables-to-be-deleted-from list to point at opened tables */ - for (target_tbl= (TABLE_LIST*) aux_tables; - target_tbl; - target_tbl= target_tbl->next) + + if (!first_table->table) { - TABLE_LIST *orig= target_tbl->table_list; - target_tbl->table= orig->table; - /* - Multi-delete can't be constructed over-union => we always have - single SELECT on top and have to check underlying SELECTs of it - */ - if (lex->select_lex.check_updateable_in_subqueries(orig->db, - orig->real_name)) - { - my_error(ER_UPDATE_TABLE_USED, MYF(0), - orig->real_name); - res= -1; - break; - } + DBUG_ASSERT(first_table->view && + first_table->ancestor && first_table->ancestor->next_local); + my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), + first_table->view_db.str, first_table->view_name.str); + res= -1; + break; } + if ((res= mysql_multi_delete_prepare(thd))) + goto error; + if (!thd->is_fatal_error && (result= new multi_delete(thd,aux_tables, table_count))) { @@ -2860,28 +3091,25 @@ unsent_create_error: 0, (ORDER *)NULL, (ORDER *)NULL, (Item *)NULL, (ORDER *)NULL, select_lex->options | thd->options | - SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK, + SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK | + OPTION_SETUP_TABLES_DONE, result, unit, select_lex); - if (thd->net.report_error) - res= -1; delete result; } else - res= -1; // Error is not sent + res= TRUE; close_thread_tables(thd); break; } case SQLCOM_DROP_TABLE: { + DBUG_ASSERT(first_table == all_tables && first_table != 0); if (!lex->drop_temporary) { - if (check_table_access(thd,DROP_ACL,tables,0)) + if (check_table_access(thd, DROP_ACL, all_tables, 0)) goto error; /* purecov: inspected */ if (end_active_trans(thd)) - { - res= -1; - break; - } + goto error; } else { @@ -2896,28 +3124,19 @@ unsent_create_error: if (thd->slave_thread) lex->drop_if_exists= 1; } - res= mysql_rm_table(thd,tables,lex->drop_if_exists, lex->drop_temporary); + res= mysql_rm_table(thd, first_table, lex->drop_if_exists, + lex->drop_temporary); } break; case SQLCOM_DROP_INDEX: - if (check_one_table_access(thd, INDEX_ACL, tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_one_table_access(thd, INDEX_ACL, all_tables)) goto error; /* purecov: inspected */ if (end_active_trans(thd)) - res= -1; - else - res = mysql_drop_index(thd, tables, &lex->alter_info); - break; - case SQLCOM_SHOW_DATABASES: -#if defined(DONT_ALLOW_SHOW_COMMANDS) - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; -#else - if ((specialflag & SPECIAL_SKIP_SHOW_DB) && - check_global_access(thd, SHOW_DB_ACL)) goto error; - res= mysqld_show_dbs(thd, (lex->wild ? lex->wild->ptr() : NullS)); + else + res = mysql_drop_index(thd, first_table, &lex->alter_info); break; -#endif case SQLCOM_SHOW_PROCESSLIST: if (!thd->priv_user[0] && check_global_access(thd,PROCESS_ACL)) break; @@ -2934,19 +3153,11 @@ unsent_create_error: case SQLCOM_SHOW_COLUMN_TYPES: res= mysqld_show_column_types(thd); break; - case SQLCOM_SHOW_STATUS: - res= mysqld_show(thd,(lex->wild ? lex->wild->ptr() : NullS),status_vars, - OPT_GLOBAL, &LOCK_status); - break; - case SQLCOM_SHOW_VARIABLES: - res= mysqld_show(thd, (lex->wild ? lex->wild->ptr() : NullS), - init_vars, lex->option_type, - &LOCK_global_system_variables); - break; case SQLCOM_SHOW_LOGS: #ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), + MYF(0)); /* purecov: inspected */ + goto error; #else { if (grant_option && check_access(thd, FILE_ACL, any_db,0,0,0)) @@ -2955,104 +3166,19 @@ unsent_create_error: break; } #endif - case SQLCOM_SHOW_TABLES: - /* FALL THROUGH */ -#ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; -#else - { - char *db=select_lex->db ? select_lex->db : thd->db; - if (!db) - { - send_error(thd,ER_NO_DB_ERROR); /* purecov: inspected */ - goto error; /* purecov: inspected */ - } - remove_escape(db); // Fix escaped '_' - if (check_db_name(db)) - { - net_printf(thd,ER_WRONG_DB_NAME, db); - goto error; - } - if (check_access(thd,SELECT_ACL,db,&thd->col_access,0,0)) - goto error; /* purecov: inspected */ - if (!thd->col_access && check_grant_db(thd,db)) - { - net_printf(thd, ER_DBACCESS_DENIED_ERROR, - thd->priv_user, - thd->priv_host, - db); - goto error; - } - /* grant is checked in mysqld_show_tables */ - if (lex->describe) - res= mysqld_extend_show_tables(thd,db, - (lex->wild ? lex->wild->ptr() : NullS)); - else - res= mysqld_show_tables(thd,db, - (lex->wild ? lex->wild->ptr() : NullS)); - break; - } -#endif - case SQLCOM_SHOW_OPEN_TABLES: - res= mysqld_show_open_tables(thd,(lex->wild ? lex->wild->ptr() : NullS)); - break; - case SQLCOM_SHOW_CHARSETS: - res= mysqld_show_charsets(thd,(lex->wild ? lex->wild->ptr() : NullS)); - break; - case SQLCOM_SHOW_COLLATIONS: - res= mysqld_show_collations(thd,(lex->wild ? lex->wild->ptr() : NullS)); - break; - case SQLCOM_SHOW_FIELDS: -#ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; -#else - { - char *db=tables->db; - remove_escape(db); // Fix escaped '_' - remove_escape(tables->real_name); - if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, - &tables->grant.privilege, 0, 0)) - goto error; /* purecov: inspected */ - if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) - goto error; - res= mysqld_show_fields(thd,tables, - (lex->wild ? lex->wild->ptr() : NullS), - lex->verbose); - break; - } -#endif - case SQLCOM_SHOW_KEYS: -#ifdef DONT_ALLOW_SHOW_COMMANDS - send_error(thd,ER_NOT_ALLOWED_COMMAND); /* purecov: inspected */ - DBUG_VOID_RETURN; -#else - { - char *db=tables->db; - remove_escape(db); // Fix escaped '_' - remove_escape(tables->real_name); - if (check_access(thd,SELECT_ACL | EXTRA_ACL,db, - &tables->grant.privilege, 0, 0)) - goto error; /* purecov: inspected */ - if (grant_option && check_grant(thd, SELECT_ACL, tables, 2, UINT_MAX, 0)) - goto error; - res= mysqld_show_keys(thd,tables); - break; - } -#endif case SQLCOM_CHANGE_DB: mysql_change_db(thd,select_lex->db); break; case SQLCOM_LOAD: { + DBUG_ASSERT(first_table == all_tables && first_table != 0); uint privilege= (lex->duplicates == DUP_REPLACE ? INSERT_ACL | DELETE_ACL : INSERT_ACL); if (!lex->local_file) { - if (check_access(thd,privilege | FILE_ACL,tables->db,0,0,0)) + if (check_access(thd, privilege | FILE_ACL, first_table->db, 0, 0, 0)) goto error; } else @@ -3060,29 +3186,29 @@ unsent_create_error: if (!(thd->client_capabilities & CLIENT_LOCAL_FILES) || ! opt_local_infile) { - send_error(thd,ER_NOT_ALLOWED_COMMAND); + my_message(ER_NOT_ALLOWED_COMMAND, ER(ER_NOT_ALLOWED_COMMAND), MYF(0)); goto error; } - if (check_one_table_access(thd, privilege, tables)) + if (check_one_table_access(thd, privilege, all_tables)) goto error; } - res=mysql_load(thd, lex->exchange, tables, lex->field_list, - lex->duplicates, lex->ignore, (bool) lex->local_file, lex->lock_option); + res= mysql_load(thd, lex->exchange, first_table, lex->field_list, + lex->duplicates, lex->ignore, (bool) lex->local_file, + lex->lock_option); break; } case SQLCOM_SET_OPTION: { List<set_var_base> *lex_var_list= &lex->var_list; - if (tables && ((res= check_table_access(thd, SELECT_ACL, tables,0)) || - (res= open_and_lock_tables(thd,tables)))) - break; + if (all_tables && + (check_table_access(thd, SELECT_ACL, all_tables, 0) || + open_and_lock_tables(thd, all_tables))) + goto error; if (lex->one_shot_set && not_all_support_one_shot(lex_var_list)) { - my_printf_error(0, "The SET ONE_SHOT syntax is reserved for \ -purposes internal to the MySQL server", MYF(0)); - res= -1; - break; + my_error(ER_RESERVED_SYNTAX, MYF(0), "SET ONE_SHOT"); + goto error; } if (!(res= sql_set_variables(thd, lex_var_list))) { @@ -3093,8 +3219,6 @@ purposes internal to the MySQL server", MYF(0)); thd->one_shot_set|= lex->one_shot_set; send_ok(thd); } - if (thd->net.report_error) - res= -1; break; } @@ -3117,17 +3241,18 @@ purposes internal to the MySQL server", MYF(0)); break; case SQLCOM_LOCK_TABLES: unlock_locked_tables(thd); - if (check_db_used(thd,tables) || end_active_trans(thd)) + if (check_db_used(thd, all_tables) || end_active_trans(thd)) goto error; - if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, tables,0)) + if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables, 0)) goto error; thd->in_lock_tables=1; thd->options|= OPTION_TABLE_LOCK; - if (!(res= open_and_lock_tables(thd, tables))) + + if (!(res= open_and_lock_tables(thd, all_tables))) { #ifdef HAVE_QUERY_CACHE if (thd->variables.query_cache_wlock_invalidate) - query_cache.invalidate_locked_for_write(tables); + query_cache.invalidate_locked_for_write(first_table); #endif /*HAVE_QUERY_CACHE*/ thd->locked_tables=thd->lock; thd->lock=0; @@ -3142,22 +3267,22 @@ purposes internal to the MySQL server", MYF(0)); char *alias; if (!(alias=thd->strdup(lex->name)) || check_db_name(lex->name)) { - net_printf(thd,ER_WRONG_DB_NAME, lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); break; } /* If in a slave thread : CREATE DATABASE DB was certainly not preceded by USE DB. - For that reason, db_ok() in sql/slave.cc did not check the + For that reason, db_ok() in sql/slave.cc did not check the do_db/ignore_db. And as this query involves no tables, tables_ok() above was not called. So we have to check rules again here. */ #ifdef HAVE_REPLICATION - if (thd->slave_thread && + if (thd->slave_thread && (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(lex->name))) { - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif @@ -3172,7 +3297,7 @@ purposes internal to the MySQL server", MYF(0)); char *alias; if (!(alias=thd->strdup(lex->name)) || check_db_name(lex->name)) { - net_printf(thd, ER_WRONG_DB_NAME, lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); break; } /* @@ -3187,7 +3312,7 @@ purposes internal to the MySQL server", MYF(0)); (!db_ok(lex->name, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(lex->name))) { - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif @@ -3195,7 +3320,8 @@ purposes internal to the MySQL server", MYF(0)); break; if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); goto error; } res=mysql_rm_db(thd, (lower_case_table_names == 2 ? alias : lex->name), @@ -3207,12 +3333,12 @@ purposes internal to the MySQL server", MYF(0)); char *db= lex->name ? lex->name : thd->db; if (!db) { - send_error(thd, ER_NO_DB_ERROR); - goto error; + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + break; } if (!strip_sp(db) || check_db_name(db)) { - net_printf(thd, ER_WRONG_DB_NAME, db); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); break; } /* @@ -3227,7 +3353,7 @@ purposes internal to the MySQL server", MYF(0)); (!db_ok(db, replicate_do_db, replicate_ignore_db) || !db_ok_with_wild_table(db))) { - my_error(ER_SLAVE_IGNORED_TABLE, MYF(0)); + my_message(ER_SLAVE_IGNORED_TABLE, ER(ER_SLAVE_IGNORED_TABLE), MYF(0)); break; } #endif @@ -3235,7 +3361,8 @@ purposes internal to the MySQL server", MYF(0)); break; if (thd->locked_tables || thd->active_transaction()) { - send_error(thd,ER_LOCK_OR_ACTIVE_TRANSACTION); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); goto error; } res= mysql_alter_db(thd, db, &lex->create_info); @@ -3245,7 +3372,7 @@ purposes internal to the MySQL server", MYF(0)); { if (!strip_sp(lex->name) || check_db_name(lex->name)) { - net_printf(thd,ER_WRONG_DB_NAME, lex->name); + my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); break; } if (check_access(thd,SELECT_ACL,lex->name,0,1,0)) @@ -3253,38 +3380,64 @@ purposes internal to the MySQL server", MYF(0)); res=mysqld_show_create_db(thd,lex->name,&lex->create_info); break; } - case SQLCOM_CREATE_FUNCTION: + case SQLCOM_CREATE_FUNCTION: // UDF function + { if (check_access(thd,INSERT_ACL,"mysql",0,1,0)) break; #ifdef HAVE_DLOPEN - if (!(res = mysql_create_function(thd,&lex->udf))) + if (sp_find_function(thd, lex->spname)) + { + my_error(ER_UDF_EXISTS, MYF(0), lex->spname->m_name.str); + goto error; + } + if (!(res = mysql_create_function(thd, &lex->udf))) send_ok(thd); #else - res= -1; + res= TRUE; #endif break; - case SQLCOM_DROP_FUNCTION: - if (check_access(thd,DELETE_ACL,"mysql",0,1,0)) + } +#ifndef NO_EMBEDDED_ACCESS_CHECKS + case SQLCOM_CREATE_USER: + { + if (check_access(thd, GRANT_ACL,"mysql",0,1,0)) break; -#ifdef HAVE_DLOPEN - if (!(res = mysql_drop_function(thd,&lex->udf.name))) + if (!(res= mysql_create_user(thd, lex->users_list))) + { + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } send_ok(thd); -#else - res= -1; -#endif + } break; -#ifndef NO_EMBEDDED_ACCESS_CHECKS + } case SQLCOM_DROP_USER: { if (check_access(thd, GRANT_ACL,"mysql",0,1,0)) break; if (!(res= mysql_drop_user(thd, lex->users_list))) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); - mysql_bin_log.write(&qinfo); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); + } + send_ok(thd); + } + break; + } + case SQLCOM_RENAME_USER: + { + if (check_access(thd, GRANT_ACL,"mysql",0,1,0)) + break; + if (!(res= mysql_rename_user(thd, lex->users_list))) + { + if (mysql_bin_log.is_open()) + { + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); } send_ok(thd); } @@ -3296,7 +3449,6 @@ purposes internal to the MySQL server", MYF(0)); break; if (!(res = mysql_revoke_all(thd, lex->users_list))) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); @@ -3310,34 +3462,12 @@ purposes internal to the MySQL server", MYF(0)); case SQLCOM_GRANT: { if (check_access(thd, lex->grant | lex->grant_tot_col | GRANT_ACL, - tables && tables->db ? tables->db : select_lex->db, - tables ? &tables->grant.privilege : 0, - tables ? 0 : 1,0)) + ((first_table && first_table->db) ? + first_table->db : select_lex->db), + first_table ? &first_table->grant.privilege : 0, + first_table ? 0 : 1, 0)) goto error; - /* - Check that the user isn't trying to change a password for another - user if he doesn't have UPDATE privilege to the MySQL database - */ - - if (thd->user) // If not replication - { - LEX_USER *user; - List_iterator <LEX_USER> user_list(lex->users_list); - while ((user=user_list++)) - { - if (user->password.str && - (strcmp(thd->user,user->user.str) || - user->host.str && - my_strcasecmp(&my_charset_latin1, - user->host.str, thd->host_or_ip))) - { - if (check_access(thd, UPDATE_ACL, "mysql",0,1,0)) - goto error; - break; // We are allowed to do changes - } - } - } if (specialflag & SPECIAL_NO_RESOLVE) { LEX_USER *user; @@ -3351,51 +3481,63 @@ purposes internal to the MySQL server", MYF(0)); user->host.str); } } - if (tables) + if (first_table) { - if (grant_option && check_grant(thd, - (lex->grant | lex->grant_tot_col | - GRANT_ACL), - tables, 0, UINT_MAX, 0)) - goto error; - if (!(res = mysql_table_grant(thd,tables,lex->users_list, lex->columns, - lex->grant, - lex->sql_command == SQLCOM_REVOKE))) + if (!lex->columns.elements && + sp_exists_routine(thd, all_tables, 1, 1)) { - mysql_update_log.write(thd, thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); - mysql_bin_log.write(&qinfo); - } + uint grants= lex->all_privileges + ? (PROC_ACLS & ~GRANT_ACL) | (lex->grant & GRANT_ACL) + : lex->grant; + if (grant_option && + check_grant_procedure(thd, grants | GRANT_ACL, all_tables, 0)) + goto error; + res= mysql_procedure_grant(thd, all_tables, lex->users_list, + grants, lex->sql_command == SQLCOM_REVOKE,0); + } + else + { + if (grant_option && check_grant(thd, + (lex->grant | lex->grant_tot_col | + GRANT_ACL), + all_tables, 0, UINT_MAX, 0)) + goto error; + res= mysql_table_grant(thd, all_tables, lex->users_list, + lex->columns, lex->grant, + lex->sql_command == SQLCOM_REVOKE); + } + if (!res && mysql_bin_log.is_open()) + { + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); + mysql_bin_log.write(&qinfo); } } else { if (lex->columns.elements) { - send_error(thd,ER_ILLEGAL_GRANT_FOR_TABLE); - res=1; + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, ER(ER_ILLEGAL_GRANT_FOR_TABLE), + MYF(0)); + goto error; } else res = mysql_grant(thd, select_lex->db, lex->users_list, lex->grant, lex->sql_command == SQLCOM_REVOKE); if (!res) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); mysql_bin_log.write(&qinfo); } - if (mqh_used && lex->sql_command == SQLCOM_GRANT) + if (lex->sql_command == SQLCOM_GRANT) { List_iterator <LEX_USER> str_list(lex->users_list); LEX_USER *user; while ((user=str_list++)) - reset_mqh(thd,user); + reset_mqh(user); } } } @@ -3410,16 +3552,14 @@ purposes internal to the MySQL server", MYF(0)); lex->no_write_to_binlog= 1; case SQLCOM_FLUSH: { - if (check_global_access(thd,RELOAD_ACL) || check_db_used(thd, tables)) + if (check_global_access(thd,RELOAD_ACL) || check_db_used(thd, all_tables)) goto error; /* reload_acl_and_cache() will tell us if we are allowed to write to the binlog or not. */ bool write_to_binlog; - if (reload_acl_and_cache(thd, lex->type, tables, &write_to_binlog)) - send_error(thd, 0); - else + if (!reload_acl_and_cache(thd, lex->type, first_table, &write_to_binlog)) { /* We WANT to write and we CAN write. @@ -3427,7 +3567,6 @@ purposes internal to the MySQL server", MYF(0)); */ if (!lex->no_write_to_binlog && write_to_binlog) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); @@ -3439,11 +3578,20 @@ purposes internal to the MySQL server", MYF(0)); break; } case SQLCOM_KILL: - kill_one_thread(thd,lex->thread_id); + { + Item *it= (Item *)lex->value_list.head(); + + if (it->fix_fields(lex->thd, 0, &it) || it->check_cols(1)) + { + my_message(ER_SET_CONSTANTS_ONLY, ER(ER_SET_CONSTANTS_ONLY), + MYF(0)); + goto error; + } + kill_one_thread(thd, (ulong)it->val_int(), lex->type & ONLY_KILL_QUERY); break; + } #ifndef NO_EMBEDDED_ACCESS_CHECKS case SQLCOM_SHOW_GRANTS: - res=0; if ((thd->priv_user && !strcmp(thd->priv_user,lex->grant_user->user.str)) || !check_access(thd, SELECT_ACL, "mysql",0,1,0)) @@ -3453,40 +3601,41 @@ purposes internal to the MySQL server", MYF(0)); break; #endif case SQLCOM_HA_OPEN: - if (check_db_used(thd,tables) || - check_table_access(thd,SELECT_ACL, tables,0)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables) || + check_table_access(thd, SELECT_ACL, all_tables, 0)) goto error; - res = mysql_ha_open(thd, tables); + res= mysql_ha_open(thd, first_table); break; case SQLCOM_HA_CLOSE: - if (check_db_used(thd,tables)) + DBUG_ASSERT(first_table == all_tables && first_table != 0); + if (check_db_used(thd, all_tables)) goto error; - res = mysql_ha_close(thd, tables); + res= mysql_ha_close(thd, first_table); break; case SQLCOM_HA_READ: + DBUG_ASSERT(first_table == all_tables && first_table != 0); /* There is no need to check for table permissions here, because if a user has no permissions to read a table, he won't be able to open it (with SQLCOM_HA_OPEN) in the first place. */ - if (check_db_used(thd,tables)) + if (check_db_used(thd, all_tables)) goto error; - res = mysql_ha_read(thd, tables, lex->ha_read_mode, lex->backup_dir, - lex->insert_list, lex->ha_rkey_mode, select_lex->where, - select_lex->select_limit, select_lex->offset_limit); + res= mysql_ha_read(thd, first_table, lex->ha_read_mode, lex->backup_dir, + lex->insert_list, lex->ha_rkey_mode, select_lex->where, + select_lex->select_limit, select_lex->offset_limit); break; case SQLCOM_BEGIN: if (thd->locked_tables) { thd->lock=thd->locked_tables; - thd->locked_tables=0; // Will be automaticly closed + thd->locked_tables=0; // Will be automatically closed close_thread_tables(thd); // Free tables } if (end_active_trans(thd)) - { - res= -1; - } + goto error; else { thd->options= ((thd->options & (ulong) ~(OPTION_STATUS_NO_TRANS_UPDATE)) | @@ -3511,7 +3660,7 @@ purposes internal to the MySQL server", MYF(0)); send_ok(thd); } else - res= -1; + goto error; break; } case SQLCOM_ROLLBACK: @@ -3528,36 +3677,413 @@ purposes internal to the MySQL server", MYF(0)); message in the error log, so we don't send it. */ if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && !thd->slave_thread) - send_warning(thd,ER_WARNING_NOT_COMPLETE_ROLLBACK,0); - else - send_ok(thd); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARNING_NOT_COMPLETE_ROLLBACK, + ER(ER_WARNING_NOT_COMPLETE_ROLLBACK)); + send_ok(thd); } else - res= -1; + res= TRUE; thd->options&= ~(ulong) (OPTION_BEGIN | OPTION_STATUS_NO_TRANS_UPDATE); break; case SQLCOM_ROLLBACK_TO_SAVEPOINT: if (!ha_rollback_to_savepoint(thd, lex->savepoint_name)) { if ((thd->options & OPTION_STATUS_NO_TRANS_UPDATE) && !thd->slave_thread) - send_warning(thd, ER_WARNING_NOT_COMPLETE_ROLLBACK, 0); - else - send_ok(thd); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_WARNING_NOT_COMPLETE_ROLLBACK, + ER(ER_WARNING_NOT_COMPLETE_ROLLBACK)); + send_ok(thd); } else - res= -1; + goto error; break; case SQLCOM_SAVEPOINT: if (!ha_savepoint(thd, lex->savepoint_name)) send_ok(thd); else - res= -1; + goto error; break; + case SQLCOM_CREATE_PROCEDURE: + case SQLCOM_CREATE_SPFUNCTION: + { + uint namelen; + char *name, *db; + int result; + + DBUG_ASSERT(lex->sphead); + + if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0)) + { + delete lex->sphead; + lex->sphead= 0; + goto error; + } + + if (!lex->sphead->m_db.str || !lex->sphead->m_db.str[0]) + { + lex->sphead->m_db.length= strlen(thd->db); + lex->sphead->m_db.str= strmake_root(thd->mem_root, thd->db, + lex->sphead->m_db.length); + } + + name= lex->sphead->name(&namelen); +#ifdef HAVE_DLOPEN + if (lex->sphead->m_type == TYPE_ENUM_FUNCTION) + { + udf_func *udf = find_udf(name, namelen); + + if (udf) + { + my_error(ER_UDF_EXISTS, MYF(0), name); + delete lex->sphead; + lex->sphead= 0; + goto error; + } + } +#endif + if (lex->sphead->m_type == TYPE_ENUM_FUNCTION && + !lex->sphead->m_has_return) + { + my_error(ER_SP_NORETURN, MYF(0), name); + delete lex->sphead; + lex->sphead= 0; + goto error; + } + + name= thd->strdup(name); + db= thd->strmake(lex->sphead->m_db.str, lex->sphead->m_db.length); + res= (result= lex->sphead->create(thd)); + switch (result) { + case SP_OK: + lex->unit.cleanup(); + delete lex->sphead; + lex->sphead= 0; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* only add privileges if really neccessary */ + if (sp_automatic_privileges && + check_procedure_access(thd, DEFAULT_CREATE_PROC_ACLS, + db, name, 1)) + { + close_thread_tables(thd); + if (sp_grant_privileges(thd, db, name)) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_PROC_AUTO_GRANT_FAIL, + ER(ER_PROC_AUTO_GRANT_FAIL)); + } +#endif + send_ok(thd); + break; + case SP_WRITE_ROW_FAILED: + my_error(ER_SP_ALREADY_EXISTS, MYF(0), SP_TYPE_STRING(lex), name); + lex->unit.cleanup(); + delete lex->sphead; + lex->sphead= 0; + goto error; + case SP_NO_DB_ERROR: + my_error(ER_BAD_DB_ERROR, MYF(0), lex->sphead->m_db.str); + lex->unit.cleanup(); + delete lex->sphead; + lex->sphead= 0; + goto error; + default: + my_error(ER_SP_STORE_FAILED, MYF(0), SP_TYPE_STRING(lex), name); + lex->unit.cleanup(); + delete lex->sphead; + lex->sphead= 0; + goto error; + } + break; + } + case SQLCOM_CALL: + { + sp_head *sp; + + if (!(sp= sp_find_procedure(thd, lex->spname))) + { + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PROCEDURE", + lex->spname->m_qname.str); + goto error; + } + else + { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + st_sp_security_context save_ctx; +#endif + ha_rows select_limit; + /* bits that should be cleared in thd->server_status */ + uint bits_to_be_cleared= 0; + + /* In case the arguments are subselects... */ + if (all_tables && + (check_table_access(thd, SELECT_ACL, all_tables, 0) || + open_and_lock_tables(thd, all_tables))) + goto error; + +#ifndef EMBEDDED_LIBRARY + my_bool nsok= thd->net.no_send_ok; + thd->net.no_send_ok= TRUE; +#endif + if (sp->m_multi_results) + { + if (! (thd->client_capabilities & CLIENT_MULTI_RESULTS)) + { + my_message(ER_SP_BADSELECT, ER(ER_SP_BADSELECT), MYF(0)); +#ifndef EMBEDDED_LIBRARY + thd->net.no_send_ok= nsok; +#endif + goto error; + } + /* + If SERVER_MORE_RESULTS_EXISTS is not set, + then remember that it should be cleared + */ + bits_to_be_cleared= (~thd->server_status & + SERVER_MORE_RESULTS_EXISTS); + thd->server_status|= SERVER_MORE_RESULTS_EXISTS; + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (check_procedure_access(thd, EXECUTE_ACL, + sp->m_db.str, sp->m_name.str, 0)) + { +#ifndef EMBEDDED_LIBRARY + thd->net.no_send_ok= nsok; +#endif + goto error; + } + sp_change_security_context(thd, sp, &save_ctx); + if (save_ctx.changed && + check_procedure_access(thd, EXECUTE_ACL, + sp->m_db.str, sp->m_name.str, 0)) + { +#ifndef EMBEDDED_LIBRARY + thd->net.no_send_ok= nsok; +#endif + sp_restore_security_context(thd, sp, &save_ctx); + goto error; + } + +#endif + select_limit= thd->variables.select_limit; + thd->variables.select_limit= HA_POS_ERROR; + + thd->row_count_func= 0; + res= sp->execute_procedure(thd, &lex->value_list); + + thd->variables.select_limit= select_limit; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + sp_restore_security_context(thd, sp, &save_ctx); +#endif + +#ifndef EMBEDDED_LIBRARY + thd->net.no_send_ok= nsok; +#endif + thd->server_status&= ~bits_to_be_cleared; + + if (!res) + send_ok(thd, (ulong) (thd->row_count_func < 0 ? 0 : + thd->row_count_func)); + else + goto error; // Substatement should already have sent error + } + break; + } + case SQLCOM_ALTER_PROCEDURE: + case SQLCOM_ALTER_FUNCTION: + { + int result; + sp_head *sp; + st_sp_chistics chistics; + + memcpy(&chistics, &lex->sp_chistics, sizeof(chistics)); + if (lex->sql_command == SQLCOM_ALTER_PROCEDURE) + sp= sp_find_procedure(thd, lex->spname); + else + sp= sp_find_function(thd, lex->spname); + mysql_reset_errors(thd); + if (! sp) + result= SP_KEY_NOT_FOUND; + else + { + if (check_procedure_access(thd, ALTER_PROC_ACL, sp->m_db.str, + sp->m_name.str, 0)) + goto error; + memcpy(&lex->sp_chistics, &chistics, sizeof(lex->sp_chistics)); + if (lex->sql_command == SQLCOM_ALTER_PROCEDURE) + result= sp_update_procedure(thd, lex->spname, &lex->sp_chistics); + else + result= sp_update_function(thd, lex->spname, &lex->sp_chistics); + } + switch (result) + { + case SP_OK: + send_ok(thd); + break; + case SP_KEY_NOT_FOUND: + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_qname.str); + goto error; + default: + my_error(ER_SP_CANT_ALTER, MYF(0), + SP_COM_STRING(lex), lex->spname->m_qname.str); + goto error; + } + break; + } + case SQLCOM_DROP_PROCEDURE: + case SQLCOM_DROP_FUNCTION: + { + sp_head *sp; + int result; + char *db, *name; + + if (lex->sql_command == SQLCOM_DROP_PROCEDURE) + sp= sp_find_procedure(thd, lex->spname); + else + sp= sp_find_function(thd, lex->spname); + mysql_reset_errors(thd); + if (sp) + { + db= thd->strdup(sp->m_db.str); + name= thd->strdup(sp->m_name.str); + if (check_procedure_access(thd, ALTER_PROC_ACL, db, name, 0)) + goto error; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (sp_automatic_privileges && + sp_revoke_privileges(thd, db, name)) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + ER_PROC_AUTO_REVOKE_FAIL, + ER(ER_PROC_AUTO_REVOKE_FAIL)); + } +#endif + if (lex->sql_command == SQLCOM_DROP_PROCEDURE) + result= sp_drop_procedure(thd, lex->spname); + else + result= sp_drop_function(thd, lex->spname); + } + else + { +#ifdef HAVE_DLOPEN + if (lex->sql_command == SQLCOM_DROP_FUNCTION) + { + udf_func *udf = find_udf(lex->spname->m_name.str, + lex->spname->m_name.length); + if (udf) + { + if (check_access(thd, DELETE_ACL, "mysql", 0, 1, 0)) + goto error; + if (!(res = mysql_drop_function(thd, &lex->spname->m_name))) + { + send_ok(thd); + break; + } + } + } +#endif + result= SP_KEY_NOT_FOUND; + } + res= result; + switch (result) + { + case SP_OK: + send_ok(thd); + break; + case SP_KEY_NOT_FOUND: + if (lex->drop_if_exists) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_SP_DOES_NOT_EXIST, ER(ER_SP_DOES_NOT_EXIST), + SP_COM_STRING(lex), lex->spname->m_name.str); + res= FALSE; + send_ok(thd); + break; + } + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_qname.str); + goto error; + default: + my_error(ER_SP_DROP_FAILED, MYF(0), + SP_COM_STRING(lex), lex->spname->m_qname.str); + goto error; + } + break; + } + case SQLCOM_SHOW_CREATE_PROC: + { + if (lex->spname->m_name.length > NAME_LEN) + { + my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); + goto error; + } + if (sp_show_create_procedure(thd, lex->spname) != SP_OK) + { /* We don't distinguish between errors for now */ + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_name.str); + goto error; + } + break; + } + case SQLCOM_SHOW_CREATE_FUNC: + { + if (lex->spname->m_name.length > NAME_LEN) + { + my_error(ER_TOO_LONG_IDENT, MYF(0), lex->spname->m_name.str); + goto error; + } + if (sp_show_create_function(thd, lex->spname) != SP_OK) + { /* We don't distinguish between errors for now */ + my_error(ER_SP_DOES_NOT_EXIST, MYF(0), + SP_COM_STRING(lex), lex->spname->m_name.str); + goto error; + } + break; + } + case SQLCOM_SHOW_STATUS_PROC: + { + res= sp_show_status_procedure(thd, (lex->wild ? + lex->wild->ptr() : NullS)); + break; + } + case SQLCOM_SHOW_STATUS_FUNC: + { + res= sp_show_status_function(thd, (lex->wild ? + lex->wild->ptr() : NullS)); + break; + } + case SQLCOM_CREATE_VIEW: + { + res= mysql_create_view(thd, thd->lex->create_view_mode); + break; + } + case SQLCOM_DROP_VIEW: + { + if (check_table_access(thd, DROP_ACL, all_tables, 0) || + end_active_trans(thd)) + goto error; + res= mysql_drop_view(thd, first_table, thd->lex->drop_mode); + break; + } + case SQLCOM_CREATE_TRIGGER: + { + res= mysql_create_or_drop_trigger(thd, all_tables, 1); + + /* We don't care about trigger body after this point */ + delete lex->sphead; + lex->sphead= 0; + break; + } + case SQLCOM_DROP_TRIGGER: + { + res= mysql_create_or_drop_trigger(thd, all_tables, 0); + break; + } default: /* Impossible */ send_ok(thd); break; } - thd->proc_info="query end"; // QQ + thd->proc_info="query end"; if (thd->one_shot_set) { /* @@ -3582,11 +4108,31 @@ purposes internal to the MySQL server", MYF(0)); thd->one_shot_set= 0; } } - if (res < 0) - send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); + + /* + The return value for ROW_COUNT() is "implementation dependent" if + the statement is not DELETE, INSERT or UPDATE (or a CALL executing + such a statement), but -1 is what JDBC and ODBC wants. + */ + switch (lex->sql_command) { + case SQLCOM_UPDATE: + case SQLCOM_UPDATE_MULTI: + case SQLCOM_REPLACE: + case SQLCOM_INSERT: + case SQLCOM_REPLACE_SELECT: + case SQLCOM_INSERT_SELECT: + case SQLCOM_DELETE: + case SQLCOM_DELETE_MULTI: + case SQLCOM_CALL: + break; + default: + thd->row_count_func= -1; + } + + DBUG_RETURN(res || thd->net.report_error); error: - DBUG_VOID_RETURN; + DBUG_RETURN(TRUE); } @@ -3597,28 +4143,29 @@ error: SYNOPSIS check_one_table_access() thd Thread handler - privilege requested privelage - tables table list of command + privilege requested privilege + all_tables global table list of query RETURN 0 - OK 1 - access denied, error is sent to client */ -int check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *tables) +bool check_one_table_access(THD *thd, ulong privilege, TABLE_LIST *all_tables) { - if (check_access(thd, privilege, tables->db, &tables->grant.privilege,0,0)) + if (check_access(thd, privilege, all_tables->db, + &all_tables->grant.privilege, 0, 0)) return 1; /* Show only 1 table for check_grant */ - if (grant_option && check_grant(thd, privilege, tables, 0, 1, 0)) + if (grant_option && check_grant(thd, privilege, all_tables, 0, 1, 0)) return 1; /* Check rights on tables of subselects and implictly opened tables */ TABLE_LIST *subselects_tables; - if ((subselects_tables= tables->next)) + if ((subselects_tables= all_tables->next_global)) { - if ((check_table_access(thd, SELECT_ACL, subselects_tables,0))) + if ((check_table_access(thd, SELECT_ACL, subselects_tables, 0))) return 1; } return 0; @@ -3648,13 +4195,13 @@ bool check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, bool dont_check_global_grants, bool no_errors) { - DBUG_ENTER("check_access"); - DBUG_PRINT("enter",("db: '%s' want_access: %lu master_access: %lu", - db ? db : "", want_access, thd->master_access)); #ifndef NO_EMBEDDED_ACCESS_CHECKS ulong db_access; #endif ulong dummy; + DBUG_ENTER("check_access"); + DBUG_PRINT("enter",("db: %s want_access: %lu master_access: %lu", + db ? db : "", want_access, thd->master_access)); if (save_priv) *save_priv=0; else @@ -3662,8 +4209,10 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, if ((!db || !db[0]) && !thd->db && !dont_check_global_grants) { + DBUG_PRINT("error",("No database")); if (!no_errors) - send_error(thd,ER_NO_DB_ERROR); /* purecov: tested */ + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), + MYF(0)); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ } @@ -3688,11 +4237,14 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, if (((want_access & ~thd->master_access) & ~(DB_ACLS | EXTRA_ACL)) || ! db && dont_check_global_grants) { // We can never grant this + DBUG_PRINT("error",("No possible access")); if (!no_errors) - net_printf(thd,ER_ACCESS_DENIED_ERROR, - thd->priv_user, - thd->priv_host, - thd->password ? ER(ER_YES) : ER(ER_NO));/* purecov: tested */ + my_error(ER_ACCESS_DENIED_ERROR, MYF(0), + thd->priv_user, + thd->priv_host, + (thd->password ? + ER(ER_YES) : + ER(ER_NO))); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ } @@ -3707,18 +4259,24 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, DBUG_PRINT("info",("db_access: %lu", db_access)); /* Remove SHOW attribute and access rights we already have */ want_access &= ~(thd->master_access | EXTRA_ACL); + DBUG_PRINT("info",("db_access: %lu want_access: %lu", + db_access, want_access)); db_access= ((*save_priv=(db_access | thd->master_access)) & want_access); /* grant_option is set if there exists a single table or column grant */ if (db_access == want_access || - ((grant_option && !dont_check_global_grants) && - !(want_access & ~(db_access | TABLE_ACLS)))) + (grant_option && !dont_check_global_grants && + !(want_access & ~(db_access | TABLE_ACLS | PROC_ACLS)))) DBUG_RETURN(FALSE); /* Ok */ + + DBUG_PRINT("error",("Access denied")); if (!no_errors) - net_printf(thd,ER_DBACCESS_DENIED_ERROR, - thd->priv_user, - thd->priv_host, - db ? db : thd->db ? thd->db : "unknown"); /* purecov: tested */ + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + thd->priv_user, + thd->priv_host, + (db ? db : (thd->db ? + thd->db : + "unknown"))); /* purecov: tested */ DBUG_RETURN(TRUE); /* purecov: tested */ #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } @@ -3733,7 +4291,7 @@ check_access(THD *thd, ulong want_access, const char *db, ulong *save_priv, want_access Use should have any of these global rights WARNING - One gets access rigth if one has ANY of the rights in want_access + One gets access right if one has ANY of the rights in want_access This is useful as one in most cases only need one global right, but in some case we want to check if the user has SUPER or REPL_CLIENT_ACL rights. @@ -3752,8 +4310,7 @@ bool check_global_access(THD *thd, ulong want_access) if ((thd->master_access & want_access)) return 0; get_privilege_desc(command, sizeof(command), want_access); - net_printf(thd,ER_SPECIFIC_ACCESS_DENIED_ERROR, - command); + my_error(ER_SPECIFIC_ACCESS_DENIED_ERROR, MYF(0), command); return 1; #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } @@ -3771,9 +4328,9 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, uint found=0; ulong found_access=0; TABLE_LIST *org_tables=tables; - for (; tables ; tables=tables->next) + for (; tables; tables= tables->next_global) { - if (tables->derived || + if (tables->derived || tables->schema_table || (tables->table && (int)tables->table->tmp_table) || my_tz_check_n_skip_implicit_tables(&tables, thd->lex->time_zone_tables_used)) @@ -3804,6 +4361,66 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, return FALSE; } + +bool +check_procedure_access(THD *thd, ulong want_access,char *db, char *name, + bool no_errors) +{ + TABLE_LIST tables[1]; + + bzero((char *)tables, sizeof(TABLE_LIST)); + tables->db= db; + tables->real_name= tables->alias= name; + + if ((thd->master_access & want_access) == want_access && !thd->db) + tables->grant.privilege= want_access; + else if (check_access(thd,want_access,db,&tables->grant.privilege, + 0, no_errors)) + return TRUE; + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (grant_option) + return check_grant_procedure(thd, want_access, tables, no_errors); +#endif + + return FALSE; +} + +/* + Check if the given table has any of the asked privileges + + SYNOPSIS + check_some_access() + thd Thread handler + want_access Bitmap of possible privileges to check for + + RETURN + 0 ok + 1 error +*/ + + +bool check_some_access(THD *thd, ulong want_access, TABLE_LIST *table) +{ + ulong access; + DBUG_ENTER("check_some_access"); + + /* This loop will work as long as we have less than 32 privileges */ + for (access= 1; access < want_access ; access<<= 1) + { + if (access & want_access) + { + if (!check_access(thd, access, table->db, + &table->grant.privilege, 0, 1) && + !grant_option || !check_grant(thd, access, table, 0, 1, 1)) + DBUG_RETURN(0); + } + } + DBUG_PRINT("exit",("no matching access rights")); + DBUG_RETURN(1); +} + + bool check_merge_table_access(THD *thd, char *db, TABLE_LIST *table_list) { @@ -3812,7 +4429,7 @@ bool check_merge_table_access(THD *thd, char *db, { /* Check that all tables use the current database */ TABLE_LIST *tmp; - for (tmp=table_list; tmp ; tmp=tmp->next) + for (tmp= table_list; tmp; tmp= tmp->next_local) { if (!tmp->db || !tmp->db[0]) tmp->db=db; @@ -3826,13 +4443,14 @@ bool check_merge_table_access(THD *thd, char *db, static bool check_db_used(THD *thd,TABLE_LIST *tables) { - for (; tables ; tables=tables->next) + for (; tables; tables= tables->next_global) { if (!tables->db) { if (!(tables->db=thd->db)) { - send_error(thd,ER_NO_DB_ERROR); /* purecov: tested */ + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), + MYF(0)); /* purecov: tested */ return TRUE; /* purecov: tested */ } } @@ -3840,6 +4458,7 @@ static bool check_db_used(THD *thd,TABLE_LIST *tables) return FALSE; } + /**************************************************************************** Check stack size; Send error if there isn't enough stack to continue ****************************************************************************/ @@ -3904,6 +4523,7 @@ bool my_yyoverflow(short **yyss, YYSTYPE **yyvs, ulong *yystacksize) return 0; } + /**************************************************************************** Initialize global thd variables needed for query ****************************************************************************/ @@ -3936,13 +4556,13 @@ void mysql_reset_thd_for_next_command(THD *thd) DBUG_ENTER("mysql_reset_thd_for_next_command"); thd->free_list= 0; thd->select_number= 1; - thd->total_warn_count= 0; // Warnings for this query + thd->total_warn_count=0; // Warnings for this query thd->last_insert_id_used= thd->query_start_used= thd->insert_id_used=0; thd->sent_row_count= thd->examined_row_count= 0; thd->is_fatal_error= thd->rand_used= thd->time_zone_used= 0; thd->server_status&= ~ (SERVER_MORE_RESULTS_EXISTS | - SERVER_QUERY_NO_INDEX_USED | - SERVER_QUERY_NO_GOOD_INDEX_USED); + SERVER_QUERY_NO_INDEX_USED | + SERVER_QUERY_NO_GOOD_INDEX_USED); thd->tmp_table_used= 0; if (opt_bin_log) reset_dynamic(&thd->user_var_events); @@ -3957,6 +4577,8 @@ mysql_init_select(LEX *lex) SELECT_LEX *select_lex= lex->current_select; select_lex->init_select(); select_lex->select_limit= HA_POS_ERROR; + lex->orig_sql_command= SQLCOM_END; + lex->wild= 0; if (select_lex == &lex->select_lex) { DBUG_ASSERT(lex->result == 0); @@ -3974,6 +4596,7 @@ mysql_new_select(LEX *lex, bool move_down) select_lex->select_number= ++lex->thd->select_number; select_lex->init_query(); select_lex->init_select(); + select_lex->parent_lex= lex; if (move_down) { lex->subqueries= TRUE; @@ -3990,10 +4613,15 @@ mysql_new_select(LEX *lex, bool move_down) unit->link_prev= 0; unit->return_to= lex->current_select; select_lex->include_down(unit); - // TODO: assign resolve_mode for fake subquery after merging with new tree + /* TODO: assign resolve_mode for fake subquery after merging with new tree */ } else { + if (lex->current_select->order_list.first && !lex->current_select->braces) + { + my_error(ER_WRONG_USAGE, MYF(0), "UNION", "ORDER BY"); + return 1; + } select_lex->include_neighbour(lex->current_select); SELECT_LEX_UNIT *unit= select_lex->master_unit(); SELECT_LEX *fake= unit->fake_select_lex; @@ -4061,6 +4689,8 @@ void mysql_init_multi_delete(LEX *lex) lex->select_lex.select_limit= lex->unit.select_limit_cnt= HA_POS_ERROR; lex->select_lex.table_list.save_and_clear(&lex->auxilliary_table_list); + lex->query_tables= 0; + lex->query_tables_last= &lex->query_tables; } @@ -4089,22 +4719,41 @@ void mysql_parse(THD *thd, char *inBuf, uint length) #endif { if (thd->net.report_error) - send_error(thd, 0, NullS); + { + if (thd->lex->sphead) + { + if (lex != thd->lex) + thd->lex->sphead->restore_lex(thd); + delete thd->lex->sphead; + thd->lex->sphead= NULL; + } + } else { mysql_execute_command(thd); query_cache_end_of_result(thd); } } + lex->unit.cleanup(); } else { DBUG_PRINT("info",("Command aborted. Fatal_error: %d", thd->is_fatal_error)); query_cache_abort(&thd->net); + lex->unit.cleanup(); + if (thd->lex->sphead) + { + /* Clean up after failed stored procedure/function */ + if (lex != thd->lex) + thd->lex->sphead->restore_lex(thd); + delete thd->lex->sphead; + thd->lex->sphead= NULL; + } } thd->proc_info="freeing items"; thd->end_statement(); + thd->cleanup_after_query(); DBUG_ASSERT(thd->change_list.is_empty()); } DBUG_VOID_RETURN; @@ -4125,17 +4774,20 @@ bool mysql_test_parse_for_slave(THD *thd, char *inBuf, uint length) { LEX *lex= thd->lex; bool error= 0; + DBUG_ENTER("mysql_test_parse_for_slave"); mysql_init_query(thd, (uchar*) inBuf, length); if (!yyparse((void*) thd) && ! thd->is_fatal_error && all_tables_not_ok(thd,(TABLE_LIST*) lex->select_lex.table_list.first)) - error= 1; /* Ignore question */ + error= 1; /* Ignore question */ thd->end_statement(); - return error; + thd->cleanup_after_query(); + DBUG_RETURN(error); } #endif + /***************************************************************************** ** Store field definition for create ** Return 0 if ok @@ -4153,12 +4805,13 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, register create_field *new_field; LEX *lex= thd->lex; uint allowed_type_modifier=0; - char warn_buff[MYSQL_ERRMSG_SIZE]; + uint sign_len; + ulong max_field_charlength= MAX_FIELD_CHARLENGTH; DBUG_ENTER("add_field_to_list"); if (strlen(field_name) > NAME_LEN) { - net_printf(thd, ER_TOO_LONG_IDENT, field_name); /* purecov: inspected */ + my_error(ER_TOO_LONG_IDENT, MYF(0), field_name); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } if (type_modifier & PRI_KEY_FLAG) @@ -4189,7 +4842,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, !(((Item_func*)default_value)->functype() == Item_func::NOW_FUNC && type == FIELD_TYPE_TIMESTAMP)) { - net_printf(thd, ER_INVALID_DEFAULT, field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name); DBUG_RETURN(1); } else if (default_value->type() == Item::NULL_ITEM) @@ -4198,20 +4851,20 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, if ((type_modifier & (NOT_NULL_FLAG | AUTO_INCREMENT_FLAG)) == NOT_NULL_FLAG) { - net_printf(thd,ER_INVALID_DEFAULT,field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name); DBUG_RETURN(1); } } else if (type_modifier & AUTO_INCREMENT_FLAG) { - net_printf(thd, ER_INVALID_DEFAULT, field_name); + my_error(ER_INVALID_DEFAULT, MYF(0), field_name); DBUG_RETURN(1); } } if (on_update_value && type != FIELD_TYPE_TIMESTAMP) { - net_printf(thd, ER_INVALID_ON_UPDATE, field_name); + my_error(ER_INVALID_ON_UPDATE, MYF(0), field_name); DBUG_RETURN(1); } @@ -4229,7 +4882,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, new_field->length=0; new_field->change=change; new_field->interval=0; - new_field->pack_length=0; + new_field->pack_length= new_field->key_length= 0; new_field->charset=cs; new_field->geom_type= (Field::geometry_type) uint_geom_type; @@ -4244,9 +4897,19 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, new_field->comment.str= (char*) comment->str; new_field->comment.length=comment->length; } + /* + Set flag if this field doesn't have a default value + Enum values has always the first value as a default (set in + make_empty_rec(). + */ + if (!default_value && !(type_modifier & AUTO_INCREMENT_FLAG) && + (type_modifier & NOT_NULL_FLAG) && type != FIELD_TYPE_TIMESTAMP && + type != FIELD_TYPE_ENUM) + new_field->flags|= NO_DEFAULT_VALUE_FLAG; + if (length && !(new_field->length= (uint) atoi(length))) length=0; /* purecov: inspected */ - uint sign_len=type_modifier & UNSIGNED_FLAG ? 0 : 1; + sign_len=type_modifier & UNSIGNED_FLAG ? 0 : 1; if (new_field->length && new_field->decimals && new_field->length < new_field->decimals+1 && @@ -4291,43 +4954,28 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, new_field->length++; } break; - case FIELD_TYPE_STRING: - case FIELD_TYPE_VAR_STRING: - if (new_field->length <= MAX_FIELD_CHARLENGTH || default_value) - break; - /* Convert long CHAR() and VARCHAR columns to TEXT or BLOB */ - new_field->sql_type= FIELD_TYPE_BLOB; - sprintf(warn_buff, ER(ER_AUTO_CONVERT), field_name, "CHAR", - (cs == &my_charset_bin) ? "BLOB" : "TEXT"); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_AUTO_CONVERT, - warn_buff); - /* fall through */ + case MYSQL_TYPE_VARCHAR: + /* + Long VARCHAR's are automaticly converted to blobs in mysql_prepare_table + if they don't have a default value + */ + max_field_charlength= MAX_FIELD_VARCHARLENGTH; + break; + case MYSQL_TYPE_STRING: + break; case FIELD_TYPE_BLOB: case FIELD_TYPE_TINY_BLOB: case FIELD_TYPE_LONG_BLOB: case FIELD_TYPE_MEDIUM_BLOB: case FIELD_TYPE_GEOMETRY: - if (new_field->length) - { - /* The user has given a length to the blob column */ - if (new_field->length < 256) - type= FIELD_TYPE_TINY_BLOB; - if (new_field->length < 65536) - type= FIELD_TYPE_BLOB; - else if (new_field->length < 256L*256L*256L) - type= FIELD_TYPE_MEDIUM_BLOB; - else - type= FIELD_TYPE_LONG_BLOB; - new_field->length= 0; - } - new_field->sql_type= type; if (default_value) // Allow empty as default value { String str,*res; res=default_value->val_str(&str); if (res->length()) { - net_printf(thd,ER_BLOB_CANT_HAVE_DEFAULT,field_name); /* purecov: inspected */ + my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0), + field_name); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } new_field->def=0; @@ -4347,7 +4995,7 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, uint tmp_length=new_field->length; if (tmp_length > PRECISION_FOR_DOUBLE) { - net_printf(thd,ER_WRONG_FIELD_SPEC,field_name); + my_error(ER_WRONG_FIELD_SPEC, MYF(0), field_name); DBUG_RETURN(1); } else if (tmp_length > PRECISION_FOR_FLOAT) @@ -4420,11 +5068,11 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, If we have TIMESTAMP NULL column without explicit DEFAULT value we treat it as having DEFAULT NULL attribute. */ - new_field->unireg_check= on_update_value ? - Field::TIMESTAMP_UN_FIELD : - (new_field->flags & NOT_NULL_FLAG ? - Field::TIMESTAMP_OLD_FIELD: - Field::NONE); + new_field->unireg_check= (on_update_value ? + Field::TIMESTAMP_UN_FIELD : + (new_field->flags & NOT_NULL_FLAG ? + Field::TIMESTAMP_OLD_FIELD: + Field::NONE)); } break; case FIELD_TYPE_DATE: // Old date type @@ -4444,8 +5092,8 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, { if (interval_list->elements > sizeof(longlong)*8) { - net_printf(thd,ER_TOO_BIG_SET,field_name); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ + my_error(ER_TOO_BIG_SET, MYF(0), field_name); /* purecov: inspected */ + DBUG_RETURN(1); /* purecov: inspected */ } new_field->pack_length= (interval_list->elements + 7) / 8; if (new_field->pack_length > 4) @@ -4461,8 +5109,8 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, when we know the character set of the column */ new_field->length= 1; + break; } - break; case FIELD_TYPE_ENUM: { // Should be safe @@ -4473,37 +5121,50 @@ bool add_field_to_list(THD *thd, char *field_name, enum_field_types type, while ((tmp= it++)) new_field->interval_list.push_back(tmp); new_field->length= 1; // See comment for FIELD_TYPE_SET above. - } + break; + } + case MYSQL_TYPE_VAR_STRING: + DBUG_ASSERT(0); // Impossible break; + case MYSQL_TYPE_BIT: + { + if (!length) + new_field->length= 1; + if (new_field->length > MAX_BIT_FIELD_LENGTH) + { + my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), field_name, + MAX_BIT_FIELD_LENGTH); + DBUG_RETURN(1); + } + new_field->pack_length= (new_field->length + 7) / 8; + break; + } } - if ((new_field->length > MAX_FIELD_CHARLENGTH && type != FIELD_TYPE_SET && - type != FIELD_TYPE_ENUM) || - (!new_field->length && !(new_field->flags & BLOB_FLAG) && - type != FIELD_TYPE_STRING && - type != FIELD_TYPE_VAR_STRING && type != FIELD_TYPE_GEOMETRY)) + if (!(new_field->flags & BLOB_FLAG) && + ((new_field->length > max_field_charlength && type != FIELD_TYPE_SET && + type != FIELD_TYPE_ENUM && + (type != MYSQL_TYPE_VARCHAR || default_value)) || + (!new_field->length && + type != MYSQL_TYPE_STRING && + type != MYSQL_TYPE_VARCHAR && type != FIELD_TYPE_GEOMETRY))) { - net_printf(thd,ER_TOO_BIG_FIELDLENGTH,field_name, - MAX_FIELD_CHARLENGTH); /* purecov: inspected */ + my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), + field_name, max_field_charlength); /* purecov: inspected */ DBUG_RETURN(1); /* purecov: inspected */ } type_modifier&= AUTO_INCREMENT_FLAG; if ((~allowed_type_modifier) & type_modifier) { - net_printf(thd,ER_WRONG_FIELD_SPEC,field_name); + my_error(ER_WRONG_FIELD_SPEC, MYF(0), field_name); DBUG_RETURN(1); } - if (!new_field->pack_length) - new_field->pack_length=calc_pack_length(new_field->sql_type == - FIELD_TYPE_VAR_STRING ? - FIELD_TYPE_STRING : - new_field->sql_type, - new_field->length); lex->create_list.push_back(new_field); lex->last_field=new_field; DBUG_RETURN(0); } + /* Store position for column in ALTER TABLE .. ADD column */ void store_position_for_column(const char *name) @@ -4542,7 +5203,6 @@ static void remove_escape(char *name) { #ifdef USE_MB int l; -/* if ((l = ismbchar(name, name+MBMAXLEN))) { Wei He: I think it's wrong */ if (use_mb(system_charset_info) && (l = my_ismbchar(system_charset_info, name, strend))) { @@ -4575,6 +5235,7 @@ bool add_to_list(THD *thd, SQL_LIST &list,Item *item,bool asc) order->asc = asc; order->free_me=0; order->used=0; + order->counter_used= 0; list.link_in_list((byte*) order,(byte**) &order->next); DBUG_RETURN(0); } @@ -4610,6 +5271,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, { register TABLE_LIST *ptr; char *alias_str; + LEX *lex= thd->lex; DBUG_ENTER("add_table_to_list"); if (!table) @@ -4618,7 +5280,7 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, if (check_table_name(table->table.str,table->table.length) || table->db.str && check_db_name(table->db.str)) { - net_printf(thd, ER_WRONG_TABLE_NAME, table->table.str); + my_error(ER_WRONG_TABLE_NAME, MYF(0), table->table.str); DBUG_RETURN(0); } @@ -4626,7 +5288,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, { if (table->sel) { - net_printf(thd,ER_DERIVED_MUST_HAVE_ALIAS); + my_message(ER_DERIVED_MUST_HAVE_ALIAS, + ER(ER_DERIVED_MUST_HAVE_ALIAS), MYF(0)); DBUG_RETURN(0); } if (!(alias_str=thd->memdup(alias_str,table->table.length+1))) @@ -4663,6 +5326,21 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->force_index= test(table_options & TL_OPTION_FORCE_INDEX); ptr->ignore_leaves= test(table_options & TL_OPTION_IGNORE_LEAVES); ptr->derived= table->sel; + if (!my_strcasecmp(system_charset_info, ptr->db, + information_schema_name.str)) + { + ST_SCHEMA_TABLE *schema_table= find_schema_table(thd, ptr->real_name); + if (!schema_table || + (schema_table->hidden && + lex->orig_sql_command == SQLCOM_END)) // not a 'show' command + { + my_error(ER_UNKNOWN_TABLE, MYF(0), + ptr->real_name, information_schema_name.str); + DBUG_RETURN(0); + } + ptr->schema_table= schema_table; + } + ptr->select_lex= lex->current_select; ptr->cacheable_table= 1; if (use_index_arg) ptr->use_index=(List<String> *) thd->memdup((gptr) use_index_arg, @@ -4676,22 +5354,256 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, { for (TABLE_LIST *tables=(TABLE_LIST*) table_list.first ; tables ; - tables=tables->next) + tables=tables->next_local) { if (!my_strcasecmp(table_alias_charset, alias_str, tables->alias) && !strcmp(ptr->db, tables->db)) { - net_printf(thd,ER_NONUNIQ_TABLE,alias_str); /* purecov: tested */ + my_error(ER_NONUNIQ_TABLE, MYF(0), alias_str); /* purecov: tested */ DBUG_RETURN(0); /* purecov: tested */ } } } - table_list.link_in_list((byte*) ptr, (byte**) &ptr->next); + /* Link table in local list (list for current select) */ + table_list.link_in_list((byte*) ptr, (byte**) &ptr->next_local); + /* Link table in global list (all used tables) */ + lex->add_to_query_tables(ptr); DBUG_RETURN(ptr); } /* + Initialize a new table list for a nested join + + SYNOPSIS + init_table_list() + thd current thread + + DESCRIPTION + The function initializes a structure of the TABLE_LIST type + for a nested join. It sets up its nested join list as empty. + The created structure is added to the front of the current + join list in the st_select_lex object. Then the function + changes the current nest level for joins to refer to the newly + created empty list after having saved the info on the old level + in the initialized structure. + + RETURN VALUE + 0, if success + 1, otherwise +*/ + +bool st_select_lex::init_nested_join(THD *thd) +{ + TABLE_LIST *ptr; + NESTED_JOIN *nested_join; + DBUG_ENTER("init_nested_join"); + + if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))) || + !(nested_join= ptr->nested_join= + (NESTED_JOIN *) thd->calloc(sizeof(NESTED_JOIN)))) + DBUG_RETURN(1); + join_list->push_front(ptr); + ptr->embedding= embedding; + ptr->join_list= join_list; + embedding= ptr; + join_list= &nested_join->join_list; + join_list->empty(); + DBUG_RETURN(0); +} + + +/* + End a nested join table list + + SYNOPSIS + end_nested_join() + thd current thread + + DESCRIPTION + The function returns to the previous join nest level. + If the current level contains only one member, the function + moves it one level up, eliminating the nest. + + RETURN VALUE + Pointer to TABLE_LIST element added to the total table list, if success + 0, otherwise +*/ + +TABLE_LIST *st_select_lex::end_nested_join(THD *thd) +{ + TABLE_LIST *ptr; + DBUG_ENTER("end_nested_join"); + ptr= embedding; + join_list= ptr->join_list; + embedding= ptr->embedding; + NESTED_JOIN *nested_join= ptr->nested_join; + if (nested_join->join_list.elements == 1) + { + TABLE_LIST *embedded= nested_join->join_list.head(); + join_list->pop(); + embedded->join_list= join_list; + embedded->embedding= embedding; + join_list->push_front(embedded); + ptr= embedded; + } + DBUG_RETURN(ptr); +} + + +/* + Nest last join operation + + SYNOPSIS + nest_last_join() + thd current thread + + DESCRIPTION + The function nest last join operation as if it was enclosed in braces. + + RETURN VALUE + Pointer to TABLE_LIST element created for the new nested join, if success + 0, otherwise +*/ + +TABLE_LIST *st_select_lex::nest_last_join(THD *thd) +{ + TABLE_LIST *ptr; + NESTED_JOIN *nested_join; + DBUG_ENTER("nest_last_join"); + + if (!(ptr = (TABLE_LIST *) thd->calloc(sizeof(TABLE_LIST))) || + !(nested_join= ptr->nested_join= + (NESTED_JOIN *) thd->calloc(sizeof(NESTED_JOIN)))) + DBUG_RETURN(0); + ptr->embedding= embedding; + ptr->join_list= join_list; + List<TABLE_LIST> *embedded_list= &nested_join->join_list; + embedded_list->empty(); + for (int i=0; i < 2; i++) + { + TABLE_LIST *table= join_list->pop(); + table->join_list= embedded_list; + table->embedding= ptr; + embedded_list->push_back(table); + } + join_list->push_front(ptr); + nested_join->used_tables= nested_join->not_null_tables= (table_map) 0; + DBUG_RETURN(ptr); +} + + +/* + Save names for a join with using clause + + SYNOPSIS + save_names_for_using_list + tab1 left table in join + tab2 right table in join + + DESCRIPTION + The function saves the full names of the tables in st_select_lex + to be able to build later an on expression to replace the using clause. + + RETURN VALUE + None +*/ + +void st_select_lex::save_names_for_using_list(TABLE_LIST *tab1, + TABLE_LIST *tab2) +{ + while (tab1->nested_join) + { + tab1= tab1->nested_join->join_list.head(); + } + db1= tab1->db; + table1= tab1->alias; + while (tab2->nested_join) + { + TABLE_LIST *next; + List_iterator_fast<TABLE_LIST> it(tab2->nested_join->join_list); + tab2= it++; + while ((next= it++)) + tab2= next; + } + db2= tab2->db; + table2= tab2->alias; +} + + +/* + Add a table to the current join list + + SYNOPSIS + add_joined_table() + table the table to add + + DESCRIPTION + The function puts a table in front of the current join list + of st_select_lex object. + Thus, joined tables are put into this list in the reverse order + (the most outer join operation follows first). + + RETURN VALUE + None +*/ + +void st_select_lex::add_joined_table(TABLE_LIST *table) +{ + DBUG_ENTER("add_joined_table"); + join_list->push_front(table); + table->join_list= join_list; + table->embedding= embedding; + DBUG_VOID_RETURN; +} + + +/* + Convert a right join into equivalent left join + + SYNOPSIS + convert_right_join() + thd current thread + + DESCRIPTION + The function takes the current join list t[0],t[1] ... and + effectively converts it into the list t[1],t[0] ... + Although the outer_join flag for the new nested table contains + JOIN_TYPE_RIGHT, it will be handled as the inner table of a left join + operation. + + EXAMPLES + SELECT * FROM t1 RIGHT JOIN t2 ON on_expr => + SELECT * FROM t2 LEFT JOIN t1 ON on_expr + + SELECT * FROM t1,t2 RIGHT JOIN t3 ON on_expr => + SELECT * FROM t1,t3 LEFT JOIN t2 ON on_expr + + SELECT * FROM t1,t2 RIGHT JOIN (t3,t4) ON on_expr => + SELECT * FROM t1,(t3,t4) LEFT JOIN t2 ON on_expr + + SELECT * FROM t1 LEFT JOIN t2 ON on_expr1 RIGHT JOIN t3 ON on_expr2 => + SELECT * FROM t3 LEFT JOIN (t1 LEFT JOIN t2 ON on_expr2) ON on_expr1 + + RETURN + Pointer to the table representing the inner table, if success + 0, otherwise +*/ + +TABLE_LIST *st_select_lex::convert_right_join() +{ + TABLE_LIST *tab2= join_list->pop(); + TABLE_LIST *tab1= join_list->pop(); + DBUG_ENTER("convert_right_join"); + + join_list->push_front(tab2); + join_list->push_front(tab1); + tab1->outer_join|= JOIN_TYPE_RIGHT; + + DBUG_RETURN(tab1); +} + +/* Set lock for all tables in current select level SYNOPSIS: @@ -4711,9 +5623,9 @@ void st_select_lex::set_lock_for_tables(thr_lock_type lock_type) DBUG_PRINT("enter", ("lock_type: %d for_update: %d", lock_type, for_update)); - for (TABLE_LIST *tables= (TABLE_LIST*) table_list.first ; - tables ; - tables=tables->next) + for (TABLE_LIST *tables= (TABLE_LIST*) table_list.first; + tables; + tables= tables->next_local) { tables->lock_type= lock_type; tables->updating= for_update; @@ -4730,7 +5642,7 @@ void add_join_on(TABLE_LIST *b,Item *expr) b->on_expr=expr; else { - // This only happens if you have both a right and left join + /* This only happens if you have both a right and left join */ b->on_expr=new Item_cond_and(b->on_expr,expr); } b->on_expr->top_level_item(); @@ -4745,7 +5657,7 @@ void add_join_on(TABLE_LIST *b,Item *expr) add_join_natural() a Table to do normal join with b Do normal join with this table - + IMPLEMENTATION This function just marks that table b should be joined with a. The function setup_cond() will create in b->on_expr a list @@ -4792,8 +5704,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, { acl_reload(thd); grant_reload(thd); - if (mqh_used) - reset_mqh(thd,(LEX_USER *) NULL,TRUE); + reset_mqh((LEX_USER *)NULL, TRUE); } #endif if (options & REFRESH_LOG) @@ -4810,7 +5721,6 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, */ tmp_write_to_binlog= 0; mysql_log.new_file(1); - mysql_update_log.new_file(1); mysql_bin_log.new_file(1); mysql_slow_log.new_file(1); #ifdef HAVE_REPLICATION @@ -4833,7 +5743,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, if (options & REFRESH_QUERY_CACHE_FREE) { query_cache.pack(); // FLUSH QUERY CACHE - options &= ~REFRESH_QUERY_CACHE; //don't flush all cache, just free memory + options &= ~REFRESH_QUERY_CACHE; // Don't flush cache, just free memory } if (options & (REFRESH_TABLES | REFRESH_QUERY_CACHE)) { @@ -4857,7 +5767,12 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, return 1; result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, tables); - make_global_read_lock_block_commit(thd); + if (make_global_read_lock_block_commit(thd)) + { + /* Don't leave things in a half-locked state */ + unlock_global_read_lock(thd); + return 1; + } } else result=close_cached_tables(thd,(options & REFRESH_FAST) ? 0 : 1, tables); @@ -4865,7 +5780,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, } if (options & REFRESH_HOSTS) hostname_cache_refresh(); - if (options & REFRESH_STATUS) + if (thd && (options & REFRESH_STATUS)) refresh_status(); if (options & REFRESH_THREADS) flush_thread_cache(); @@ -4895,7 +5810,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, } #endif if (options & REFRESH_USER_RESOURCES) - reset_mqh(thd,(LEX_USER *) NULL); + reset_mqh((LEX_USER *) NULL); if (write_to_binlog) *write_to_binlog= tmp_write_to_binlog; return result; @@ -4913,7 +5828,7 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, This is written such that we have a short lock on LOCK_thread_count */ -void kill_one_thread(THD *thd, ulong id) +void kill_one_thread(THD *thd, ulong id, bool only_kill_query) { THD *tmp; uint error=ER_NO_SUCH_THREAD; @@ -4933,7 +5848,7 @@ void kill_one_thread(THD *thd, ulong id) if ((thd->master_access & SUPER_ACL) || !strcmp(thd->user,tmp->user)) { - tmp->awake(1 /*prepare to die*/); + tmp->awake(only_kill_query ? THD::KILL_QUERY : THD::KILL_CONNECTION); error=0; } else @@ -4944,7 +5859,7 @@ void kill_one_thread(THD *thd, ulong id) if (!error) send_ok(thd); else - net_printf(thd,error,id); + my_error(error, MYF(0), id); } /* Clear most status variables */ @@ -4967,6 +5882,13 @@ static void refresh_status(void) (char*) &dflt_key_cache_var)); *(ulong*) value= 0; } + else if (ptr->type == SHOW_LONG_STATUS) + { + THD *thd= current_thd; + /* We must update the global status before cleaning up the thread */ + add_to_status(&global_status_var, &thd->status_var); + bzero((char*) &thd->status_var, sizeof(thd->status_var)); + } } pthread_mutex_unlock(&LOCK_status); } @@ -5013,12 +5935,13 @@ static bool append_file_to_dir(THD *thd, const char **filename_ptr, bool check_simple_select() { THD *thd= current_thd; - if (thd->lex->current_select != &thd->lex->select_lex) + LEX *lex= thd->lex; + if (lex->current_select != &lex->select_lex) { char command[80]; - strmake(command, thd->lex->yylval->symbol.str, - min(thd->lex->yylval->symbol.length, sizeof(command)-1)); - net_printf(thd, ER_CANT_USE_OPTION_HERE, command); + strmake(command, lex->yylval->symbol.str, + min(lex->yylval->symbol.length, sizeof(command)-1)); + my_error(ER_CANT_USE_OPTION_HERE, MYF(0), command); return 1; } return 0; @@ -5106,12 +6029,11 @@ Item * all_any_subquery_creator(Item *left_expr, One should normally create all indexes with CREATE TABLE or ALTER TABLE. */ -int mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys) +bool mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys) { List<create_field> fields; ALTER_INFO alter_info; alter_info.flags= ALTER_ADD_INDEX; - alter_info.is_simple= 0; HA_CREATE_INFO create_info; DBUG_ENTER("mysql_create_index"); bzero((char*) &create_info,sizeof(create_info)); @@ -5124,7 +6046,7 @@ int mysql_create_index(THD *thd, TABLE_LIST *table_list, List<Key> &keys) } -int mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info) +bool mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info) { List<create_field> fields; List<Key> keys; @@ -5135,7 +6057,6 @@ int mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info) create_info.default_table_charset= thd->variables.collation_database; alter_info->clear(); alter_info->flags= ALTER_DROP_INDEX; - alter_info->is_simple= 0; DBUG_RETURN(mysql_alter_table(thd,table_list->db,table_list->real_name, &create_info, table_list, fields, keys, 0, (ORDER*)0, @@ -5149,33 +6070,31 @@ int mysql_drop_index(THD *thd, TABLE_LIST *table_list, ALTER_INFO *alter_info) SYNOPSIS multi_update_precheck() thd Thread handler - tables Global table list + tables Global/local table list (have to be the same) RETURN VALUE - 0 OK - 1 Error (message is sent to user) - -1 Error (message is not sent to user) + FALSE OK + TRUE Error */ -int multi_update_precheck(THD *thd, TABLE_LIST *tables) +bool multi_update_precheck(THD *thd, TABLE_LIST *tables) { - DBUG_ENTER("multi_update_precheck"); const char *msg= 0; TABLE_LIST *table; LEX *lex= thd->lex; SELECT_LEX *select_lex= &lex->select_lex; - TABLE_LIST *update_list= (TABLE_LIST*)select_lex->table_list.first; + DBUG_ENTER("multi_update_precheck"); if (select_lex->item_list.elements != lex->value_list.elements) { - my_error(ER_WRONG_VALUE_COUNT, MYF(0)); - DBUG_RETURN(-1); + my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); + DBUG_RETURN(TRUE); } /* Ensure that we have UPDATE or SELECT privilege for each table The exact privilege is checked in mysql_multi_update() */ - for (table= update_list; table; table= table->next) + for (table= tables; table; table= table->next_local) { if (table->derived) table->grant.privilege= SELECT_ACL; @@ -5183,17 +6102,12 @@ int multi_update_precheck(THD *thd, TABLE_LIST *tables) &table->grant.privilege, 0, 1) || grant_option && check_grant(thd, UPDATE_ACL, table, 0, 1, 1)) && - (check_access(thd, SELECT_ACL, table->db, - &table->grant.privilege, 0, 0) || - grant_option && check_grant(thd, SELECT_ACL, table, 0, 1, 0))) - DBUG_RETURN(1); + (check_access(thd, SELECT_ACL, table->db, + &table->grant.privilege, 0, 0) || + grant_option && check_grant(thd, SELECT_ACL, table, 0, 1, 0))) + DBUG_RETURN(TRUE); - /* - We assign following flag only to copy of table, because it will - be checked only if query contains subqueries i.e. only if copy exists - */ - if (table->table_list) - table->table_list->table_in_update_from_clause= 1; + table->table_in_first_from_clause= 1; } /* Is there tables of subqueries? @@ -5201,27 +6115,16 @@ int multi_update_precheck(THD *thd, TABLE_LIST *tables) if (&lex->select_lex != lex->all_selects_list) { DBUG_PRINT("info",("Checking sub query list")); - for (table= tables; table; table= table->next) + for (table= tables; table; table= table->next_global) { - if (my_tz_check_n_skip_implicit_tables(&table, - lex->time_zone_tables_used)) - continue; - else if (table->table_in_update_from_clause) - { - /* - If we check table by local TABLE_LIST copy then we should copy - grants to global table list, because it will be used for table - opening. - */ - if (table->table_list) - table->grant= table->table_list->grant; - } - else if (!table->derived) + if (!my_tz_check_n_skip_implicit_tables(&table, + lex->time_zone_tables_used) && + !table->table_in_first_from_clause) { if (check_access(thd, SELECT_ACL, table->db, &table->grant.privilege, 0, 0) || grant_option && check_grant(thd, SELECT_ACL, table, 0, 1, 0)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } } @@ -5234,9 +6137,9 @@ int multi_update_precheck(THD *thd, TABLE_LIST *tables) if (msg) { my_error(ER_WRONG_USAGE, MYF(0), "UPDATE", msg); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } /* @@ -5245,22 +6148,21 @@ int multi_update_precheck(THD *thd, TABLE_LIST *tables) SYNOPSIS multi_delete_precheck() thd Thread handler - tables Global table list + tables Global/local table list table_count Pointer to table counter RETURN VALUE - 0 OK - 1 error (message is sent to user) - -1 error (message is not sent to user) + FALSE OK + TRUE error */ -int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) + +bool multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) { - DBUG_ENTER("multi_delete_precheck"); SELECT_LEX *select_lex= &thd->lex->select_lex; TABLE_LIST *aux_tables= (TABLE_LIST *)thd->lex->auxilliary_table_list.first; - TABLE_LIST *delete_tables= (TABLE_LIST *)select_lex->table_list.first; TABLE_LIST *target_tbl; + DBUG_ENTER("multi_delete_precheck"); *table_count= 0; @@ -5269,18 +6171,19 @@ int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) || check_table_access(thd,SELECT_ACL, tables,0) || check_table_access(thd,DELETE_ACL, aux_tables,0)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where) { - my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, MYF(0)); - DBUG_RETURN(-1); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); + DBUG_RETURN(TRUE); } - for (target_tbl= aux_tables; target_tbl; target_tbl= target_tbl->next) + for (target_tbl= aux_tables; target_tbl; target_tbl= target_tbl->next_local) { (*table_count)++; /* All tables in aux_tables must be found in FROM PART */ TABLE_LIST *walk; - for (walk= delete_tables; walk; walk= walk->next) + for (walk= tables; walk; walk= walk->next_local) { if (!my_strcasecmp(table_alias_charset, target_tbl->alias, walk->alias) && @@ -5289,20 +6192,14 @@ int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) } if (!walk) { - my_error(ER_UNKNOWN_TABLE, MYF(0), target_tbl->real_name, - "MULTI DELETE"); - DBUG_RETURN(-1); - } - if (walk->derived) - { - my_error(ER_NON_UPDATABLE_TABLE, MYF(0), target_tbl->real_name, - "DELETE"); - DBUG_RETURN(-1); + my_error(ER_UNKNOWN_TABLE, MYF(0), + target_tbl->real_name, "MULTI DELETE"); + DBUG_RETURN(TRUE); } walk->lock_type= target_tbl->lock_type; - target_tbl->table_list= walk; // Remember corresponding table + target_tbl->correspondent_table= walk; // Remember corresponding table } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -5315,21 +6212,20 @@ int multi_delete_precheck(THD *thd, TABLE_LIST *tables, uint *table_count) tables Global table list RETURN VALUE - 0 OK - 1 Error (message is sent to user) - -1 Error (message is not sent to user) + FALSE OK + TRUE Error */ -int update_precheck(THD *thd, TABLE_LIST *tables) +bool update_precheck(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("update_precheck"); if (thd->lex->select_lex.item_list.elements != thd->lex->value_list.elements) { - my_error(ER_WRONG_VALUE_COUNT, MYF(0)); - DBUG_RETURN(-1); + my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); + DBUG_RETURN(TRUE); } - DBUG_RETURN((check_db_used(thd, tables) || - check_one_table_access(thd, UPDATE_ACL, tables)) ? 1 : 0); + DBUG_RETURN(check_db_used(thd, tables) || + check_one_table_access(thd, UPDATE_ACL, tables)); } @@ -5342,19 +6238,18 @@ int update_precheck(THD *thd, TABLE_LIST *tables) tables Global table list RETURN VALUE - 0 OK - 1 error (message is sent to user) - -1 error (message is not sent to user) + FALSE OK + TRUE error */ -int delete_precheck(THD *thd, TABLE_LIST *tables) +bool delete_precheck(THD *thd, TABLE_LIST *tables) { DBUG_ENTER("delete_precheck"); if (check_one_table_access(thd, DELETE_ACL, tables)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); /* Set privilege for the WHERE clause */ tables->grant.want_privilege=(SELECT_ACL & ~tables->grant.privilege); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -5367,12 +6262,11 @@ int delete_precheck(THD *thd, TABLE_LIST *tables) tables Global table list RETURN VALUE - 0 OK - 1 error (message is sent to user) - -1 error (message is not sent to user) + FALSE OK + TRUE error */ -int insert_precheck(THD *thd, TABLE_LIST *tables) +bool insert_precheck(THD *thd, TABLE_LIST *tables) { LEX *lex= thd->lex; DBUG_ENTER("insert_precheck"); @@ -5381,19 +6275,19 @@ int insert_precheck(THD *thd, TABLE_LIST *tables) Check that we have modify privileges for the first table and select privileges for the rest */ - ulong privilege= INSERT_ACL | - (lex->duplicates == DUP_REPLACE ? DELETE_ACL : 0) | - (lex->duplicates == DUP_UPDATE ? UPDATE_ACL : 0); + ulong privilege= (INSERT_ACL | + (lex->duplicates == DUP_REPLACE ? DELETE_ACL : 0) | + (lex->value_list.elements ? UPDATE_ACL : 0)); if (check_one_table_access(thd, privilege, tables)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); if (lex->update_list.elements != lex->value_list.elements) { - my_error(ER_WRONG_VALUE_COUNT, MYF(0)); - DBUG_RETURN(-1); + my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -5407,17 +6301,17 @@ int insert_precheck(THD *thd, TABLE_LIST *tables) create_table Table which will be created RETURN VALUE - 0 OK - 1 Error (message is sent to user) + FALSE OK + TRUE Error */ -int create_table_precheck(THD *thd, TABLE_LIST *tables, - TABLE_LIST *create_table) +bool create_table_precheck(THD *thd, TABLE_LIST *tables, + TABLE_LIST *create_table) { LEX *lex= thd->lex; SELECT_LEX *select_lex= &lex->select_lex; ulong want_priv; - int error= 1; // Error message is given + bool error= TRUE; // Error message is given DBUG_ENTER("create_table_precheck"); want_priv= ((lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) ? @@ -5438,37 +6332,27 @@ int create_table_precheck(THD *thd, TABLE_LIST *tables, /* Check permissions for used tables in CREATE TABLE ... SELECT */ /* - For temporary tables or PREPARED STATEMETNS we don't have to check - if the created table exists + Only do the check for PS, becasue we on execute we have to check that + against the opened tables to ensure we don't use a table that is part + of the view (which can only be done after the table has been opened). */ - if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && - ! thd->current_arena->is_stmt_prepare() && - find_real_table_in_list(tables, create_table->db, - create_table->real_name)) - { - net_printf(thd,ER_UPDATE_TABLE_USED, create_table->real_name); - - goto err; - } - if (lex->create_info.used_fields & HA_CREATE_USED_UNION) + if (thd->current_arena->is_stmt_prepare()) { - TABLE_LIST *tab; - for (tab= tables; tab; tab= tab->next) + /* + For temporary tables we don't have to check if the created table exists + */ + if (!(lex->create_info.options & HA_LEX_CREATE_TMP_TABLE) && + find_table_in_global_list(tables, create_table->db, + create_table->real_name)) { - if (find_real_table_in_list((TABLE_LIST*) lex->create_info. - merge_list.first, - tables->db, tab->real_name)) - { - net_printf(thd, ER_UPDATE_TABLE_USED, tab->real_name); - goto err; - } - } - } - + error= FALSE; + goto err; + } + } if (tables && check_table_access(thd, SELECT_ACL, tables,0)) goto err; } - error= 0; + error= FALSE; err: DBUG_RETURN(error); @@ -5480,7 +6364,7 @@ err: SYNOPSIS negate_expression() - thd therad handler + thd thread handler expr expression for negation RETURN diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 20ebc23e240..9a47a3b8953 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -70,9 +70,13 @@ Long data handling: #include "mysql_priv.h" #include "sql_select.h" // for JOIN #include <m_ctype.h> // for isspace() +#include "sp_head.h" +#include "sp.h" #ifdef EMBEDDED_LIBRARY /* include MYSQL_BIND headers */ #include <mysql.h> +#else +#include <mysql_com.h> #endif /****************************************************************************** @@ -104,7 +108,7 @@ public: }; static void execute_stmt(THD *thd, Prepared_statement *stmt, - String *expanded_query, bool set_context); + String *expanded_query); /****************************************************************************** Implementation @@ -118,16 +122,13 @@ inline bool is_param_null(const uchar *pos, ulong param_no) enum { STMT_QUERY_LOG_LENGTH= 8192 }; -enum enum_send_error { DONT_SEND_ERROR= 0, SEND_ERROR }; - /* Seek prepared statement in statement map by id: returns zero if statement was not found, pointer otherwise. */ static Prepared_statement * -find_prepared_statement(THD *thd, ulong id, const char *where, - enum enum_send_error se) +find_prepared_statement(THD *thd, ulong id, const char *where) { Statement *stmt= thd->stmt_map.find(id); @@ -135,8 +136,6 @@ find_prepared_statement(THD *thd, ulong id, const char *where, { char llbuf[22]; my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), 22, llstr(id, llbuf), where); - if (se == SEND_ERROR) - send_error(thd); return 0; } return (Prepared_statement *) stmt; @@ -166,7 +165,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, uint columns) (stmt->param_count && stmt->thd->protocol_simple.send_fields((List<Item> *) &stmt->lex->param_list, - 0))); + Protocol::SEND_EOF))); } #else static bool send_prep_stmt(Prepared_statement *stmt, @@ -176,7 +175,7 @@ static bool send_prep_stmt(Prepared_statement *stmt, thd->client_stmt_id= stmt->id; thd->client_param_count= stmt->param_count; - thd->net.last_errno= 0; + thd->clear_error(); return 0; } @@ -399,6 +398,7 @@ static void set_param_datetime(Item_param *param, uchar **pos, ulong len) *pos+= length; } + static void set_param_date(Item_param *param, uchar **pos, ulong len) { MYSQL_TIME tm; @@ -867,28 +867,26 @@ static bool insert_params_from_vars_with_log(Prepared_statement *stmt, SYNOPSIS mysql_test_insert() stmt prepared statemen handler - tables list of tables queries + tables global/local table list RETURN VALUE - 0 ok - 1 error, sent to the client - -1 error, not sent to client + FALSE OK + TRUE error */ -static int mysql_test_insert(Prepared_statement *stmt, - TABLE_LIST *table_list, - List<Item> &fields, - List<List_item> &values_list, - List<Item> &update_fields, - List<Item> &update_values, - enum_duplicates duplic) + +static bool mysql_test_insert(Prepared_statement *stmt, + TABLE_LIST *table_list, + List<Item> &fields, + List<List_item> &values_list, + List<Item> &update_fields, + List<Item> &update_values, + enum_duplicates duplic) { THD *thd= stmt->thd; LEX *lex= stmt->lex; List_iterator_fast<List_item> its(values_list); List_item *values; - int res= -1; - TABLE_LIST *insert_table_list= - (TABLE_LIST*) lex->select_lex.table_list.first; + bool res; DBUG_ENTER("mysql_test_insert"); if ((res= insert_precheck(thd, table_list))) @@ -900,18 +898,25 @@ static int mysql_test_insert(Prepared_statement *stmt, */ if (open_and_lock_tables(thd, table_list)) { - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } if ((values= its++)) { uint value_count; ulong counter= 0; + Item *unused_conds= 0; + + if (table_list->table) + { + // don't allocate insert_values + table_list->table->insert_values=(byte *)1; + } - table_list->table->insert_values=(byte *)1; // don't allocate insert_values - if ((res= mysql_prepare_insert(thd, table_list, insert_table_list, - table_list->table, fields, values, - update_fields, update_values, duplic))) + if ((res= mysql_prepare_insert(thd, table_list, table_list->table, + fields, values, update_fields, + update_values, duplic, + &unused_conds, FALSE))) goto error; value_count= values->elements; @@ -922,12 +927,10 @@ static int mysql_test_insert(Prepared_statement *stmt, counter++; if (values->elements != value_count) { - my_printf_error(ER_WRONG_VALUE_COUNT_ON_ROW, - ER(ER_WRONG_VALUE_COUNT_ON_ROW), - MYF(0), counter); + my_error(ER_WRONG_VALUE_COUNT_ON_ROW, MYF(0), counter); goto error; } - if (setup_fields(thd, 0, insert_table_list, *values, 0, 0, 0)) + if (setup_fields(thd, 0, table_list, *values, 0, 0, 0)) goto error; } } @@ -935,7 +938,7 @@ static int mysql_test_insert(Prepared_statement *stmt, res= 0; error: lex->unit.cleanup(); - table_list->table->insert_values=0; + /* insert_values is cleared in open_table */ DBUG_RETURN(res); } @@ -950,39 +953,67 @@ error: RETURN VALUE 0 success - 1 error, sent to client - -1 error, not sent to client + 2 convert to multi_update + 1 error */ static int mysql_test_update(Prepared_statement *stmt, - TABLE_LIST *table_list) + TABLE_LIST *table_list) { int res; THD *thd= stmt->thd; + uint table_count= 0; SELECT_LEX *select= &stmt->lex->select_lex; DBUG_ENTER("mysql_test_update"); - if ((res= update_precheck(thd, table_list))) - DBUG_RETURN(res); + if (update_precheck(thd, table_list)) + DBUG_RETURN(1); - if (open_and_lock_tables(thd, table_list)) - res= -1; - else + if (!open_tables(thd, table_list, &table_count)) { - TABLE_LIST *update_table_list= (TABLE_LIST *)select->table_list.first; + if (table_list->ancestor && table_list->ancestor->next_local) + { + DBUG_ASSERT(table_list->view); + DBUG_PRINT("info", ("Switch to multi-update")); + /* pass counter value */ + thd->lex->table_count= table_count; + /* + give correct value to multi_lock_option, because it will be used + in multiupdate + */ + thd->lex->multi_lock_option= table_list->lock_type; + /* convert to multiupdate */ + return 2; + } + + if (lock_tables(thd, table_list, table_count) || + mysql_handle_derived(thd->lex, &mysql_derived_prepare) || + (thd->fill_derived_tables() && + mysql_handle_derived(thd->lex, &mysql_derived_filling))) + DBUG_RETURN(1); + if (!(res= mysql_prepare_update(thd, table_list, - update_table_list, &select->where, select->order_list.elements, (ORDER *) select->order_list.first))) { - if (setup_fields(thd, 0, update_table_list, - select->item_list, 1, 0, 0) || - setup_fields(thd, 0, update_table_list, - stmt->lex->value_list, 0, 0, 0)) - res= -1; + thd->lex->select_lex.no_wrap_view_item= 1; + if (setup_fields(thd, 0, table_list, select->item_list, 1, 0, 0)) + { + res= 1; + thd->lex->select_lex.no_wrap_view_item= 0; + } + else + { + thd->lex->select_lex.no_wrap_view_item= 0; + if (setup_fields(thd, 0, table_list, + stmt->lex->value_list, 0, 0, 0)) + res= 1; + } } stmt->lex->unit.cleanup(); } + else + res= 1; /* TODO: here we should send types of placeholders to the client. */ DBUG_RETURN(res); } @@ -997,30 +1028,36 @@ static int mysql_test_update(Prepared_statement *stmt, tables list of tables queries RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error */ static int mysql_test_delete(Prepared_statement *stmt, TABLE_LIST *table_list) { - int res; THD *thd= stmt->thd; LEX *lex= stmt->lex; DBUG_ENTER("mysql_test_delete"); - if ((res= delete_precheck(thd, table_list))) - DBUG_RETURN(res); + if (delete_precheck(thd, table_list)) + DBUG_RETURN(TRUE); - if (open_and_lock_tables(thd, table_list)) - res= -1; - else + if (!open_and_lock_tables(thd, table_list)) { - res= mysql_prepare_delete(thd, table_list, &lex->select_lex.where); + if (!table_list->table) + { + DBUG_ASSERT(table_list->view && + table_list->ancestor && table_list->ancestor->next_local); + my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), + table_list->view_db.str, table_list->view_name.str); + DBUG_RETURN(-1); + } + + mysql_prepare_delete(thd, table_list, &lex->select_lex.where); lex->unit.cleanup(); + DBUG_RETURN(FALSE); } /* TODO: here we should send types of placeholders to the client. */ - DBUG_RETURN(res); + DBUG_RETURN(TRUE); } @@ -1035,9 +1072,8 @@ static int mysql_test_delete(Prepared_statement *stmt, tables list of tables queries RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, sent to client */ static int mysql_test_select(Prepared_statement *stmt, @@ -1046,7 +1082,7 @@ static int mysql_test_select(Prepared_statement *stmt, THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX_UNIT *unit= &lex->unit; - int result= 1; + bool result; DBUG_ENTER("mysql_test_select"); #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -1054,30 +1090,29 @@ static int mysql_test_select(Prepared_statement *stmt, if (tables) { if (check_table_access(thd, privilege, tables,0)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } else if (check_access(thd, privilege, any_db,0,0,0)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); #endif + result= TRUE; if (!lex->result && !(lex->result= new (stmt->mem_root) select_send)) - { - send_error(thd); goto err; - } if (open_and_lock_tables(thd, tables)) - { - send_error(thd); goto err; - } + thd->used_tables= 0; // Updated by setup_fields - // JOIN::prepare calls + /* + JOIN::prepare calls + It is not SELECT COMMAND for sure, so setup_tables will be called as + usual, and we pass 0 as setup_tables_done_option + */ if (unit->prepare(thd, 0, 0)) { - send_error(thd); goto err_prep; } if (!text_protocol) @@ -1102,12 +1137,12 @@ static int mysql_test_select(Prepared_statement *stmt, prepared in unit->prepare call above. */ if (send_prep_stmt(stmt, lex->result->field_count(fields)) || - lex->result->send_fields(fields, 0) || + lex->result->send_fields(fields, Protocol::SEND_EOF) || thd->protocol->flush()) goto err_prep; } } - result= 0; // ok + result= FALSE; // ok err_prep: unit->cleanup(); @@ -1126,30 +1161,27 @@ err: values list of expressions RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error, sent to client */ -static int mysql_test_do_fields(Prepared_statement *stmt, +static bool mysql_test_do_fields(Prepared_statement *stmt, TABLE_LIST *tables, List<Item> *values) { DBUG_ENTER("mysql_test_do_fields"); THD *thd= stmt->thd; - int res= 0; - if (tables && (res= check_table_access(thd, SELECT_ACL, tables, 0))) - DBUG_RETURN(res); + bool res; + if (tables && check_table_access(thd, SELECT_ACL, tables, 0)) + DBUG_RETURN(TRUE); - if (tables && (res= open_and_lock_tables(thd, tables))) + if (tables && open_and_lock_tables(thd, tables)) { - DBUG_RETURN(res); + DBUG_RETURN(TRUE); } res= setup_fields(thd, 0, 0, *values, 0, 0, 0); stmt->lex->unit.cleanup(); - if (res) - DBUG_RETURN(-1); - DBUG_RETURN(0); + DBUG_RETURN(res); } @@ -1163,22 +1195,21 @@ static int mysql_test_do_fields(Prepared_statement *stmt, values list of expressions RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error */ -static int mysql_test_set_fields(Prepared_statement *stmt, - TABLE_LIST *tables, - List<set_var_base> *var_list) +static bool mysql_test_set_fields(Prepared_statement *stmt, + TABLE_LIST *tables, + List<set_var_base> *var_list) { DBUG_ENTER("mysql_test_set_fields"); List_iterator_fast<set_var_base> it(*var_list); THD *thd= stmt->thd; set_var_base *var; - int res= 0; + bool res= 0; - if (tables && (res= check_table_access(thd, SELECT_ACL, tables, 0))) - DBUG_RETURN(res); + if (tables && check_table_access(thd, SELECT_ACL, tables, 0)) + DBUG_RETURN(TRUE); if (tables && (res= open_and_lock_tables(thd, tables))) goto error; @@ -1187,7 +1218,7 @@ static int mysql_test_set_fields(Prepared_statement *stmt, if (var->light_check(thd)) { stmt->lex->unit.cleanup(); - res= -1; + res= TRUE; goto error; } } @@ -1202,31 +1233,39 @@ error: SYNOPSIS select_like_statement_test() - stmt - prepared table handler - tables - global list of tables + stmt - prepared table handler + tables - global list of tables + specific_prepare - function of command specific prepare RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error */ -static int select_like_statement_test(Prepared_statement *stmt, - TABLE_LIST *tables) +static bool select_like_statement_test(Prepared_statement *stmt, + TABLE_LIST *tables, + bool (*specific_prepare)(THD *thd), + ulong setup_tables_done_option) { DBUG_ENTER("select_like_statement_test"); THD *thd= stmt->thd; LEX *lex= stmt->lex; - int res= 0; + bool res= 0; - if (tables && (res= open_and_lock_tables(thd, tables))) + /* check that tables was not opened during conversion from usual update */ + if (tables && + (!tables->table && !tables->view) && + (res= open_and_lock_tables(thd, tables))) + goto end; + + if (specific_prepare && (res= (*specific_prepare)(thd))) goto end; thd->used_tables= 0; // Updated by setup_fields // JOIN::prepare calls - if (lex->unit.prepare(thd, 0, 0)) + if (lex->unit.prepare(thd, 0, setup_tables_done_option)) { - res= thd->net.report_error ? -1 : 1; + res= TRUE; } end: lex->unit.cleanup(); @@ -1247,31 +1286,29 @@ end: 1 error, sent to client -1 error, not sent to client */ -static int mysql_test_create_table(Prepared_statement *stmt, - TABLE_LIST *tables) + +static int mysql_test_create_table(Prepared_statement *stmt) { DBUG_ENTER("mysql_test_create_table"); THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX *select_lex= &lex->select_lex; int res= 0; - /* Skip first table, which is the table we are creating */ - TABLE_LIST *create_table, *create_table_local; - tables= lex->unlink_first_table(tables, &create_table, - &create_table_local); + bool link_to_local; + TABLE_LIST *create_table= lex->unlink_first_table(&link_to_local); + TABLE_LIST *tables= lex->query_tables; if (!(res= create_table_precheck(thd, tables, create_table)) && select_lex->item_list.elements) { select_lex->resolve_mode= SELECT_LEX::SELECT_MODE; - res= select_like_statement_test(stmt, tables); + res= select_like_statement_test(stmt, tables, 0, 0); select_lex->resolve_mode= SELECT_LEX::NOMATTER_MODE; } /* put tables back for PS rexecuting */ - tables= lex->link_first_table_back(tables, create_table, - create_table_local); + lex->link_first_table_back(create_table, link_to_local); DBUG_RETURN(res); } @@ -1283,19 +1320,26 @@ static int mysql_test_create_table(Prepared_statement *stmt, mysql_test_multiupdate() stmt prepared statemen handler tables list of tables queries + converted converted to multi-update from usual update RETURN VALUE - 0 success - 1 error, sent to client - -1 error, not sent to client + FALSE success + TRUE error */ -static int mysql_test_multiupdate(Prepared_statement *stmt, - TABLE_LIST *tables) + +static bool mysql_test_multiupdate(Prepared_statement *stmt, + TABLE_LIST *tables, + bool converted) { - int res; - if ((res= multi_update_precheck(stmt->thd, tables))) - return res; - return select_like_statement_test(stmt, tables); + /* if we switched from normal update, rights are checked */ + if (!converted && multi_update_precheck(stmt->thd, tables)) + return TRUE; + /* + here we do not pass tables for opening, tables will be opened and locked + by mysql_multi_update_prepare + */ + return select_like_statement_test(stmt, 0, &mysql_multi_update_prepare, + OPTION_SETUP_TABLES_DONE); } @@ -1323,7 +1367,20 @@ static int mysql_test_multidelete(Prepared_statement *stmt, uint fake_counter; if ((res= multi_delete_precheck(stmt->thd, tables, &fake_counter))) return res; - return select_like_statement_test(stmt, tables); + if ((res= select_like_statement_test(stmt, tables, + &mysql_multi_delete_prepare, + OPTION_SETUP_TABLES_DONE))) + return res; + if (!tables->table) + { + DBUG_ASSERT(tables->view && + tables->ancestor && tables->ancestor->next_local); + my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0), + tables->view_db.str, tables->view_name.str); + return -1; + } + return 0; + } @@ -1340,23 +1397,33 @@ static int mysql_test_multidelete(Prepared_statement *stmt, 1 error, sent to client -1 error, not sent to client */ + static int mysql_test_insert_select(Prepared_statement *stmt, TABLE_LIST *tables) { int res; LEX *lex= stmt->lex; + TABLE_LIST *first_local_table; + if ((res= insert_precheck(stmt->thd, tables))) return res; - TABLE_LIST *first_local_table= - (TABLE_LIST *)lex->select_lex.table_list.first; + first_local_table= (TABLE_LIST *)lex->select_lex.table_list.first; + DBUG_ASSERT(first_local_table != 0); /* Skip first table, which is the table we are inserting in */ - lex->select_lex.table_list.first= (byte*) first_local_table->next; + lex->select_lex.table_list.first= (byte*) first_local_table->next_local; + if (tables->table) + { + // don't allocate insert_values + tables->table->insert_values=(byte *)1; + } + /* insert/replace from SELECT give its SELECT_LEX for SELECT, and item_list belong to SELECT */ lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; - res= select_like_statement_test(stmt, tables); + res= select_like_statement_test(stmt, tables, &mysql_insert_select_prepare, + OPTION_SETUP_TABLES_DONE); /* revert changes*/ lex->select_lex.table_list.first= (byte*) first_local_table; lex->select_lex.resolve_mode= SELECT_LEX::INSERT_MODE; @@ -1365,30 +1432,53 @@ static int mysql_test_insert_select(Prepared_statement *stmt, /* - Send the prepare query results back to client + Perform semantic analysis of the parsed tree and send a response packet + to the client. + SYNOPSIS - send_prepare_results() - stmt prepared statement + check_prepared_statement() + stmt prepared statement + + DESCRIPTION + This function + - opens all tables and checks access rights + - validates semantics of statement columns and SQL functions + by calling fix_fields. + RETURN VALUE 0 success 1 error, sent to client */ -static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) -{ + +static int check_prepared_statement(Prepared_statement *stmt, + bool text_protocol) +{ THD *thd= stmt->thd; LEX *lex= stmt->lex; SELECT_LEX *select_lex= &lex->select_lex; - TABLE_LIST *tables=(TABLE_LIST*) select_lex->table_list.first; + TABLE_LIST *tables; enum enum_sql_command sql_command= lex->sql_command; int res= 0; - DBUG_ENTER("send_prepare_results"); + DBUG_ENTER("check_prepared_statement"); DBUG_PRINT("enter",("command: %d, param_count: %ld", sql_command, stmt->param_count)); - if ((&lex->select_lex != lex->all_selects_list || - lex->time_zone_tables_used) && - lex->unit.create_total_list(thd, lex, &tables)) - DBUG_RETURN(1); + lex->first_lists_tables_same(); + tables= lex->query_tables; + + /* + Preopen 'proc' system table and cache all functions used in this + statement. We must do that before we open ordinary tables to avoid + deadlocks. We can't open and lock any table once query tables were + opened. + */ + if (lex->sql_command != SQLCOM_CREATE_PROCEDURE && + lex->sql_command != SQLCOM_CREATE_SPFUNCTION) + { + /* the error is print inside */ + if (sp_cache_functions(thd, lex)) + DBUG_RETURN(1); + } switch (sql_command) { case SQLCOM_REPLACE: @@ -1401,6 +1491,12 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) case SQLCOM_UPDATE: res= mysql_test_update(stmt, tables); + /* mysql_test_update return 2 if we need to switch to multi-update */ + if (res != 2) + break; + + case SQLCOM_UPDATE_MULTI: + res= mysql_test_multiupdate(stmt, tables, res == 2); break; case SQLCOM_DELETE: @@ -1414,7 +1510,7 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) DBUG_RETURN(0); case SQLCOM_CREATE_TABLE: - res= mysql_test_create_table(stmt, tables); + res= mysql_test_create_table(stmt); break; case SQLCOM_DO: @@ -1428,10 +1524,6 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) case SQLCOM_DELETE_MULTI: res= mysql_test_multidelete(stmt, tables); break; - - case SQLCOM_UPDATE_MULTI: - res= mysql_test_multiupdate(stmt, tables); - break; case SQLCOM_INSERT_SELECT: case SQLCOM_REPLACE_SELECT: @@ -1456,6 +1548,12 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) case SQLCOM_SHOW_GRANTS: case SQLCOM_DROP_TABLE: case SQLCOM_RENAME_TABLE: + case SQLCOM_ALTER_TABLE: + case SQLCOM_COMMIT: + case SQLCOM_CREATE_INDEX: + case SQLCOM_DROP_INDEX: + case SQLCOM_ROLLBACK: + case SQLCOM_TRUNCATE: break; default: @@ -1463,15 +1561,13 @@ static int send_prepare_results(Prepared_statement *stmt, bool text_protocol) All other is not supported yet */ res= -1; - my_error(ER_UNSUPPORTED_PS, MYF(0)); + my_message(ER_UNSUPPORTED_PS, ER(ER_UNSUPPORTED_PS), MYF(0)); goto error; } if (res == 0) DBUG_RETURN(text_protocol? 0 : (send_prep_stmt(stmt, 0) || thd->protocol->flush())); error: - if (res < 0) - send_error(thd, thd->killed ? ER_SERVER_SHUTDOWN : 0); DBUG_RETURN(1); } @@ -1490,9 +1586,8 @@ static bool init_param_array(Prepared_statement *stmt) if (stmt->param_count > (uint) UINT_MAX16) { /* Error code to be defined in 5.0 */ - send_error(thd, ER_UNKNOWN_ERROR, - "Prepared statement contains too many placeholders."); - return 1; + my_message(ER_PS_MANY_PARAM, ER(ER_PS_MANY_PARAM), MYF(0)); + return TRUE; } Item_param **to; List_iterator<Item_param> param_iterator(lex->param_list); @@ -1501,10 +1596,7 @@ static bool init_param_array(Prepared_statement *stmt) alloc_root(stmt->thd->mem_root, sizeof(Item_param*) * stmt->param_count); if (!stmt->param_array) - { - send_error(thd, ER_OUT_OF_RESOURCES); - return 1; - } + return TRUE; for (to= stmt->param_array; to < stmt->param_array + stmt->param_count; ++to) @@ -1512,7 +1604,7 @@ static bool init_param_array(Prepared_statement *stmt) *to= param_iterator++; } } - return 0; + return FALSE; } /* @@ -1527,10 +1619,10 @@ static bool init_param_array(Prepared_statement *stmt) name NULL or statement name. For unnamed statements binary PS protocol is used, for named statements text protocol is used. - RETURN - 0 OK, statement prepared successfully - other Error - + RETURN + FALSE OK, statement prepared successfully + TRUE Error + NOTES This function parses the query and sends the total number of parameters and resultset metadata information back to client (if any), without @@ -1544,21 +1636,18 @@ static bool init_param_array(Prepared_statement *stmt) */ -int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, - LEX_STRING *name) +bool mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, + LEX_STRING *name) { LEX *lex; Prepared_statement *stmt= new Prepared_statement(thd); - int error; + bool error; DBUG_ENTER("mysql_stmt_prepare"); DBUG_PRINT("prep_query", ("%s", packet)); if (stmt == 0) - { - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_RETURN(1); - } + DBUG_RETURN(TRUE); if (name) { @@ -1567,16 +1656,14 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, name->length))) { delete stmt; - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } if (thd->stmt_map.insert(stmt)) { delete stmt; - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } thd->set_n_backup_statement(stmt, &thd->stmt_backup); @@ -1588,8 +1675,7 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, thd->restore_backup_item_arena(stmt, &thd->stmt_backup); /* Statement map deletes statement on erase */ thd->stmt_map.erase(stmt); - send_error(thd, ER_OUT_OF_RESOURCES); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } mysql_log.write(thd, COM_PREPARE, "%s", packet); @@ -1604,8 +1690,8 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, error= yyparse((void *)thd) || thd->is_fatal_error || thd->net.report_error || init_param_array(stmt); /* - While doing context analysis of the query (in send_prepare_results) we - allocate a lot of additional memory: for open tables, JOINs, derived + While doing context analysis of the query (in check_prepared_statement) + we allocate a lot of additional memory: for open tables, JOINs, derived tables, etc. Let's save a snapshot of current parse tree to the statement and restore original THD. In cases when some tree transformation can be reused on execute, we set again thd->mem_root from @@ -1614,18 +1700,24 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, thd->restore_backup_item_arena(stmt, &thd->stmt_backup); if (!error) - error= send_prepare_results(stmt, test(name)); + error= check_prepared_statement(stmt, test(name)); /* restore to WAIT_PRIOR: QUERY_PRIOR is set inside alloc_query */ if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),WAIT_PRIOR); + if (error && thd->lex->sphead) + { + if (lex != thd->lex) + thd->lex->sphead->restore_lex(thd); + delete thd->lex->sphead; + thd->lex->sphead= NULL; + } lex_end(lex); thd->restore_backup_statement(stmt, &thd->stmt_backup); cleanup_items(stmt->free_list); close_thread_tables(thd); - free_items(thd->free_list); thd->rollback_item_tree_changes(); - thd->free_list= 0; + thd->cleanup_after_query(); thd->current_arena= thd; if (error) @@ -1633,9 +1725,6 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, /* Statement map deletes statement on erase */ thd->stmt_map.erase(stmt); stmt= NULL; - if (thd->net.report_error) - send_error(thd); - /* otherwise the error is sent inside yyparse/send_prepare_results */ } else { @@ -1646,61 +1735,49 @@ int mysql_stmt_prepare(THD *thd, char *packet, uint packet_length, optimisation. */ for (; sl; sl= sl->next_select_in_list()) - { sl->prep_where= sl->where; - } stmt->state= Item_arena::PREPARED; } - DBUG_RETURN(!stmt); } + /* Reinit statement before execution */ -static void reset_stmt_for_execute(Prepared_statement *stmt) +void reset_stmt_for_execute(THD *thd, LEX *lex) { - THD *thd= stmt->thd; - LEX *lex= stmt->lex; SELECT_LEX *sl= lex->all_selects_list; + DBUG_ENTER("reset_stmt_for_execute"); + if (lex->empty_field_list_on_rset) + { + lex->empty_field_list_on_rset= 0; + lex->field_list.empty(); + } for (; sl; sl= sl->next_select_in_list()) { - /* remove option which was put by mysql_explain_union() */ - sl->options&= ~SELECT_DESCRIBE; - /* - Copy WHERE clause pointers to avoid damaging they by optimisation - */ - if (sl->prep_where) + if (!sl->first_execution) { - sl->where= sl->prep_where->copy_andor_structure(thd); - sl->where->cleanup(); - } - DBUG_ASSERT(sl->join == 0); - ORDER *order; - /* Fix GROUP list */ - for (order= (ORDER *)sl->group_list.first; order; order= order->next) - order->item= &order->item_ptr; - /* Fix ORDER list */ - for (order= (ORDER *)sl->order_list.first; order; order= order->next) - order->item= &order->item_ptr; + /* remove option which was put by mysql_explain_union() */ + sl->options&= ~SELECT_DESCRIBE; - /* - TODO: When the new table structure is ready, then have a status bit - to indicate the table is altered, and re-do the setup_* - and open the tables back. - */ - for (TABLE_LIST *tables= (TABLE_LIST*) sl->table_list.first; - tables; - tables= tables->next) - { /* - Reset old pointers to TABLEs: they are not valid since the tables - were closed in the end of previous prepare or execute call. + Copy WHERE clause pointers to avoid damaging they by optimisation */ - tables->table= 0; - tables->table_list= 0; + if (sl->prep_where) + { + sl->where= sl->prep_where->copy_andor_structure(thd); + sl->where->cleanup(); + } + DBUG_ASSERT(sl->join == 0); + ORDER *order; + /* Fix GROUP list */ + for (order= (ORDER *)sl->group_list.first; order; order= order->next) + order->item= &order->item_ptr; + /* Fix ORDER list */ + for (order= (ORDER *)sl->order_list.first; order; order= order->next) + order->item= &order->item_ptr; } - { SELECT_LEX_UNIT *unit= sl->master_unit(); unit->unclean(); @@ -1709,9 +1786,34 @@ static void reset_stmt_for_execute(Prepared_statement *stmt) unit->reinit_exec_mechanism(); } } + + /* + TODO: When the new table structure is ready, then have a status bit + to indicate the table is altered, and re-do the setup_* + and open the tables back. + */ + for (TABLE_LIST *tables= lex->query_tables; + tables; + tables= tables->next_global) + { + /* + Reset old pointers to TABLEs: they are not valid since the tables + were closed in the end of previous prepare or execute call. + */ + tables->table= 0; + if (tables->nested_join) + tables->nested_join->counter= 0; + } lex->current_select= &lex->select_lex; + + /* restore original list used in INSERT ... SELECT */ + if (lex->leaf_tables_insert) + lex->select_lex.leaf_tables= lex->leaf_tables_insert; + if (lex->result) lex->result->cleanup(); + + DBUG_VOID_RETURN; } @@ -1746,6 +1848,7 @@ static void reset_stmt_params(Prepared_statement *stmt) void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) { ulong stmt_id= uint4korr(packet); + ulong flags= (ulong) ((uchar) packet[4]); /* Query text for binary log, or empty string if the query is not put into binary log. @@ -1759,8 +1862,7 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) packet+= 9; /* stmt_id + 5 bytes of flags */ - if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_execute", - SEND_ERROR))) + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_execute"))) DBUG_VOID_RETURN; DBUG_PRINT("exec_query:", ("%s", stmt->query)); @@ -1768,12 +1870,33 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) /* Check if we got an error when sending long data */ if (stmt->state == Item_arena::ERROR) { - send_error(thd, stmt->last_errno, stmt->last_error); + my_message(stmt->last_errno, stmt->last_error, MYF(0)); DBUG_VOID_RETURN; } DBUG_ASSERT(thd->free_list == NULL); mysql_reset_thd_for_next_command(thd); + if (flags & (ulong) CURSOR_TYPE_READ_ONLY) + { + if (!stmt->lex->result || !stmt->lex->result->simple_select()) + { + DBUG_PRINT("info",("Cursor asked for not SELECT stmt")); + /* + If lex->result is set in the parser, this is not a SELECT + statement: we can't open a cursor for it. + */ + flags= 0; + } + else + { + DBUG_PRINT("info",("Using READ_ONLY cursor")); + if (!stmt->cursor && + !(stmt->cursor= new (&stmt->main_mem_root) Cursor())) + DBUG_VOID_RETURN; + /* If lex->result is set, mysql_execute_command will use it */ + stmt->lex->result= &stmt->cursor->result; + } + } #ifndef EMBEDDED_LIBRARY if (stmt->param_count) { @@ -1792,15 +1915,51 @@ void mysql_stmt_execute(THD *thd, char *packet, uint packet_length) if (stmt->param_count && stmt->set_params_data(stmt, &expanded_query)) goto set_params_data_err; #endif + thd->stmt_backup.set_statement(thd); + thd->set_statement(stmt); + thd->current_arena= stmt; + reset_stmt_for_execute(thd, stmt->lex); + /* From now cursors assume that thd->mem_root is clean */ + if (expanded_query.length() && + alloc_query(thd, (char *)expanded_query.ptr(), + expanded_query.length()+1)) + { + my_error(ER_OUTOFMEMORY, 0, expanded_query.length()); + goto err; + } + thd->protocol= &thd->protocol_prep; // Switch to binary protocol - execute_stmt(thd, stmt, &expanded_query, TRUE); + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(),QUERY_PRIOR); + mysql_execute_command(thd); + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(), WAIT_PRIOR); thd->protocol= &thd->protocol_simple; // Use normal protocol + + if (flags & (ulong) CURSOR_TYPE_READ_ONLY) + { + if (stmt->cursor->is_open()) + stmt->cursor->init_from_thd(thd); + stmt->cursor->state= stmt->state; + } + else + { + thd->lex->unit.cleanup(); + cleanup_items(stmt->free_list); + reset_stmt_params(stmt); + close_thread_tables(thd); /* to close derived tables */ + thd->rollback_item_tree_changes(); + thd->cleanup_after_query(); + } + + thd->set_statement(&thd->stmt_backup); + thd->current_arena= thd; DBUG_VOID_RETURN; set_params_data_err: reset_stmt_params(stmt); my_error(ER_WRONG_ARGUMENTS, MYF(0), "mysql_stmt_execute"); - send_error(thd); +err: DBUG_VOID_RETURN; } @@ -1820,33 +1979,32 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) String expanded_query; DBUG_ENTER("mysql_sql_stmt_execute"); + DBUG_ASSERT(thd->free_list == NULL); + if (!(stmt= (Prepared_statement*)thd->stmt_map.find_by_name(stmt_name))) { my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), stmt_name->length, stmt_name->str, "EXECUTE"); - send_error(thd); DBUG_VOID_RETURN; } if (stmt->param_count != thd->lex->prepared_stmt_params.elements) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); - send_error(thd); DBUG_VOID_RETURN; } - DBUG_ASSERT(thd->free_list == NULL); /* Must go before setting variables, as it clears thd->user_var_events */ mysql_reset_thd_for_next_command(thd); thd->set_n_backup_statement(stmt, &thd->stmt_backup); + thd->set_statement(stmt); if (stmt->set_params_from_vars(stmt, thd->stmt_backup.lex->prepared_stmt_params, &expanded_query)) { my_error(ER_WRONG_ARGUMENTS, MYF(0), "EXECUTE"); - send_error(thd); } - execute_stmt(thd, stmt, &expanded_query, FALSE); + execute_stmt(thd, stmt, &expanded_query); DBUG_VOID_RETURN; } @@ -1861,23 +2019,21 @@ void mysql_sql_stmt_execute(THD *thd, LEX_STRING *stmt_name) placeholders replaced with actual values. Otherwise empty string. NOTES - Caller must set parameter values and thd::protocol. - thd->free_list is assumed to be garbage. + Caller must set parameter values and thd::protocol. */ static void execute_stmt(THD *thd, Prepared_statement *stmt, - String *expanded_query, bool set_context) + String *expanded_query) { DBUG_ENTER("execute_stmt"); - if (set_context) - thd->set_n_backup_statement(stmt, &thd->stmt_backup); - reset_stmt_for_execute(stmt); + + reset_stmt_for_execute(thd, stmt->lex); if (expanded_query->length() && alloc_query(thd, (char *)expanded_query->ptr(), expanded_query->length()+1)) { - my_error(ER_OUTOFMEMORY, 0, expanded_query->length()); + my_error(ER_OUTOFMEMORY, MYF(0), expanded_query->length()); DBUG_VOID_RETURN; } /* @@ -1890,21 +2046,69 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(),QUERY_PRIOR); mysql_execute_command(thd); - thd->lex->unit.cleanup(); if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(), WAIT_PRIOR); - /* Free Items that were created during this execution of the PS. */ - free_items(thd->free_list); - thd->free_list= 0; - if (stmt->state == Item_arena::PREPARED) - stmt->state= Item_arena::EXECUTED; + thd->lex->unit.cleanup(); thd->current_arena= thd; cleanup_items(stmt->free_list); thd->rollback_item_tree_changes(); reset_stmt_params(stmt); close_thread_tables(thd); // to close derived tables thd->set_statement(&thd->stmt_backup); + thd->cleanup_after_query(); + + if (stmt->state == Item_arena::PREPARED) + stmt->state= Item_arena::EXECUTED; + DBUG_VOID_RETURN; +} + + +/* + COM_FETCH handler: fetches requested amount of rows from cursor + + SYNOPSIS + mysql_stmt_fetch() + thd Thread handler + packet Packet from client (with stmt_id & num_rows) + packet_length Length of packet +*/ + +void mysql_stmt_fetch(THD *thd, char *packet, uint packet_length) +{ + /* assume there is always place for 8-16 bytes */ + ulong stmt_id= uint4korr(packet); + ulong num_rows= uint4korr(packet+=4); + Statement *stmt; + int error; + DBUG_ENTER("mysql_stmt_fetch"); + + if (!(stmt= thd->stmt_map.find(stmt_id)) || + !stmt->cursor || + !stmt->cursor->is_open()) + { + my_error(ER_UNKNOWN_STMT_HANDLER, MYF(0), stmt_id, "fetch"); + DBUG_VOID_RETURN; + } + thd->current_arena= stmt; + thd->set_n_backup_statement(stmt, &thd->stmt_backup); + stmt->cursor->init_thd(thd); + + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(), QUERY_PRIOR); + + thd->protocol= &thd->protocol_prep; // Switch to binary protocol + error= stmt->cursor->fetch(num_rows); + thd->protocol= &thd->protocol_simple; // Use normal protocol + + if (!(specialflag & SPECIAL_NO_PRIOR)) + my_pthread_setprio(pthread_self(), WAIT_PRIOR); + + /* Restore THD state */ + stmt->cursor->reset_thd(thd); + thd->restore_backup_statement(stmt, &thd->stmt_backup); + thd->current_arena= thd; + DBUG_VOID_RETURN; } @@ -1914,7 +2118,7 @@ static void execute_stmt(THD *thd, Prepared_statement *stmt, SYNOPSIS mysql_stmt_reset() thd Thread handle - packet Packet with stmt id + packet Packet with stmt id DESCRIPTION This function resets statement to the state it was right after prepare. @@ -1934,8 +2138,7 @@ void mysql_stmt_reset(THD *thd, char *packet) DBUG_ENTER("mysql_stmt_reset"); - if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_reset", - SEND_ERROR))) + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_reset"))) DBUG_VOID_RETURN; stmt->state= Item_arena::PREPARED; @@ -1966,8 +2169,7 @@ void mysql_stmt_free(THD *thd, char *packet) DBUG_ENTER("mysql_stmt_free"); - if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_close", - DONT_SEND_ERROR))) + if (!(stmt= find_prepared_statement(thd, stmt_id, "mysql_stmt_close"))) DBUG_VOID_RETURN; /* Statement map deletes statement on erase */ @@ -2017,8 +2219,8 @@ void mysql_stmt_get_longdata(THD *thd, char *packet, ulong packet_length) stmt_id= uint4korr(packet); packet+= 4; - if (!(stmt=find_prepared_statement(thd, stmt_id, "mysql_stmt_send_long_data", - DONT_SEND_ERROR))) + if (!(stmt=find_prepared_statement(thd, stmt_id, + "mysql_stmt_send_long_data"))) DBUG_VOID_RETURN; param_number= uint2korr(packet); @@ -2084,8 +2286,11 @@ void Prepared_statement::setup_set_params() } } + Prepared_statement::~Prepared_statement() { + if (cursor) + cursor->Cursor::~Cursor(); free_items(free_list); delete lex->result; } @@ -2095,4 +2300,3 @@ Item_arena::Type Prepared_statement::type() const { return PREPARED_STATEMENT; } - diff --git a/sql/sql_rename.cc b/sql/sql_rename.cc index 388034e0f1a..1640fc1a634 100644 --- a/sql/sql_rename.cc +++ b/sql/sql_rename.cc @@ -44,7 +44,8 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) if (thd->locked_tables || thd->active_transaction()) { - my_error(ER_LOCK_OR_ACTIVE_TRANSACTION,MYF(0)); + my_message(ER_LOCK_OR_ACTIVE_TRANSACTION, + ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); DBUG_RETURN(1); } @@ -64,10 +65,10 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) table_list= reverse_table_list(table_list); /* Find the last renamed table */ - for (table=table_list ; - table->next != ren_table ; - table=table->next->next) ; - table=table->next->next; // Skip error table + for (table= table_list; + table->next_local != ren_table ; + table= table->next_local->next_local) ; + table= table->next_local->next_local; // Skip error table /* Revert to old names */ rename_tables(thd, table, 1); @@ -80,7 +81,6 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) /* Lets hope this doesn't fail as the result will be messy */ if (!error) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -90,7 +90,7 @@ bool mysql_rename_tables(THD *thd, TABLE_LIST *table_list) send_ok(thd); } - unlock_table_names(thd,table_list); + unlock_table_names(thd, table_list); err: pthread_mutex_unlock(&LOCK_open); @@ -115,8 +115,8 @@ static TABLE_LIST *reverse_table_list(TABLE_LIST *table_list) while (table_list) { - TABLE_LIST *next= table_list->next; - table_list->next= prev; + TABLE_LIST *next= table_list->next_local; + table_list->next_local= prev; prev= table_list; table_list= next; } @@ -135,13 +135,13 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error) TABLE_LIST *ren_table,*new_table; DBUG_ENTER("rename_tables"); - for (ren_table=table_list ; ren_table ; ren_table=new_table->next) + for (ren_table= table_list; ren_table; ren_table= new_table->next_local) { db_type table_type; char name[FN_REFLEN]; const char *new_alias, *old_alias; - new_table=ren_table->next; + new_table= ren_table->next_local; if (lower_case_table_names == 2) { old_alias= ren_table->alias; @@ -157,7 +157,7 @@ rename_tables(THD *thd, TABLE_LIST *table_list, bool skip_error) unpack_filename(name, name); if (!access(name,F_OK)) { - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_alias); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias); DBUG_RETURN(ren_table); // This can't be skipped } sprintf(name,"%s/%s/%s%s",mysql_data_home, diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index fd165ad1fa5..8ba92015535 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -45,16 +45,34 @@ int check_binlog_magic(IO_CACHE* log, const char** errmsg) return 0; } + /* + fake_rotate_event() builds a fake (=which does not exist physically in any + binlog) Rotate event, which contains the name of the binlog we are going to + send to the slave (because the slave may not know it if it just asked for + MASTER_LOG_FILE='', MASTER_LOG_POS=4). + < 4.0.14, fake_rotate_event() was called only if the requested pos was + 4. After this version we always call it, so that a 3.23.58 slave can rely on + it to detect if the master is 4.0 (and stop) (the _fake_ Rotate event has + zeros in the good positions which, by chance, make it possible for the 3.23 + slave to detect that this event is unexpected) (this is luck which happens + because the master and slave disagree on the size of the header of + Log_event). + + Relying on the event length of the Rotate event instead of these well-placed + zeros was not possible as Rotate events have a variable-length part. +*/ + static int fake_rotate_event(NET* net, String* packet, char* log_file_name, - ulonglong position, const char**errmsg) + ulonglong position, const char** errmsg) { + DBUG_ENTER("fake_rotate_event"); char header[LOG_EVENT_HEADER_LEN], buf[ROTATE_HEADER_LEN]; - memset(header, 0, 4); // when does not matter + memset(header, 0, 4); // 'when' (the timestamp) does not matter, is set to 0 header[EVENT_TYPE_OFFSET] = ROTATE_EVENT; char* p = log_file_name+dirname_length(log_file_name); uint ident_len = (uint) strlen(p); - ulong event_len = ident_len + ROTATE_EVENT_OVERHEAD; + ulong event_len = ident_len + LOG_EVENT_HEADER_LEN + ROTATE_HEADER_LEN; int4store(header + SERVER_ID_OFFSET, server_id); int4store(header + EVENT_LEN_OFFSET, event_len); int2store(header + FLAGS_OFFSET, 0); @@ -69,9 +87,9 @@ static int fake_rotate_event(NET* net, String* packet, char* log_file_name, if (my_net_write(net, (char*)packet->ptr(), packet->length())) { *errmsg = "failed on my_net_write()"; - return -1; + DBUG_RETURN(-1); } - return 0; + DBUG_RETURN(0); } static int send_file(THD *thd) @@ -257,41 +275,39 @@ bool log_in_use(const char* log_name) return result; } -int purge_error_message(THD* thd, int res) +bool purge_error_message(THD* thd, int res) { - const char *errmsg= 0; + uint errmsg= 0; switch (res) { case 0: break; - case LOG_INFO_EOF: errmsg= "Target log not found in binlog index"; break; - case LOG_INFO_IO: errmsg= "I/O error reading log index file"; break; - case LOG_INFO_INVALID: - errmsg= "Server configuration does not permit binlog purge"; break; - case LOG_INFO_SEEK: errmsg= "Failed on fseek()"; break; - case LOG_INFO_MEM: errmsg= "Out of memory"; break; - case LOG_INFO_FATAL: errmsg= "Fatal error during purge"; break; - case LOG_INFO_IN_USE: errmsg= "A purgeable log is in use, will not purge"; - break; - default: errmsg= "Unknown error during purge"; break; + case LOG_INFO_EOF: errmsg= ER_UNKNOWN_TARGET_BINLOG; break; + case LOG_INFO_IO: errmsg= ER_IO_ERR_LOG_INDEX_READ; break; + case LOG_INFO_INVALID:errmsg= ER_BINLOG_PURGE_PROHIBITED; break; + case LOG_INFO_SEEK: errmsg= ER_FSEEK_FAIL; break; + case LOG_INFO_MEM: errmsg= ER_OUT_OF_RESOURCES; break; + case LOG_INFO_FATAL: errmsg= ER_BINLOG_PURGE_FATAL_ERR; break; + case LOG_INFO_IN_USE: errmsg= ER_LOG_IN_USE; break; + default: errmsg= ER_LOG_PURGE_UNKNOWN_ERR; break; } if (errmsg) { - send_error(thd, 0, errmsg); - return 1; + my_message(errmsg, ER(errmsg), MYF(0)); + return TRUE; } send_ok(thd); - return 0; + return FALSE; } -int purge_master_logs(THD* thd, const char* to_log) +bool purge_master_logs(THD* thd, const char* to_log) { char search_file_name[FN_REFLEN]; if (!mysql_bin_log.is_open()) { send_ok(thd); - return 0; + return FALSE; } mysql_bin_log.make_log_name(search_file_name, to_log); @@ -301,7 +317,7 @@ int purge_master_logs(THD* thd, const char* to_log) } -int purge_master_logs_before_date(THD* thd, time_t purge_time) +bool purge_master_logs_before_date(THD* thd, time_t purge_time) { if (!mysql_bin_log.is_open()) { @@ -312,6 +328,36 @@ int purge_master_logs_before_date(THD* thd, time_t purge_time) mysql_bin_log.purge_logs_before_date(purge_time)); } +int test_for_non_eof_log_read_errors(int error, const char **errmsg) +{ + if (error == LOG_READ_EOF) + return 0; + my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; + switch (error) { + case LOG_READ_BOGUS: + *errmsg = "bogus data in log event"; + break; + case LOG_READ_TOO_LARGE: + *errmsg = "log event entry exceeded max_allowed_packet; \ +Increase max_allowed_packet on master"; + break; + case LOG_READ_IO: + *errmsg = "I/O error reading log event"; + break; + case LOG_READ_MEM: + *errmsg = "memory allocation failed reading log event"; + break; + case LOG_READ_TRUNC: + *errmsg = "binlog truncated in the middle of event"; + break; + default: + *errmsg = "unknown error reading log event on the master"; + break; + } + return error; +} + + /* TODO: Clean up loop to only have one call to send_file() */ @@ -328,6 +374,7 @@ void mysql_binlog_send(THD* thd, char* log_ident, my_off_t pos, int error; const char *errmsg = "Unknown error"; NET* net = &thd->net; + pthread_mutex_t *log_lock; #ifndef DBUG_OFF int left_events = max_binlog_dump_events; #endif @@ -387,18 +434,25 @@ impossible position"; goto err; } - my_b_seek(&log, pos); // Seek will done on next read /* We need to start a packet with something other than 255 - to distiquish it from error + to distinguish it from error */ - packet->set("\0", 1, &my_charset_bin); + packet->set("\0", 1, &my_charset_bin); /* This is the start of a new packet */ /* + Tell the client about the log name with a fake Rotate event; + this is needed even if we also send a Format_description_log_event just + after, because that event does not contain the binlog's name. + Note that as this Rotate event is sent before Format_description_log_event, + the slave cannot have any info to understand this event's format, so the + header len of Rotate_log_event is FROZEN + (so in 5.0 it will have a header shorter than other events except + FORMAT_DESCRIPTION_EVENT). Before 4.0.14 we called fake_rotate_event below only if (pos == BIN_LOG_HEADER_SIZE), because if this is false then the slave already knows the binlog's name. - Now we always call fake_rotate_event; if the slave already knew the log's + Since, we always call fake_rotate_event; if the slave already knew the log's name (ex: CHANGE MASTER TO MASTER_LOG_FILE=...) this is useless but does not harm much. It is nice for 3.23 (>=.58) slaves which test Rotate events to see if the master is 4.0 (then they choose to stop because they can't @@ -415,15 +469,72 @@ impossible position"; */ if (fake_rotate_event(net, packet, log_file_name, pos, &errmsg)) { + /* + This error code is not perfect, as fake_rotate_event() does not read + anything from the binlog; if it fails it's because of an error in + my_net_write(), fortunately it will say it in errmsg. + */ my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; goto err; } packet->set("\0", 1, &my_charset_bin); + /* + We can set log_lock now, it does not move (it's a member of mysql_bin_log, + and it's already inited, and it will be destroyed only at shutdown). + */ + log_lock = mysql_bin_log.get_log_lock(); + if (pos > BIN_LOG_HEADER_SIZE) + { + /* Try to find a Format_description_log_event at the beginning of the binlog */ + if (!(error = Log_event::read_log_event(&log, packet, log_lock))) + { + /* + The packet has offsets equal to the normal offsets in a binlog event + +1 (the first character is \0). + */ + DBUG_PRINT("info", + ("Looked for a Format_description_log_event, found event type %d", + (*packet)[EVENT_TYPE_OFFSET+1])); + if ((*packet)[EVENT_TYPE_OFFSET+1] == FORMAT_DESCRIPTION_EVENT) + { + /* + mark that this event with "log_pos=0", so the slave + should not increment master's binlog position + (rli->group_master_log_pos) + */ + int4store(packet->c_ptr() +LOG_POS_OFFSET+1,0); + /* send it */ + if (my_net_write(net, (char*)packet->ptr(), packet->length())) + { + errmsg = "Failed on my_net_write()"; + my_errno= ER_UNKNOWN_ERROR; + goto err; + } + /* + No need to save this event. We are only doing simple reads (no real + parsing of the events) so we don't need it. And so we don't need the + artificial Format_description_log_event of 3.23&4.x. + */ + } + } + else + if (test_for_non_eof_log_read_errors(error, &errmsg)) + goto err; + /* + else: it's EOF, nothing to do, go on reading next events, the + Format_description_log_event will be found naturally if it is written. + */ + /* reset the packet as we wrote to it in any case */ + packet->set("\0", 1, &my_charset_bin); + } /* end of if (pos > BIN_LOG_HEADER_SIZE); if false, the Format_description_log_event + event will be found naturally. */ + + /* seek to the requested position, to start the requested dump */ + my_b_seek(&log, pos); // Seek will done on next read + while (!net->error && net->vio != 0 && !thd->killed) { - pthread_mutex_t *log_lock = mysql_bin_log.get_log_lock(); - while (!(error = Log_event::read_log_event(&log, packet, log_lock))) { #ifndef DBUG_OFF @@ -435,7 +546,7 @@ impossible position"; goto err; } #endif - if (my_net_write(net, (char*)packet->ptr(), packet->length()) ) + if (my_net_write(net, (char*)packet->ptr(), packet->length())) { errmsg = "Failed on my_net_write()"; my_errno= ER_UNKNOWN_ERROR; @@ -456,34 +567,14 @@ impossible position"; } /* TODO: now that we are logging the offset, check to make sure - the recorded offset and the actual match + the recorded offset and the actual match. + Guilhem 2003-06: this is not true if this master is a slave <4.0.15 + running with --log-slave-updates, because then log_pos may be the offset + in the-master-of-this-master's binlog. */ - if (error != LOG_READ_EOF) - { - my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; - switch (error) { - case LOG_READ_BOGUS: - errmsg = "bogus data in log event"; - break; - case LOG_READ_TOO_LARGE: - errmsg = "log event entry exceeded max_allowed_packet; \ -Increase max_allowed_packet on master"; - break; - case LOG_READ_IO: - errmsg = "I/O error reading log event"; - break; - case LOG_READ_MEM: - errmsg = "memory allocation failed reading log event"; - break; - case LOG_READ_TRUNC: - errmsg = "binlog truncated in the middle of event"; - break; - default: - errmsg = "unknown error reading log event on the master"; - break; - } + + if (test_for_non_eof_log_read_errors(error, &errmsg)) goto err; - } if (!(flags & BINLOG_DUMP_NON_BLOCK) && mysql_bin_log.is_active(log_file_name)) @@ -617,8 +708,13 @@ Increase max_allowed_packet on master"; (void) my_close(file, MYF(MY_WME)); /* - Even if the previous log contained a Rotate_log_event, we still fake - one. + Call fake_rotate_event() in case the previous log (the one which we have + just finished reading) did not contain a Rotate event (for example (I + don't know any other example) the previous log was the last one before + the master was shutdown & restarted). + This way we tell the slave about the new log's name and position. + If the binlog is 5.0, the next event we are going to read and send is + Format_description_log_event. */ if ((file=open_binlog(&log, log_file_name, &errmsg)) < 0 || fake_rotate_event(net, packet, log_file_name, BIN_LOG_HEADER_SIZE, &errmsg)) @@ -657,7 +753,7 @@ err: pthread_mutex_unlock(&LOCK_thread_count); if (file >= 0) (void) my_close(file, MYF(MY_WME)); - send_error(thd, my_errno, errmsg); + my_message(my_errno, errmsg, MYF(0)); DBUG_VOID_RETURN; } @@ -774,7 +870,7 @@ int start_slave(THD* thd , MASTER_INFO* mi, bool net_report) if (slave_errno) { if (net_report) - send_error(thd, slave_errno); + my_message(slave_errno, ER(slave_errno), MYF(0)); DBUG_RETURN(1); } else if (net_report) @@ -824,7 +920,7 @@ int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report ) if (slave_errno) { if (net_report) - send_error(thd, slave_errno); + my_message(slave_errno, ER(slave_errno), MYF(0)); return 1; } else if (net_report) @@ -905,7 +1001,7 @@ int reset_slave(THD *thd, MASTER_INFO* mi) err: unlock_slave_threads(mi); - if (error) + if (error) my_error(sql_errno, MYF(0), errmsg); DBUG_RETURN(error); } @@ -953,13 +1049,13 @@ void kill_zombie_dump_threads(uint32 slave_server_id) it will be slow because it will iterate through the list again. We just to do kill the thread ourselves. */ - tmp->awake(1/*prepare to die*/); + tmp->awake(THD::KILL_QUERY); pthread_mutex_unlock(&tmp->LOCK_delete); } } -int change_master(THD* thd, MASTER_INFO* mi) +bool change_master(THD* thd, MASTER_INFO* mi) { int thread_mask; const char* errmsg= 0; @@ -970,9 +1066,9 @@ int change_master(THD* thd, MASTER_INFO* mi) init_thread_mask(&thread_mask,mi,0 /*not inverse*/); if (thread_mask) // We refuse if any slave thread is running { - net_printf(thd,ER_SLAVE_MUST_STOP); + my_message(ER_SLAVE_MUST_STOP, ER(ER_SLAVE_MUST_STOP), MYF(0)); unlock_slave_threads(mi); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } thd->proc_info = "Changing master"; @@ -981,9 +1077,9 @@ int change_master(THD* thd, MASTER_INFO* mi) if (init_master_info(mi, master_info_file, relay_log_info_file, 0, thread_mask)) { - send_error(thd, ER_MASTER_INFO); + my_message(ER_MASTER_INFO, ER(ER_MASTER_INFO), MYF(0)); unlock_slave_threads(mi); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } /* @@ -1100,9 +1196,9 @@ int change_master(THD* thd, MASTER_INFO* mi) 0 /* not only reset, but also reinit */, &errmsg)) { - net_printf(thd, 0, "Failed purging old relay logs: %s",errmsg); + my_error(ER_RELAY_LOG_FAIL, MYF(0), errmsg); unlock_slave_threads(mi); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } else @@ -1114,11 +1210,11 @@ int change_master(THD* thd, MASTER_INFO* mi) mi->rli.group_relay_log_name, mi->rli.group_relay_log_pos, 0 /*no data lock*/, - &msg)) + &msg, 0)) { - net_printf(thd,0,"Failed initializing relay log position: %s",msg); + my_error(ER_RELAY_LOG_INIT, MYF(0), msg); unlock_slave_threads(mi); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } mi->rli.group_master_log_pos = mi->master_log_pos; @@ -1160,14 +1256,15 @@ int change_master(THD* thd, MASTER_INFO* mi) unlock_slave_threads(mi); thd->proc_info = 0; send_ok(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } int reset_master(THD* thd) { if (!mysql_bin_log.is_open()) { - my_error(ER_FLUSH_MASTER_BINLOG_CLOSED, MYF(ME_BELL+ME_WAITTANG)); + my_message(ER_FLUSH_MASTER_BINLOG_CLOSED, + ER(ER_FLUSH_MASTER_BINLOG_CLOSED), MYF(ME_BELL+ME_WAITTANG)); return 1; } return mysql_bin_log.reset_logs(thd); @@ -1191,7 +1288,7 @@ int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1, } -int show_binlog_events(THD* thd) +bool show_binlog_events(THD* thd) { Protocol *protocol= thd->protocol; DBUG_ENTER("show_binlog_events"); @@ -1199,10 +1296,13 @@ int show_binlog_events(THD* thd) const char *errmsg = 0; IO_CACHE log; File file = -1; + Format_description_log_event *description_event= new + Format_description_log_event(3); /* MySQL 4.0 by default */ Log_event::init_show_field_list(&field_list); - if (protocol-> send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); if (mysql_bin_log.is_open()) { @@ -1237,10 +1337,38 @@ int show_binlog_events(THD* thd) goto err; pthread_mutex_lock(log_lock); + + /* + open_binlog() sought to position 4. + Read the first event in case it's a Format_description_log_event, to know the + format. If there's no such event, we are 3.23 or 4.x. This code, like + before, can't read 3.23 binlogs. + This code will fail on a mixed relay log (one which has Format_desc then + Rotate then Format_desc). + */ + + ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event); + if (ev) + { + if (ev->get_type_code() == FORMAT_DESCRIPTION_EVENT) + { + delete description_event; + description_event= (Format_description_log_event*) ev; + } + else + delete ev; + } + my_b_seek(&log, pos); + if (!description_event->is_valid()) + { + errmsg="Invalid Format_description event; could be out of memory"; + goto err; + } + for (event_count = 0; - (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,0)); ) + (ev = Log_event::read_log_event(&log,(pthread_mutex_t*)0,description_event)); ) { if (event_count >= limit_start && ev->net_send(protocol, linfo.log_file_name, pos)) @@ -1269,6 +1397,7 @@ int show_binlog_events(THD* thd) } err: + delete description_event; if (file >= 0) { end_io_cache(&log); @@ -1278,19 +1407,19 @@ err: if (errmsg) { my_error(ER_ERROR_WHEN_EXECUTING_COMMAND, MYF(0), - "SHOW BINLOG EVENTS", errmsg); - DBUG_RETURN(-1); + "SHOW BINLOG EVENTS", errmsg); + DBUG_RETURN(TRUE); } send_eof(thd); pthread_mutex_lock(&LOCK_thread_count); thd->current_linfo = 0; pthread_mutex_unlock(&LOCK_thread_count); - DBUG_RETURN(0); + DBUG_RETURN(TRUE); } -int show_binlog_info(THD* thd) +bool show_binlog_info(THD* thd) { Protocol *protocol= thd->protocol; DBUG_ENTER("show_binlog_info"); @@ -1301,8 +1430,9 @@ int show_binlog_info(THD* thd) field_list.push_back(new Item_empty_string("Binlog_Do_DB",255)); field_list.push_back(new Item_empty_string("Binlog_Ignore_DB",255)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); protocol->prepare_for_resend(); if (mysql_bin_log.is_open()) @@ -1315,10 +1445,10 @@ int show_binlog_info(THD* thd) protocol->store(&binlog_do_db); protocol->store(&binlog_ignore_db); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -1330,11 +1460,11 @@ int show_binlog_info(THD* thd) thd Thread specific variable RETURN VALUES - 0 ok - 1 error (Error message sent to client) + FALSE OK + TRUE error */ -int show_binlogs(THD* thd) +bool show_binlogs(THD* thd) { IO_CACHE *index_file; char fname[FN_REFLEN]; @@ -1345,14 +1475,14 @@ int show_binlogs(THD* thd) if (!mysql_bin_log.is_open()) { - //TODO: Replace with ER() error message - send_error(thd, 0, "You are not using binary logging"); + my_message(ER_NO_BINARY_LOGGING, ER(ER_NO_BINARY_LOGGING), MYF(0)); return 1; } field_list.push_back(new Item_empty_string("Log_name", 255)); - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); mysql_bin_log.lock_index(); index_file=mysql_bin_log.get_index_file(); @@ -1370,11 +1500,11 @@ int show_binlogs(THD* thd) } mysql_bin_log.unlock_index(); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: mysql_bin_log.unlock_index(); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } diff --git a/sql/sql_repl.h b/sql/sql_repl.h index 21b3d2955f7..e8497fee343 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -36,24 +36,24 @@ extern I_List<i_string> binlog_do_db, binlog_ignore_db; extern int max_binlog_dump_events; extern my_bool opt_sporadic_binlog_dump_fail; -#define KICK_SLAVE(thd) { pthread_mutex_lock(&(thd)->LOCK_delete); (thd)->awake(0 /* do not prepare to die*/); pthread_mutex_unlock(&(thd)->LOCK_delete); } +#define KICK_SLAVE(thd) { pthread_mutex_lock(&(thd)->LOCK_delete); (thd)->awake(THD::NOT_KILLED); pthread_mutex_unlock(&(thd)->LOCK_delete); } File open_binlog(IO_CACHE *log, const char *log_file_name, const char **errmsg); int start_slave(THD* thd, MASTER_INFO* mi, bool net_report); int stop_slave(THD* thd, MASTER_INFO* mi, bool net_report); -int change_master(THD* thd, MASTER_INFO* mi); -int show_binlog_events(THD* thd); +bool change_master(THD* thd, MASTER_INFO* mi); +bool show_binlog_events(THD* thd); int cmp_master_pos(const char* log_file_name1, ulonglong log_pos1, const char* log_file_name2, ulonglong log_pos2); int reset_slave(THD *thd, MASTER_INFO* mi); int reset_master(THD* thd); -int purge_master_logs(THD* thd, const char* to_log); -int purge_master_logs_before_date(THD* thd, time_t purge_time); +bool purge_master_logs(THD* thd, const char* to_log); +bool purge_master_logs_before_date(THD* thd, time_t purge_time); bool log_in_use(const char* log_name); void adjust_linfo_offsets(my_off_t purge_offset); -int show_binlogs(THD* thd); +bool show_binlogs(THD* thd); extern int init_master_info(MASTER_INFO* mi); void kill_zombie_dump_threads(uint32 slave_server_id); int check_binlog_magic(IO_CACHE* log, const char** errmsg); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 03e322d28ee..dce7c754f03 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -30,24 +30,45 @@ const char *join_type_str[]={ "UNKNOWN","system","const","eq_ref","ref", "MAYBE_REF","ALL","range","index","fulltext", - "ref_or_null","unique_subquery","index_subquery" + "ref_or_null","unique_subquery","index_subquery", + "index_merge" }; const key_map key_map_empty(0); const key_map key_map_full(~0); static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array); -static bool make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, +static bool make_join_statistics(JOIN *join, TABLE_LIST *leaves, COND *conds, DYNAMIC_ARRAY *keyuse); static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse, JOIN_TAB *join_tab, uint tables, COND *conds, + COND_EQUAL *cond_equal, table_map table_map, SELECT_LEX *select_lex); static int sort_keyuse(KEYUSE *a,KEYUSE *b); static void set_position(JOIN *join,uint index,JOIN_TAB *table,KEYUSE *key); static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, table_map used_tables); -static void find_best_combination(JOIN *join,table_map rest_tables); +static void choose_plan(JOIN *join,table_map join_tables); + +static void best_access_path(JOIN *join, JOIN_TAB *s, THD *thd, + table_map remaining_tables, uint idx, + double record_count, double read_time); +static void optimize_straight_join(JOIN *join, table_map join_tables); +static void greedy_search(JOIN *join, table_map remaining_tables, + uint depth, uint prune_level); +static void best_extension_by_limited_search(JOIN *join, + table_map remaining_tables, + uint idx, double record_count, + double read_time, uint depth, + uint prune_level); +static uint determine_search_depth(JOIN* join); +static int join_tab_cmp(const void* ptr1, const void* ptr2); +static int join_tab_cmp_straight(const void* ptr1, const void* ptr2); +/* + TODO: 'find_best' is here only temporarily until 'greedy_search' is + tested and approved. +*/ static void find_best(JOIN *join,table_map rest_tables,uint index, double record_count,double read_time); static uint cache_record_length(JOIN *join,uint index); @@ -58,6 +79,7 @@ static store_key *get_store_key(THD *thd, KEY_PART_INFO *key_part, char *key_buff, uint maybe_null); static bool make_simple_join(JOIN *join,TABLE *tmp_table); +static void make_outerjoin_info(JOIN *join); static bool make_join_select(JOIN *join,SQL_SELECT *select,COND *item); static void make_join_readinfo(JOIN *join,uint options); static bool only_eq_ref_tables(JOIN *join, ORDER *order, table_map tables); @@ -70,8 +92,19 @@ static int return_zero_rows(JOIN *join, select_result *res,TABLE_LIST *tables, uint select_options, const char *info, Item *having, Procedure *proc, SELECT_LEX_UNIT *unit); -static COND *optimize_cond(THD *thd, COND *conds, +static COND *build_equal_items(THD *thd, COND *cond, + COND_EQUAL *inherited, + List<TABLE_LIST> *join_list, + COND_EQUAL **cond_equal_ref); +static COND* substitute_for_best_equal_field(COND *cond, + COND_EQUAL *cond_equal, + void *table_join_idx); +static COND *simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, + COND *conds, bool top); +static COND *optimize_cond(JOIN *join, COND *conds, + List<TABLE_LIST> *join_list, Item::cond_result *cond_value); +static bool resolve_nested_join (TABLE_LIST *table); static COND *remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value); static bool const_expression_in_where(COND *conds,Item *item, Item **comp_item); @@ -116,7 +149,7 @@ static int join_read_next_same_or_null(READ_RECORD *info); static COND *make_cond_for_table(COND *cond,table_map table, table_map used_table); static Item* part_of_refkey(TABLE *form,Field *field); -static uint find_shortest_key(TABLE *table, const key_map *usable_keys); +uint find_shortest_key(TABLE *table, const key_map *usable_keys); static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order, ha_rows select_limit, bool no_changes); static int create_sort_index(THD *thd, JOIN *join, ORDER *order, @@ -127,6 +160,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *entry, Field **field, ulong offset,Item *having); static int remove_dup_with_hash_index(THD *thd,TABLE *table, uint field_count, Field **first_field, + ulong key_length,Item *having); static int join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count); static ulong used_blob_length(CACHE_FIELD **ptr); @@ -159,26 +193,37 @@ static void init_tmptable_sum_functions(Item_sum **func); static void update_tmptable_sum_func(Item_sum **func,TABLE *tmp_table); static void copy_sum_funcs(Item_sum **func_ptr); static bool add_ref_to_table_cond(THD *thd, JOIN_TAB *join_tab); +static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr); static bool init_sum_functions(Item_sum **func, Item_sum **end); static bool update_sum_func(Item_sum **func); static void select_describe(JOIN *join, bool need_tmp_table,bool need_order, bool distinct, const char *message=NullS); static Item *remove_additional_cond(Item* conds); +static void add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab); /* This handles SELECT with and without UNION */ -int handle_select(THD *thd, LEX *lex, select_result *result) +bool handle_select(THD *thd, LEX *lex, select_result *result, + ulong setup_tables_done_option) { - int res; + bool res; register SELECT_LEX *select_lex = &lex->select_lex; DBUG_ENTER("handle_select"); if (select_lex->next_select()) - res=mysql_union(thd, lex, result, &lex->unit); + res= mysql_union(thd, lex, result, &lex->unit, setup_tables_done_option); else + { + SELECT_LEX_UNIT *unit= &lex->unit; + unit->set_limit(unit->global_parameters, select_lex); + /* + 'options' of mysql_select will be set in JOIN, as far as JOIN for + every PS/SP execution new, we will not need reset this flag if + setup_tables_done_option changed for next rexecution + */ res= mysql_select(thd, &select_lex->ref_pointer_array, (TABLE_LIST*) select_lex->table_list.first, select_lex->with_wild, select_lex->item_list, @@ -189,17 +234,18 @@ int handle_select(THD *thd, LEX *lex, select_result *result) (ORDER*) select_lex->group_list.first, select_lex->having, (ORDER*) lex->proc_list.first, - select_lex->options | thd->options, - result, &(lex->unit), &(lex->select_lex)); - - /* Don't set res if it's -1 as we may want this later */ + select_lex->options | thd->options | + setup_tables_done_option, + result, unit, select_lex); + } DBUG_PRINT("info",("res: %d report_error: %d", res, thd->net.report_error)); - if (thd->net.report_error || res<0) + res|= thd->net.report_error; + if (unlikely(res)) { - result->send_error(0, NullS); + /* If we had a another error reported earlier then this will be ignored */ + result->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR)); result->abort(); - res= 1; // Error sent to client } DBUG_RETURN(res); } @@ -210,6 +256,7 @@ int handle_select(THD *thd, LEX *lex, select_result *result) */ inline int setup_without_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + TABLE_LIST *leaves, List<Item> &fields, List<Item> &all_fields, COND **conds, @@ -222,11 +269,13 @@ inline int setup_without_group(THD *thd, Item **ref_pointer_array, save_allow_sum_func= thd->allow_sum_func; thd->allow_sum_func= 0; - res= (setup_conds(thd, tables, conds) || - setup_order(thd, ref_pointer_array, tables, fields, all_fields, - order) || - setup_group(thd, ref_pointer_array, tables, fields, all_fields, - group, hidden_group_fields)); + res= setup_conds(thd, tables, leaves, conds); + thd->allow_sum_func= save_allow_sum_func; + res= res || setup_order(thd, ref_pointer_array, tables, fields, all_fields, + order); + thd->allow_sum_func= 0; + res= res || setup_group(thd, ref_pointer_array, tables, fields, all_fields, + group, hidden_group_fields); thd->allow_sum_func= save_allow_sum_func; DBUG_RETURN(res); } @@ -264,17 +313,21 @@ JOIN::prepare(Item ***rref_pointer_array, tables_list= tables_init; select_lex= select_lex_arg; select_lex->join= this; + join_list= &select_lex->top_join_list; union_part= (unit_arg->first_select()->next_select() != 0); /* Check that all tables, fields, conds and order are ok */ - if (setup_tables(tables_list) || + if ((!(select_options & OPTION_SETUP_TABLES_DONE) && + setup_tables(thd, tables_list, &conds, &select_lex->leaf_tables, + FALSE, FALSE)) || setup_wild(thd, tables_list, fields_list, &all_fields, wild_num) || select_lex->setup_ref_array(thd, og_num) || setup_fields(thd, (*rref_pointer_array), tables_list, fields_list, 1, &all_fields, 1) || - setup_without_group(thd, (*rref_pointer_array), tables_list, fields_list, - all_fields, &conds, order, group_list, + setup_without_group(thd, (*rref_pointer_array), tables_list, + select_lex->leaf_tables, fields_list, + all_fields, &conds, order, group_list, &hidden_group_fields)) DBUG_RETURN(-1); /* purecov: inspected */ @@ -295,15 +348,19 @@ JOIN::prepare(Item ***rref_pointer_array, having->split_sum_func(thd, ref_pointer_array, all_fields); } - // Is it subselect + if (!thd->lex->view_prepare_mode) { Item_subselect *subselect; + /* Is it subselect? */ if ((subselect= select_lex->master_unit()->item)) { Item_subselect::trans_res res; if ((res= subselect->select_transformer(this)) != Item_subselect::RES_OK) + { + select_lex->fix_prepare_information(thd, &conds); DBUG_RETURN((res == Item_subselect::RES_ERROR)); + } } } @@ -332,12 +389,15 @@ JOIN::prepare(Item ***rref_pointer_array, } if (flag == 3) { - my_error(ER_MIX_OF_GROUP_FUNC_AND_FIELDS,MYF(0)); + my_message(ER_MIX_OF_GROUP_FUNC_AND_FIELDS, + ER(ER_MIX_OF_GROUP_FUNC_AND_FIELDS), MYF(0)); DBUG_RETURN(-1); } } TABLE_LIST *table_ptr; - for (table_ptr= tables_list ; table_ptr ; table_ptr= table_ptr->next) + for (table_ptr= select_lex->leaf_tables; + table_ptr; + table_ptr= table_ptr->next_leaf) tables++; } { @@ -359,21 +419,22 @@ JOIN::prepare(Item ***rref_pointer_array, { if (!test_if_subpart(procedure->group,group_list)) { /* purecov: inspected */ - my_message(0,"Can't handle procedures with differents groups yet", - MYF(0)); /* purecov: inspected */ + my_message(ER_DIFF_GROUPS_PROC, ER(ER_DIFF_GROUPS_PROC), + MYF(0)); /* purecov: inspected */ goto err; /* purecov: inspected */ } } #ifdef NOT_NEEDED else if (!group_list && procedure->flags & PROC_GROUP) { - my_message(0,"Select must have a group with this procedure",MYF(0)); + my_message(ER_NO_GROUP_FOR_PROC, MYF(0)); goto err; } #endif if (order && (procedure->flags & PROC_NO_SORT)) { /* purecov: inspected */ - my_message(0,"Can't use order with this procedure",MYF(0)); /* purecov: inspected */ + my_message(ER_ORDER_WITH_PROC, ER(ER_ORDER_WITH_PROC), + MYF(0)); /* purecov: inspected */ goto err; /* purecov: inspected */ } } @@ -406,6 +467,7 @@ JOIN::prepare(Item ***rref_pointer_array, if (alloc_func_list()) goto err; + select_lex->fix_prepare_information(thd, &conds); DBUG_RETURN(0); // All OK err: @@ -451,7 +513,6 @@ bool JOIN::test_in_subselect(Item **where) return 0; } - /* global select optimisation. return 0 - success @@ -488,8 +549,32 @@ JOIN::optimize() } } #endif + SELECT_LEX *sel= thd->lex->current_select; + if (sel->first_cond_optimization) + { + /* + The following code will allocate the new items in a permanent + MEMROOT for prepared statements and stored procedures. + */ + + Item_arena *arena= thd->current_arena, backup; + if (arena->is_conventional()) + arena= 0; // For easier test + else + thd->set_n_backup_item_arena(arena, &backup); + + sel->first_cond_optimization= 0; - conds= optimize_cond(thd, conds, &cond_value); + /* Convert all outer joins to inner joins if possible */ + conds= simplify_joins(this, join_list, conds, TRUE); + + sel->prep_where= conds ? conds->copy_andor_structure(thd) : 0; + + if (arena) + thd->restore_backup_item_arena(arena, &backup); + } + + conds= optimize_cond(this, conds, join_list, &cond_value); if (thd->net.report_error) { error= 1; @@ -500,6 +585,7 @@ JOIN::optimize() if (cond_value == Item::COND_FALSE || (!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS))) { /* Impossible cond */ + DBUG_PRINT("info", ("Impossible WHERE")); zero_result_cause= "Impossible WHERE"; error= 0; DBUG_RETURN(0); @@ -513,33 +599,37 @@ JOIN::optimize() opt_sum_query() returns -1 if no rows match to the WHERE conditions, or 1 if all items were resolved, or 0, or an error number HA_ERR_... */ - if ((res=opt_sum_query(tables_list, all_fields, conds))) + if ((res=opt_sum_query(select_lex->leaf_tables, all_fields, conds))) { if (res > 1) { + DBUG_PRINT("error",("Error from opt_sum_query")); DBUG_RETURN(1); } if (res < 0) { + DBUG_PRINT("info",("No matching min/max row")); zero_result_cause= "No matching min/max row"; error=0; DBUG_RETURN(0); } + DBUG_PRINT("info",("Select tables optimized away")); zero_result_cause= "Select tables optimized away"; tables_list= 0; // All tables resolved } } if (!tables_list) { + DBUG_PRINT("info",("No tables")); error= 0; DBUG_RETURN(0); } error= -1; // Error is sent to client - sort_by_table= get_sort_by_table(order, group_list, tables_list); + sort_by_table= get_sort_by_table(order, group_list, select_lex->leaf_tables); /* Calculate how to do the join */ thd->proc_info= "statistics"; - if (make_join_statistics(this, tables_list, conds, &keyuse) || + if (make_join_statistics(this, select_lex->leaf_tables, conds, &keyuse) || thd->is_fatal_error) { DBUG_PRINT("error",("Error: make_join_statistics() failed")); @@ -576,20 +666,49 @@ JOIN::optimize() if (const_tables && !thd->locked_tables && !(select_options & SELECT_NO_UNLOCK)) mysql_unlock_some_tables(thd, table, const_tables); - if (!conds && outer_join) { /* Handle the case where we have an OUTER JOIN without a WHERE */ conds=new Item_int((longlong) 1,1); // Always true } select=make_select(*table, const_table_map, - const_table_map, conds, &error); + const_table_map, conds, &error, true); if (error) { /* purecov: inspected */ error= -1; /* purecov: inspected */ DBUG_PRINT("error",("Error: make_select() failed")); DBUG_RETURN(1); } + + make_outerjoin_info(this); + + /* + Among the equal fields belonging to the same multiple equality + choose the one that is to be retrieved first and substitute + all references to these in where condition for a reference for + the selected field. + */ + if (conds) + { + conds= substitute_for_best_equal_field(conds, cond_equal, map2table); + conds->update_used_tables(); + DBUG_EXECUTE("where", print_where(conds, "after substitute_best_equal");); + } + /* + Permorm the the optimization on fields evaluation mentioned above + for all on expressions. + */ + for (JOIN_TAB *tab= join_tab + const_tables; tab < join_tab + tables ; tab++) + { + if (*tab->on_expr_ref) + { + *tab->on_expr_ref= substitute_for_best_equal_field(*tab->on_expr_ref, + tab->cond_equal, + map2table); + (*tab->on_expr_ref)->update_used_tables(); + } + } + if (make_join_select(this, select, conds)) { zero_result_cause= @@ -891,13 +1010,15 @@ JOIN::optimize() if (create_sort_index(thd, this, group_list, HA_POS_ERROR, HA_POS_ERROR) || alloc_group_fields(this, group_list) || - make_sum_func_list(all_fields, fields_list, 1)) + make_sum_func_list(all_fields, fields_list, 1) || + setup_sum_funcs(thd, sum_funcs)) DBUG_RETURN(1); group_list=0; } else { - if (make_sum_func_list(all_fields, fields_list, 0)) + if (make_sum_func_list(all_fields, fields_list, 0) || + setup_sum_funcs(thd, sum_funcs)) DBUG_RETURN(1); if (!group_list && ! exec_tmp_table1->distinct && order && simple_order) { @@ -964,16 +1085,16 @@ JOIN::reinit() { DBUG_ENTER("JOIN::reinit"); /* TODO move to unit reinit */ - unit->offset_limit_cnt =select_lex->offset_limit; - unit->select_limit_cnt =select_lex->select_limit+select_lex->offset_limit; - if (unit->select_limit_cnt < select_lex->select_limit) - unit->select_limit_cnt= HA_POS_ERROR; // no limit - if (unit->select_limit_cnt == HA_POS_ERROR) - select_lex->options&= ~OPTION_FOUND_ROWS; - - if (setup_tables(tables_list)) - DBUG_RETURN(1); - + unit->set_limit(select_lex, select_lex); + + /* conds should not be used here, it is added just for safety */ + if (tables_list) + { + if (setup_tables(thd, tables_list, &conds, &select_lex->leaf_tables, + TRUE, FALSE)) + DBUG_RETURN(1); + } + /* Reset of sum functions */ first_record= 0; @@ -1051,7 +1172,8 @@ JOIN::exec() (zero_result_cause?zero_result_cause:"No tables used")); else { - result->send_fields(fields_list,1); + result->send_fields(fields_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); if (!having || having->val_int()) { if (do_send_rows && (procedure ? (procedure->send_row(fields_list) || @@ -1074,7 +1196,7 @@ JOIN::exec() if (zero_result_cause) { - (void) return_zero_rows(this, result, tables_list, fields_list, + (void) return_zero_rows(this, result, select_lex->leaf_tables, fields_list, send_row_on_empty_set(), select_options, zero_result_cause, @@ -1118,6 +1240,12 @@ JOIN::exec() List<Item> *curr_fields_list= &fields_list; TABLE *curr_tmp_table= 0; + if ((curr_join->select_lex->options & OPTION_SCHEMA_TABLE) && + get_schema_tables_result(curr_join)) + { + DBUG_VOID_RETURN; + } + /* Create a tmp table if distinct or if the sort is too complicated */ if (need_tmp) { @@ -1127,7 +1255,7 @@ JOIN::exec() /* Copy data to the temporary table */ thd->proc_info= "Copying to tmp table"; - + DBUG_PRINT("info", ("%s", thd->proc_info)); if ((tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) { error= tmp_error; @@ -1255,6 +1383,7 @@ JOIN::exec() } thd->proc_info="Copying to group table"; + DBUG_PRINT("info", ("%s", thd->proc_info)); tmp_error= -1; if (curr_join != this) { @@ -1271,7 +1400,8 @@ JOIN::exec() } } if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, - 1) || + 1, TRUE) || + setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) || (tmp_error= do_select(curr_join, (List<Item> *) 0, curr_tmp_table, 0))) { @@ -1359,7 +1489,9 @@ JOIN::exec() set_items_ref_array(items3); if (curr_join->make_sum_func_list(*curr_all_fields, *curr_fields_list, - 1) || thd->is_fatal_error) + 1, TRUE) || + setup_sum_funcs(curr_join->thd, curr_join->sum_funcs) || + thd->is_fatal_error) DBUG_VOID_RETURN; } if (curr_join->group_list || curr_join->order) @@ -1424,11 +1556,9 @@ JOIN::exec() /* table->keyuse is set in the case there was an original WHERE clause on the table that was optimized away. - table->on_expr tells us that it was a LEFT JOIN and there will be - at least one row generated from the table. */ if (curr_table->select_cond || - (curr_table->keyuse && !curr_table->on_expr)) + (curr_table->keyuse && !curr_table->first_inner)) { /* We have to sort all rows */ curr_join->select_limit= HA_POS_ERROR; @@ -1458,12 +1588,46 @@ JOIN::exec() DBUG_VOID_RETURN; } } + /* XXX: When can we have here thd->net.report_error not zero? */ + if (thd->net.report_error) + { + error= thd->net.report_error; + DBUG_VOID_RETURN; + } curr_join->having= curr_join->tmp_having; - thd->proc_info="Sending data"; - error= thd->net.report_error ? -1 : - do_select(curr_join, curr_fields_list, NULL, procedure); - thd->limit_found_rows= curr_join->send_records; - thd->examined_row_count= curr_join->examined_rows; + curr_join->fields= curr_fields_list; + curr_join->procedure= procedure; + + if (unit == &thd->lex->unit && + (unit->fake_select_lex == 0 || select_lex == unit->fake_select_lex) && + thd->cursor && tables != const_tables) + { + /* + We are here if this is JOIN::exec for the last select of the main unit + and the client requested to open a cursor. + We check that not all tables are constant because this case is not + handled by do_select() separately, and this case is not implemented + for cursors yet. + */ + DBUG_ASSERT(error == 0); + /* + curr_join is used only for reusable joins - that is, + to perform SELECT for each outer row (like in subselects). + This join is main, so we know for sure that curr_join == join. + */ + DBUG_ASSERT(curr_join == this); + /* Open cursor for the last join sweep */ + error= thd->cursor->open(this); + } + else + { + thd->proc_info="Sending data"; + DBUG_PRINT("info", ("%s", thd->proc_info)); + error= do_select(curr_join, curr_fields_list, NULL, procedure); + thd->limit_found_rows= curr_join->send_records; + thd->examined_row_count= curr_join->examined_rows; + } + DBUG_VOID_RETURN; } @@ -1492,6 +1656,7 @@ JOIN::cleanup() tmp_table_param.copy_field=0; DBUG_RETURN(tmp_join->cleanup()); } + cond_equal= 0; lock=0; // It's faster to unlock later join_free(1); @@ -1512,7 +1677,299 @@ JOIN::cleanup() } +/************************* Cursor ******************************************/ + +void +Cursor::init_from_thd(THD *thd) +{ + /* + We need to save and reset thd->mem_root, otherwise it'll be freed + later in mysql_parse. + + We can't just change the thd->mem_root here as we want to keep the things + that is already allocated in thd->mem_root for Cursor::fetch() + */ + main_mem_root= *thd->mem_root; + /* Allocate new memory root for thd */ + init_sql_alloc(thd->mem_root, + thd->variables.query_alloc_block_size, + thd->variables.query_prealloc_size); + + /* + The same is true for open tables and lock: save tables and zero THD + pointers to prevent table close in close_thread_tables (This is a part + of the temporary solution to make cursors work with minimal changes to + the current source base). + */ + derived_tables= thd->derived_tables; + open_tables= thd->open_tables; + lock= thd->lock; + query_id= thd->query_id; + free_list= thd->free_list; + reset_thd(thd); + /* + XXX: thd->locked_tables is not changed. + What problems can we have with it if cursor is open? + */ + /* + TODO: grab thd->free_list here? + */ +} + + +void +Cursor::init_thd(THD *thd) +{ + DBUG_ASSERT(thd->derived_tables == 0); + thd->derived_tables= derived_tables; + + DBUG_ASSERT(thd->open_tables == 0); + thd->open_tables= open_tables; + + DBUG_ASSERT(thd->lock== 0); + thd->lock= lock; + thd->query_id= query_id; +} + + +void +Cursor::reset_thd(THD *thd) +{ + thd->derived_tables= 0; + thd->open_tables= 0; + thd->lock= 0; + thd->free_list= 0; +} + + +int +Cursor::open(JOIN *join_arg) +{ + join= join_arg; + THD *thd= join->thd; + /* First non-constant table */ + JOIN_TAB *join_tab= join->join_tab + join->const_tables; + DBUG_ENTER("Cursor::open"); + + /* + Send fields description to the client; server_status is sent + in 'EOF' packet, which ends send_fields(). + */ + thd->server_status|= SERVER_STATUS_CURSOR_EXISTS; + join->result->send_fields(*join->fields, Protocol::SEND_NUM_ROWS); + ::send_eof(thd); + thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS; + + /* Prepare JOIN for reading rows. */ + + Next_select_func end_select= join->sort_and_group || join->procedure && + join->procedure->flags & PROC_GROUP ? + end_send_group : end_send; + + join->join_tab[join->tables-1].next_select= end_select; + join->send_records= 0; + join->fetch_limit= join->unit->offset_limit_cnt; + + /* Disable JOIN CACHE as it is not working with cursors yet */ + for (JOIN_TAB *tab= join_tab; + tab != join->join_tab + join->tables - 1; + tab++) + { + if (tab->next_select == sub_select_cache) + tab->next_select= sub_select; + } + + DBUG_ASSERT(join_tab->table->reginfo.not_exists_optimize == 0); + DBUG_ASSERT(join_tab->not_used_in_distinct == 0); + /* + null_row is set only if row not found and it's outer join: should never + happen for the first table in join_tab list + */ + DBUG_ASSERT(join_tab->table->null_row == 0); + + DBUG_RETURN(join_tab->read_first_record(join_tab)); +} + + +/* + DESCRIPTION + Fetch next num_rows rows from the cursor and sent them to the client + PRECONDITION: + Cursor is open + RETURN VALUES: + -4 there are more rows, send_eof sent to the client + 0 no more rows, send_eof was sent to the client, cursor is closed + other fatal fetch error, cursor is closed (error is not reported) +*/ + int +Cursor::fetch(ulong num_rows) +{ + THD *thd= join->thd; + JOIN_TAB *join_tab= join->join_tab + join->const_tables; + COND *on_expr= *join_tab->on_expr_ref; + COND *select_cond= join_tab->select_cond; + READ_RECORD *info= &join_tab->read_record; + int error= 0; + + /* save references to memory, allocated during fetch */ + thd->set_n_backup_item_arena(this, &thd->stmt_backup); + + join->fetch_limit+= num_rows; + + /* + Run while there are new rows in the first table; + For each row, satisfying ON and WHERE clauses (those parts of them which + can be evaluated early), call next_select. + */ + do + { + int no_more_rows; + + join->examined_rows++; + + if (thd->killed) /* Aborted by user */ + { + my_message(ER_SERVER_SHUTDOWN, ER(ER_SERVER_SHUTDOWN), MYF(0)); + return -1; + } + + if (on_expr == 0 || on_expr->val_int()) + { + if (select_cond == 0 || select_cond->val_int()) + { + /* + TODO: call table->unlock_row() to unlock row failed selection, + when this feature will be used. + */ + error= join_tab->next_select(join, join_tab + 1, 0); + DBUG_ASSERT(error <= 0); + if (error) + { + /* real error or LIMIT/FETCH LIMIT worked */ + if (error == -4) + { + /* + FETCH LIMIT, read ahead one row, and close cursor + if there is no more rows XXX: to be fixed to support + non-equi-joins! + */ + if ((no_more_rows= info->read_record(info))) + error= no_more_rows > 0 ? -1: 0; + } + break; + } + } + } + /* read next row; break loop if there was an error */ + if ((no_more_rows= info->read_record(info))) + { + if (no_more_rows > 0) + error= -1; + else + { + enum { END_OF_RECORDS= 1 }; + error= join_tab->next_select(join, join_tab+1, (int) END_OF_RECORDS); + } + break; + } + } + while (thd->net.report_error == 0); + + if (thd->net.report_error) + error= -1; + + if (error == -3) /* LIMIT clause worked */ + error= 0; + +#ifdef USING_TRANSACTIONS + ha_release_temporary_latches(thd); +#endif + + thd->restore_backup_item_arena(this, &thd->stmt_backup); + if (error == -4) + { + /* Fetch limit worked, possibly more rows are there */ + thd->server_status|= SERVER_STATUS_CURSOR_EXISTS; + ::send_eof(thd); + thd->server_status&= ~SERVER_STATUS_CURSOR_EXISTS; + } + else + { + close(); + if (error == 0) + { + thd->server_status|= SERVER_STATUS_LAST_ROW_SENT; + ::send_eof(thd); + thd->server_status&= ~SERVER_STATUS_LAST_ROW_SENT; + } + else + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); + /* free cursor memory */ + free_items(free_list); + free_list= 0; + free_root(&main_mem_root, MYF(0)); + } + return error; +} + + +void +Cursor::close() +{ + THD *thd= join->thd; + DBUG_ENTER("Cursor::close"); + + join->join_free(0); + if (unit) + { + /* In case of UNIONs JOIN is freed inside unit->cleanup() */ + unit->cleanup(); + } + else + { + join->cleanup(); + delete join; + } + /* XXX: Another hack: closing tables used in the cursor */ + { + DBUG_ASSERT(lock || open_tables || derived_tables); + + TABLE *tmp_open_tables= thd->open_tables; + TABLE *tmp_derived_tables= thd->derived_tables; + MYSQL_LOCK *tmp_lock= thd->lock; + + thd->open_tables= open_tables; + thd->derived_tables= derived_tables; + thd->lock= lock; + close_thread_tables(thd); + + thd->open_tables= tmp_derived_tables; + thd->derived_tables= tmp_derived_tables; + thd->lock= tmp_lock; + } + join= 0; + unit= 0; + DBUG_VOID_RETURN; +} + + +Cursor::~Cursor() +{ + if (is_open()) + close(); + free_items(free_list); + /* + Must be last, as some memory might be allocated for free purposes, + like in free_tmp_table() (TODO: fix this issue) + */ + free_root(&main_mem_root, MYF(0)); +} + +/*********************************************************************/ + + +bool mysql_select(THD *thd, Item ***rref_pointer_array, TABLE_LIST *tables, uint wild_num, List<Item> &fields, COND *conds, uint og_num, ORDER *order, ORDER *group, @@ -1520,7 +1977,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array, select_result *result, SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) { - int err; + bool err; bool free_join= 1; DBUG_ENTER("mysql_select"); @@ -1528,7 +1985,10 @@ mysql_select(THD *thd, Item ***rref_pointer_array, if (select_lex->join != 0) { join= select_lex->join; - // is it single SELECT in derived table, called in derived table creation + /* + is it single SELECT in derived table, called in derived table + creation + */ if (select_lex->linkage != DERIVED_TABLE_TYPE || (select_options & SELECT_DESCRIBE)) { @@ -1537,7 +1997,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array, //here is EXPLAIN of subselect or derived table if (join->change_result(result)) { - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } else @@ -1556,7 +2016,7 @@ mysql_select(THD *thd, Item ***rref_pointer_array, else { if (!(join= new JOIN(thd, fields, select_options, result))) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); thd->proc_info="init"; thd->used_tables=0; // Updated by setup_fields if (join->prepare(rref_pointer_array, tables, wild_num, @@ -1583,6 +2043,16 @@ mysql_select(THD *thd, Item ***rref_pointer_array, join->exec(); + if (thd->cursor && thd->cursor->is_open()) + { + /* + A cursor was opened for the last sweep in exec(). + We are here only if this is mysql_select for top-level SELECT_LEX_UNIT + and there were no error. + */ + free_join= 0; + } + if (thd->lex->describe & DESCRIBE_EXTENDED) { select_lex->where= join->conds_history; @@ -1594,10 +2064,8 @@ err: { thd->proc_info="end"; err= join->cleanup(); - if (thd->net.report_error) - err= -1; delete join; - DBUG_RETURN(err); + DBUG_RETURN(err || thd->net.report_error); } DBUG_RETURN(join->error); } @@ -1640,10 +2108,11 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select, */ static bool -make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, +make_join_statistics(JOIN *join, TABLE_LIST *tables, COND *conds, DYNAMIC_ARRAY *keyuse_array) { int error; + TABLE *table; uint i,table_count,const_count,key; table_map found_const_table_map, all_table_map, found_ref, refs; key_map const_ref, eq_part; @@ -1667,15 +2136,18 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, found_const_table_map= all_table_map=0; const_count=0; - for (s=stat,i=0 ; tables ; s++,tables=tables->next,i++) + for (s= stat, i= 0; + tables; + s++, tables= tables->next_leaf, i++) { - TABLE *table; + TABLE_LIST *embedding= tables->embedding; stat_vector[i]=s; s->keys.init(); s->const_keys.init(); s->checked_keys.init(); s->needed_reg.init(); table_vector[i]=s->table=table=tables->table; + table->pos_in_table_list= tables; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);// record count table->quick_keys.clear_all(); table->reginfo.join_tab=s; @@ -1684,29 +2156,39 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, all_table_map|= table->map; s->join=join; s->info=0; // For describe - if ((s->on_expr=tables->on_expr)) + + s->dependent= tables->dep_tables; + s->key_dependent= 0; + if (tables->schema_table) + table->file->records= 2; + + s->on_expr_ref= &tables->on_expr; + if (*s->on_expr_ref) { - /* Left join */ + /* s is the only inner table of an outer join */ if (!table->file->records) { // Empty table - s->key_dependent=s->dependent=0; // Ignore LEFT JOIN depend. + s->dependent= 0; // Ignore LEFT JOIN depend. set_position(join,const_count++,s,(KEYUSE*) 0); continue; } - s->key_dependent=s->dependent= - s->on_expr->used_tables() & ~(table->map); - if (table->outer_join & JOIN_TYPE_LEFT) - s->dependent|=stat_vector[i-1]->dependent | table_vector[i-1]->map; - if (tables->outer_join & JOIN_TYPE_RIGHT) - s->dependent|=tables->next->table->map; - outer_join|=table->map; + outer_join|= table->map; continue; } - if (tables->straight) // We don't have to move this - s->dependent= table_vector[i-1]->map | stat_vector[i-1]->dependent; - else - s->dependent=(table_map) 0; - s->key_dependent=(table_map) 0; + if (embedding) + { + /* s belongs to a nested join, maybe to several embedded joins */ + do + { + NESTED_JOIN *nested_join= embedding->nested_join; + s->dependent|= embedding->dep_tables; + embedding= embedding->embedding; + outer_join|= nested_join->used_tables; + } + while (embedding); + continue; + } + if ((table->system || table->file->records <= 1) && ! s->dependent && !(table->file->table_flags() & HA_NOT_EXACT_COUNT) && !table->fulltext_searched) @@ -1717,45 +2199,42 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, stat_vector[i]=0; join->outer_join=outer_join; - /* - If outer join: Re-arrange tables in stat_vector so that outer join - tables are after all tables it is dependent of. - For example: SELECT * from A LEFT JOIN B ON B.c=C.c, C WHERE A.C=C.C - Will shift table B after table C. - */ - if (outer_join) + if (join->outer_join) { - table_map used_tables=0L; - for (i=0 ; i < join->tables-1 ; i++) + /* + Build transitive closure for relation 'to be dependent on'. + This will speed up the plan search for many cases with outer joins, + as well as allow us to catch illegal cross references/ + Warshall's algorithm is used to build the transitive closure. + As we use bitmaps to represent the relation the complexity + of the algorithm is O((number of tables)^2). + */ + for (i= 0, s= stat ; i < table_count ; i++, s++) { - if (stat_vector[i]->dependent & ~used_tables) + for (uint j= 0 ; j < table_count ; j++) { - JOIN_TAB *save= stat_vector[i]; - uint j; - for (j=i+1; - j < join->tables && stat_vector[j]->dependent & ~used_tables; - j++) - { - JOIN_TAB *tmp=stat_vector[j]; // Move element up - stat_vector[j]=save; - save=tmp; - } - if (j == join->tables) - { - join->tables=0; // Don't use join->table - my_error(ER_WRONG_OUTER_JOIN,MYF(0)); - DBUG_RETURN(1); - } - stat_vector[i]=stat_vector[j]; - stat_vector[j]=save; + table= stat[j].table; + if (s->dependent & table->map) + s->dependent |= table->reginfo.join_tab->dependent; + } + } + /* Catch illegal cross references for outer joins */ + for (i= 0, s= stat ; i < table_count ; i++, s++) + { + if (s->dependent & s->table->map) + { + join->tables=0; // Don't use join->table + my_message(ER_WRONG_OUTER_JOIN, ER(ER_WRONG_OUTER_JOIN), MYF(0)); + DBUG_RETURN(1); } - used_tables|= stat_vector[i]->table->map; + s->key_dependent= s->dependent; } } if (conds || outer_join) if (update_ref_and_keys(join->thd, keyuse_array, stat, join->tables, - conds, ~outer_join, join->select_lex)) + conds, join->cond_equal, + ~outer_join, join->select_lex)) DBUG_RETURN(1); /* Read tables with 0 or 1 rows (system tables) */ @@ -1792,14 +2271,15 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, for (JOIN_TAB **pos=stat_vector+const_count ; (s= *pos) ; pos++) { - TABLE *table=s->table; + table=s->table; if (s->dependent) // If dependent on some table { // All dep. must be constants if (s->dependent & ~(found_const_table_map)) continue; if (table->file->records <= 1L && - !(table->file->table_flags() & HA_NOT_EXACT_COUNT)) + !(table->file->table_flags() & HA_NOT_EXACT_COUNT) && + !table->pos_in_table_list->embedding) { // system table int tmp= 0; s->type=JT_SYSTEM; @@ -1898,14 +2378,23 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, if (s->worst_seeks < 2.0) // Fix for small tables s->worst_seeks=2.0; - if (! s->const_keys.is_clear_all()) + /* + Add to stat->const_keys those indexes for which all group fields or + all select distinct fields participate in one index. + */ + add_group_and_distinct_keys(join, s); + + if (!s->const_keys.is_clear_all() && + !s->table->pos_in_table_list->embedding) { ha_rows records; SQL_SELECT *select; select= make_select(s->table, found_const_table_map, found_const_table_map, - s->on_expr ? s->on_expr : conds, - &error); + *s->on_expr_ref ? *s->on_expr_ref : conds, + &error, true); + if (!select) + DBUG_RETURN(1); records= get_quick_record_count(join->thd, select, s->table, &s->const_keys, join->row_limit); s->quick=select->quick; @@ -1922,7 +2411,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, join->const_table_map|= s->table->map; set_position(join,const_count++,s,(KEYUSE*) 0); s->type= JT_CONST; - if (s->on_expr) + if (*s->on_expr_ref) { /* Generate empty row */ s->info= "Impossible ON condition"; @@ -1940,17 +2429,17 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, } } - /* Find best combination and return it */ join->join_tab=stat; join->map2table=stat_ref; join->table= join->all_tables=table_vector; join->const_tables=const_count; join->found_const_table_map=found_const_table_map; + /* Find an optimal join order of the non-constant tables. */ if (join->const_tables != join->tables) { optimize_keyuse(join, keyuse_array); - find_best_combination(join,all_table_map & ~join->const_table_map); + choose_plan(join, all_table_map & ~join->const_table_map); } else { @@ -1958,6 +2447,7 @@ make_join_statistics(JOIN *join,TABLE_LIST *tables,COND *conds, sizeof(POSITION)*join->const_tables); join->best_read=1.0; } + /* Generate an execution plan from the found optimal join order. */ DBUG_RETURN(join->thd->killed || get_best_combination(join)); } @@ -2085,6 +2575,7 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, add_key_field() key_fields Pointer to add key, if usable and_level And level, to be stored in KEY_FIELD + cond Condition predicate field Field used in comparision eq_func True if we used =, <=> or IS NULL value Value used for comparison with field @@ -2100,8 +2591,8 @@ merge_key_fields(KEY_FIELD *start,KEY_FIELD *new_fields,KEY_FIELD *end, */ static void -add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, - Field *field,bool eq_func,Item **value, uint num_values, +add_key_field(KEY_FIELD **key_fields, uint and_level, COND *cond, + Field *field, bool eq_func, Item **value, uint num_values, table_map usable_tables) { uint exists_optimize= 0; @@ -2177,7 +2668,7 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, We can't use indexes if the effective collation of the operation differ from the field collation. - We can also not used index on a text column, as the column may + We also cannot use index on a text column, as the column may contain 'x' 'x\t' 'x ' and 'read_next_same' will stop after 'x' when searching for WHERE col='x ' */ @@ -2206,6 +2697,57 @@ add_key_field(KEY_FIELD **key_fields,uint and_level, COND *cond, } +/* + Add possible keys to array of possible keys originated from a simple predicate + + SYNPOSIS + add_key_equal_fields() + key_fields Pointer to add key, if usable + and_level And level, to be stored in KEY_FIELD + cond Condition predicate + field Field used in comparision + eq_func True if we used =, <=> or IS NULL + value Value used for comparison with field + Is NULL for BETWEEN and IN + usable_tables Tables which can be used for key optimization + + NOTES + If field items f1 and f2 belong to the same multiple equality and + a key is added for f1, the the same key is added for f2. + + RETURN + *key_fields is incremented if we stored a key in the array +*/ + +static void +add_key_equal_fields(KEY_FIELD **key_fields, uint and_level, + COND *cond, Item_field *field_item, + bool eq_func, Item **val, + uint num_values, table_map usable_tables) +{ + Field *field= field_item->field; + add_key_field(key_fields, and_level, cond, field, + eq_func, val, num_values, usable_tables); + Item_equal *item_equal= field_item->item_equal; + if (item_equal) + { + /* + Add to the set of possible key values every substitution of + the field for an equal field included into item_equal + */ + Item_equal_iterator it(*item_equal); + Item_field *item; + while ((item= it++)) + { + if (!field->eq(item->field)) + { + add_key_field(key_fields, and_level, cond, item->field, + eq_func, val, num_values, usable_tables); + } + } + } +} + static void add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, COND *cond, table_map usable_tables) @@ -2248,16 +2790,26 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, case Item_func::OPTIMIZE_NONE: break; case Item_func::OPTIMIZE_KEY: - // BETWEEN, IN, NOT + { + // BETWEEN, IN, NE if (cond_func->key_item()->real_item()->type() == Item::FIELD_ITEM && !(cond_func->used_tables() & OUTER_REF_TABLE_BIT)) - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*)(cond_func->key_item()->real_item()))->field, - cond_func->argument_count() == 2 && - cond_func->functype() == Item_func::IN_FUNC, - cond_func->arguments()+1, cond_func->argument_count()-1, - usable_tables); + { + Item **values= cond_func->arguments()+1; + if (cond_func->functype() == Item_func::NE_FUNC && + cond_func->arguments()[1]->real_item()->type() == Item::FIELD_ITEM && + !(cond_func->arguments()[0]->used_tables() & OUTER_REF_TABLE_BIT)) + values--; + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->key_item()->real_item()), + cond_func->argument_count() == 2 && + cond_func->functype() == Item_func::IN_FUNC, + values, + cond_func->argument_count()-1, + usable_tables); + } break; + } case Item_func::OPTIMIZE_OP: { bool equal_func=(cond_func->functype() == Item_func::EQ_FUNC || @@ -2266,21 +2818,19 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, if (cond_func->arguments()[0]->real_item()->type() == Item::FIELD_ITEM && !(cond_func->arguments()[0]->used_tables() & OUTER_REF_TABLE_BIT)) { - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[0])->real_item()) - ->field, - equal_func, - cond_func->arguments()+1, 1, usable_tables); + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[0])->real_item(), + equal_func, + cond_func->arguments()+1, 1, usable_tables); } if (cond_func->arguments()[1]->real_item()->type() == Item::FIELD_ITEM && cond_func->functype() != Item_func::LIKE_FUNC && !(cond_func->arguments()[1]->used_tables() & OUTER_REF_TABLE_BIT)) { - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[1])->real_item()) - ->field, - equal_func, - cond_func->arguments(),1,usable_tables); + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[1])->real_item(), + equal_func, + cond_func->arguments(),1,usable_tables); } break; } @@ -2292,15 +2842,55 @@ add_key_fields(JOIN_TAB *stat,KEY_FIELD **key_fields,uint *and_level, Item *tmp=new Item_null; if (unlikely(!tmp)) // Should never be true return; - add_key_field(key_fields,*and_level,cond_func, - ((Item_field*) (cond_func->arguments()[0])->real_item()) - ->field, + add_key_equal_fields(key_fields, *and_level, cond_func, + (Item_field*) (cond_func->arguments()[0])->real_item(), cond_func->functype() == Item_func::ISNULL_FUNC, &tmp, 1, usable_tables); } break; + case Item_func::OPTIMIZE_EQUAL: + Item_equal *item_equal= (Item_equal *) cond; + Item *const_item= item_equal->get_const(); + Item_equal_iterator it(*item_equal); + Item_field *item; + if (const_item) + { + /* + For each field field1 from item_equal consider the equality + field1=const_item as a condition allowing an index access of the table + with field1 by the keys value of field1. + */ + while ((item= it++)) + { + add_key_field(key_fields, *and_level, cond, item->field, + TRUE, &const_item, 1, usable_tables); + } + } + else + { + /* + Consider all pairs of different fields included into item_equal. + For each of them (field1, field1) consider the equality + field1=field2 as a condition allowing an index access of the table + with field1 by the keys value of field2. + */ + Item_equal_iterator fi(*item_equal); + while ((item= fi++)) + { + Field *field= item->field; + while ((item= it++)) + { + if (!field->eq(item->field)) + { + add_key_field(key_fields, *and_level, cond, field, + TRUE, (Item **) &item, 1, usable_tables); + } + } + it.rewind(); + } + } + break; } - return; } /* @@ -2374,14 +2964,14 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array, Item_func *arg0=(Item_func *)(func->arguments()[0]), *arg1=(Item_func *)(func->arguments()[1]); if (arg1->const_item() && - ((functype == Item_func::GE_FUNC && arg1->val()> 0) || - (functype == Item_func::GT_FUNC && arg1->val()>=0)) && + ((functype == Item_func::GE_FUNC && arg1->val_real() > 0) || + (functype == Item_func::GT_FUNC && arg1->val_real() >=0)) && arg0->type() == Item::FUNC_ITEM && arg0->functype() == Item_func::FT_FUNC) cond_func=(Item_func_match *) arg0; else if (arg0->const_item() && - ((functype == Item_func::LE_FUNC && arg0->val()> 0) || - (functype == Item_func::LT_FUNC && arg0->val()>=0)) && + ((functype == Item_func::LE_FUNC && arg0->val_real() > 0) || + (functype == Item_func::LT_FUNC && arg0->val_real() >=0)) && arg1->type() == Item::FUNC_ITEM && arg1->functype() == Item_func::FT_FUNC) cond_func=(Item_func_match *) arg1; @@ -2443,15 +3033,19 @@ sort_keyuse(KEYUSE *a,KEYUSE *b) static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, - uint tables, COND *cond, table_map normal_tables, - SELECT_LEX *select_lex) + uint tables, COND *cond, COND_EQUAL *cond_equal, + table_map normal_tables, SELECT_LEX *select_lex) { uint and_level,i,found_eq_constant; KEY_FIELD *key_fields, *end, *field; + uint m= 1; + + if (cond_equal && cond_equal->max_members) + m= cond_equal->max_members; if (!(key_fields=(KEY_FIELD*) thd->alloc(sizeof(key_fields[0])* - (thd->lex->current_select->cond_count+1)*2))) + (thd->lex->current_select->cond_count+1)*2*m))) return TRUE; /* purecov: inspected */ and_level= 0; field= end= key_fields; @@ -2471,11 +3065,32 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, } for (i=0 ; i < tables ; i++) { - if (join_tab[i].on_expr) - { - add_key_fields(join_tab,&end,&and_level,join_tab[i].on_expr, + /* + Block the creation of keys for inner tables of outer joins. + Here only the outer joins that can not be converted to + inner joins are left and all nests that can be eliminated + are flattened. + In the future when we introduce conditional accesses + for inner tables in outer joins these keys will be taken + into account as well. + */ + if (*join_tab[i].on_expr_ref) + { + add_key_fields(join_tab,&end,&and_level,*join_tab[i].on_expr_ref, join_tab[i].table->map); } + else + { + TABLE_LIST *tab= join_tab[i].table->pos_in_table_list; + TABLE_LIST *embedding= tab->embedding; + if (embedding) + { + NESTED_JOIN *nested_join= embedding->nested_join; + if (nested_join->join_list.head() == tab) + add_key_fields(join_tab, &end, &and_level, embedding->on_expr, + nested_join->used_tables); + } + } } /* fill keyuse with found key parts */ for ( ; field != end ; field++) @@ -2538,7 +3153,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, } /* - Update some values in keyuse for faster find_best_combination() loop + Update some values in keyuse for faster choose_plan() loop */ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) @@ -2579,6 +3194,68 @@ static void optimize_keyuse(JOIN *join, DYNAMIC_ARRAY *keyuse_array) } +/* + Discover the indexes that can be used for GROUP BY or DISTINCT queries. + + SYNOPSIS + add_group_and_distinct_keys() + join + join_tab + + DESCRIPTION + If the query has a GROUP BY clause, find all indexes that contain all + GROUP BY fields, and add those indexes to join->const_keys. + If the query has a DISTINCT clause, find all indexes that contain all + SELECT fields, and add those indexes to join->const_keys. + This allows later on such queries to be processed by a + QUICK_GROUP_MIN_MAX_SELECT. + + RETURN + None +*/ + +static void +add_group_and_distinct_keys(JOIN *join, JOIN_TAB *join_tab) +{ + List<Item_field> indexed_fields; + List_iterator<Item_field> indexed_fields_it(indexed_fields); + ORDER *cur_group; + Item_field *cur_item; + key_map possible_keys(0); + + if (join->group_list) + { /* Collect all query fields referenced in the GROUP clause. */ + for (cur_group= join->group_list; cur_group; cur_group= cur_group->next) + (*cur_group->item)->walk(&Item::collect_item_field_processor, + (byte*) &indexed_fields); + } + else if (join->select_distinct) + { /* Collect all query fields referenced in the SELECT clause. */ + List<Item> &select_items= join->fields_list; + List_iterator<Item> select_items_it(select_items); + Item *item; + while ((item= select_items_it++)) + item->walk(&Item::collect_item_field_processor, (byte*) &indexed_fields); + } + else + return; + + if (indexed_fields.elements == 0) + return; + + /* Intersect the keys of all group fields. */ + cur_item= indexed_fields_it++; + possible_keys.merge(cur_item->field->part_of_key); + while ((cur_item= indexed_fields_it++)) + { + possible_keys.intersect(cur_item->field->part_of_key); + } + + if (!possible_keys.is_clear_all()) + join_tab->const_keys.merge(possible_keys); +} + + /***************************************************************************** Go through all combinations of not marked tables and find the one which uses least records @@ -2606,16 +3283,984 @@ set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key) } +/* + Find the best access path for an extension of a partial execution plan and + add this path to the plan. + + SYNOPSIS + best_access_path() + join pointer to the structure providing all context info + for the query + s the table to be joined by the function + thd thread for the connection that submitted the query + remaining_tables set of tables not included into the partial plan yet + idx the length of the partial plan + record_count estimate for the number of records returned by the partial + plan + read_time the cost of the partial plan + + DESCRIPTION + The function finds the best access path to table 's' from the passed + partial plan where an access path is the general term for any means to + access the data in 's'. An access path may use either an index or a scan, + whichever is cheaper. The input partial plan is passed via the array + 'join->positions' of length 'idx'. The chosen access method for 's' and its + cost are stored in 'join->positions[idx]'. + + RETURN + None +*/ + static void -find_best_combination(JOIN *join, table_map rest_tables) +best_access_path(JOIN *join, + JOIN_TAB *s, + THD *thd, + table_map remaining_tables, + uint idx, + double record_count, + double read_time) +{ + KEYUSE *best_key= 0; + uint best_max_key_part= 0; + my_bool found_constraint= 0; + double best= DBL_MAX; + double best_time= DBL_MAX; + double records= DBL_MAX; + double tmp; + ha_rows rec; + + DBUG_ENTER("best_access_path"); + + if (s->keyuse) + { /* Use key if possible */ + TABLE *table= s->table; + KEYUSE *keyuse,*start_key=0; + double best_records= DBL_MAX; + uint max_key_part=0; + + /* Test how we can use keys */ + rec= s->records/MATCHING_ROWS_IN_OTHER_TABLE; // Assumed records/key + for (keyuse=s->keyuse ; keyuse->table == table ;) + { + key_part_map found_part= 0; + table_map found_ref= 0; + uint found_ref_or_null= 0; + uint key= keyuse->key; + KEY *keyinfo= table->key_info+key; + bool ft_key= (keyuse->keypart == FT_KEYPART); + + /* Calculate how many key segments of the current key we can use */ + start_key= keyuse; + do + { /* for each keypart */ + uint keypart= keyuse->keypart; + uint found_part_ref_or_null= KEY_OPTIMIZE_REF_OR_NULL; + do + { + if (!(remaining_tables & keyuse->used_tables) && + !(found_ref_or_null & keyuse->optimize)) + { + found_part|= keyuse->keypart_map; + found_ref|= keyuse->used_tables; + if (rec > keyuse->ref_table_rows) + rec= keyuse->ref_table_rows; + found_part_ref_or_null&= keyuse->optimize; + } + keyuse++; + found_ref_or_null|= found_part_ref_or_null; + } while (keyuse->table == table && keyuse->key == key && + keyuse->keypart == keypart); + } while (keyuse->table == table && keyuse->key == key); + + /* + Assume that that each key matches a proportional part of table. + */ + if (!found_part && !ft_key) + continue; // Nothing usable found + + if (rec < MATCHING_ROWS_IN_OTHER_TABLE) + rec= MATCHING_ROWS_IN_OTHER_TABLE; // Fix for small tables + + /* + ft-keys require special treatment + */ + if (ft_key) + { + /* + Really, there should be records=0.0 (yes!) + but 1.0 would be probably safer + */ + tmp= prev_record_reads(join, found_ref); + records= 1.0; + } + else + { + found_constraint= 1; + /* + Check if we found full key + */ + if (found_part == PREV_BITS(uint,keyinfo->key_parts) && + !found_ref_or_null) + { /* use eq key */ + max_key_part= (uint) ~0; + if ((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) + { + tmp = prev_record_reads(join, found_ref); + records=1.0; + } + else + { + if (!found_ref) + { /* We found a const key */ + if (table->quick_keys.is_set(key)) + records= (double) table->quick_rows[key]; + else + { + /* quick_range couldn't use key! */ + records= (double) s->records/rec; + } + } + else + { + if (!(records=keyinfo->rec_per_key[keyinfo->key_parts-1])) + { /* Prefer longer keys */ + records= + ((double) s->records / (double) rec * + (1.0 + + ((double) (table->max_key_length-keyinfo->key_length) / + (double) table->max_key_length))); + if (records < 2.0) + records=2.0; /* Can't be as good as a unique */ + } + } + /* Limit the number of matched rows */ + tmp = records; + set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); + if (table->used_keys.is_set(key)) + { + /* we can use only index tree */ + uint keys_per_block= table->file->block_size/2/ + (keyinfo->key_length+table->file->ref_length)+1; + tmp = record_count*(tmp+keys_per_block-1)/keys_per_block; + } + else + tmp = record_count*min(tmp,s->worst_seeks); + } + } + else + { + /* + Use as much key-parts as possible and a uniq key is better + than a not unique key + Set tmp to (previous record count) * (records / combination) + */ + if ((found_part & 1) && + (!(table->file->index_flags(key, 0, 0) & HA_ONLY_WHOLE_INDEX) || + found_part == PREV_BITS(uint,keyinfo->key_parts))) + { + max_key_part=max_part_bit(found_part); + /* + Check if quick_range could determinate how many rows we + will match + */ + if (table->quick_keys.is_set(key) && + table->quick_key_parts[key] == max_key_part) + tmp= records= (double) table->quick_rows[key]; + else + { + /* Check if we have statistic about the distribution */ + if ((records = keyinfo->rec_per_key[max_key_part-1])) + tmp = records; + else + { + /* + Assume that the first key part matches 1% of the file + and that the whole key matches 10 (duplicates) or 1 + (unique) records. + Assume also that more key matches proportionally more + records + This gives the formula: + records = (x * (b-a) + a*c-b)/(c-1) + + b = records matched by whole key + a = records matched by first key part (1% of all records?) + c = number of key parts in key + x = used key parts (1 <= x <= c) + */ + double rec_per_key; + if (!(rec_per_key=(double) + keyinfo->rec_per_key[keyinfo->key_parts-1])) + rec_per_key=(double) s->records/rec+1; + + if (!s->records) + tmp = 0; + else if (rec_per_key/(double) s->records >= 0.01) + tmp = rec_per_key; + else + { + double a=s->records*0.01; + if (keyinfo->key_parts > 1) + tmp= (max_key_part * (rec_per_key - a) + + a*keyinfo->key_parts - rec_per_key)/ + (keyinfo->key_parts-1); + else + tmp= a; + set_if_bigger(tmp,1.0); + } + records = (ulong) tmp; + } + /* + If quick_select was used on a part of this key, we know + the maximum number of rows that the key can match. + */ + if (table->quick_keys.is_set(key) && + table->quick_key_parts[key] <= max_key_part && + records > (double) table->quick_rows[key]) + tmp= records= (double) table->quick_rows[key]; + else if (found_ref_or_null) + { + /* We need to do two key searches to find key */ + tmp *= 2.0; + records *= 2.0; + } + } + /* Limit the number of matched rows */ + set_if_smaller(tmp, (double) thd->variables.max_seeks_for_key); + if (table->used_keys.is_set(key)) + { + /* we can use only index tree */ + uint keys_per_block= table->file->block_size/2/ + (keyinfo->key_length+table->file->ref_length)+1; + tmp = record_count*(tmp+keys_per_block-1)/keys_per_block; + } + else + tmp = record_count*min(tmp,s->worst_seeks); + } + else + tmp = best_time; // Do nothing + } + } /* not ft_key */ + if (tmp < best_time - records/(double) TIME_FOR_COMPARE) + { + best_time= tmp + records/(double) TIME_FOR_COMPARE; + best= tmp; + best_records= records; + best_key= start_key; + best_max_key_part= max_key_part; + } + } + records= best_records; + } + + /* + Don't test table scan if it can't be better. + Prefer key lookup if we would use the same key for scanning. + + Don't do a table scan on InnoDB tables, if we can read the used + parts of the row from any of the used index. + This is because table scans uses index and we would not win + anything by using a table scan. + */ + if ((records >= s->found_records || best > s->read_time) && + !(s->quick && best_key && s->quick->index == best_key->key && + best_max_key_part >= s->table->quick_key_parts[best_key->key]) && + !((s->table->file->table_flags() & HA_TABLE_SCAN_ON_INDEX) && + ! s->table->used_keys.is_clear_all() && best_key) && + !(s->table->force_index && best_key)) + { // Check full join + ha_rows rnd_records= s->found_records; + /* + If there is a restriction on the table, assume that 25% of the + rows can be skipped on next part. + This is to force tables that this table depends on before this + table + */ + if (found_constraint) + rnd_records-= rnd_records/4; + + /* + Range optimizer never proposes a RANGE if it isn't better + than FULL: so if RANGE is present, it's always preferred to FULL. + Here we estimate its cost. + */ + if (s->quick) + { + /* + For each record we: + - read record range through 'quick' + - skip rows which does not satisfy WHERE constraints + */ + tmp= record_count * + (s->quick->read_time + + (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE); + } + else + { + /* Estimate cost of reading table. */ + tmp= s->table->file->scan_time(); + if (s->table->map & join->outer_join) // Can't use join cache + { + /* + For each record we have to: + - read the whole table record + - skip rows which does not satisfy join condition + */ + tmp= record_count * + (tmp + + (s->records - rnd_records)/(double) TIME_FOR_COMPARE); + } + else + { + /* We read the table as many times as join buffer becomes full. */ + tmp*= (1.0 + floor((double) cache_record_length(join,idx) * + record_count / + (double) thd->variables.join_buff_size)); + /* + We don't make full cartesian product between rows in the scanned + table and existing records because we skip all rows from the + scanned table, which does not satisfy join condition when + we read the table (see flush_cached_records for details). Here we + take into account cost to read and skip these records. + */ + tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE; + } + } + + /* + We estimate the cost of evaluating WHERE clause for found records + as record_count * rnd_records / TIME_FOR_COMPARE. This cost plus + tmp give us total cost of using TABLE SCAN + */ + if (best == DBL_MAX || + (tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records < + best + record_count/(double) TIME_FOR_COMPARE*records)) + { + /* + If the table has a range (s->quick is set) make_join_select() + will ensure that this will be used + */ + best= tmp; + records= rows2double(rnd_records); + best_key= 0; + } + } + + /* Update the cost information for the current partial plan */ + join->positions[idx].records_read= records; + join->positions[idx].read_time= best; + join->positions[idx].key= best_key; + join->positions[idx].table= s; + + if (!best_key && + idx == join->const_tables && + s->table == join->sort_by_table && + join->unit->select_limit_cnt >= records) + join->sort_by_table= (TABLE*) 1; // Must use temporary table + + DBUG_VOID_RETURN; +} + + +/* + Selects and invokes a search strategy for an optimal query plan. + + SYNOPSIS + choose_plan() + join pointer to the structure providing all context info for + the query + join_tables set of the tables in the query + + DESCRIPTION + The function checks user-configurable parameters that control the search + strategy for an optimal plan, selects the search method and then invokes + it. Each specific optimization procedure stores the final optimal plan in + the array 'join->best_positions', and the cost of the plan in + 'join->best_read'. + + RETURN + None +*/ + +static void +choose_plan(JOIN *join, table_map join_tables) { - DBUG_ENTER("find_best_combination"); - join->best_read=DBL_MAX; - find_best(join,rest_tables, join->const_tables,1.0,0.0); + uint search_depth= join->thd->variables.optimizer_search_depth; + uint prune_level= join->thd->variables.optimizer_prune_level; + bool straight_join= join->select_options & SELECT_STRAIGHT_JOIN; + DBUG_ENTER("choose_plan"); + + /* + if (SELECT_STRAIGHT_JOIN option is set) + reorder tables so dependent tables come after tables they depend + on, otherwise keep tables in the order they were specified in the query + else + Apply heuristic: pre-sort all access plans with respect to the number of + records accessed. + */ + qsort(join->best_ref + join->const_tables, join->tables - join->const_tables, + sizeof(JOIN_TAB*), straight_join?join_tab_cmp_straight:join_tab_cmp); + + if (straight_join) + { + optimize_straight_join(join, join_tables); + } + else + { + if (search_depth == MAX_TABLES+2) + { /* + TODO: 'MAX_TABLES+2' denotes the old implementation of find_best before + the greedy version. Will be removed when greedy_search is approved. + */ + join->best_read= DBL_MAX; + find_best(join, join_tables, join->const_tables, 1.0, 0.0); + } + else + { + if (search_depth == 0) + /* Automatically determine a reasonable value for 'search_depth' */ + search_depth= determine_search_depth(join); + greedy_search(join, join_tables, search_depth, prune_level); + } + } + + /* + Store the cost of this query into a user variable + Don't update last_query_cost for 'show status' command + */ + if (join->thd->lex->orig_sql_command != SQLCOM_SHOW_STATUS) + last_query_cost= join->best_read; + DBUG_VOID_RETURN; } +/* + Compare two JOIN_TAB objects based on the number of accessed records. + + SYNOPSIS + join_tab_cmp() + ptr1 pointer to first JOIN_TAB object + ptr2 pointer to second JOIN_TAB object + + RETURN + 1 if first is bigger + -1 if second is bigger + 0 if equal +*/ + +static int +join_tab_cmp(const void* ptr1, const void* ptr2) +{ + JOIN_TAB *jt1= *(JOIN_TAB**) ptr1; + JOIN_TAB *jt2= *(JOIN_TAB**) ptr2; + + if (jt1->dependent & jt2->table->map) + return 1; + if (jt2->dependent & jt1->table->map) + return -1; + if (jt1->found_records > jt2->found_records) + return 1; + if (jt1->found_records < jt2->found_records) + return -1; + return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0); +} + + +/* + Same as join_tab_cmp, but for use with SELECT_STRAIGHT_JOIN. +*/ + +static int +join_tab_cmp_straight(const void* ptr1, const void* ptr2) +{ + JOIN_TAB *jt1= *(JOIN_TAB**) ptr1; + JOIN_TAB *jt2= *(JOIN_TAB**) ptr2; + + if (jt1->dependent & jt2->table->map) + return 1; + if (jt2->dependent & jt1->table->map) + return -1; + return jt1 > jt2 ? 1 : (jt1 < jt2 ? -1 : 0); +} + +/* + Heuristic procedure to automatically guess a reasonable degree of + exhaustiveness for the greedy search procedure. + + SYNOPSIS + determine_search_depth() + join pointer to the structure providing all context info for the query + + DESCRIPTION + The procedure estimates the optimization time and selects a search depth + big enough to result in a near-optimal QEP, that doesn't take too long to + find. If the number of tables in the query exceeds some constant, then + search_depth is set to this constant. + + NOTES + This is an extremely simplistic implementation that serves as a stub for a + more advanced analysis of the join. Ideally the search depth should be + determined by learning from previous query optimizations, because it will + depend on the CPU power (and other factors). + + RETURN + A positive integer that specifies the search depth (and thus the + exhaustiveness) of the depth-first search algorithm used by + 'greedy_search'. +*/ + +static uint +determine_search_depth(JOIN *join) +{ + uint table_count= join->tables - join->const_tables; + uint search_depth; + /* TODO: this value should be determined dynamically, based on statistics: */ + uint max_tables_for_exhaustive_opt= 7; + + if (table_count <= max_tables_for_exhaustive_opt) + search_depth= table_count+1; // use exhaustive for small number of tables + else + /* + TODO: this value could be determined by some mapping of the form: + depth : table_count -> [max_tables_for_exhaustive_opt..MAX_EXHAUSTIVE] + */ + search_depth= max_tables_for_exhaustive_opt; // use greedy search + + return search_depth; +} + + +/* + Select the best ways to access the tables in a query without reordering them. + + SYNOPSIS + optimize_straight_join() + join pointer to the structure providing all context info for + the query + join_tables set of the tables in the query + + DESCRIPTION + Find the best access paths for each query table and compute their costs + according to their order in the array 'join->best_ref' (thus without + reordering the join tables). The function calls sequentially + 'best_access_path' for each table in the query to select the best table + access method. The final optimal plan is stored in the array + 'join->best_positions', and the corresponding cost in 'join->best_read'. + + NOTES + This function can be applied to: + - queries with STRAIGHT_JOIN + - internally to compute the cost of an arbitrary QEP + Thus 'optimize_straight_join' can be used at any stage of the query + optimization process to finalize a QEP as it is. + + RETURN + None +*/ + +static void +optimize_straight_join(JOIN *join, table_map join_tables) +{ + JOIN_TAB *s; + uint idx= join->const_tables; + double record_count= 1.0; + double read_time= 0.0; + + for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++) + { + /* Find the best access method from 's' to the current partial plan */ + best_access_path(join, s, join->thd, join_tables, idx, record_count, read_time); + /* compute the cost of the new plan extended with 's' */ + record_count*= join->positions[idx].records_read; + read_time+= join->positions[idx].read_time; + join_tables&= ~(s->table->map); + ++idx; + } + + read_time+= record_count / (double) TIME_FOR_COMPARE; + if (join->sort_by_table && + join->sort_by_table != join->positions[join->const_tables].table->table) + read_time+= record_count; // We have to make a temp table + memcpy((gptr) join->best_positions, (gptr) join->positions, + sizeof(POSITION)*idx); + join->best_read= read_time; +} + + +/* + Find a good, possibly optimal, query execution plan (QEP) by a greedy search. + + SYNOPSIS + join pointer to the structure providing all context info + for the query + remaining_tables set of tables not included into the partial plan yet + search_depth controlls the exhaustiveness of the search + prune_level the pruning heuristics that should be applied during + search + + DESCRIPTION + The search procedure uses a hybrid greedy/exhaustive search with controlled + exhaustiveness. The search is performed in N = card(remaining_tables) + steps. Each step evaluates how promising is each of the unoptimized tables, + selects the most promising table, and extends the current partial QEP with + that table. Currenly the most 'promising' table is the one with least + expensive extension. + There are two extreme cases: + 1. When (card(remaining_tables) < search_depth), the estimate finds the best + complete continuation of the partial QEP. This continuation can be + used directly as a result of the search. + 2. When (search_depth == 1) the 'best_extension_by_limited_search' + consideres the extension of the current QEP with each of the remaining + unoptimized tables. + All other cases are in-between these two extremes. Thus the parameter + 'search_depth' controlls the exhaustiveness of the search. The higher the + value, the longer the optimizaton time and possibly the better the + resulting plan. The lower the value, the fewer alternative plans are + estimated, but the more likely to get a bad QEP. + + All intermediate and final results of the procedure are stored in 'join': + join->positions modified for every partial QEP that is explored + join->best_positions modified for the current best complete QEP + join->best_read modified for the current best complete QEP + join->best_ref might be partially reordered + The final optimal plan is stored in 'join->best_positions', and its + corresponding cost in 'join->best_read'. + + NOTES + The following pseudocode describes the algorithm of 'greedy_search': + + procedure greedy_search + input: remaining_tables + output: pplan; + { + pplan = <>; + do { + (t, a) = best_extension(pplan, remaining_tables); + pplan = concat(pplan, (t, a)); + remaining_tables = remaining_tables - t; + } while (remaining_tables != {}) + return pplan; + } + + where 'best_extension' is a placeholder for a procedure that selects the + most "promising" of all tables in 'remaining_tables'. + Currently this estimate is performed by calling + 'best_extension_by_limited_search' to evaluate all extensions of the + current QEP of size 'search_depth', thus the complexity of 'greedy_search' + mainly depends on that of 'best_extension_by_limited_search'. + + If 'best_extension()' == 'best_extension_by_limited_search()', then the + worst-case complexity of this algorithm is <= + O(N*N^search_depth/search_depth). When serch_depth >= N, then the + complexity of greedy_search is O(N!). + + In the future, 'greedy_search' might be extended to support other + implementations of 'best_extension', e.g. some simpler quadratic procedure. + + RETURN + None +*/ + +static void +greedy_search(JOIN *join, + table_map remaining_tables, + uint search_depth, + uint prune_level) +{ + double record_count= 1.0; + double read_time= 0.0; + uint idx= join->const_tables; // index into 'join->best_ref' + uint best_idx; + uint rem_size; // cardinality of remaining_tables + POSITION best_pos; + JOIN_TAB *best_table; // the next plan node to be added to the curr QEP + + DBUG_ENTER("greedy_search"); + + /* number of tables that remain to be optimized */ + rem_size= my_count_bits(remaining_tables); + + do { + /* Find the extension of the current QEP with the lowest cost */ + join->best_read= DBL_MAX; + best_extension_by_limited_search(join, remaining_tables, idx, record_count, + read_time, search_depth, prune_level); + + if (rem_size <= search_depth) + { + /* + 'join->best_positions' contains a complete optimal extension of the + current partial QEP. + */ + DBUG_EXECUTE("opt", print_plan(join, read_time, record_count, + join->tables, "optimal");); + DBUG_VOID_RETURN; + } + + /* select the first table in the optimal extension as most promising */ + best_pos= join->best_positions[idx]; + best_table= best_pos.table; + /* + Each subsequent loop of 'best_extension_by_limited_search' uses + 'join->positions' for cost estimates, therefore we have to update its + value. + */ + join->positions[idx]= best_pos; + + /* find the position of 'best_table' in 'join->best_ref' */ + best_idx= idx; + JOIN_TAB *pos= join->best_ref[best_idx]; + while (pos && best_table != pos) + pos= join->best_ref[++best_idx]; + DBUG_ASSERT((pos != NULL)); // should always find 'best_table' + /* move 'best_table' at the first free position in the array of joins */ + swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]); + + /* compute the cost of the new plan extended with 'best_table' */ + record_count*= join->positions[idx].records_read; + read_time+= join->positions[idx].read_time; + + remaining_tables&= ~(best_table->table->map); + --rem_size; + ++idx; + + DBUG_EXECUTE("opt", + print_plan(join, read_time, record_count, idx, "extended");); + } while (TRUE); +} + + +/* + Find a good, possibly optimal, query execution plan (QEP) by a possibly + exhaustive search. + + SYNOPSIS + best_extension_by_limited_search() + join pointer to the structure providing all context info for + the query + remaining_tables set of tables not included into the partial plan yet + idx length of the partial QEP in 'join->positions'; + since a depth-first search is used, also corresponds to + the current depth of the search tree; + also an index in the array 'join->best_ref'; + record_count estimate for the number of records returned by the best + partial plan + read_time the cost of the best partial plan + search_depth maximum depth of the recursion and thus size of the found + optimal plan (0 < search_depth <= join->tables+1). + prune_level pruning heuristics that should be applied during optimization + (values: 0 = EXHAUSTIVE, 1 = PRUNE_BY_TIME_OR_ROWS) + + DESCRIPTION + The procedure searches for the optimal ordering of the query tables in set + 'remaining_tables' of size N, and the corresponding optimal access paths to each + table. The choice of a table order and an access path for each table + constitutes a query execution plan (QEP) that fully specifies how to + execute the query. + + The maximal size of the found plan is controlled by the parameter + 'search_depth'. When search_depth == N, the resulting plan is complete and + can be used directly as a QEP. If search_depth < N, the found plan consists + of only some of the query tables. Such "partial" optimal plans are useful + only as input to query optimization procedures, and cannot be used directly + to execute a query. + + The algorithm begins with an empty partial plan stored in 'join->positions' + and a set of N tables - 'remaining_tables'. Each step of the algorithm + evaluates the cost of the partial plan extended by all access plans for + each of the relations in 'remaining_tables', expands the current partial + plan with the access plan that results in lowest cost of the expanded + partial plan, and removes the corresponding relation from + 'remaining_tables'. The algorithm continues until it either constructs a + complete optimal plan, or constructs an optimal plartial plan with size = + search_depth. + + The final optimal plan is stored in 'join->best_positions'. The + corresponding cost of the optimal plan is in 'join->best_read'. + + NOTES + The procedure uses a recursive depth-first search where the depth of the + recursion (and thus the exhaustiveness of the search) is controlled by the + parameter 'search_depth'. + + The pseudocode below describes the algorithm of + 'best_extension_by_limited_search'. The worst-case complexity of this + algorithm is O(N*N^search_depth/search_depth). When serch_depth >= N, then + the complexity of greedy_search is O(N!). + + procedure best_extension_by_limited_search( + pplan in, // in, partial plan of tables-joined-so-far + pplan_cost, // in, cost of pplan + remaining_tables, // in, set of tables not referenced in pplan + best_plan_so_far, // in/out, best plan found so far + best_plan_so_far_cost,// in/out, cost of best_plan_so_far + search_depth) // in, maximum size of the plans being considered + { + for each table T from remaining_tables + { + // Calculate the cost of using table T as above + cost = complex-series-of-calculations; + + // Add the cost to the cost so far. + pplan_cost+= cost; + + if (pplan_cost >= best_plan_so_far_cost) + // pplan_cost already too great, stop search + continue; + + pplan= expand pplan by best_access_method; + remaining_tables= remaining_tables - table T; + if (remaining_tables is not an empty set + and + search_depth > 1) + { + best_extension_by_limited_search(pplan, pplan_cost, + remaining_tables, + best_plan_so_far, + best_plan_so_far_cost, + search_depth - 1); + } + else + { + best_plan_so_far_cost= pplan_cost; + best_plan_so_far= pplan; + } + } + } + + IMPLEMENTATION + When 'best_extension_by_limited_search' is called for the first time, + 'join->best_read' must be set to the largest possible value (e.g. DBL_MAX). + The actual implementation provides a way to optionally use pruning + heuristic (controlled by the parameter 'prune_level') to reduce the search + space by skipping some partial plans. + The parameter 'search_depth' provides control over the recursion + depth, and thus the size of the resulting optimal plan. + + RETURN + None +*/ + +static void +best_extension_by_limited_search(JOIN *join, + table_map remaining_tables, + uint idx, + double record_count, + double read_time, + uint search_depth, + uint prune_level) +{ + THD *thd= join->thd; + if (thd->killed) // Abort + return; + + DBUG_ENTER("best_extension_by_limited_search"); + + /* + 'join' is a partial plan with lower cost than the best plan so far, + so continue expanding it further with the tables in 'remaining_tables'. + */ + JOIN_TAB *s; + double best_record_count= DBL_MAX; + double best_read_time= DBL_MAX; + + DBUG_EXECUTE("opt", + print_plan(join, read_time, record_count, idx, "part_plan");); + + for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++) + { + table_map real_table_bit= s->table->map; + if ((remaining_tables & real_table_bit) && !(remaining_tables & s->dependent)) + { + double current_record_count, current_read_time; + + /* Find the best access method from 's' to the current partial plan */ + best_access_path(join, s, thd, remaining_tables, idx, record_count, read_time); + /* Compute the cost of extending the plan with 's' */ + current_record_count= record_count * join->positions[idx].records_read; + current_read_time= read_time + join->positions[idx].read_time; + + /* Expand only partial plans with lower cost than the best QEP so far */ + if ((current_read_time + + current_record_count / (double) TIME_FOR_COMPARE) >= join->best_read) + { + DBUG_EXECUTE("opt", print_plan(join, read_time, record_count, idx, + "prune_by_cost");); + continue; + } + + /* + Prune some less promising partial plans. This heuristic may miss + the optimal QEPs, thus it results in a non-exhaustive search. + */ + if (prune_level == 1) + { + if (best_record_count > current_record_count || + best_read_time > current_read_time || + idx == join->const_tables && // 's' is the first table in the QEP + s->table == join->sort_by_table) + { + if (best_record_count >= current_record_count && + best_read_time >= current_read_time && + /* TODO: What is the reasoning behind this condition? */ + (!(s->key_dependent & remaining_tables) || + join->positions[idx].records_read < 2.0)) + { + best_record_count= current_record_count; + best_read_time= current_read_time; + } + } + else + { + DBUG_EXECUTE("opt", print_plan(join, read_time, record_count, idx, + "pruned_by_heuristic");); + continue; + } + } + + if ( (search_depth > 1) && (remaining_tables & ~real_table_bit) ) + { /* Recursively expand the current partial plan */ + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + best_extension_by_limited_search(join, + remaining_tables & ~real_table_bit, + idx + 1, + current_record_count, + current_read_time, + search_depth - 1, + prune_level); + if (thd->killed) + DBUG_VOID_RETURN; + swap_variables(JOIN_TAB*, join->best_ref[idx], *pos); + } + else + { /* + 'join' is either the best partial QEP with 'search_depth' relations, + or the best complete QEP so far, whichever is smaller. + */ + current_read_time+= current_record_count / (double) TIME_FOR_COMPARE; + if (join->sort_by_table && + join->sort_by_table != join->positions[join->const_tables].table->table) + /* We have to make a temp table */ + current_read_time+= current_record_count; + if ((search_depth == 1) || (current_read_time < join->best_read)) + { + memcpy((gptr) join->best_positions, (gptr) join->positions, + sizeof(POSITION) * (idx + 1)); + join->best_read= current_read_time - 0.001; + } + DBUG_EXECUTE("opt", + print_plan(join, current_read_time, current_record_count, idx, "full_plan");); + } + } + } + DBUG_VOID_RETURN; +} + + +/* + TODO: this function is here only temporarily until 'greedy_search' is + tested and accepted. +*/ static void find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, double read_time) @@ -2638,7 +4283,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, { memcpy((gptr) join->best_positions,(gptr) join->positions, sizeof(POSITION)*idx); - join->best_read=read_time; + join->best_read= read_time - 0.001; } return; } @@ -2815,7 +4460,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, { /* Assume that the first key part matches 1% of the file - and that the hole key matches 10 (duplicates) or 1 + and that the whole key matches 10 (duplicates) or 1 (unique) records. Assume also that more key matches proportionally more records @@ -2935,7 +4580,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count, { /* Estimate cost of reading table. */ tmp= s->table->file->scan_time(); - if (s->on_expr) // Can't use join cache + if (s->table->map & join->outer_join) // Can't use join cache { /* For each record we have to: @@ -3109,11 +4754,12 @@ get_best_combination(JOIN *join) KEYUSE *keyuse; uint table_count; THD *thd=join->thd; + DBUG_ENTER("get_best_combination"); table_count=join->tables; if (!(join->join_tab=join_tab= (JOIN_TAB*) thd->alloc(sizeof(JOIN_TAB)*table_count))) - return TRUE; + DBUG_RETURN(TRUE); join->full_join=0; @@ -3125,8 +4771,9 @@ get_best_combination(JOIN *join) form=join->table[tablenr]=j->table; used_tables|= form->map; form->reginfo.join_tab=j; - if (!j->on_expr) + if (!*j->on_expr_ref) form->reginfo.not_exists_optimize=0; // Only with LEFT JOIN + DBUG_PRINT("info",("type: %d", j->type)); if (j->type == JT_CONST) continue; // Handled in make_join_stat.. @@ -3142,13 +4789,13 @@ get_best_combination(JOIN *join) join->full_join=1; } else if (create_ref_for_key(join, j, keyuse, used_tables)) - return TRUE; // Something went wrong + DBUG_RETURN(TRUE); // Something went wrong } for (i=0 ; i < table_count ; i++) join->map2table[join->join_tab[i].table->tablenr]=join->join_tab+i; update_depend_map(join); - return 0; + DBUG_RETURN(0); } @@ -3161,6 +4808,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, uint keyparts,length,key; TABLE *table; KEY *keyinfo; + DBUG_ENTER("create_ref_for_key"); /* Use best key from find_best */ table=j->table; @@ -3210,7 +4858,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, (keyparts+1)))) || !(j->ref.items= (Item**) thd->alloc(sizeof(Item*)*keyparts))) { - return TRUE; + DBUG_RETURN(TRUE); } j->ref.key_buff2=j->ref.key_buff+ALIGN_SIZE(length); j->ref.key_err=1; @@ -3223,7 +4871,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, { j->ref.items[0]=((Item_func*)(keyuse->val))->key_item(); if (keyuse->used_tables) - return TRUE; // not supported yet. SerG + DBUG_RETURN(TRUE); // not supported yet. SerG j->type=JT_FT; } @@ -3247,7 +4895,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, maybe_null ? (char*) key_buff : 0, keyinfo->key_part[i].length, keyuse->val); if (thd->is_fatal_error) - return TRUE; + DBUG_RETURN(TRUE); tmp.copy(); } else @@ -3256,7 +4904,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, &keyinfo->key_part[i], (char*) key_buff,maybe_null); /* - Remeber if we are going to use REF_OR_NULL + Remember if we are going to use REF_OR_NULL But only if field _really_ can be null i.e. we force JT_REF instead of JT_REF_OR_NULL in case if field can't be null */ @@ -3267,7 +4915,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, } /* not ftkey */ *ref_key=0; // end_marker if (j->type == JT_FT) - return 0; + DBUG_RETURN(0); if (j->type == JT_CONST) j->table->const_table= 1; else if (((keyinfo->flags & (HA_NOSAME | HA_NULL_PART_KEY | @@ -3291,7 +4939,7 @@ static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, } else j->type=JT_EQ_REF; - return 0; + DBUG_RETURN(0); } @@ -3354,10 +5002,11 @@ make_simple_join(JOIN *join,TABLE *tmp_table) { TABLE **tableptr; JOIN_TAB *join_tab; + DBUG_ENTER("make_simple_join"); if (!(tableptr=(TABLE**) join->thd->alloc(sizeof(TABLE*))) || !(join_tab=(JOIN_TAB*) join->thd->alloc(sizeof(JOIN_TAB)))) - return TRUE; + DBUG_RETURN(TRUE); join->join_tab=join_tab; join->table=tableptr; tableptr[0]=tmp_table; join->tables=1; @@ -3380,15 +5029,148 @@ make_simple_join(JOIN *join,TABLE *tmp_table) join_tab->type= JT_ALL; /* Map through all records */ join_tab->keys.init(~0); /* test everything in quick */ join_tab->info=0; - join_tab->on_expr=0; + join_tab->on_expr_ref=0; + join_tab->last_inner= 0; + join_tab->first_unmatched= 0; join_tab->ref.key = -1; join_tab->not_used_in_distinct=0; join_tab->read_first_record= join_init_read_record; join_tab->join=join; + join_tab->ref.key_parts= 0; bzero((char*) &join_tab->read_record,sizeof(join_tab->read_record)); tmp_table->status=0; tmp_table->null_row=0; - return FALSE; + DBUG_RETURN(FALSE); +} + + +/* + Build a predicate guarded by match variables for embedding outer joins + + SYNOPSIS + add_found_match_trig_cond() + tab the first inner table for most nested outer join + cond the predicate to be guarded + root_tab the first inner table to stop + + DESCRIPTION + The function recursively adds guards for predicate cond + assending from tab to the first inner table next embedding + nested outer join and so on until it reaches root_tab + (root_tab can be 0). + + RETURN VALUE + pointer to the guarded predicate, if success + 0, otherwise +*/ + +static COND* +add_found_match_trig_cond(JOIN_TAB *tab, COND *cond, JOIN_TAB *root_tab) +{ + COND *tmp; + if (tab == root_tab || !cond) + return cond; + if ((tmp= add_found_match_trig_cond(tab->first_upper, cond, root_tab))) + { + tmp= new Item_func_trig_cond(tmp, &tab->found); + } + if (tmp) + tmp->quick_fix_field(); + return tmp; +} + + +/* + Fill in outer join related info for the execution plan structure + + SYNOPSIS + make_outerjoin_info() + join - reference to the info fully describing the query + + DESCRIPTION + For each outer join operation left after simplification of the + original query the function set up the following pointers in the linear + structure join->join_tab representing the selected execution plan. + The first inner table t0 for the operation is set to refer to the last + inner table tk through the field t0->last_inner. + Any inner table ti for the operation are set to refer to the first + inner table ti->first_inner. + The first inner table t0 for the operation is set to refer to the + first inner table of the embedding outer join operation, if there is any, + through the field t0->first_upper. + The on expression for the outer join operation is attached to the + corresponding first inner table through the field t0->on_expr_ref. + Here ti are structures of the JOIN_TAB type. + + EXAMPLE + For the query: + SELECT * FROM t1 + LEFT JOIN + (t2, t3 LEFT JOIN t4 ON t3.a=t4.a) + ON (t1.a=t2.a AND t1.b=t3.b) + WHERE t1.c > 5, + given the execution plan with the table order t1,t2,t3,t4 + is selected, the following references will be set; + t4->last_inner=[t4], t4->first_inner=[t4], t4->first_upper=[t2] + t2->last_inner=[t4], t2->first_inner=t3->first_inner=[t2], + on expression (t1.a=t2.a AND t1.b=t3.b) will be attached to + *t2->on_expr_ref, while t3.a=t4.a will be attached to *t4->on_expr_ref. + + NOTES + The function assumes that the simplification procedure has been + already applied to the join query (see simplify_joins). + This function can be called only after the execution plan + has been chosen. +*/ + +static void +make_outerjoin_info(JOIN *join) +{ + DBUG_ENTER("make_outerjoin_info"); + for (uint i=join->const_tables ; i < join->tables ; i++) + { + JOIN_TAB *tab=join->join_tab+i; + TABLE *table=tab->table; + TABLE_LIST *tbl= table->pos_in_table_list; + TABLE_LIST *embedding= tbl->embedding; + + if (tbl->outer_join) + { + /* + Table tab is the only one inner table for outer join. + (Like table t4 for the table reference t3 LEFT JOIN t4 ON t3.a=t4.a + is in the query above.) + */ + tab->last_inner= tab->first_inner= tab; + tab->on_expr_ref= &tbl->on_expr; + tab->cond_equal= tbl->cond_equal; + if (embedding) + tab->first_upper= embedding->nested_join->first_nested; + } + for ( ; embedding ; embedding= embedding->embedding) + { + NESTED_JOIN *nested_join= embedding->nested_join; + if (!nested_join->counter) + { + /* + Table tab is the first inner table for nested_join. + Save reference to it in the nested join structure. + */ + nested_join->first_nested= tab; + tab->on_expr_ref= &embedding->on_expr; + tab->cond_equal= tbl->cond_equal; + if (embedding->embedding) + tab->first_upper= embedding->embedding->nested_join->first_nested; + } + if (!tab->first_inner) + tab->first_inner= nested_join->first_nested; + if (++nested_join->counter < nested_join->join_list.elements) + break; + /* Table tab is the last inner table for nested join. */ + nested_join->first_nested->last_inner= tab; + } + } + DBUG_VOID_RETURN; } @@ -3399,20 +5181,45 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) if (select) { table_map used_tables; - if (join->tables > 1) - cond->update_used_tables(); // Tablenr may have changed - if (join->const_tables == join->tables && - join->thd->lex->current_select->master_unit() == - &join->thd->lex->unit) // not upper level SELECT - join->const_table_map|=RAND_TABLE_BIT; - { // Check const tables - COND *const_cond= - make_cond_for_table(cond,join->const_table_map,(table_map) 0); - DBUG_EXECUTE("where",print_where(const_cond,"constants");); - if (const_cond && !const_cond->val_int()) - { - DBUG_PRINT("info",("Found impossible WHERE condition")); - DBUG_RETURN(1); // Impossible const condition + if (cond) /* Because of QUICK_GROUP_MIN_MAX_SELECT */ + { /* there may be a select without a cond. */ + if (join->tables > 1) + cond->update_used_tables(); // Tablenr may have changed + if (join->const_tables == join->tables && + join->thd->lex->current_select->master_unit() == + &join->thd->lex->unit) // not upper level SELECT + join->const_table_map|=RAND_TABLE_BIT; + { // Check const tables + COND *const_cond= + make_cond_for_table(cond,join->const_table_map,(table_map) 0); + DBUG_EXECUTE("where",print_where(const_cond,"constants");); + for (JOIN_TAB *tab= join->join_tab+join->const_tables; + tab < join->join_tab+join->tables ; tab++) + { + if (*tab->on_expr_ref) + { + JOIN_TAB *cond_tab= tab->first_inner; + COND *tmp= make_cond_for_table(*tab->on_expr_ref, + join->const_table_map, + ( table_map) 0); + if (!tmp) + continue; + tmp= new Item_func_trig_cond(tmp, &cond_tab->not_null_compl); + if (!tmp) + DBUG_RETURN(1); + tmp->quick_fix_field(); + cond_tab->select_cond= !cond_tab->select_cond ? tmp : + new Item_cond_and(cond_tab->select_cond,tmp); + if (!cond_tab->select_cond) + DBUG_RETURN(1); + cond_tab->select_cond->quick_fix_field(); + } + } + if (const_cond && !const_cond->val_int()) + { + DBUG_PRINT("info",("Found impossible WHERE condition")); + DBUG_RETURN(1); // Impossible const condition + } } } used_tables=((select->const_tables=join->const_table_map) | @@ -3420,14 +5227,15 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) for (uint i=join->const_tables ; i < join->tables ; i++) { JOIN_TAB *tab=join->join_tab+i; + JOIN_TAB *first_inner_tab= tab->first_inner; table_map current_map= tab->table->map; + bool use_quick_range=0; /* Following force including random expression in last table condition. It solve problem with select like SELECT * FROM t1 WHERE rand() > 0.5 */ if (i == join->tables-1) current_map|= OUTER_REF_TABLE_BIT | RAND_TABLE_BIT; - bool use_quick_range=0; used_tables|=current_map; if (tab->type == JT_REF && tab->quick && @@ -3443,23 +5251,59 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) join->best_positions[i].records_read= rows2double(tab->quick->records); } - COND *tmp=make_cond_for_table(cond,used_tables,current_map); - if (!tmp && tab->quick) + COND *tmp= NULL; + if (cond) + tmp= make_cond_for_table(cond,used_tables,current_map); + if (cond && !tmp && tab->quick) { // Outer join - /* - Hack to handle the case where we only refer to a table - in the ON part of an OUTER JOIN. - */ - tmp=new Item_int((longlong) 1,1); // Always true + if (tab->type != JT_ALL) + { + /* + Don't use the quick method + We come here in the case where we have 'key=constant' and + the test is removed by make_cond_for_table() + */ + delete tab->quick; + tab->quick= 0; + } + else + { + /* + Hack to handle the case where we only refer to a table + in the ON part of an OUTER JOIN. In this case we want the code + below to check if we should use 'quick' instead. + */ + DBUG_PRINT("info", ("Item_int")); + tmp= new Item_int((longlong) 1,1); // Always true + DBUG_PRINT("info", ("Item_int 0x%lx", (ulong)tmp)); + } + } - if (tmp) + if (tmp || !cond) { DBUG_EXECUTE("where",print_where(tmp,tab->table->table_name);); SQL_SELECT *sel=tab->select=(SQL_SELECT*) join->thd->memdup((gptr) select, sizeof(SQL_SELECT)); if (!sel) DBUG_RETURN(1); // End of memory - tab->select_cond=sel->cond=tmp; + /* + If tab is an inner table of an outer join operation, + add a match guard to the pushed down predicate. + The guard will turn the predicate on only after + the first match for outer tables is encountered. + */ + if (cond) + {/* + Because of QUICK_GROUP_MIN_MAX_SELECT there may be a select without + a cond, so neutralize the hack above. + */ + if (!(tmp= add_found_match_trig_cond(first_inner_tab, tmp, 0))) + DBUG_RETURN(1); + tab->select_cond=sel->cond=tmp; + } + else + tab->select_cond= sel->cond= NULL; + sel->head=tab->table; if (tab->quick) { @@ -3467,7 +5311,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) with key reading */ if (tab->needed_reg.is_clear_all() && tab->type != JT_EQ_REF && tab->type != JT_FT && (tab->type != JT_REF || - (uint) tab->ref.key == tab->quick->index)) + (uint) tab->ref.key == tab->quick->index)) { sel->quick=tab->quick; // Use value from get_quick_... sel->quick_keys.clear_all(); @@ -3498,7 +5342,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) the index if we are using limit and this is the first table */ - if ((!tab->keys.is_subset(tab->const_keys) && i > 0) || + if (cond && + (!tab->keys.is_subset(tab->const_keys) && i > 0) || (!tab->const_keys.is_clear_all() && i == join->const_tables && join->unit->select_limit_cnt < join->best_positions[i].records_read && @@ -3506,7 +5351,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) { /* Join with outer join condition */ COND *orig_cond=sel->cond; - sel->cond= and_conds(sel->cond, tab->on_expr); + sel->cond= and_conds(sel->cond, *tab->on_expr_ref); /* We can't call sel->cond->fix_fields, @@ -3530,7 +5375,7 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) we have to check isn't it only "impossible ON" instead */ sel->cond=orig_cond; - if (!tab->on_expr || + if (!*tab->on_expr_ref || sel->test_quick_select(join->thd, tab->keys, used_tables & ~ current_map, (join->select_options & @@ -3565,7 +5410,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } if (i != join->const_tables && tab->use_quick != 2) { /* Read with cache */ - if ((tmp=make_cond_for_table(cond, + if (cond && + (tmp=make_cond_for_table(cond, join->const_table_map | current_map, current_map))) @@ -3578,17 +5424,75 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond) } } } + } + + /* + Push down all predicates from on expressions. + Each of these predicated are guarded by a variable + that turns if off just before null complemented row for + outer joins is formed. Thus, the predicates from an + 'on expression' are guaranteed not to be checked for + the null complemented row. + */ + JOIN_TAB *last_tab= tab; + while (first_inner_tab && first_inner_tab->last_inner == last_tab) + { + /* + Table tab is the last inner table of an outer join. + An on expression is always attached to it. + */ + COND *on_expr= *first_inner_tab->on_expr_ref; + + table_map used_tables= join->const_table_map | + OUTER_REF_TABLE_BIT | RAND_TABLE_BIT; + for (tab= join->join_tab+join->const_tables; tab <= last_tab ; tab++) + { + current_map= tab->table->map; + used_tables|= current_map; + COND *tmp= make_cond_for_table(on_expr, used_tables, current_map); + if (tmp) + { + JOIN_TAB *cond_tab= tab < first_inner_tab ? first_inner_tab : tab; + /* + First add the guards for match variables of + all embedding outer join operations. + */ + if (!(tmp= add_found_match_trig_cond(cond_tab->first_inner, + tmp, first_inner_tab))) + DBUG_RETURN(1); + /* + Now add the guard turning the predicate off for + the null complemented row. + */ + DBUG_PRINT("info", ("Item_func_trig_cond")); + tmp= new Item_func_trig_cond(tmp, + &first_inner_tab->not_null_compl); + DBUG_PRINT("info", ("Item_func_trig_cond 0x%lx", (ulong) tmp)); + if (tmp) + tmp->quick_fix_field(); + /* Add the predicate to other pushed down predicates */ + DBUG_PRINT("info", ("Item_cond_and")); + cond_tab->select_cond= !cond_tab->select_cond ? tmp : + new Item_cond_and(cond_tab->select_cond,tmp); + DBUG_PRINT("info", ("Item_cond_and 0x%lx", + (ulong)cond_tab->select_cond)); + if (!cond_tab->select_cond) + DBUG_RETURN(1); + cond_tab->select_cond->quick_fix_field(); + } + } + first_inner_tab= first_inner_tab->first_upper; } } } DBUG_RETURN(0); } - static void make_join_readinfo(JOIN *join, uint options) { uint i; + bool statistics= test(!(join->select_options & SELECT_DESCRIBE)); DBUG_ENTER("make_join_readinfo"); @@ -3672,7 +5576,7 @@ make_join_readinfo(JOIN *join, uint options) */ table->status=STATUS_NO_RECORD; if (i != join->const_tables && !(options & SELECT_NO_JOIN_CACHE) && - tab->use_quick != 2 && !tab->on_expr) + tab->use_quick != 2 && !tab->first_inner) { if ((options & SELECT_DESCRIBE) || !join_init_cache(join->thd,join->join_tab+join->const_tables, @@ -3687,7 +5591,8 @@ make_join_readinfo(JOIN *join, uint options) join->thd->server_status|=SERVER_QUERY_NO_GOOD_INDEX_USED; tab->read_first_record= join_init_quick_read_record; if (statistics) - statistic_increment(select_range_check_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_range_check_count, + &LOCK_status); } else { @@ -3697,13 +5602,15 @@ make_join_readinfo(JOIN *join, uint options) if (tab->select && tab->select->quick) { if (statistics) - statistic_increment(select_range_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_range_count, + &LOCK_status); } else { join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) - statistic_increment(select_scan_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_scan_count, + &LOCK_status); } } else @@ -3711,13 +5618,15 @@ make_join_readinfo(JOIN *join, uint options) if (tab->select && tab->select->quick) { if (statistics) - statistic_increment(select_full_range_join_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_full_range_join_count, + &LOCK_status); } else { join->thd->server_status|=SERVER_QUERY_NO_INDEX_USED; if (statistics) - statistic_increment(select_full_join_count, &LOCK_status); + statistic_increment(join->thd->status_var.select_full_join_count, + &LOCK_status); } } if (!table->no_keyread) @@ -3775,7 +5684,8 @@ bool error_if_full_join(JOIN *join) { if (tab->type == JT_ALL && (!tab->select || !tab->select->quick)) { - my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,MYF(0)); + my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, + ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0)); return(1); } } @@ -4054,7 +5964,7 @@ remove_const(JOIN *join,ORDER *first_order, COND *cond, bool *simple_order) table_map not_const_tables= ~join->const_table_map; table_map ref; prev_ptr= &first_order; - *simple_order= join->join_tab[join->const_tables].on_expr ? 0 : 1; + *simple_order= *join->join_tab[join->const_tables].on_expr_ref ? 0 : 1; /* NOTE: A variable of not_const_tables ^ first_table; breaks gcc 2.7 */ @@ -4121,12 +6031,13 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables, if (send_row) { - for (TABLE_LIST *table=tables; table ; table=table->next) + for (TABLE_LIST *table= tables; table; table= table->next_leaf) mark_as_null_row(table->table); // All fields are NULL if (having && having->val_int() == 0) send_row=0; } - if (!(result->send_fields(fields,1))) + if (!(result->send_fields(fields, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))) { if (send_row) { @@ -4161,9 +6072,13 @@ static void clear_tables(JOIN *join) class COND_CMP :public ilink { public: - static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } static void operator delete(void *ptr __attribute__((unused)), - size_t size __attribute__((unused))) {} /*lint -e715 */ + size_t size __attribute__((unused))) + { TRASH(ptr, size); } Item *and_level; Item_func *cmp_func; @@ -4177,6 +6092,790 @@ template class List<Item_func_match>; template class List_iterator<Item_func_match>; #endif + +/* + Find the multiple equality predicate containing a field + + SYNOPSIS + find_item_equal() + cond_equal multiple equalities to search in + field field to look for + inherited_fl :out set up to TRUE if multiple equality is found + on upper levels (not on current level of cond_equal) + + DESCRIPTION + The function retrieves the multiple equalities accessed through + the con_equal structure from current level and up looking for + an equality containing field. It stops retrieval as soon as the equality + is found and set up inherited_fl to TRUE if it's found on upper levels. + + RETURN + Item_equal for the found multiple equality predicate if a success; + NULL - otherwise. +*/ + +Item_equal *find_item_equal(COND_EQUAL *cond_equal, Field *field, + bool *inherited_fl) +{ + Item_equal *item= 0; + bool in_upper_level= FALSE; + while (cond_equal) + { + List_iterator_fast<Item_equal> li(cond_equal->current_level); + while ((item= li++)) + { + if (item->contains(field)) + goto finish; + } + in_upper_level= TRUE; + cond_equal= cond_equal->upper_levels; + } + in_upper_level= FALSE; +finish: + *inherited_fl= in_upper_level; + return item; +} + + +/* + Check whether an item is a simple equality predicate and if so + create/find a multiple equality for this predicate + + SYNOPSIS + check_equality() + item item to check + cond_equal multiple equalities that must hold together with the predicate + + DESCRIPTION + This function first checks whether an item is a simple equality i.e. + the one that equates a field with another field or a constant + (item=constant_item or item=field_item). + If this is the case the function looks a for a multiple equality + in the lists referenced directly or indirectly by cond_equal inferring + the given simple equality. If it doesn't find any, it builds a multiple + equality that covers the predicate, i.e. the predicate can be inferred + from it. + The built multiple equality could be obtained in such a way: + create a binary multiple equality equivalent to the predicate, then + merge it, if possible, with one of old multiple equalities. + This guarantees that the set of multiple equalities covering equality + predicates will + be minimal. + + EXAMPLE + For the where condition + WHERE a=b AND b=c AND + (b=2 OR f=e) + the check_equality will be called for the following equality + predicates a=b, b=c, b=2 and f=e. + For a=b it will be called with *cond_equal=(0,[]) and will transform + *cond_equal into (0,[Item_equal(a,b)]). + For b=c it will be called with *cond_equal=(0,[Item_equal(a,b)]) + and will transform *cond_equal into CE=(0,[Item_equal(a,b,c)]). + For b=2 it will be called with *cond_equal=(ptr(CE),[]) + and will transform *cond_equal into (ptr(CE,[Item_equal(2,a,b,c)]). + For f=e it will be called with *cond_equal=(ptr(CE), []) + and will transform *cond_equal into (ptr(CE,[Item_equal(f,e)]). + + NOTES + Now only fields that have the same type defintions (verified by + the Field::eq_def method) are placed to the same multiple equalities. + Because of this some equality predicates are not eliminated and + can be used in the constant propagation procedure. + We could weeken the equlity test as soon as at least one of the + equal fields is to be equal to a constant. It would require a + more complicated implementation: we would have to store, in + general case, its own constant for each fields from the multiple + equality. But at the same time it would allow us to get rid + of constant propagation completely: it would be done by the call + to build_equal_items_for_cond. + + IMPLEMENTATION + The implementation does not follow exactly the above rules to + build a new multiple equality for the equality predicate. + If it processes the equality of the form field1=field2, it + looks for multiple equalities me1 containig field1 and me2 containing + field2. If only one of them is found the fuction expands it with + the lacking field. If multiple equalities for both fields are + found they are merged. If both searches fail a new multiple equality + containing just field1 and field2 is added to the existing + multiple equalities. + If the function processes the predicate of the form field1=const, + it looks for a multiple equality containing field1. If found, the + function checks the constant of the multiple equality. If the value + is unknown, it is setup to const. Otherwise the value is compared with + const and the evaluation of the equality predicate is performed. + When expanding/merging equality predicates from the upper levels + the function first copies them for the current level. It looks + acceptable, as this happens rarely. The implementation without + copying would be much more complicated. + + RETURN + TRUE - if the predicate is a simple equality predicate + FALSE - otherwise +*/ + +static bool check_equality(Item *item, COND_EQUAL *cond_equal) +{ + if (item->type() == Item::FUNC_ITEM && + ((Item_func*) item)->functype() == Item_func::EQ_FUNC) + { + Item *left_item= ((Item_func*) item)->arguments()[0]; + Item *right_item= ((Item_func*) item)->arguments()[1]; + if (left_item->type() == Item::FIELD_ITEM && + right_item->type() == Item::FIELD_ITEM) + { + /* The predicate the form field1=field2 is processed */ + + Field *left_field= ((Item_field*) left_item)->field; + Field *right_field= ((Item_field*) right_item)->field; + + if (!left_field->eq_def(right_field)) + return FALSE; + + if (left_field->eq(right_field)) /* f = f */ + return TRUE; + + /* Search for multiple equalities containing field1 and/or field2 */ + bool left_copyfl, right_copyfl; + Item_equal *left_item_equal= + find_item_equal(cond_equal, left_field, &left_copyfl); + Item_equal *right_item_equal= + find_item_equal(cond_equal, right_field, &right_copyfl); + + if (left_item_equal && left_item_equal == right_item_equal) + { + /* + The equality predicate is inference of one of the existing + multiple equalities, i.e the condition is already covered + by upper level equalities + */ + return TRUE; + } + + /* Copy the found multiple equalities at the current level if needed */ + if (left_copyfl) + { + /* left_item_equal of an upper level contains left_item */ + left_item_equal= new Item_equal(left_item_equal); + cond_equal->current_level.push_back(left_item_equal); + } + if (right_copyfl) + { + /* right_item_equal of an upper level contains right_item */ + right_item_equal= new Item_equal(right_item_equal); + cond_equal->current_level.push_back(right_item_equal); + } + + if (left_item_equal) + { + /* left item was found in the current or one of the upper levels */ + if (! right_item_equal) + left_item_equal->add((Item_field *) right_item); + else + { + /* Merge two multiple equalities forming a new one */ + left_item_equal->merge(right_item_equal); + /* Remove the merged multiple equality from the list */ + List_iterator<Item_equal> li(cond_equal->current_level); + while ((li++) != right_item_equal); + li.remove(); + } + } + else + { + /* left item was not found neither the current nor in upper levels */ + if (right_item_equal) + right_item_equal->add((Item_field *) left_item); + else + { + /* None of the fields was found in multiple equalities */ + Item_equal *item= new Item_equal((Item_field *) left_item, + (Item_field *) right_item); + cond_equal->current_level.push_back(item); + } + } + return TRUE; + } + + { + /* The predicate of the form field=const/const=field is processed */ + Item *const_item= 0; + Item_field *field_item= 0; + if (left_item->type() == Item::FIELD_ITEM && + right_item->const_item()) + { + field_item= (Item_field*) left_item; + const_item= right_item; + } + else if (right_item->type() == Item::FIELD_ITEM && + left_item->const_item()) + { + field_item= (Item_field*) right_item; + const_item= left_item; + } + if (const_item && + field_item->result_type() == const_item->result_type()) + { + bool copyfl; + + if (field_item->result_type() == STRING_RESULT && + ((Field_str *) field_item->field)->charset() != + ((Item_cond *) item)->compare_collation()) + return FALSE; + + Item_equal *item_equal = find_item_equal(cond_equal, + field_item->field, ©fl); + if (copyfl) + { + item_equal= new Item_equal(item_equal); + cond_equal->current_level.push_back(item_equal); + } + if (item_equal) + { + /* + The flag cond_false will be set to 1 after this, if item_equal + already contains a constant and its value is not equal to + the value of const_item. + */ + item_equal->add(const_item); + } + else + { + item_equal= new Item_equal(const_item, field_item); + cond_equal->current_level.push_back(item_equal); + } + return TRUE; + } + } + } + return FALSE; +} + +/* + Replace all equality predicates in a condition by multiple equality items + + SYNOPSIS + build_equal_items_for_cond() + cond condition(expression) where to make replacement + inherited path to all inherited multiple equality items + + DESCRIPTION + At each 'and' level the function detects items for equality predicates + and replaced them by a set of multiple equality items of class Item_equal, + taking into account inherited equalities from upper levels. + If an equality predicate is used not in a conjunction it's just + replaced by a multiple equality predicate. + For each 'and' level the function set a pointer to the inherited + multiple equalities in the cond_equal field of the associated + object of the type Item_cond_and. + The function also traverses the cond tree and and for each field reference + sets a pointer to the multiple equality item containing the field, if there + is any. If this multiple equality equates fields to a constant the + function replace the field reference by the constant. + The function also determines the maximum number of members in + equality lists of each Item_cond_and object assigning it to + cond_equal->max_members of this object and updating accordingly + the upper levels COND_EQUAL structures. + + NOTES + Multiple equality predicate =(f1,..fn) is equivalent to the conjuction of + f1=f2, .., fn-1=fn. It substitutes any inference from these + equality predicates that is equivalent to the conjunction. + Thus, =(a1,a2,a3) can substitute for ((a1=a3) AND (a2=a3) AND (a2=a1)) as + it is equivalent to ((a1=a2) AND (a2=a3)). + The function always makes a substitution of all equality predicates occured + in a conjuction for a minimal set of multiple equality predicates. + This set can be considered as a canonical representation of the + sub-conjunction of the equality predicates. + E.g. (t1.a=t2.b AND t2.b>5 AND t1.a=t3.c) is replaced by + (=(t1.a,t2.b,t3.c) AND t2.b>5), not by + (=(t1.a,t2.b) AND =(t1.a,t3.c) AND t2.b>5); + while (t1.a=t2.b AND t2.b>5 AND t3.c=t4.d) is replaced by + (=(t1.a,t2.b) AND =(t3.c=t4.d) AND t2.b>5), + but if additionally =(t4.d,t2.b) is inherited, it + will be replaced by (=(t1.a,t2.b,t3.c,t4.d) AND t2.b>5) + + IMPLEMENTATION + The function performs the substitution in a recursive descent by + the condtion tree, passing to the next AND level a chain of multiple + equality predicates which have been built at the upper levels. + The Item_equal items built at the level are attached to other + non-equality conjucts as a sublist. The pointer to the inherited + multiple equalities is saved in the and condition object (Item_cond_and). + This chain allows us for any field reference occurence easyly to find a + multiple equality that must be held for this occurence. + For each AND level we do the following: + - scan it for all equality predicate (=) items + - join them into disjoint Item_equal() groups + - process the included OR conditions recursively to do the same for + lower AND levels. + We need to do things in this order as lower AND levels need to know about + all possible Item_equal objects in upper levels. + + RETURN + pointer to the transformed condition +*/ + +static COND *build_equal_items_for_cond(COND *cond, + COND_EQUAL *inherited) +{ + Item_equal *item_equal; + uint members; + COND_EQUAL cond_equal; + cond_equal.upper_levels= inherited; + + if (cond->type() == Item::COND_ITEM) + { + bool and_level= ((Item_cond*) cond)->functype() == + Item_func::COND_AND_FUNC; + List<Item> *args= ((Item_cond*) cond)->argument_list(); + + List_iterator<Item> li(*args); + Item *item; + + if (and_level) + { + /* + Retrieve all conjucts of this level detecting the equality + that are subject to substitution by multiple equality items and + removing each such predicate from the conjunction after having + found/created a multiple equality whose inference the predicate is. + */ + while ((item= li++)) + { + if (check_equality(item, &cond_equal)) + li.remove(); + } + + List_iterator_fast<Item_equal> it(cond_equal.current_level); + while ((item_equal= it++)) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + members= item_equal->members(); + if (cond_equal.max_members < members) + cond_equal.max_members= members; + } + members= cond_equal.max_members; + if (inherited && inherited->max_members < members) + { + do + { + inherited->max_members= members; + inherited= inherited->upper_levels; + } + while (inherited); + } + + ((Item_cond_and*)cond)->cond_equal= cond_equal; + inherited= &(((Item_cond_and*)cond)->cond_equal); + } + /* + Make replacement of equality predicates for lower levels + of the condition expression. + */ + li.rewind(); + while((item= li++)) + { + Item *new_item; + if ((new_item = build_equal_items_for_cond(item, inherited))!= item) + { + /* This replacement happens only for standalone equalities */ + li.replace(new_item); + } + } + if (and_level) + args->concat((List<Item> *)&cond_equal.current_level); + } + else if (cond->type() == Item::FUNC_ITEM) + { + /* + If an equality predicate forms the whole and level, + we call it standalone equality and it's processed here. + E.g. in the following where condition + WHERE a=5 AND (b=5 or a=c) + (b=5) and (a=c) are standalone equalities. + In general we can't leave alone standalone eqalities: + for WHERE a=b AND c=d AND (b=c OR d=5) + b=c is replaced by =(a,b,c,d). + */ + if (check_equality(cond, &cond_equal) && + (item_equal= cond_equal.current_level.pop())) + { + item_equal->fix_length_and_dec(); + item_equal->update_used_tables(); + return item_equal; + } + /* + For each field reference in cond, not from equalitym predicates, + set a pointer to the multiple equality if belongs to (if there is any) + */ + cond= cond->transform(&Item::equal_fields_propagator, + (byte *) inherited); + cond->update_used_tables(); + } + return cond; +} + + +/* + Build multiple equalities for a condition and all on expressions that + inherit these multiple equalities + + SYNOPSIS + build_equal_items() + thd Thread handler + cond condition to build the multiple equalities for + inherited path to all inherited multiple equality items + join_list list of join tables to which the condition refers to + cond_equal_ref :out pointer to the structure to place built equalities in + + DESCRIPTION + The function first applies the build_equal_items_for_cond function + to build all multiple equalities for condition cond utilizing equalities + referred through the parameter inherited. The extended set of + equalities is returned in the structure referred by the cond_equal_ref + parameter. After this the function calls itself recursively for + all on expressions whose direct references can be found in join_list + and who inherit directly the multiple equalities just having built. + + NOTES + The on expression used in an outer join operation inherits all equalities + from the on expression of the embedding join, if there is any, or + otherwise - from the where condition. + This fact is not obvious, but presumably can be proved. + Consider the following query: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t2.a=t4.a + WHERE t1.a=t2.a; + If the on expression in the query inherits =(t1.a,t2.a), then we + can build the multiple equality =(t1.a,t2.a,t3.a,t4.a) that infers + the equality t3.a=t4.a. Although the on expression + t1.a=t3.a AND t2.a=t4.a AND t3.a=t4.a is not equivalent to the one + in the query the latter can be replaced by the former: the new query + will return the same result set as the original one. + + Interesting that multiple equality =(t1.a,t2.a,t3.a,t4.a) allows us + to use t1.a=t3.a AND t3.a=t4.a under the on condition: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a + WHERE t1.a=t2.a + This query equivalent to: + SELECT * FROM (t1 LEFT JOIN (t3,t4) ON t1.a=t3.a AND t3.a=t4.a),t2 + WHERE t1.a=t2.a + Similarly the original query can be rewritten to the query: + SELECT * FROM (t1,t2) LEFT JOIN (t3,t4) ON t2.a=t4.a AND t3.a=t4.a + WHERE t1.a=t2.a + that is equivalent to: + SELECT * FROM (t2 LEFT JOIN (t3,t4)ON t2.a=t4.a AND t3.a=t4.a), t1 + WHERE t1.a=t2.a + Thus, applying equalities from the where condition we basically + can get more freedom in performing join operations. + Althogh we don't use this property now, it probably makes sense to use + it in the future. + + RETURN + pointer to the transformed condition containing multiple equalities +*/ + +static COND *build_equal_items(THD *thd, COND *cond, + COND_EQUAL *inherited, + List<TABLE_LIST> *join_list, + COND_EQUAL **cond_equal_ref) +{ + COND_EQUAL *cond_equal= 0; + + if (cond) + { + cond= build_equal_items_for_cond(cond, inherited); + cond->update_used_tables(); + if (cond->type() == Item::COND_ITEM && + ((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + cond_equal= &((Item_cond_and*) cond)->cond_equal; + else if (cond->type() == Item::FUNC_ITEM && + ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) + { + cond_equal= new COND_EQUAL; + cond_equal->current_level.push_back((Item_equal *) cond); + } + } + if (cond_equal) + { + cond_equal->upper_levels= inherited; + inherited= cond_equal; + } + *cond_equal_ref= cond_equal; + + if (join_list) + { + TABLE_LIST *table; + List_iterator<TABLE_LIST> li(*join_list); + + while ((table= li++)) + { + if (table->on_expr) + { + Item *expr; + List<TABLE_LIST> *join_list= table->nested_join ? + &table->nested_join->join_list : NULL; + expr= build_equal_items(thd, table->on_expr, inherited, join_list, + &table->cond_equal); + if (expr != table->on_expr) + thd->change_item_tree(&table->on_expr, expr); + } + } + } + + return cond; +} + + +/* + Compare field items by table order in the execution plan + + SYNOPSIS + compare_fields_by_table_order() + field1 first field item to compare + field2 second field item to compare + table_join_idx index to tables determining table order + + DESCRIPTION + field1 considered as better than field2 if the table containing + field1 is accessed earlier than the table containing field2. + The function finds out what of two fields is better according + this criteria. + + RETURN + 1, if field1 is better than field2 + -1, if field2 is better than field1 + 0, otherwise +*/ + +static int compare_fields_by_table_order(Item_field *field1, + Item_field *field2, + void *table_join_idx) +{ + int cmp= 0; + bool outer_ref= 0; + if (field2->used_tables() & OUTER_REF_TABLE_BIT) + { + outer_ref= 1; + cmp= -1; + } + if (field2->used_tables() & OUTER_REF_TABLE_BIT) + { + outer_ref= 1; + cmp++; + } + if (outer_ref) + return cmp; + JOIN_TAB **idx= (JOIN_TAB **) table_join_idx; + cmp= idx[field2->field->table->tablenr]-idx[field1->field->table->tablenr]; + return cmp < 0 ? -1 : (cmp ? 1 : 0); +} + + +/* + Generate minimal set of simple equalities equivalent to a multiple equality + + SYNOPSIS + eliminate_item_equal() + cond condition to add the generated equality to + upper_levels structure to access multiple equality of upper levels + item_equal multiple equality to generate simple equality from + + DESCRIPTION + The function retrieves the fields of the multiple equality item + item_equal and for each field f: + - if item_equal contains const it generates the equality f=const_item; + - otherwise, if f is not the first field, generates the equality + f=item_equal->get_first(). + All generated equality are added to the cond conjunction. + + NOTES + Before generating an equality function checks that it has not + been generated for multiple equalies of the upper levels. + E.g. for the following where condition + WHERE a=5 AND ((a=b AND b=c) OR c>4) + the upper level AND condition will contain =(5,a), + while the lower level AND condition will contain =(5,a,b,c). + When splitting =(5,a,b,c) into a separate equality predicates + we should omit 5=a, as we have it already in the upper level. + The following where condition gives us a more complicated case: + WHERE t1.a=t2.b AND t3.c=t4.d AND (t2.b=t3.c OR t4.e>5 ...) AND ... + Given the tables are accessed in the order t1->t2->t3->t4 for + the selected query execution plan the lower level multiple + equality =(t1.a,t2.b,t3.c,t4.d) formally should be converted to + t1.a=t2.b AND t1.a=t3.c AND t1.a=t4.d. But t1.a=t2.a will be + generated for the upper level. Also t3.c=t4.d will be generated there. + So only t1.a=t3.c should be left in the lower level. + If cond is equal to 0, then not more then one equality is generated + and a pointer to it is returned as the result of the function. + + RETURN + The condition with generated simple equalities or + a pointer to the simple generated equality, if success. + 0, otherwise. +*/ + +static Item *eliminate_item_equal(COND *cond, COND_EQUAL *upper_levels, + Item_equal *item_equal) +{ + List<Item> eq_list; + Item_func_eq *eq_item= 0; + if (((Item *) item_equal)->const_item() && !item_equal->val_int()) + return new Item_int((longlong) 0,1); + Item *item_const= item_equal->get_const(); + Item_equal_iterator it(*item_equal); + Item *head; + if (item_const) + head= item_const; + else + { + head= item_equal->get_first(); + it++; + } + Item_field *item_field; + while ((item_field= it++)) + { + Item_equal *upper= item_field->find_item_equal(upper_levels); + Item_field *item= item_field; + if (upper) + { + if (item_const && upper->get_const()) + item= 0; + else + { + Item_equal_iterator li(*item_equal); + while ((item= li++) != item_field) + { + if (item->find_item_equal(upper_levels) == upper) + break; + } + } + } + if (item == item_field) + { + if (eq_item) + eq_list.push_back(eq_item); + eq_item= new Item_func_eq(item_field, head); + if (!eq_item) + return 0; + eq_item->set_cmp_func(); + eq_item->quick_fix_field(); + } + } + + if (!cond && !eq_list.head()) + { + if (!eq_item) + return new Item_int((longlong) 1,1); + return eq_item; + } + + if (eq_item) + eq_list.push_back(eq_item); + if (!cond) + cond= new Item_cond_and(eq_list); + else + ((Item_cond *) cond)->add_at_head(&eq_list); + + cond->quick_fix_field(); + cond->update_used_tables(); + + return cond; +} + + +/* + Substitute every field reference in a condition by the best equal field + and eliminate all multiplle equality predicates + + SYNOPSIS + substitute_for_best_equal_field() + cond condition to process + cond_equal multiple equalities to take into consideration + table_join_idx index to tables determining field preference + + DESCRIPTION + The function retrieves the cond condition and for each encountered + multiple equality predicate it sorts the field references in it + according to the order of tables specified by the table_join_idx + parameter. Then it eliminates the multiple equality predicate it + replacing it by the conjunction of simple equality predicates + equating every field from the multiple equality to the first + field in it, or to the constant, if there is any. + After this the function retrieves all other conjuncted + predicates substitute every field reference by the field reference + to the first equal field or equal constant if there are any. + + NOTES + At the first glance full sort of fields in multiple equality + seems to be an overkill. Yet it's not the case due to possible + new fields in multiple equality item of lower levels. We want + the order in them to comply with the order of upper levels. + + RETURN + The transformed condition +*/ + +static COND* substitute_for_best_equal_field(COND *cond, + COND_EQUAL *cond_equal, + void *table_join_idx) +{ + Item_equal *item_equal; + + if (cond->type() == Item::COND_ITEM) + { + List<Item> *cond_list= ((Item_cond*) cond)->argument_list(); + + bool and_level= ((Item_cond*) cond)->functype() == + Item_func::COND_AND_FUNC; + if (and_level) + { + cond_equal= &((Item_cond_and *) cond)->cond_equal; + cond_list->disjoin((List<Item> *) &cond_equal->current_level); + + List_iterator_fast<Item_equal> it(cond_equal->current_level); + while ((item_equal= it++)) + { + item_equal->sort(&compare_fields_by_table_order, table_join_idx); + } + } + + List_iterator<Item> li(*cond_list); + Item *item; + while ((item= li++)) + { + Item *new_item =substitute_for_best_equal_field(item, cond_equal, + table_join_idx); + if (new_item != item) + li.replace(new_item); + } + + if (and_level) + { + List_iterator_fast<Item_equal> it(cond_equal->current_level); + while ((item_equal= it++)) + { + eliminate_item_equal(cond, cond_equal->upper_levels, item_equal); + } + } + } + else if (cond->type() == Item::FUNC_ITEM && + ((Item_cond*) cond)->functype() == Item_func::MULT_EQUAL_FUNC) + { + item_equal= (Item_equal *) cond; + item_equal->sort(&compare_fields_by_table_order, table_join_idx); + if (cond_equal && cond_equal->current_level.head() == item_equal) + cond_equal= 0; + return eliminate_item_equal(0, cond_equal, item_equal); + } + else + cond->walk(&Item::replace_equal_field_processor, 0); + return cond; +} + + /* change field = field to field = const for each found field = const in the and_level @@ -4349,28 +7048,284 @@ propagate_cond_constants(THD *thd, I_List<COND_CMP> *save_list, } +/* + Simplify joins replacing outer joins by inner joins whenever it's possible + + SYNOPSIS + simplify_joins() + join reference to the query info + join_list list representation of the join to be converted + conds conditions to add on expressions for converted joins + top true <=> conds is the where condition + + DESCRIPTION + The function, during a retrieval of join_list, eliminates those + outer joins that can be converted into inner join, possibly nested. + It also moves the on expressions for the converted outer joins + and from inner joins to conds. + The function also calculates some attributes for nested joins: + - used_tables + - not_null_tables + - dep_tables. + - on_expr_dep_tables + The first two attributes are used to test whether an outer join can + be substituted for an inner join. The third attribute represents the + relation 'to be dependent on' for tables. If table t2 is dependent + on table t1, then in any evaluated execution plan table access to + table t2 must precede access to table t2. This relation is used also + to check whether the query contains invalid cross-references. + The forth attribute is an auxiliary one and is used to calculate + dep_tables. + As the attribute dep_tables qualifies possibles orders of tables in the + execution plan, the dependencies required by the straight join + modifiers are reflected in this attribute as well. + The function also removes all braces that can be removed from the join + expression without changing its meaning. + + NOTES + An outer join can be replaced by an inner join if the where condition + or the on expression for an embedding nested join contains a conjunctive + predicate rejecting null values for some attribute of the inner tables. + + E.g. in the query: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a WHERE t2.b < 5 + the predicate t2.b < 5 rejects nulls. + The query is converted first to: + SELECT * FROM t1 INNER JOIN t2 ON t2.a=t1.a WHERE t2.b < 5 + then to the equivalent form: + SELECT * FROM t1, t2 ON t2.a=t1.a WHERE t2.b < 5 AND t2.a=t1.a. + + Similarly the following query: + SELECT * from t1 LEFT JOIN (t2, t3) ON t2.a=t1.a t3.b=t1.b + WHERE t2.c < 5 + is converted to: + SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a t3.b=t1.b + + One conversion might trigger another: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a + LEFT JOIN t3 ON t3.b=t2.b + WHERE t3 IS NOT NULL => + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t1.a, t3 + WHERE t3 IS NOT NULL AND t3.b=t2.b => + SELECT * FROM t1, t2, t3 + WHERE t3 IS NOT NULL AND t3.b=t2.b AND t2.a=t1.a + + The function removes all unnecessary braces from the expression + produced by the conversions. + E.g. SELECT * FROM t1, (t2, t3) WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b + finally is converted to: + SELECT * FROM t1, t2, t3 WHERE t2.c < 5 AND t2.a=t1.a AND t3.b=t1.b + + It also will remove braces from the following queries: + SELECT * from (t1 LEFT JOIN t2 ON t2.a=t1.a) LEFT JOIN t3 ON t3.b=t2.b + SELECT * from (t1, (t2,t3)) WHERE t1.a=t2.a AND t2.b=t3.b. + + The benefit of this simplification procedure is that it might return + a query for which the optimizer can evaluate execution plan with more + join orders. With a left join operation the optimizer does not + consider any plan where one of the inner tables is before some of outer + tables. + + IMPLEMENTATION. + The function is implemented by a recursive procedure. On the recursive + ascent all attributes are calculated, all outer joins that can be + converted are replaced and then all unnecessary braces are removed. + As join list contains join tables in the reverse order sequential + elimination of outer joins does not requite extra recursive calls. + + EXAMPLES + Here is an example of a join query with invalid cross references: + SELECT * FROM t1 LEFT JOIN t2 ON t2.a=t3.a LEFT JOIN ON t3.b=t1.b + + RETURN VALUE + The new condition, if success + 0, otherwise +*/ + +static COND * +simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top) +{ + TABLE_LIST *table; + NESTED_JOIN *nested_join; + TABLE_LIST *prev_table= 0; + List_iterator<TABLE_LIST> li(*join_list); + DBUG_ENTER("simplify_joins"); + + /* + Try to simplify join operations from join_list. + The most outer join operation is checked for conversion first. + */ + while ((table= li++)) + { + table_map used_tables; + table_map not_null_tables= (table_map) 0; + + if ((nested_join= table->nested_join)) + { + /* + If the element of join_list is a nested join apply + the procedure to its nested join list first. + */ + if (table->on_expr) + { + Item *expr; + /* + If an on expression E is attached to the table, + check all null rejected predicates in this expression. + If such a predicate over an attribute belonging to + an inner table of an embedded outer join is found, + the outer join is converted to an inner join and + the corresponding on expression is added to E. + */ + expr= simplify_joins(join, &nested_join->join_list, + table->on_expr, FALSE); + table->on_expr= expr; + } + nested_join->used_tables= (table_map) 0; + nested_join->not_null_tables=(table_map) 0; + conds= simplify_joins(join, &nested_join->join_list, conds, top); + used_tables= nested_join->used_tables; + not_null_tables= nested_join->not_null_tables; + } + else + { + used_tables= table->table->map; + if (conds) + not_null_tables= conds->not_null_tables(); + } + + if (table->embedding) + { + table->embedding->nested_join->used_tables|= used_tables; + table->embedding->nested_join->not_null_tables|= not_null_tables; + } + + if (!table->outer_join || (used_tables & not_null_tables)) + { + /* + For some of the inner tables there are conjunctive predicates + that reject nulls => the outer join can be replaced by an inner join. + */ + table->outer_join= 0; + if (table->on_expr) + { + /* Add on expression to the where condition. */ + if (conds) + { + conds= and_conds(conds, table->on_expr); + conds->fix_fields(join->thd, 0, &conds); + } + else + conds= table->on_expr; + table->on_expr= 0; + } + } + + if (!top) + continue; + + /* + Only inner tables of non-convertible outer joins + remain with on_expr. + */ + if (table->on_expr) + { + table->dep_tables|= table->on_expr->used_tables(); + if (table->embedding) + { + table->dep_tables&= ~table->embedding->nested_join->used_tables; + /* + Embedding table depends on tables used + in embedded on expressions. + */ + table->embedding->on_expr_dep_tables|= table->on_expr->used_tables(); + } + else + table->dep_tables&= ~table->table->map; + } + + if (prev_table) + { + /* The order of tables is reverse: prev_table follows table */ + if (prev_table->straight) + prev_table->dep_tables|= used_tables; + if (prev_table->on_expr) + { + prev_table->dep_tables|= table->on_expr_dep_tables; + table_map prev_used_tables= prev_table->nested_join ? + prev_table->nested_join->used_tables : + prev_table->table->map; + /* + If on expression contains only references to inner tables + we still make the inner tables dependent on the outer tables. + It would be enough to set dependency only on one outer table + for them. Yet this is really a rare case. + */ + if (!(prev_table->on_expr->used_tables() & ~prev_used_tables)) + prev_table->dep_tables|= used_tables; + } + } + prev_table= table; + } + + /* Flatten nested joins that can be flattened. */ + li.rewind(); + while((table= li++)) + { + nested_join= table->nested_join; + if (nested_join && !table->on_expr) + { + TABLE_LIST *tbl; + List_iterator<TABLE_LIST> it(nested_join->join_list); + while ((tbl= it++)) + { + tbl->embedding= table->embedding; + tbl->join_list= table->join_list; + } + li.replace(nested_join->join_list); + } + } + DBUG_RETURN(conds); +} + + static COND * -optimize_cond(THD *thd, COND *conds, Item::cond_result *cond_value) +optimize_cond(JOIN *join, COND *conds, List<TABLE_LIST> *join_list, + Item::cond_result *cond_value) { - SELECT_LEX *select= thd->lex->current_select; + THD *thd= join->thd; + SELECT_LEX *select= thd->lex->current_select; DBUG_ENTER("optimize_cond"); - if (conds) + + if (!conds) + { + *cond_value= Item::COND_TRUE; + select->prep_where= 0; + } + else { + /* + Build all multiple equality predicates and eliminate equality + predicates that can be inferred from these multiple equalities. + For each reference of a field included into a multiple equality + that occurs in a function set a pointer to the multiple equality + predicate. Substitute a constant instead of this field if the + multiple equality contains a constant. + */ DBUG_EXECUTE("where", print_where(conds, "original");); + conds= build_equal_items(join->thd, conds, NULL, join_list, + &join->cond_equal); + DBUG_EXECUTE("where",print_where(conds,"after equal_items");); + /* change field = field to field = const for each found field = const */ propagate_cond_constants(thd, (I_List<COND_CMP> *) 0, conds, conds); /* Remove all instances of item == item Remove all and-levels where CONST item != CONST item */ - DBUG_EXECUTE("where", print_where(conds, "after const change");); - conds= remove_eq_conds(thd, conds, cond_value); - DBUG_EXECUTE("info", print_where(conds, "after remove");); - } - else - { - *cond_value= Item::COND_TRUE; - select->prep_where= 0; + DBUG_EXECUTE("where",print_where(conds,"after const change");); + conds= remove_eq_conds(thd, conds, cond_value) ; + DBUG_EXECUTE("info",print_where(conds,"after remove");); } DBUG_RETURN(conds); } @@ -4495,6 +7450,11 @@ remove_eq_conds(THD *thd, COND *cond, Item::cond_result *cond_value) } } } + if (cond->const_item()) + { + *cond_value= eval_const_cond(cond) ? Item::COND_TRUE : Item::COND_FALSE; + return (COND*) 0; + } } else if (cond->const_item()) { @@ -4574,7 +7534,6 @@ const_expression_in_where(COND *cond, Item *comp_item, Item **const_item) return 0; } - /**************************************************************************** Create internal temporary table ****************************************************************************/ @@ -4610,8 +7569,9 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field, { Field *new_field; - if (convert_blob_length && org_field->flags & BLOB_FLAG) - new_field= new Field_varstring(convert_blob_length, org_field->maybe_null(), + if (convert_blob_length && (org_field->flags & BLOB_FLAG)) + new_field= new Field_varstring(convert_blob_length, + org_field->maybe_null(), org_field->field_name, table, org_field->charset()); else @@ -4624,7 +7584,8 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field, new_field->field_name= item->name; if (org_field->maybe_null()) new_field->flags&= ~NOT_NULL_FLAG; // Because of outer join - if (org_field->type() == FIELD_TYPE_VAR_STRING) + if (org_field->type() == MYSQL_TYPE_VAR_STRING || + org_field->type() == MYSQL_TYPE_VARCHAR) table->db_create_options|= HA_OPTION_PACK_RECORD; } return new_field; @@ -4654,7 +7615,8 @@ static Field* create_tmp_field_from_field(THD *thd, Field* org_field, 0 on error new_created field */ -static Field* create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, + +static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, Item ***copy_func, bool modify_item, uint convert_blob_length) { @@ -4672,19 +7634,12 @@ static Field* create_tmp_field_from_item(THD *thd, Item *item, TABLE *table, item->name, table, item->unsigned_flag); break; case STRING_RESULT: - if (item->max_length > 255) - { - if (convert_blob_length) - new_field= new Field_varstring(convert_blob_length, maybe_null, - item->name, table, - item->collation.collation); - else - new_field= new Field_blob(item->max_length, maybe_null, item->name, - table, item->collation.collation); - } + if (item->max_length > 255 && convert_blob_length) + new_field= new Field_varstring(convert_blob_length, maybe_null, + item->name, table, + item->collation.collation); else - new_field= new Field_string(item->max_length, maybe_null, item->name, - table, item->collation.collation); + new_field= item->make_string_field(table); break; case ROW_RESULT: default: @@ -4765,18 +7720,11 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, return new Field_longlong(item_sum->max_length,maybe_null, item->name,table,item->unsigned_flag); case STRING_RESULT: - if (item_sum->max_length > 255) - { - if (convert_blob_length) - return new Field_varstring(convert_blob_length, maybe_null, - item->name, table, - item->collation.collation); - else - return new Field_blob(item_sum->max_length, maybe_null, item->name, - table, item->collation.collation); - } - return new Field_string(item_sum->max_length,maybe_null, - item->name,table,item->collation.collation); + if (item_sum->max_length > 255 && convert_blob_length) + return new Field_varstring(convert_blob_length, maybe_null, + item->name, table, + item->collation.collation); + return item_sum->make_string_field(table); case ROW_RESULT: default: // This case should never be choosen @@ -4831,6 +7779,11 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, for send_fields */ +#define STRING_TOTAL_LENGTH_TO_PACK_ROWS 128 +#define AVG_STRING_LENGTH_TO_PACK_ROWS 64 +#define RATIO_TO_PACK_ROWS 2 +#define MIN_STRING_LENGTH_TO_PACK_ROWS 10 + TABLE * create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, ORDER *group, bool distinct, bool save_sum_fields, @@ -4838,12 +7791,15 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, char *table_alias) { TABLE *table; - uint i,field_count,reclength,null_count,null_pack_length, - hidden_null_count, hidden_null_pack_length, hidden_field_count, - blob_count,group_null_items; - bool using_unique_constraint=0; + uint i,field_count,null_count,null_pack_length; + uint hidden_null_count, hidden_null_pack_length, hidden_field_count; + uint blob_count,group_null_items, string_count; + uint temp_pool_slot=MY_BIT_NONE; + ulong reclength, string_total_length; + bool using_unique_constraint= 0; + bool use_packed_rows= 0; bool not_all_columns= !(select_options & TMP_TABLE_ALL_COLUMNS); - char *tmpname,path[FN_REFLEN]; + char *tmpname,path[FN_REFLEN], filename[FN_REFLEN]; byte *pos,*group_buff; uchar *null_flags; Field **reg_field, **from_field, **blob_field; @@ -4852,27 +7808,27 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, KEY_PART_INFO *key_part_info; Item **copy_func; MI_COLUMNDEF *recinfo; - uint temp_pool_slot=MY_BIT_NONE; - + uint total_uneven_bit_length= 0; DBUG_ENTER("create_tmp_table"); DBUG_PRINT("enter",("distinct: %d save_sum_fields: %d rows_limit: %lu group: %d", (int) distinct, (int) save_sum_fields, (ulong) rows_limit,test(group))); - statistic_increment(created_tmp_tables, &LOCK_status); + statistic_increment(thd->status_var.created_tmp_tables, &LOCK_status); if (use_temp_pool) temp_pool_slot = bitmap_set_next(&temp_pool); if (temp_pool_slot != MY_BIT_NONE) // we got a slot - sprintf(path, "%s%s_%lx_%i", mysql_tmpdir, tmp_file_prefix, - current_pid, temp_pool_slot); + sprintf(filename, "%s_%lx_%i", tmp_file_prefix, + current_pid, temp_pool_slot); else // if we run out of slots or we are not using tempool - sprintf(path,"%s%s%lx_%lx_%x",mysql_tmpdir,tmp_file_prefix,current_pid, + sprintf(filename,"%s%lx_%lx_%x",tmp_file_prefix,current_pid, thd->thread_id, thd->tmp_table++); if (lower_case_table_names) my_casedn_str(files_charset_info, path); + sprintf(path, "%s%s", mysql_tmpdir, filename); if (group) { @@ -4881,7 +7837,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, else for (ORDER *tmp=group ; tmp ; tmp=tmp->next) { (*tmp->item)->marker=4; // Store null in key - if ((*tmp->item)->max_length >= MAX_CHAR_WIDTH) + if ((*tmp->item)->max_length >= CONVERT_IF_BIGGER_TO_BLOB) using_unique_constraint=1; } if (param->group_length >= MAX_BLOB_WIDTH) @@ -4937,6 +7893,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, table->temp_pool_slot = temp_pool_slot; table->copy_blobs= 1; table->in_use= thd; + table->table_charset= param->table_charset; table->keys_for_keyread.init(); table->keys_in_use.init(); table->read_only_keys.init(); @@ -4946,7 +7903,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, /* Calculate which type of fields we will store in the temporary table */ - reclength=blob_count=null_count=hidden_null_count=group_null_items=0; + reclength= string_total_length= 0; + blob_count= string_count= null_count= hidden_null_count= group_null_items= 0; param->using_indirect_summary_function=0; List_iterator_fast<Item> li(fields); @@ -4993,6 +7951,12 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, *blob_field++= new_field; blob_count++; } + if (new_field->real_type() == MYSQL_TYPE_STRING || + new_field->real_type() == MYSQL_TYPE_VARCHAR) + { + string_count++; + string_total_length+= new_field->pack_length(); + } thd->change_item_tree(argp, new Item_field(new_field)); if (!(new_field->flags & NOT_NULL_FLAG)) { @@ -5034,6 +7998,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, reclength+=new_field->pack_length(); if (!(new_field->flags & NOT_NULL_FLAG)) null_count++; + if (new_field->type() == FIELD_TYPE_BIT) + total_uneven_bit_length+= new_field->field_length & 7; if (new_field->flags & BLOB_FLAG) { *blob_field++= new_field; @@ -5082,10 +8048,17 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, null_count++; } hidden_null_pack_length=(hidden_null_count+7)/8; - null_pack_length=hidden_null_count+(null_count+7)/8; + null_pack_length= hidden_null_count + + (null_count + total_uneven_bit_length + 7) / 8; reclength+=null_pack_length; if (!reclength) reclength=1; // Dummy select + /* Use packed rows if there is blobs or a lot of space to gain */ + if (blob_count || + string_total_length >= STRING_TOTAL_LENGTH_TO_PACK_ROWS && + (reclength / string_total_length <= RATIO_TO_PACK_ROWS || + string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS)) + use_packed_rows= 1; table->fields=field_count; table->reclength=reclength; @@ -5160,10 +8133,9 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, recinfo->length=length; if (field->flags & BLOB_FLAG) recinfo->type= (int) FIELD_BLOB; - else if (!field->zero_pack() && - (field->type() == FIELD_TYPE_STRING || - field->type() == FIELD_TYPE_VAR_STRING) && - length >= 10 && blob_count) + else if (use_packed_rows && + field->real_type() == MYSQL_TYPE_STRING && + length >= MIN_STRING_LENGTH_TO_PACK_ROWS) recinfo->type=FIELD_SKIP_ENDSPACE; else recinfo->type=FIELD_NORMAL; @@ -5202,6 +8174,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, keyinfo->key_length=0; keyinfo->rec_per_key=0; keyinfo->algorithm= HA_KEY_ALG_UNDEF; + keyinfo->name= (char*) "group_key"; for (; group ; group=group->next,key_part_info++) { Field *field=(*group->item)->get_tmp_table_field(); @@ -5209,35 +8182,40 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, key_part_info->null_bit=0; key_part_info->field= field; key_part_info->offset= field->offset(); - key_part_info->length= (uint16) field->pack_length(); + key_part_info->length= (uint16) field->key_length(); key_part_info->type= (uint8) field->key_type(); key_part_info->key_type = ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT || - (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ? + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 || + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ? 0 : FIELDFLAG_BINARY; if (!using_unique_constraint) { group->buff=(char*) group_buff; - if (!(group->field=field->new_field(thd->mem_root,table))) + if (!(group->field= field->new_key_field(thd->mem_root,table, + (char*) group_buff + + test(maybe_null), + field->null_ptr, + field->null_bit))) goto err; /* purecov: inspected */ if (maybe_null) { /* - To be able to group on NULL, we reserve place in group_buff - for the NULL flag just before the column. + To be able to group on NULL, we reserved place in group_buff + for the NULL flag just before the column. (see above). The field data is after this flag. - The NULL flag is updated by 'end_update()' and 'end_write()' + The NULL flag is updated in 'end_update()' and 'end_write()' */ keyinfo->flags|= HA_NULL_ARE_EQUAL; // def. that NULL == NULL key_part_info->null_bit=field->null_bit; key_part_info->null_offset= (uint) (field->null_ptr - (uchar*) table->record[0]); - group->field->move_field((char*) ++group->buff); - group_buff++; + group->buff++; // Pointer to field data + group_buff++; // Skipp null flag } - else - group->field->move_field((char*) group_buff); - group_buff+= key_part_info->length; + /* In GROUP BY 'a' and 'a ' are equal for VARCHAR fields */ + key_part_info->key_part_flag|= HA_END_SPACE_ARE_EQUAL; + group_buff+= group->field->pack_length(); } keyinfo->key_length+= key_part_info->length; } @@ -5272,7 +8250,7 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, keyinfo->key_part=key_part_info; keyinfo->flags=HA_NOSAME | HA_NULL_ARE_EQUAL; keyinfo->key_length=(uint16) reclength; - keyinfo->name=(char*) "tmp"; + keyinfo->name= (char*) "distinct_key"; keyinfo->algorithm= HA_KEY_ALG_UNDEF; if (null_pack_length) { @@ -5301,7 +8279,8 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields, key_part_info->type= (uint8) (*reg_field)->key_type(); key_part_info->key_type = ((ha_base_keytype) key_part_info->type == HA_KEYTYPE_TEXT || - (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT) ? + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT1 || + (ha_base_keytype) key_part_info->type == HA_KEYTYPE_VARTEXT2) ? 0 : FIELDFLAG_BINARY; } } @@ -5351,8 +8330,8 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, MI_KEYDEF keydef; MI_UNIQUEDEF uniquedef; KEY *keyinfo=param->keyinfo; - DBUG_ENTER("create_myisam_tmp_table"); + if (table->keys) { // Get keys for ni_create bool using_unique_constraint=0; @@ -5400,21 +8379,18 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, { seg->type= ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? - HA_KEYTYPE_VARBINARY : HA_KEYTYPE_VARTEXT); - seg->bit_start=seg->length - table->blob_ptr_size; + HA_KEYTYPE_VARBINARY2 : HA_KEYTYPE_VARTEXT2); + seg->bit_start= field->pack_length() - table->blob_ptr_size; seg->flag= HA_BLOB_PART; seg->length=0; // Whole blob in unique constraint } else { - seg->type= - ((keyinfo->key_part[i].key_type & FIELDFLAG_BINARY) ? - HA_KEYTYPE_BINARY : HA_KEYTYPE_TEXT); - if (!(field->flags & ZEROFILL_FLAG) && - (field->type() == FIELD_TYPE_STRING || - field->type() == FIELD_TYPE_VAR_STRING) && + seg->type= keyinfo->key_part[i].type; + /* Tell handler if it can do suffic space compression */ + if (field->real_type() == MYSQL_TYPE_STRING && keyinfo->key_part[i].length > 4) - seg->flag|=HA_SPACE_PACK; + seg->flag|= HA_SPACE_PACK; } if (!(field->flags & NOT_NULL_FLAG)) { @@ -5423,7 +8399,7 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, /* We are using a GROUP BY on something that contains NULL In this case we have to tell MyISAM that two NULL should - on INSERT be compared as equal + on INSERT be regarded at the same value */ if (!using_unique_constraint) keydef.flag|= HA_NULL_ARE_EQUAL; @@ -5448,7 +8424,8 @@ static bool create_myisam_tmp_table(TABLE *table,TMP_TABLE_PARAM *param, table->db_stat=0; goto err; } - statistic_increment(created_tmp_disk_tables, &LOCK_status); + statistic_increment(table->in_use->status_var.created_tmp_disk_tables, + &LOCK_status); table->db_record_offset=1; DBUG_RETURN(0); err: @@ -5600,7 +8577,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) { int error= 0; JOIN_TAB *join_tab; - int (*end_select)(JOIN *, struct st_join_table *,bool); + Next_select_func end_select; DBUG_ENTER("do_select"); join->procedure=procedure; @@ -5608,7 +8585,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) Tell the client how many fields there are in a row */ if (!table) - join->result->send_fields(*fields,1); + join->result->send_fields(*fields, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF); else { VOID(table->file->extra(HA_EXTRA_WRITE_CACHE)); @@ -5648,11 +8626,18 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) } else { - if (join->sort_and_group || (join->procedure && - join->procedure->flags & PROC_GROUP)) - end_select=end_send_group; + /* Test if data is accessed via QUICK_GROUP_MIN_MAX_SELECT. */ + bool is_using_quick_group_min_max_select= + (join->join_tab->select && join->join_tab->select->quick && + (join->join_tab->select->quick->get_type() == + QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)); + + if ((join->sort_and_group || + (join->procedure && join->procedure->flags & PROC_GROUP)) && + !is_using_quick_group_min_max_select) + end_select= end_send_group; else - end_select=end_send; + end_select= end_send; } join->join_tab[join->tables-1].next_select=end_select; @@ -5661,7 +8646,7 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) if (join->tables == join->const_tables) { /* - HAVING will be chcked after processing aggregate functions, + HAVING will be checked after processing aggregate functions, But WHERE should checkd here (we alredy have read tables) */ if (!join->conds || join->conds->val_int()) @@ -5698,21 +8683,19 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure) } if (table) { - int tmp; + int tmp, new_errno= 0; if ((tmp=table->file->extra(HA_EXTRA_NO_CACHE))) { DBUG_PRINT("error",("extra(HA_EXTRA_NO_CACHE) failed")); - my_errno= tmp; - error= -1; + new_errno= tmp; } if ((tmp=table->file->ha_index_or_rnd_end())) { DBUG_PRINT("error",("ha_index_or_rnd_end() failed")); - my_errno= tmp; - error= -1; + new_errno= tmp; } - if (error == -1) - table->file->print_error(my_errno,MYF(0)); + if (new_errno) + table->file->print_error(new_errno,MYF(0)); } #ifndef DBUG_OFF if (error) @@ -5737,7 +8720,7 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) } if (join->thd->killed) // If aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); return -2; /* purecov: inspected */ } if (join_tab->use_quick != 2 || test_if_quick_select(join_tab) <= 0) @@ -5751,20 +8734,148 @@ sub_select_cache(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) return sub_select(join,join_tab,end_of_records); /* Use ordinary select */ } +/* + Retrieve records ends with a given beginning from the result of a join + + SYNPOSIS + sub_select() + join pointer to the structure providing all context info for the query + join_tab the first next table of the execution plan to be retrieved + end_records true when we need to perform final steps of retrival + + DESCRIPTION + For a given partial join record consisting of records from the tables + preceding the table join_tab in the execution plan, the function + retrieves all matching full records from the result set and + send them to the result set stream. + + NOTES + The function effectively implements the final (n-k) nested loops + of nested loops join algorithm, where k is the ordinal number of + the join_tab table and n is the total number of tables in the join query. + It performs nested loops joins with all conjunctive predicates from + the where condition pushed as low to the tables as possible. + E.g. for the query + SELECT * FROM t1,t2,t3 + WHERE t1.a=t2.a AND t2.b=t3.b AND t1.a BETWEEN 5 AND 9 + the predicate (t1.a BETWEEN 5 AND 9) will be pushed to table t1, + given the selected plan prescribes to nest retrievals of the + joined tables in the following order: t1,t2,t3. + A pushed down predicate are attached to the table which it pushed to, + at the field select_cond. + When executing a nested loop of level k the function runs through + the rows of 'join_tab' and for each row checks the pushed condition + attached to the table. + If it is false the function moves to the next row of the + table. If the condition is true the function recursively executes (n-k-1) + remaining embedded nested loops. + The situation becomes more complicated if outer joins are involved in + the execution plan. In this case the pushed down predicates can be + checked only at certain conditions. + Suppose for the query + SELECT * FROM t1 LEFT JOIN (t2,t3) ON t3.a=t1.a + WHERE t1>2 AND (t2.b>5 OR t2.b IS NULL) + the optimizer has chosen a plan with the table order t1,t2,t3. + The predicate P1=t1>2 will be pushed down to the table t1, while the + predicate P2=(t2.b>5 OR t2.b IS NULL) will be attached to the table + t2. But the second predicate can not be unconditionally tested right + after a row from t2 has been read. This can be done only after the + first row with t3.a=t1.a has been encountered. + Thus, the second predicate P2 is supplied with a guarded value that are + stored in the field 'found' of the first inner table for the outer join + (table t2). When the first row with t3.a=t1.a for the current row + of table t1 appears, the value becomes true. For now on the predicate + is evaluated immediately after the row of table t2 has been read. + When the first row with t3.a=t1.a has been encountered all + conditions attached to the inner tables t2,t3 must be evaluated. + Only when all of them are true the row is sent to the output stream. + If not, the function returns to the lowest nest level that has a false + attached condition. + The predicates from on expressions are also pushed down. If in the + the above example the on expression were (t3.a=t1.a AND t2.a=t1.a), + then t1.a=t2.a would be pushed down to table t2, and without any + guard. + If after the run through all rows of table t2, the first inner table + for the outer join operation, it turns out that no matches are + found for the current row of t1, then current row from table t1 + is complemented by nulls for t2 and t3. Then the pushed down predicates + are checked for the composed row almost in the same way as it had + been done for the first row with a match. The only difference is + the predicates from on expressions are not checked. + + IMPLEMENTATION + The function forms output rows for a current partial join of k + tables tables recursively. + For each partial join record ending with a certain row from + join_tab it calls sub_select that builds all possible matching + tails from the result set. + To be able check predicates conditionally items of the class + Item_func_trig_cond are employed. + An object of this class is constructed from an item of class COND + and a pointer to a guarding boolean variable. + When the value of the guard variable is true the value of the object + is the same as the value of the predicate, otherwise it's just returns + true. + To carry out a return to a nested loop level of join table t the pointer + to t is remembered in the field 'return_tab' of the join structure. + Consider the following query: + SELECT * FROM t1, + LEFT JOIN + (t2, t3 LEFT JOIN (t4,t5) ON t5.a=t3.a) + ON t4.a=t2.a + WHERE (t2.b=5 OR t2.b IS NULL) AND (t4.b=2 OR t4.b IS NULL) + Suppose the chosen execution plan dictates the order t1,t2,t3,t4,t5 + and suppose for a given joined rows from tables t1,t2,t3 there are + no rows in the result set yet. + When first row from t5 that satisfies the on condition + t5.a=t3.a is found, the pushed down predicate t4.b=2 OR t4.b IS NULL + becomes 'activated', as well the predicate t4.a=t2.a. But + the predicate (t2.b=5 OR t2.b IS NULL) can not be checked until + t4.a=t2.a becomes true. + In order not to re-evaluate the predicates that were already evaluated + as attached pushed down predicates, a pointer to the the first + most inner unmatched table is maintained in join_tab->first_unmatched. + Thus, when the first row from t5 with t5.a=t3.a is found + this pointer for t5 is changed from t4 to t2. + + STRUCTURE NOTES + join_tab->first_unmatched points always backwards to the first inner + table of the embedding nested join, if any. + + RETURN + 0, if success + # of the error, otherwise +*/ static int sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) { - join_tab->table->null_row=0; if (end_of_records) return (*join_tab->next_select)(join,join_tab+1,end_of_records); - /* Cache variables for faster loop */ int error; - bool found=0; - COND *on_expr=join_tab->on_expr, *select_cond=join_tab->select_cond; + JOIN_TAB *first_unmatched; + JOIN_TAB *tab; + bool found= 0; + /* Cache variables for faster loop */ + COND *select_cond= join_tab->select_cond; + JOIN_TAB *first_inner_tab= join_tab->first_inner; + my_bool *report_error= &(join->thd->net.report_error); + join->return_tab= join_tab; + + if (join_tab->last_inner) + { + /* join_tab is the first inner table for an outer join operation. */ + + /* Set initial state of guard variables for this table.*/ + join_tab->found=0; + join_tab->not_null_compl= 1; + + /* Set first_unmatched for the last inner table of this group */ + join_tab->last_inner->first_unmatched= join_tab; + } if (!(error=(*join_tab->read_first_record)(join_tab))) { @@ -5778,20 +8889,81 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) { if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); return -2; /* purecov: inspected */ } - join->examined_rows++; - join->thd->row_count++; - if (!on_expr || on_expr->val_int()) + DBUG_PRINT("info", ("select cond 0x%lx", (ulong)select_cond)); + if (!select_cond || select_cond->val_int()) { - found=1; - if (not_exists_optimize) - break; // Searching after not null columns - if (!select_cond || select_cond->val_int()) - { - if ((error=(*join_tab->next_select)(join,join_tab+1,0)) < 0) + /* + There is no select condition or the attached pushed down + condition is true => a match is found. + */ + bool found= 1; + while (join_tab->first_unmatched && found) + { + /* + The while condition is always false if join_tab is not + the last inner join table of an outer join operation. + */ + first_unmatched= join_tab->first_unmatched; + /* + Mark that a match for current outer table is found. + This activates push down conditional predicates attached + to the all inner tables of the outer join. + */ + first_unmatched->found= 1; + for (tab= first_unmatched; tab <= join_tab; tab++) + { + /* Check all predicates that has just been activated. */ + /* + Actually all predicates non-guarded by first_unmatched->found + will be re-evaluated again. It could be fixed, but, probably, + it's not worth doing now. + */ + if (tab->select_cond && !tab->select_cond->val_int()) + { + /* The condition attached to table tab is false */ + if (tab == join_tab) + found= 0; + else + { + /* + Set a return point if rejected predicate is attached + not to the last table of the current nest level. + */ + join->return_tab= tab; + return 0; + } + } + } + /* + Check whether join_tab is not the last inner table + for another embedding outer join. + */ + if ((first_unmatched= first_unmatched->first_upper) && + first_unmatched->last_inner != join_tab) + first_unmatched= 0; + join_tab->first_unmatched= first_unmatched; + } + + /* + It was not just a return to lower loop level when one + of the newly activated predicates is evaluated as false + (See above join->return_tab= tab). + */ + join->examined_rows++; + join->thd->row_count++; + + if (found) + { + if (not_exists_optimize) + break; + /* A match from join_tab is found for the current partial join. */ + if ((error=(*join_tab->next_select)(join, join_tab+1, 0)) < 0) return error; + if (join->return_tab < join_tab) + return 0; /* Test if this was a SELECT DISTINCT query on a table that was not in the field list; In this case we can abort if @@ -5801,30 +8973,77 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) return 0; } else - { - /* - This row failed selection, release lock on it. - XXX: There is no table handler in MySQL which makes use of this - call. It's kept from Gemini times. A lot of new code was added - recently (i. e. subselects) without having it in mind. - */ - info->file->unlock_row(); - } + info->file->unlock_row(); + } + else + { + /* + The condition pushed down to the table join_tab rejects all rows + with the beginning coinciding with the current partial join. + */ + join->examined_rows++; + join->thd->row_count++; } + } while (!(error=info->read_record(info)) && !(*report_error)); } if (error > 0 || (*report_error)) // Fatal error return -1; - if (!found && on_expr) - { // OUTER JOIN - restore_record(join_tab->table,default_values); // Make empty record - mark_as_null_row(join_tab->table); // For group by without error - if (!select_cond || select_cond->val_int()) - { - if ((error=(*join_tab->next_select)(join,join_tab+1,0)) < 0) - return error; /* purecov: inspected */ + if (join_tab->last_inner && !join_tab->found) + { + /* + The table join_tab is the first inner table of a outer join operation + and no matches has been found for the current outer row. + */ + JOIN_TAB *last_inner_tab= join_tab->last_inner; + for ( ; join_tab <= last_inner_tab ; join_tab++) + { + /* Change the the values of guard predicate variables. */ + join_tab->found= 1; + join_tab->not_null_compl= 0; + /* The outer row is complemented by nulls for each inner tables */ + restore_record(join_tab->table,default_values); // Make empty record + mark_as_null_row(join_tab->table); // For group by without error + select_cond= join_tab->select_cond; + /* Check all attached conditions for inner table rows. */ + if (select_cond && !select_cond->val_int()) + return 0; + } + join_tab--; + /* + The row complemented by nulls might be the first row + of embedding outer joins. + If so, perform the same actions as in the code + for the first regular outer join row above. + */ + for ( ; ; ) + { + first_unmatched= join_tab->first_unmatched; + if ((first_unmatched= first_unmatched->first_upper) && + first_unmatched->last_inner != join_tab) + first_unmatched= 0; + join_tab->first_unmatched= first_unmatched; + if (!first_unmatched) + break; + first_unmatched->found= 1; + for (JOIN_TAB *tab= first_unmatched; tab <= join_tab; tab++) + { + if (tab->select_cond && !tab->select_cond->val_int()) + { + join->return_tab= tab; + return 0; + } + } } + /* + The row complemented by nulls satisfies all conditions + attached to inner tables. + Send the row complemented by nulls to be joined with the + remaining tables. + */ + if ((error=(*join_tab->next_select)(join, join_tab+1 ,0)) < 0) + return error; } return 0; } @@ -5866,7 +9085,7 @@ flush_cached_records(JOIN *join,JOIN_TAB *join_tab,bool skip_last) { if (join->thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); return -2; // Aborted by user /* purecov: inspected */ } SQL_SELECT *select=join_tab->select; @@ -5982,9 +9201,9 @@ join_read_const_table(JOIN_TAB *tab, POSITION *pos) table->file->extra(HA_EXTRA_NO_KEYREAD); } } - if (tab->on_expr && !table->null_row) + if (*tab->on_expr_ref && !table->null_row) { - if ((table->null_row= test(tab->on_expr->val_int() == 0))) + if ((table->null_row= test((*tab->on_expr_ref)->val_int() == 0))) mark_as_null_row(table); } if (!table->null_row) @@ -6018,6 +9237,19 @@ join_read_system(JOIN_TAB *tab) } +/* + Read a table when there is at most one matching row + + SYNOPSIS + join_read_const() + tab Table to read + + RETURN + 0 Row was found + -1 Row was not found + 1 Got an error (other than row not found) during read +*/ + static int join_read_const(JOIN_TAB *tab) { @@ -6025,6 +9257,7 @@ join_read_const(JOIN_TAB *tab) TABLE *table= tab->table; if (table->status & STATUS_GARBAGE) // If first read { + table->status= 0; if (cp_buffer_from_ref(&tab->ref)) error=HA_ERR_KEY_NOT_FOUND; else @@ -6035,6 +9268,7 @@ join_read_const(JOIN_TAB *tab) } if (error) { + table->status= STATUS_NOT_FOUND; mark_as_null_row(tab->table); empty_record(table); if (error != HA_ERR_KEY_NOT_FOUND) @@ -6198,8 +9432,8 @@ test_if_quick_select(JOIN_TAB *tab) static int join_init_read_record(JOIN_TAB *tab) { - if (tab->select && tab->select->quick) - tab->select->quick->reset(); + if (tab->select && tab->select->quick && tab->select->quick->reset()) + return 1; init_read_record(&tab->read_record, tab->join->thd, tab->table, tab->select,1,1); return (*tab->read_record.read_record)(&tab->read_record); @@ -6407,6 +9641,14 @@ end_send(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), } DBUG_RETURN(-3); // Abort nicely } + else if (join->send_records >= join->fetch_limit) + { + /* + There is a server side cursor and all rows for + this fetch request are sent. + */ + DBUG_RETURN(-4); + } } else { @@ -6481,6 +9723,14 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), join->do_send_rows=0; join->unit->select_limit_cnt = HA_POS_ERROR; } + else if (join->send_records >= join->fetch_limit) + { + /* + There is a server side cursor and all rows + for this fetch request are sent. + */ + DBUG_RETURN(-4); + } } } else @@ -6519,7 +9769,7 @@ end_write(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); DBUG_RETURN(-2); /* purecov: inspected */ } if (!end_of_records) @@ -6587,7 +9837,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); DBUG_RETURN(-2); /* purecov: inspected */ } @@ -6598,9 +9848,6 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { Item *item= *group->item; item->save_org_in_field(group->field); -#ifdef EMBEDDED_LIBRARY - join->thd->net.last_errno= 0; -#endif /* Store in the used key if the field was 0 */ if (item->maybe_null) group->buff[-1]=item->null_value ? 1 : 0; @@ -6620,13 +9867,19 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); } - /* The null bits are already set */ + /* + Copy null bits from group key to table + We can't copy all data as the key may have different format + as the row data (for example as with VARCHAR keys) + */ KEY_PART_INFO *key_part; for (group=table->group,key_part=table->key_info[0].key_part; group ; group=group->next,key_part++) - memcpy(table->record[0]+key_part->offset, group->buff, key_part->length); - + { + if (key_part->null_bit) + memcpy(table->record[0]+key_part->offset, group->buff, 1); + } init_tmptable_sum_functions(join->sum_funcs); copy_funcs(join->tmp_table_param.items_to_copy); if ((error=table->file->write_row(table->record[0]))) @@ -6642,6 +9895,7 @@ end_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); } + /* Like end_update, but this is done with unique constraints instead of keys */ static int @@ -6656,7 +9910,7 @@ end_unique_update(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(0); if (join->thd->killed) // Aborted by user { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); DBUG_RETURN(-2); /* purecov: inspected */ } @@ -6703,7 +9957,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (join->thd->killed) { // Aborted by user - my_error(ER_SERVER_SHUTDOWN,MYF(0)); /* purecov: inspected */ + join->thd->send_kill_message(); DBUG_RETURN(-2); /* purecov: inspected */ } if (!join->first_record || end_of_records || @@ -6789,11 +10043,11 @@ static bool test_if_ref(Item_field *left_item,Item *right_item) /* We can remove binary fields and numerical fields except float, as float comparison isn't 100 % secure - We have to keep binary strings to be able to check for end spaces + We have to keep normal strings to be able to check for end spaces */ if (field->binary() && - field->real_type() != FIELD_TYPE_STRING && - field->real_type() != FIELD_TYPE_VAR_STRING && + field->real_type() != MYSQL_TYPE_STRING && + field->real_type() != MYSQL_TYPE_VARCHAR && (field->type() != FIELD_TYPE_FLOAT || field->decimals() == 0)) { return !store_val_in_field(field,right_item); @@ -6983,7 +10237,7 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx, } -static uint find_shortest_key(TABLE *table, const key_map *usable_keys) +uint find_shortest_key(TABLE *table, const key_map *usable_keys) { uint min_length= (uint) ~0; uint best= MAX_KEY; @@ -7120,6 +10374,17 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, } else if (select && select->quick) // Range found by opt_range { + int quick_type= select->quick->get_type(); + /* + assume results are not ordered when index merge is used + TODO: sergeyp: Results of all index merge selects actually are ordered + by clustered PK values. + */ + + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) + DBUG_RETURN(0); ref_key= select->quick->index; ref_key_parts= select->quick->used_key_parts; } @@ -7153,7 +10418,11 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, } else { - select->quick->file->ha_index_end(); + select->quick->head->file->ha_index_end(); + /* + We have verified above that select->quick is not + index_merge quick select. + */ select->quick->index= new_ref_key; select->quick->init(); } @@ -7175,8 +10444,15 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit, */ if (!select->quick->reverse_sorted()) { - // ORDER BY range_key DESC - QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC(select->quick, + int quick_type= select->quick->get_type(); + if (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + DBUG_RETURN(0); // Use filesort + + /* ORDER BY range_key DESC */ + QUICK_SELECT_DESC *tmp=new QUICK_SELECT_DESC((QUICK_RANGE_SELECT*)(select->quick), used_key_parts); if (!tmp || tmp->error) { @@ -7324,8 +10600,11 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, { select->quick=tab->quick; tab->quick=0; - /* We can only use 'Only index' if quick key is same as ref_key */ - if (table->key_read && (uint) tab->ref.key != select->quick->index) + /* + We can only use 'Only index' if quick key is same as ref_key + and in index_merge 'Only index' cannot be used + */ + if (table->key_read && ((uint) tab->ref.key != select->quick->index)) { table->key_read=0; table->file->extra(HA_EXTRA_NO_KEYREAD); @@ -7356,6 +10635,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order, tab->select= 0; } tab->select_cond=0; + tab->last_inner= 0; + tab->first_unmatched= 0; tab->type=JT_ALL; // Read with normal read_record tab->read_first_record= join_init_read_record; tab->join->examined_rows+=examined_rows; @@ -7513,7 +10794,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error=0; goto err; } @@ -7534,7 +10815,7 @@ static int remove_dup_with_compare(THD *thd, TABLE *table, Field **first_field, } if (copy_blobs(first_field)) { - my_error(ER_OUTOFMEMORY,MYF(0)); + my_message(ER_OUTOFMEMORY, ER(ER_OUTOFMEMORY), MYF(0)); error=0; goto err; } @@ -7625,7 +10906,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error=0; goto err; } @@ -7743,6 +11024,7 @@ join_init_cache(THD *thd,JOIN_TAB *tables,uint table_count) } if (!(cache->field=(CACHE_FIELD*) sql_alloc(sizeof(CACHE_FIELD)*(cache->fields+table_count*2)+(blobs+1)* + sizeof(CACHE_FIELD*)))) { my_free((gptr) cache->buff,MYF(0)); /* purecov: inspected */ @@ -7984,71 +11266,133 @@ cp_buffer_from_ref(TABLE_REF *ref) *****************************************************************************/ /* - Find order/group item in requested columns and change the item to point at - it. If item doesn't exists, add it first in the field list - Return 0 if ok. + Resolve an ORDER BY or GROUP BY column reference. + + SYNOPSIS + find_order_in_list() + thd [in] Pointer to current thread structure + ref_pointer_array [in/out] All select, group and order by fields + tables [in] List of tables to search in (usually FROM clause) + order [in] Column reference to be resolved + fields [in] List of fields to search in (usually SELECT list) + all_fields [in/out] All select, group and order by fields + is_group_field [in] True if order is a GROUP field, false if + ORDER by field + + DESCRIPTION + Given a column reference (represented by 'order') from a GROUP BY or ORDER + BY clause, find the actual column it represents. If the column being + resolved is from the GROUP BY clause, the procedure searches the SELECT + list 'fields' and the columns in the FROM list 'tables'. If 'order' is from + the ORDER BY clause, only the SELECT list is being searched. + + If 'order' is resolved to an Item, then order->item is set to the found + Item. If there is no item for the found column (that is, it was resolved + into a table field), order->item is 'fixed' and is added to all_fields and + ref_pointer_array. + + RETURN + 0 if OK + 1 if error occurred */ static int -find_order_in_list(THD *thd, Item **ref_pointer_array, - TABLE_LIST *tables,ORDER *order, List<Item> &fields, - List<Item> &all_fields) +find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, + ORDER *order, List<Item> &fields, List<Item> &all_fields, + bool is_group_field) { - Item *it= *order->item; - if (it->type() == Item::INT_ITEM) + Item *order_item= *order->item; /* The item from the GROUP/ORDER caluse. */ + Item::Type order_item_type; + Item **select_item; /* The corresponding item from the SELECT clause. */ + Field *from_field; /* The corresponding field from the FROM clause. */ + + if (order_item->type() == Item::INT_ITEM) { /* Order by position */ - uint count= (uint) it->val_int(); + uint count= (uint) order_item->val_int(); if (!count || count > fields.elements) { - my_printf_error(ER_BAD_FIELD_ERROR,ER(ER_BAD_FIELD_ERROR), - MYF(0), it->full_name(), thd->where); + my_error(ER_BAD_FIELD_ERROR, MYF(0), + order_item->full_name(), thd->where); return 1; } - order->item= ref_pointer_array + count-1; + order->item= ref_pointer_array + count - 1; order->in_field_list= 1; + order->counter= count; + order->counter_used= 1; return 0; } + /* Lookup the current GROUP/ORDER field in the SELECT clause. */ uint counter; bool unaliased; - Item **item= find_item_in_list(it, fields, &counter, + select_item= find_item_in_list(order_item, fields, &counter, REPORT_EXCEPT_NOT_FOUND, &unaliased); - if (!item) - return 1; + if (!select_item) + return 1; /* Some error occured. */ - if (item != (Item **)not_found_item) + + /* Check whether the resolved field is not ambiguos. */ + if (select_item != not_found_item) { /* If we have found field not by its alias in select list but by its original field name, we should additionaly check if we have conflict for this name (in case if we would perform lookup in all tables). */ - if (unaliased && !it->fixed && it->fix_fields(thd, tables, order->item)) + if (unaliased && !order_item->fixed && order_item->fix_fields(thd, tables, order->item)) return 1; - order->item= ref_pointer_array + counter; - order->in_field_list=1; - return 0; + /* Lookup the current GROUP field in the FROM clause. */ + order_item_type= order_item->type(); + if (is_group_field && + order_item_type == Item::FIELD_ITEM || order_item_type == Item::REF_ITEM) + { + Item **view_ref= NULL; + from_field= find_field_in_tables(thd, (Item_ident*) order_item, tables, + view_ref, IGNORE_ERRORS, TRUE); + if(!from_field) + from_field= (Field*) not_found_field; + } + else + from_field= (Field*) not_found_field; + + if (from_field == not_found_field || + from_field && from_field != view_ref_found && + (*select_item)->type() == Item::FIELD_ITEM && + ((Item_field*) (*select_item))->field->eq(from_field)) + /* + If there is no such field in the FROM clause, or it is the same field as + the one found in the SELECT clause, then use the Item created for the + SELECT field. As a result if there was a derived field that 'shadowed' + a table field with the same name, the table field will be chosen over + the derived field. + */ + { + order->item= ref_pointer_array + counter; + order->in_field_list=1; + return 0; + } } order->in_field_list=0; /* - We check it->fixed because Item_func_group_concat can put + We check order_item->fixed because Item_func_group_concat can put arguments for which fix_fields already was called. 'it' reassigned in if condition because fix_field can change it. */ - if (!it->fixed && - (it->fix_fields(thd, tables, order->item) || - (it= *order->item)->check_cols(1) || + if (!order_item->fixed && + (order_item->fix_fields(thd, tables, order->item) || + (order_item= *order->item)->check_cols(1) || thd->is_fatal_error)) return 1; // Wrong field uint el= all_fields.elements; - all_fields.push_front(it); // Add new field to field list - ref_pointer_array[el]= it; + all_fields.push_front(order_item); // Add new field to field list + ref_pointer_array[el]= order_item; order->item= ref_pointer_array + el; return 0; } + /* Change order to point at item in select list. If item isn't a number and doesn't exits in the select list, add it the the field list. @@ -8061,7 +11405,7 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, for (; order; order=order->next) { if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, - all_fields)) + all_fields, FALSE)) return 1; } return 0; @@ -8075,7 +11419,7 @@ int setup_order(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, setup_group() thd Thread handler ref_pointer_array We store references to all fields that was not in - 'fields' here. + 'fields' here. fields All fields in the select part. Any item in 'order' that is part of these list is replaced by a pointer to this fields. @@ -8113,13 +11457,12 @@ setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, for (; order; order=order->next) { if (find_order_in_list(thd, ref_pointer_array, tables, order, fields, - all_fields)) + all_fields, TRUE)) return 1; (*order->item)->marker=1; /* Mark found */ if ((*order->item)->with_sum_func) { - my_printf_error(ER_WRONG_GROUP_FIELD, ER(ER_WRONG_GROUP_FIELD),MYF(0), - (*order->item)->full_name()); + my_error(ER_WRONG_GROUP_FIELD, MYF(0), (*order->item)->full_name()); return 1; } } @@ -8134,9 +11477,7 @@ setup_group(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables, if (item->type() != Item::SUM_FUNC_ITEM && !item->marker && !item->const_item()) { - my_printf_error(ER_WRONG_FIELD_WITH_GROUP, - ER(ER_WRONG_FIELD_WITH_GROUP), - MYF(0),item->full_name()); + my_error(ER_WRONG_FIELD_WITH_GROUP, MYF(0), item->full_name()); return 1; } } @@ -8324,7 +11665,7 @@ get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables) if (!map || (map & (RAND_TABLE_BIT | OUTER_REF_TABLE_BIT))) DBUG_RETURN(0); - for (; !(map & tables->table->map) ; tables=tables->next) ; + for (; !(map & tables->table->map); tables= tables->next_leaf); if (map != tables->table->map) DBUG_RETURN(0); // More than one table DBUG_PRINT("exit",("sort by table: %d",tables->table->tablenr)); @@ -8348,15 +11689,30 @@ calc_group_buffer(JOIN *join,ORDER *group) { if (field->type() == FIELD_TYPE_BLOB) key_length+=MAX_BLOB_WIDTH; // Can't be used as a key + else if (field->type() == MYSQL_TYPE_VARCHAR) + key_length+= field->field_length + HA_KEY_BLOB_LENGTH; else - key_length+=field->pack_length(); + key_length+= field->pack_length(); } else if ((*group->item)->result_type() == REAL_RESULT) key_length+=sizeof(double); else if ((*group->item)->result_type() == INT_RESULT) key_length+=sizeof(longlong); + else if ((*group->item)->result_type() == STRING_RESULT) + { + /* + Group strings are taken as varstrings and require an length field. + A field is not yet created by create_tmp_field() + and the sizes should match up. + */ + key_length+= (*group->item)->max_length + HA_KEY_BLOB_LENGTH; + } else - key_length+=(*group->item)->max_length; + { + /* This case should never be choosen */ + DBUG_ASSERT(0); + current_thd->fatal_error(); + } parts++; if ((*group->item)->maybe_null) null_parts++; @@ -8628,9 +11984,7 @@ bool JOIN::alloc_func_list() field_list All items send_fields Items in select list before_group_by Set to 1 if this is called before GROUP BY handling - - NOTES - Calls ::setup() for all item_sum objects in field_list + recompute Set to TRUE if sum_funcs must be recomputed RETURN 0 ok @@ -8638,23 +11992,21 @@ bool JOIN::alloc_func_list() */ bool JOIN::make_sum_func_list(List<Item> &field_list, List<Item> &send_fields, - bool before_group_by) + bool before_group_by, bool recompute) { List_iterator_fast<Item> it(field_list); Item_sum **func; Item *item; DBUG_ENTER("make_sum_func_list"); + if (*sum_funcs && !recompute) + DBUG_RETURN(FALSE); /* We have already initialized sum_funcs. */ + func= sum_funcs; while ((item=it++)) { if (item->type() == Item::SUM_FUNC_ITEM && !item->const_item()) - { *func++= (Item_sum*) item; - /* let COUNT(DISTINCT) create the temporary table */ - if (((Item_sum*) item)->setup(thd)) - DBUG_RETURN(TRUE); - } } if (before_group_by && rollup.state == ROLLUP::STATE_INITED) { @@ -8697,6 +12049,8 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, { List_iterator_fast<Item> it(all_fields); Item *item_field,*item; + DBUG_ENTER("change_to_use_tmp_fields"); + res_selected_fields.empty(); res_all_fields.empty(); @@ -8720,8 +12074,8 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, else item_field= (Item*) new Item_field(field); if (!item_field) - return TRUE; // Fatal error - item_field->name= item->name; /*lint -e613 */ + DBUG_RETURN(TRUE); // Fatal error + item_field->name= item->name; #ifndef DBUG_OFF if (_db_on_ && !item_field->name) { @@ -8745,7 +12099,7 @@ change_to_use_tmp_fields(THD *thd, Item **ref_pointer_array, for (i= 0; i < border; i++) itr++; itr.sublist(res_selected_fields, elements); - return FALSE; + DBUG_RETURN(FALSE); } @@ -8799,6 +12153,31 @@ change_refs_to_tmp_fields(THD *thd, Item **ref_pointer_array, Code for calculating functions ******************************************************************************/ + +/* + Call ::setup for all sum functions + + SYNOPSIS + setup_sum_funcs() + thd thread handler + func_ptr sum function list + + RETURN + FALSE ok + TRUE error +*/ + +static bool setup_sum_funcs(THD *thd, Item_sum **func_ptr) +{ + Item_sum *func; + DBUG_ENTER("setup_sum_funcs"); + while ((func= *(func_ptr++))) + if (func->setup(thd)) + DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); +} + + static void init_tmptable_sum_functions(Item_sum **func_ptr) { @@ -9177,6 +12556,7 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, select_result *result=join->result; Item *item_null= new Item_null(); CHARSET_INFO *cs= system_charset_info; + int quick_type; DBUG_ENTER("select_describe"); DBUG_PRINT("info", ("Select 0x%lx, type %s, message %s", (ulong)join->select_lex, join->select_lex->type, @@ -9268,14 +12648,20 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, { JOIN_TAB *tab=join->join_tab+i; TABLE *table=tab->table; - char buff[512],*buff_ptr=buff; - char buff1[512], buff2[512]; + char buff[512]; + char buff1[512], buff2[512], buff3[512]; + char keylen_str_buf[64]; + String extra(buff, sizeof(buff),cs); char table_name_buffer[NAME_LEN]; String tmp1(buff1,sizeof(buff1),cs); String tmp2(buff2,sizeof(buff2),cs); + String tmp3(buff3,sizeof(buff3),cs); + extra.length(0); tmp1.length(0); tmp2.length(0); + tmp3.length(0); + quick_type= -1; item_list.empty(); /* id */ item_list.push_back(new Item_uint((uint32) @@ -9285,7 +12671,15 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, strlen(join->select_lex->type), cs)); if (tab->type == JT_ALL && tab->select && tab->select->quick) - tab->type= JT_RANGE; + { + quick_type= tab->select->quick->get_type(); + if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) || + (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) || + (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION)) + tab->type = JT_INDEX_MERGE; + else + tab->type = JT_RANGE; + } /* table */ if (table->derived_select_number) { @@ -9303,10 +12697,10 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(new Item_string(join_type_str[tab->type], strlen(join_type_str[tab->type]), cs)); - uint j; - /* possible_keys */ + /* Build "possible_keys" value and add it to item_list */ if (!tab->keys.is_clear_all()) { + uint j; for (j=0 ; j < table->keys ; j++) { if (tab->keys.is_set(j)) @@ -9323,14 +12717,19 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(new Item_string(tmp1.ptr(),tmp1.length(),cs)); else item_list.push_back(item_null); - /* key key_len ref */ + + /* Build "key", "key_len", and "ref" values and add them to item_list */ if (tab->ref.key_parts) { KEY *key_info=table->key_info+ tab->ref.key; + register uint length; item_list.push_back(new Item_string(key_info->name, strlen(key_info->name), system_charset_info)); - item_list.push_back(new Item_int((int32) tab->ref.key_length)); + length= longlong2str(tab->ref.key_length, keylen_str_buf, 10) - + keylen_str_buf; + item_list.push_back(new Item_string(keylen_str_buf, length, + system_charset_info)); for (store_key **ref=tab->ref.key_copy ; *ref ; ref++) { if (tmp2.length()) @@ -9343,18 +12742,21 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, else if (tab->type == JT_NEXT) { KEY *key_info=table->key_info+ tab->index; + register uint length; item_list.push_back(new Item_string(key_info->name, strlen(key_info->name),cs)); - item_list.push_back(new Item_int((int32) key_info->key_length)); + length= longlong2str(key_info->key_length, keylen_str_buf, 10) - + keylen_str_buf; + item_list.push_back(new Item_string(keylen_str_buf, + length, + system_charset_info)); item_list.push_back(item_null); } else if (tab->select && tab->select->quick) { - KEY *key_info=table->key_info+ tab->select->quick->index; - item_list.push_back(new Item_string(key_info->name, - strlen(key_info->name),cs)); - item_list.push_back(new Item_int((int32) tab->select->quick-> - max_used_key_length)); + tab->select->quick->add_keys_and_lengths(&tmp2, &tmp3); + item_list.push_back(new Item_string(tmp2.ptr(),tmp2.length(),cs)); + item_list.push_back(new Item_string(tmp3.ptr(),tmp3.length(),cs)); item_list.push_back(item_null); } else @@ -9363,52 +12765,73 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, item_list.push_back(item_null); item_list.push_back(item_null); } - /* rows */ + /* Add "rows" field to item_list. */ item_list.push_back(new Item_int((longlong) (ulonglong) join->best_positions[i]. records_read, 21)); - /* extra */ + /* Build "Extra" field and add it to item_list. */ my_bool key_read=table->key_read; if ((tab->type == JT_NEXT || tab->type == JT_CONST) && table->used_keys.is_set(tab->index)) key_read=1; - + if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT && + !((QUICK_ROR_INTERSECT_SELECT*)tab->select->quick)->need_to_fetch_row) + key_read=1; + if (tab->info) item_list.push_back(new Item_string(tab->info,strlen(tab->info),cs)); else { + if (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION || + quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT || + quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) + { + extra.append("; Using "); + tab->select->quick->add_info_string(&extra); + } if (tab->select) { if (tab->use_quick == 2) { char buf[MAX_KEY/8+1]; - sprintf(buff_ptr,"; Range checked for each record (index map: 0x%s)", - tab->keys.print(buf)); - buff_ptr=strend(buff_ptr); + extra.append("; Range checked for each record (index map: 0x"); + extra.append(tab->keys.print(buf)); + extra.append(')'); } - else - buff_ptr=strmov(buff_ptr,"; Using where"); + else if (tab->select->cond) + extra.append("; Using where"); } if (key_read) - buff_ptr= strmov(buff_ptr,"; Using index"); + { + if (quick_type == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) + extra.append("; Using index for group-by"); + else + extra.append("; Using index"); + } if (table->reginfo.not_exists_optimize) - buff_ptr= strmov(buff_ptr,"; Not exists"); + extra.append("; Not exists"); if (need_tmp_table) { need_tmp_table=0; - buff_ptr= strmov(buff_ptr,"; Using temporary"); + extra.append("; Using temporary"); } if (need_order) { need_order=0; - buff_ptr= strmov(buff_ptr,"; Using filesort"); + extra.append("; Using filesort"); } if (distinct & test_all_bits(used_tables,thd->used_tables)) - buff_ptr= strmov(buff_ptr,"; Distinct"); - if (buff_ptr == buff) - buff_ptr+= 2; // Skip inital "; " - item_list.push_back(new Item_string(buff+2,(uint) (buff_ptr - buff)-2, - cs)); + extra.append("; Distinct"); + + /* Skip initial "; "*/ + const char *str= extra.ptr(); + uint32 len= extra.length(); + if (len) + { + str += 2; + len -= 2; + } + item_list.push_back(new Item_string(str, len, cs)); } // For next iteration used_tables|=table->map; @@ -9427,10 +12850,10 @@ static void select_describe(JOIN *join, bool need_tmp_table, bool need_order, } -int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) +bool mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) { DBUG_ENTER("mysql_explain_union"); - int res= 0; + bool res= 0; SELECT_LEX *first= unit->first_select(); for (SELECT_LEX *sl= first; @@ -9467,6 +12890,7 @@ int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) else { thd->lex->current_select= first; + unit->set_limit(unit->global_parameters, first); res= mysql_select(thd, &first->ref_pointer_array, (TABLE_LIST*) first->table_list.first, first->with_wild, first->item_list, @@ -9480,9 +12904,111 @@ int mysql_explain_union(THD *thd, SELECT_LEX_UNIT *unit, select_result *result) first->options | thd->options | SELECT_DESCRIBE, result, unit, first); } - if (res > 0 || thd->net.report_error) - res= -1; // mysql_explain_select do not report error - DBUG_RETURN(res); + DBUG_RETURN(res || thd->net.report_error); +} + + +/* + Print joins from the FROM clause + + SYNOPSIS + print_join() + thd thread handler + str string where table should be printed + tables list of tables in join +*/ + +static void print_join(THD *thd, String *str, List<TABLE_LIST> *tables) +{ + /* List is reversed => we should reverse it before using */ + List_iterator_fast<TABLE_LIST> ti(*tables); + TABLE_LIST **table= (TABLE_LIST **)thd->alloc(sizeof(TABLE_LIST*) * + tables->elements); + if (table == 0) + return; // out of memory + + for (TABLE_LIST **t= table + (tables->elements - 1); t >= table; t--) + *t= ti++; + + DBUG_ASSERT(tables->elements >= 1); + (*table)->print(thd, str); + + TABLE_LIST **end= table + tables->elements; + for(TABLE_LIST **tbl= table + 1; tbl < end; tbl++) + { + TABLE_LIST *curr= *tbl; + if (curr->outer_join) + str->append(" left join ", 11); // MySQL converg right to left joins + else if (curr->straight) + str->append(" straight_join ", 15); + else + str->append(" join ", 6); + curr->print(thd, str); + if (curr->on_expr) + { + str->append(" on(", 4); + curr->on_expr->print(str); + str->append(')'); + } + } +} + + +/* + Print table as it should be in join list + + SYNOPSIS + st_table_list::print(); + str string where table should bbe printed +*/ + +void st_table_list::print(THD *thd, String *str) +{ + if (nested_join) + { + str->append('('); + print_join(thd, str, &nested_join->join_list); + str->append(')'); + } + else + { + const char *cmp_name; // Name to compare with alias + if (view_name.str) + { + append_identifier(thd, str, view_db.str, view_db.length); + str->append('.'); + append_identifier(thd, str, view_name.str, view_name.length); + cmp_name= view_name.str; + } + else if (derived) + { + str->append('('); + derived->print(str); + str->append(')'); + cmp_name= ""; // Force printing of alias + } + else + { + append_identifier(thd, str, db, db_length); + str->append('.'); + if (schema_table) + { + append_identifier(thd, str, schema_table_name, + strlen(schema_table_name)); + cmp_name= schema_table_name; + } + else + { + append_identifier(thd, str, real_name, real_name_length); + cmp_name= real_name; + } + } + if (my_strcasecmp(table_alias_charset, cmp_name, alias)) + { + str->append(' '); + append_identifier(thd, str, alias, strlen(alias)); + } + } } @@ -9492,8 +13018,8 @@ void st_select_lex::print(THD *thd, String *str) thd= current_thd; str->append("select ", 7); - - //options + + /* First add options */ if (options & SELECT_STRAIGHT_JOIN) str->append("straight_join ", 14); if ((thd->lex->lock_option == TL_READ_HIGH_PRIORITY) && @@ -9534,60 +13060,8 @@ void st_select_lex::print(THD *thd, String *str) if (table_list.elements) { str->append(" from ", 6); - Item *next_on= 0; - for (TABLE_LIST *table= (TABLE_LIST *) table_list.first; - table; - table= table->next) - { - if (table->derived) - { - str->append('('); - table->derived->print(str); - str->append(") "); - str->append(table->alias); - } - else - { - str->append(table->db); - str->append('.'); - str->append(table->real_name); - if (my_strcasecmp(table_alias_charset, table->real_name, table->alias)) - { - str->append(' '); - str->append(table->alias); - } - } - - if (table->on_expr && ((table->outer_join & JOIN_TYPE_LEFT) || - !(table->outer_join & JOIN_TYPE_RIGHT))) - next_on= table->on_expr; - - if (next_on) - { - str->append(" on(", 4); - next_on->print(str); - str->append(')'); - next_on= 0; - } - - TABLE_LIST *next_table; - if ((next_table= table->next)) - { - if (table->outer_join & JOIN_TYPE_RIGHT) - { - str->append(" right join ", 12); - if (!(table->outer_join & JOIN_TYPE_LEFT) && - table->on_expr) - next_on= table->on_expr; - } - else if (next_table->straight) - str->append(" straight_join ", 15); - else if (next_table->outer_join & JOIN_TYPE_LEFT) - str->append(" left join ", 11); - else - str->append(" join ", 6); - } - } + /* go through join tree */ + print_join(thd, str, &top_join_list); } // Where @@ -9650,17 +13124,17 @@ void st_select_lex::print(THD *thd, String *str) res new select_result object RETURN - 0 - OK - -1 - error + FALSE - OK + TRUE - error */ -int JOIN::change_result(select_result *res) +bool JOIN::change_result(select_result *res) { DBUG_ENTER("JOIN::change_result"); result= res; if (!procedure && result->prepare(fields_list, select_lex->master_unit())) { - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_select.h b/sql/sql_select.h index bbd169d1850..0f26207b391 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -75,23 +75,33 @@ typedef struct st_join_cache { /* ** The structs which holds the join connections and join states */ - enum join_type { JT_UNKNOWN,JT_SYSTEM,JT_CONST,JT_EQ_REF,JT_REF,JT_MAYBE_REF, JT_ALL, JT_RANGE, JT_NEXT, JT_FT, JT_REF_OR_NULL, - JT_UNIQUE_SUBQUERY, JT_INDEX_SUBQUERY}; + JT_UNIQUE_SUBQUERY, JT_INDEX_SUBQUERY, JT_INDEX_MERGE}; class JOIN; +typedef int (*Next_select_func)(JOIN *,struct st_join_table *,bool); +typedef int (*Read_record_func)(struct st_join_table *tab); + + typedef struct st_join_table { TABLE *table; KEYUSE *keyuse; /* pointer to first used key */ SQL_SELECT *select; COND *select_cond; - QUICK_SELECT *quick; - Item *on_expr; + QUICK_SELECT_I *quick; + Item **on_expr_ref; /* pointer to the associated on expression */ + COND_EQUAL *cond_equal; /* multiple equalities for the on expression */ + st_join_table *first_inner; /* first inner table for including outerjoin */ + bool found; /* true after all matches or null complement */ + bool not_null_compl;/* true before null complement is added */ + st_join_table *last_inner; /* last table table for embedding outer join */ + st_join_table *first_upper; /* first inner table for embedding outer join */ + st_join_table *first_unmatched; /* used for optimization purposes only */ const char *info; - int (*read_first_record)(struct st_join_table *tab); - int (*next_select)(JOIN *,struct st_join_table *,bool); + Read_record_func read_first_record; + Next_select_func next_select; READ_RECORD read_record; double worst_seeks; key_map const_keys; /* Keys with constant part */ @@ -116,6 +126,7 @@ typedef struct st_join_table { typedef struct st_position /* Used in find_best */ { double records_read; + double read_time; JOIN_TAB *table; KEYUSE *key; } POSITION; @@ -133,8 +144,9 @@ typedef struct st_rollup class JOIN :public Sql_alloc { public: - JOIN_TAB *join_tab,**best_ref,**map2table; - JOIN_TAB *join_tab_save; //saved join_tab for subquery reexecution + JOIN_TAB *join_tab,**best_ref; + JOIN_TAB **map2table; // mapping between table indexes and JOIN_TABs + JOIN_TAB *join_tab_save; // saved join_tab for subquery reexecution TABLE **table,**all_tables,*sort_by_table; uint tables,const_tables; uint send_group_parts; @@ -142,6 +154,16 @@ class JOIN :public Sql_alloc bool do_send_rows; table_map const_table_map,found_const_table_map,outer_join; ha_rows send_records,found_records,examined_rows,row_limit, select_limit; + /* + Used to fetch no more than given amount of rows per one + fetch operation of server side cursor. + The value is checked in end_send and end_send_group in fashion, similar + to offset_limit_cnt: + - fetch_limit= HA_POS_ERROR if there is no cursor. + - when we open a cursor, we set fetch_limit to 0, + - on each fetch iteration we add num_rows to fetch to fetch_limit + */ + ha_rows fetch_limit; POSITION positions[MAX_TABLES+1],best_positions[MAX_TABLES+1]; double best_read; List<Item> *fields; @@ -199,8 +221,11 @@ class JOIN :public Sql_alloc ORDER *order, *group_list, *proc_param; //hold parameters of mysql_select COND *conds; // ---"--- Item *conds_history; // store WHERE for explain - TABLE_LIST *tables_list; //hold 'tables' parameter of mysql_selec + TABLE_LIST *tables_list; //hold 'tables' parameter of mysql_select + List<TABLE_LIST> *join_list; // list of joined tables in reverse order + COND_EQUAL *cond_equal; SQL_SELECT *select; //created in optimisation phase + JOIN_TAB *return_tab; //used only for outer joins Item **ref_pointer_array; //used pointer reference for this select // Copy of above to be used with different lists Item **items0, **items1, **items2, **items3, **current_ref_pointer_array; @@ -224,11 +249,13 @@ class JOIN :public Sql_alloc table= 0; tables= 0; const_tables= 0; + join_list= 0; sort_and_group= 0; first_record= 0; do_send_rows= 1; send_records= 0; found_records= 0; + fetch_limit= HA_POS_ERROR; examined_rows= 0; exec_tmp_table1= 0; exec_tmp_table2= 0; @@ -250,15 +277,16 @@ class JOIN :public Sql_alloc hidden_group_fields= 0; /*safety*/ buffer_result= test(select_options & OPTION_BUFFER_RESULT) && !test(select_options & OPTION_FOUND_ROWS); - all_fields= fields_arg; - fields_list= fields_arg; error= 0; select= 0; + return_tab= 0; ref_pointer_array= items0= items1= items2= items3= 0; ref_pointer_array_size= 0; zero_result_cause= 0; optimized= 0; + cond_equal= 0; + all_fields= fields_arg; fields_list= fields_arg; bzero((char*) &keyuse,sizeof(keyuse)); tmp_table_param.copy_field=0; @@ -277,7 +305,7 @@ class JOIN :public Sql_alloc void restore_tmp(); bool alloc_func_list(); bool make_sum_func_list(List<Item> &all_fields, List<Item> &send_fields, - bool before_group_by); + bool before_group_by, bool recompute= FALSE); inline void set_items_ref_array(Item **ptr) { @@ -304,7 +332,45 @@ class JOIN :public Sql_alloc return (do_send_rows && tmp_table_param.sum_func_count != 0 && !group_list); } - int change_result(select_result *result); + bool change_result(select_result *result); +}; + + +/* + Server-side cursor (now stands only for basic read-only cursor) + See class implementation in sql_select.cc +*/ + +class Cursor: public Sql_alloc, public Item_arena +{ + JOIN *join; + SELECT_LEX_UNIT *unit; + + TABLE *open_tables; + MYSQL_LOCK *lock; + TABLE *derived_tables; + /* List of items created during execution */ + ulong query_id; +public: + select_send result; + + /* Temporary implementation as now we replace THD state by value */ + /* Save THD state into cursor */ + void init_from_thd(THD *thd); + /* Restore THD from cursor to continue cursor execution */ + void init_thd(THD *thd); + /* bzero cursor state in THD */ + void reset_thd(THD *thd); + + int open(JOIN *join); + int fetch(ulong num_rows); + void reset() { join= 0; } + bool is_open() const { return join != 0; } + void close(); + + void set_unit(SELECT_LEX_UNIT *unit_arg) { unit= unit_arg; } + Cursor() :join(0), unit(0) {} + ~Cursor(); }; @@ -332,10 +398,15 @@ void copy_fields(TMP_TABLE_PARAM *param); void copy_funcs(Item **func_ptr); bool create_myisam_from_heap(THD *thd, TABLE *table, TMP_TABLE_PARAM *param, int error, bool ignore_last_dupp_error); +uint find_shortest_key(TABLE *table, const key_map *usable_keys); /* functions from opt_sum.cc */ +bool simple_pred(Item_func *func_item, Item **args, bool *inv_order); int opt_sum_query(TABLE_LIST *tables, List<Item> &all_fields,COND *conds); +/* from sql_delete.cc, used by opt_range.cc */ +extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); + /* class to copying an field/item to a key struct */ class store_key :public Sql_alloc @@ -349,15 +420,15 @@ class store_key :public Sql_alloc :null_ptr(null),err(0) { if (field_arg->type() == FIELD_TYPE_BLOB) - to_field=new Field_varstring(ptr, length, (uchar*) null, 1, + { + /* Key segments are always packed with a 2 byte length prefix */ + to_field=new Field_varstring(ptr, length, 2, (uchar*) null, 1, Field::NONE, field_arg->field_name, field_arg->table, field_arg->charset()); - else - { - to_field=field_arg->new_field(thd->mem_root,field_arg->table); - if (to_field) - to_field->move_field(ptr, (uchar*) null, 1); } + else + to_field=field_arg->new_key_field(thd->mem_root, field_arg->table, + ptr, (uchar*) null, 1); } virtual ~store_key() {} /* Not actually needed */ virtual bool copy()=0; @@ -403,7 +474,7 @@ public: {} bool copy() { - return item->save_in_field(to_field, 1) || err != 0; + return item->save_in_field_no_warnings(to_field, 1) || err != 0; } const char *name() const { return "func"; } }; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index ba13dd1ff04..6ffad1e2bff 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -20,6 +20,7 @@ #include "mysql_priv.h" #include "sql_select.h" // For select_describe #include "repl_failsafe.h" +#include "sp_head.h" #include <my_dir.h> #ifdef HAVE_BERKELEY_DB @@ -37,141 +38,16 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), #endif static int -store_create_info(THD *thd, TABLE *table, String *packet); - - -/* - Report list of databases - A database is a directory in the mysql_data_home directory -*/ - -int -mysqld_show_dbs(THD *thd,const char *wild) -{ - Item_string *field=new Item_string("",0,thd->charset()); - List<Item> field_list; - char *end; - List<char> files; - char *file_name; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_dbs"); - - field->name=(char*) thd->alloc(20+ (wild ? (uint) strlen(wild)+4: 0)); - field->max_length=NAME_LEN; - end=strmov(field->name,"Database"); - if (wild && wild[0]) - strxmov(end," (",wild,")",NullS); - field_list.push_back(field); - - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - if (mysql_find_files(thd,&files,NullS,mysql_data_home,wild,1)) - DBUG_RETURN(1); - List_iterator_fast<char> it(files); - - while ((file_name=it++)) - { -#ifndef NO_EMBEDDED_ACCESS_CHECKS - if (thd->master_access & (DB_ACLS | SHOW_DB_ACL) || - acl_get(thd->host, thd->ip, thd->priv_user, file_name,0) || - (grant_option && !check_grant_db(thd, file_name))) -#endif - { - protocol->prepare_for_resend(); - protocol->store(file_name, system_charset_info); - if (protocol->write()) - DBUG_RETURN(-1); - } - } - send_eof(thd); - DBUG_RETURN(0); -} - - -/*************************************************************************** - List all open tables in a database -***************************************************************************/ - -int mysqld_show_open_tables(THD *thd,const char *wild) -{ - List<Item> field_list; - OPEN_TABLE_LIST *open_list; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_open_tables"); - - field_list.push_back(new Item_empty_string("Database",NAME_LEN)); - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - field_list.push_back(new Item_return_int("In_use", 1, MYSQL_TYPE_TINY)); - field_list.push_back(new Item_return_int("Name_locked", 4, MYSQL_TYPE_TINY)); - - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - - if (!(open_list=list_open_tables(thd,wild)) && thd->is_fatal_error) - DBUG_RETURN(-1); - - for (; open_list ; open_list=open_list->next) - { - protocol->prepare_for_resend(); - protocol->store(open_list->db, system_charset_info); - protocol->store(open_list->table, system_charset_info); - protocol->store_tiny((longlong) open_list->in_use); - protocol->store_tiny((longlong) open_list->locked); - if (protocol->write()) - { - DBUG_RETURN(-1); - } - } - send_eof(thd); - DBUG_RETURN(0); -} +store_create_info(THD *thd, TABLE_LIST *table_list, String *packet); +static int +view_store_create_info(THD *thd, TABLE_LIST *table, String *packet); /*************************************************************************** -** List all tables in a database (fast version) -** A table is a .frm file in the current databasedir -***************************************************************************/ - -int mysqld_show_tables(THD *thd,const char *db,const char *wild) -{ - Item_string *field=new Item_string("",0,thd->charset()); - List<Item> field_list; - char path[FN_LEN],*end; - List<char> files; - char *file_name; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_tables"); - - field->name=(char*) thd->alloc(20+(uint) strlen(db)+ - (wild ? (uint) strlen(wild)+4:0)); - end=strxmov(field->name,"Tables_in_",db,NullS); - if (wild && wild[0]) - strxmov(end," (",wild,")",NullS); - field->max_length=NAME_LEN; - (void) sprintf(path,"%s/%s",mysql_data_home,db); - (void) unpack_dirname(path,path); - field_list.push_back(field); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - if (mysql_find_files(thd,&files,db,path,wild,0)) - DBUG_RETURN(-1); - List_iterator_fast<char> it(files); - while ((file_name=it++)) - { - protocol->prepare_for_resend(); - protocol->store(file_name, system_charset_info); - if (protocol->write()) - DBUG_RETURN(-1); - } - send_eof(thd); - DBUG_RETURN(0); -} - -/*************************************************************************** ** List all table types supported ***************************************************************************/ -int mysqld_show_storage_engines(THD *thd) +bool mysqld_show_storage_engines(THD *thd) { List<Item> field_list; Protocol *protocol= thd->protocol; @@ -181,8 +57,9 @@ int mysqld_show_storage_engines(THD *thd) field_list.push_back(new Item_empty_string("Support",10)); field_list.push_back(new Item_empty_string("Comment",80)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); const char *default_type_name= ha_get_storage_engine((enum db_type)thd->variables.table_type); @@ -200,10 +77,10 @@ int mysqld_show_storage_engines(THD *thd) protocol->store(option_name, system_charset_info); protocol->store(types->comment, system_charset_info); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -220,12 +97,16 @@ struct show_privileges_st { static struct show_privileges_st sys_privileges[]= { {"Alter", "Tables", "To alter the table"}, - {"Create temporary tables","Databases","To use CREATE TEMPORARY TABLE"}, + {"Alter routine", "Functions,Procedures", "To alter or drop stored functions/procedures"}, {"Create", "Databases,Tables,Indexes", "To create new databases and tables"}, + {"Create routine","Functions,Procedures","To use CREATE FUNCTION/PROCEDURE"}, + {"Create temporary tables","Databases","To use CREATE TEMPORARY TABLE"}, + {"Create view", "Tables", "To create new views"}, {"Delete", "Tables", "To delete existing rows"}, - {"Drop", "Databases,Tables", "To drop databases and tables"}, + {"Drop", "Databases,Tables", "To drop databases, tables, and views"}, + {"Execute", "Functions,Procedures", "To execute stored routines"}, {"File", "File access on server", "To read and write files on the server"}, - {"Grant option", "Databases,Tables", "To give to other users those privileges you possess"}, + {"Grant option", "Databases,Tables,Functions,Procedures", "To give to other users those privileges you possess"}, {"Index", "Tables", "To create or drop indexes"}, {"Insert", "Tables", "To insert data into tables"}, {"Lock tables","Databases","To use LOCK TABLES (together with SELECT privilege)"}, @@ -236,14 +117,15 @@ static struct show_privileges_st sys_privileges[]= {"Replication slave","Server Admin","To read binary log events from the master"}, {"Select", "Tables", "To retrieve rows from table"}, {"Show databases","Server Admin","To see all databases with SHOW DATABASES"}, - {"Shutdown","Server Admin", "To shutdown the server"}, + {"Show view","Tables","To see views with SHOW CREATE VIEW"}, + {"Shutdown","Server Admin", "To shut down the server"}, {"Super","Server Admin","To use KILL thread, SET GLOBAL, CHANGE MASTER, etc."}, {"Update", "Tables", "To update existing rows"}, {"Usage","Server Admin","No privileges - allow connect only"}, {NullS, NullS, NullS} }; -int mysqld_show_privileges(THD *thd) +bool mysqld_show_privileges(THD *thd) { List<Item> field_list; Protocol *protocol= thd->protocol; @@ -253,8 +135,9 @@ int mysqld_show_privileges(THD *thd) field_list.push_back(new Item_empty_string("Context",15)); field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); show_privileges_st *privilege= sys_privileges; for (privilege= sys_privileges; privilege->privilege ; privilege++) @@ -264,10 +147,10 @@ int mysqld_show_privileges(THD *thd) protocol->store(privilege->context, system_charset_info); protocol->store(privilege->comment, system_charset_info); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -307,7 +190,7 @@ static struct show_column_type_st sys_column_types[]= "A very small integer"}, }; -int mysqld_show_column_types(THD *thd) +bool mysqld_show_column_types(THD *thd) { List<Item> field_list; Protocol *protocol= thd->protocol; @@ -328,8 +211,9 @@ int mysqld_show_column_types(THD *thd) field_list.push_back(new Item_empty_string("Default",NAME_LEN)); field_list.push_back(new Item_empty_string("Comment",NAME_LEN)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); /* TODO: Change the loop to not use 'i' */ for (uint i=0; i < sizeof(sys_column_types)/sizeof(sys_column_types[0]); i++) @@ -350,10 +234,10 @@ int mysqld_show_column_types(THD *thd) protocol->store(sys_column_types[i].default_value, system_charset_info); protocol->store(sys_column_types[i].comment, system_charset_info); if (protocol->write()) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -448,400 +332,83 @@ mysql_find_files(THD *thd,List<char> *files, const char *db,const char *path, } -/*************************************************************************** - Extended version of mysqld_show_tables -***************************************************************************/ - -int mysqld_extend_show_tables(THD *thd,const char *db,const char *wild) -{ - Item *item; - List<char> files; - List<Item> field_list; - char path[FN_LEN]; - char *file_name; - TABLE *table; - Protocol *protocol= thd->protocol; - TIME time; - DBUG_ENTER("mysqld_extend_show_tables"); - - (void) sprintf(path,"%s/%s",mysql_data_home,db); - (void) unpack_dirname(path,path); - field_list.push_back(item=new Item_empty_string("Name",NAME_LEN)); - field_list.push_back(item=new Item_empty_string("Engine",10)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Version", (longlong) 0, 21)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Row_format",10)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Rows",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Avg_row_length",(int32) 0,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Data_length",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Max_data_length",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Index_length",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Data_free",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Auto_increment",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_datetime("Create_time")); - item->maybe_null=1; - field_list.push_back(item=new Item_datetime("Update_time")); - item->maybe_null=1; - field_list.push_back(item=new Item_datetime("Check_time")); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Collation",32)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Create_options",255)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Comment",80)); - item->maybe_null=1; - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - - if (mysql_find_files(thd,&files,db,path,wild,0)) - DBUG_RETURN(-1); - List_iterator_fast<char> it(files); - while ((file_name=it++)) - { - TABLE_LIST table_list; - bzero((char*) &table_list,sizeof(table_list)); - protocol->prepare_for_resend(); - protocol->store(file_name, system_charset_info); - table_list.db=(char*) db; - table_list.real_name= table_list.alias= file_name; - if (lower_case_table_names) - my_casedn_str(files_charset_info, file_name); - if (!(table = open_ltable(thd, &table_list, TL_READ))) - { - for (uint i=2 ; i < field_list.elements ; i++) - protocol->store_null(); - // Send error to Comment field - protocol->store(thd->net.last_error, system_charset_info); - thd->clear_error(); - } - else - { - const char *str; - handler *file=table->file; - file->info(HA_STATUS_VARIABLE | HA_STATUS_TIME | HA_STATUS_NO_LOCK); - protocol->store(file->table_type(), system_charset_info); - protocol->store((ulonglong) table->frm_version); - str= ((table->db_options_in_use & HA_OPTION_COMPRESS_RECORD) ? - "Compressed" : - (table->db_options_in_use & HA_OPTION_PACK_RECORD) ? - "Dynamic" : "Fixed"); - protocol->store(str, system_charset_info); - protocol->store((ulonglong) file->records); - protocol->store((ulonglong) file->mean_rec_length); - protocol->store((ulonglong) file->data_file_length); - if (file->max_data_file_length) - protocol->store((ulonglong) file->max_data_file_length); - else - protocol->store_null(); - protocol->store((ulonglong) file->index_file_length); - protocol->store((ulonglong) file->delete_length); - if (table->found_next_number_field) - { - table->next_number_field=table->found_next_number_field; - table->next_number_field->reset(); - file->update_auto_increment(); - protocol->store(table->next_number_field->val_int()); - table->next_number_field=0; - } - else - protocol->store_null(); - if (!file->create_time) - protocol->store_null(); - else - { - thd->variables.time_zone->gmt_sec_to_TIME(&time, file->create_time); - protocol->store(&time); - } - if (!file->update_time) - protocol->store_null(); - else - { - thd->variables.time_zone->gmt_sec_to_TIME(&time, file->update_time); - protocol->store(&time); - } - if (!file->check_time) - protocol->store_null(); - else - { - thd->variables.time_zone->gmt_sec_to_TIME(&time, file->check_time); - protocol->store(&time); - } - str= (table->table_charset ? table->table_charset->name : "default"); - protocol->store(str, system_charset_info); - if (file->table_flags() & HA_HAS_CHECKSUM) - protocol->store((ulonglong)file->checksum()); - else - protocol->store_null(); // Checksum - { - char option_buff[350],*ptr; - ptr=option_buff; - if (table->min_rows) - { - ptr=strmov(ptr," min_rows="); - ptr=longlong10_to_str(table->min_rows,ptr,10); - } - if (table->max_rows) - { - ptr=strmov(ptr," max_rows="); - ptr=longlong10_to_str(table->max_rows,ptr,10); - } - if (table->avg_row_length) - { - ptr=strmov(ptr," avg_row_length="); - ptr=longlong10_to_str(table->avg_row_length,ptr,10); - } - if (table->db_create_options & HA_OPTION_PACK_KEYS) - ptr=strmov(ptr," pack_keys=1"); - if (table->db_create_options & HA_OPTION_NO_PACK_KEYS) - ptr=strmov(ptr," pack_keys=0"); - if (table->db_create_options & HA_OPTION_CHECKSUM) - ptr=strmov(ptr," checksum=1"); - if (table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) - ptr=strmov(ptr," delay_key_write=1"); - if (table->row_type != ROW_TYPE_DEFAULT) - ptr=strxmov(ptr, " row_format=", ha_row_type[(uint) table->row_type], - NullS); - if (file->raid_type) - { - char buff[100]; - sprintf(buff," raid_type=%s raid_chunks=%d raid_chunksize=%ld", - my_raid_type(file->raid_type), file->raid_chunks, file->raid_chunksize/RAID_BLOCK_SIZE); - ptr=strmov(ptr,buff); - } - protocol->store(option_buff+1, - (ptr == option_buff ? 0 : (uint) (ptr-option_buff)-1) - , system_charset_info); - } - { - char *comment=table->file->update_table_comment(table->comment); - protocol->store(comment, system_charset_info); - if (comment != table->comment) - my_free(comment,MYF(0)); - } - close_thread_tables(thd,0); - } - if (protocol->write()) - DBUG_RETURN(-1); - } - send_eof(thd); - DBUG_RETURN(0); -} - - -/*************************************************************************** -** List all columns in a table_list->real_name -***************************************************************************/ - -int -mysqld_show_fields(THD *thd, TABLE_LIST *table_list,const char *wild, - bool verbose) -{ - TABLE *table; - handler *file; - char tmp[MAX_FIELD_WIDTH]; - char tmp1[MAX_FIELD_WIDTH]; - Item *item; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_fields"); - DBUG_PRINT("enter",("db: %s table: %s",table_list->db, - table_list->real_name)); - - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) - { - send_error(thd); - DBUG_RETURN(1); - } - file=table->file; - file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); -#ifndef NO_EMBEDDED_ACCESS_CHECKS - (void) get_table_grant(thd, table_list); -#endif - List<Item> field_list; - field_list.push_back(new Item_empty_string("Field",NAME_LEN)); - field_list.push_back(new Item_empty_string("Type",40)); - if (verbose) - field_list.push_back(new Item_empty_string("Collation",40)); - field_list.push_back(new Item_empty_string("Null",1)); - field_list.push_back(new Item_empty_string("Key",3)); - field_list.push_back(item=new Item_empty_string("Default",NAME_LEN)); - item->maybe_null=1; - field_list.push_back(new Item_empty_string("Extra",20)); - if (verbose) - { - field_list.push_back(new Item_empty_string("Privileges",80)); - field_list.push_back(new Item_empty_string("Comment",255)); - } - // Send first number of fields and records - if (protocol->send_records_num(&field_list, (ulonglong)file->records) || - protocol->send_fields(&field_list,0)) - DBUG_RETURN(1); - restore_record(table,default_values); // Get empty record - - Field **ptr,*field; - for (ptr=table->field; (field= *ptr) ; ptr++) - { - if (!wild || !wild[0] || - !wild_case_compare(system_charset_info, field->field_name,wild)) - { - { - byte *pos; - uint flags=field->flags; - String type(tmp,sizeof(tmp), system_charset_info); -#ifndef NO_EMBEDDED_ACCESS_CHECKS - uint col_access; -#endif - protocol->prepare_for_resend(); - protocol->store(field->field_name, system_charset_info); - field->sql_type(type); - protocol->store(type.ptr(), type.length(), system_charset_info); - if (verbose) - protocol->store(field->has_charset() ? field->charset()->name : "NULL", - system_charset_info); - /* - Even if TIMESTAMP field can't contain NULL as its value it - will accept NULL if you will try to insert such value and will - convert NULL value to current TIMESTAMP. So YES here means - that NULL is allowed for assignment (but may be won't be - returned). - */ - pos=(byte*) ((flags & NOT_NULL_FLAG) && - field->type() != FIELD_TYPE_TIMESTAMP ? - "" : "YES"); - protocol->store((const char*) pos, system_charset_info); - pos=(byte*) ((field->flags & PRI_KEY_FLAG) ? "PRI" : - (field->flags & UNIQUE_KEY_FLAG) ? "UNI" : - (field->flags & MULTIPLE_KEY_FLAG) ? "MUL":""); - protocol->store((char*) pos, system_charset_info); - - if (table->timestamp_field == field && - field->unireg_check != Field::TIMESTAMP_UN_FIELD) - { - /* - We have NOW() as default value but we use CURRENT_TIMESTAMP form - because it is more SQL standard comatible - */ - protocol->store("CURRENT_TIMESTAMP", system_charset_info); - } - else if (field->unireg_check != Field::NEXT_NUMBER && - !field->is_null()) - { // Not null by default - /* - Note: we have to convert the default value into - system_charset_info before sending. - This is necessary for "SET NAMES binary": - If the client character set is binary, we want to - send metadata in UTF8 rather than in the column's - character set. - This conversion also makes "SHOW COLUMNS" and - "SHOW CREATE TABLE" output consistent. Without - this conversion the default values were displayed - differently. - */ - String def(tmp1,sizeof(tmp1), system_charset_info); - type.set(tmp, sizeof(tmp), field->charset()); - field->val_str(&type); - uint dummy_errors; - def.copy(type.ptr(), type.length(), type.charset(), - system_charset_info, &dummy_errors); - protocol->store(def.ptr(), def.length(), def.charset()); - } - else if (field->unireg_check == Field::NEXT_NUMBER || - field->maybe_null()) - protocol->store_null(); // Null as default - else - protocol->store("",0, system_charset_info); // empty string - - char *end=tmp; - if (field->unireg_check == Field::NEXT_NUMBER) - end=strmov(tmp,"auto_increment"); - protocol->store(tmp,(uint) (end-tmp), system_charset_info); - - if (verbose) - { - /* Add grant options & comments */ - end=tmp; -#ifndef NO_EMBEDDED_ACCESS_CHECKS - col_access= get_column_grant(thd,table_list,field) & COL_ACLS; - for (uint bitnr=0; col_access ; col_access>>=1,bitnr++) - { - if (col_access & 1) - { - *end++=','; - end=strmov(end,grant_types.type_names[bitnr]); - } - } -#else - end=strmov(end,""); -#endif - protocol->store(tmp+1,end == tmp ? 0 : (uint) (end-tmp-1), - system_charset_info); - protocol->store(field->comment.str, field->comment.length, - system_charset_info); - } - if (protocol->write()) - DBUG_RETURN(1); - } - } - } - send_eof(thd); - DBUG_RETURN(0); -} - - -int +bool mysqld_show_create(THD *thd, TABLE_LIST *table_list) { TABLE *table; Protocol *protocol= thd->protocol; char buff[2048]; String buffer(buff, sizeof(buff), system_charset_info); + int res; DBUG_ENTER("mysqld_show_create"); DBUG_PRINT("enter",("db: %s table: %s",table_list->db, table_list->real_name)); - /* Only one table for now */ - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) + /* Only one table for now, but VIEW can involve several tables */ + if (open_and_lock_tables(thd, table_list)) { - send_error(thd); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); + } + /* TODO: add environment variables show when it become possible */ + if (thd->lex->only_view && !table_list->view) + { + my_error(ER_WRONG_OBJECT, MYF(0), + table_list->db, table_list->real_name, "VIEW"); + DBUG_RETURN(TRUE); } - if (store_create_info(thd, table, &buffer)) - DBUG_RETURN(-1); + table= table_list->table; + + if ((table_list->view ? + view_store_create_info(thd, table_list, &buffer) : + store_create_info(thd, table_list, &buffer))) + DBUG_RETURN(TRUE); List<Item> field_list; - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - // 1024 is for not to confuse old clients - field_list.push_back(new Item_empty_string("Create Table", - max(buffer.length(),1024))); + if (table_list->view) + { + field_list.push_back(new Item_empty_string("View",NAME_LEN)); + field_list.push_back(new Item_empty_string("Create View", + max(buffer.length(),1024))); + } + else + { + field_list.push_back(new Item_empty_string("Table",NAME_LEN)); + // 1024 is for not to confuse old clients + field_list.push_back(new Item_empty_string("Create Table", + max(buffer.length(),1024))); + } - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); protocol->prepare_for_resend(); - protocol->store(table->table_name, system_charset_info); buffer.length(0); - if (store_create_info(thd, table, &buffer)) - DBUG_RETURN(-1); + if (table_list->view) + { + protocol->store(table_list->view_name.str, system_charset_info); + if (view_store_create_info(thd, table_list, &buffer)) + DBUG_RETURN(TRUE); + } + else + { + if (table_list->schema_table) + protocol->store(table_list->schema_table_name, system_charset_info); + else + protocol->store(table->table_name, system_charset_info); + if (store_create_info(thd, table_list, &buffer)) + DBUG_RETURN(TRUE); + } protocol->store(buffer.ptr(), buffer.length(), buffer.charset()); + if (protocol->write()) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } -int mysqld_show_create_db(THD *thd, char *dbname, - HA_CREATE_INFO *create_info) +bool mysqld_show_create_db(THD *thd, char *dbname, + HA_CREATE_INFO *create_info) { int length; char path[FN_REFLEN]; @@ -858,8 +425,8 @@ int mysqld_show_create_db(THD *thd, char *dbname, if (check_db_name(dbname)) { - net_printf(thd,ER_WRONG_DB_NAME, dbname); - DBUG_RETURN(1); + my_error(ER_WRONG_DB_NAME, MYF(0), dbname); + DBUG_RETURN(TRUE); } #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -870,11 +437,11 @@ int mysqld_show_create_db(THD *thd, char *dbname, thd->master_access); if (!(db_access & DB_ACLS) && (!grant_option || check_grant_db(thd,dbname))) { - net_printf(thd,ER_DBACCESS_DENIED_ERROR, - thd->priv_user, thd->host_or_ip, dbname); + my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), + thd->priv_user, thd->host_or_ip, dbname); mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), thd->priv_user, thd->host_or_ip, dbname); - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } #endif @@ -888,8 +455,8 @@ int mysqld_show_create_db(THD *thd, char *dbname, } if (access(path,F_OK)) { - net_printf(thd,ER_BAD_DB_ERROR,dbname); - DBUG_RETURN(1); + my_error(ER_BAD_DB_ERROR, MYF(0), dbname); + DBUG_RETURN(TRUE); } if (found_libchar) path[length-1]= FN_LIBCHAR; @@ -900,8 +467,9 @@ int mysqld_show_create_db(THD *thd, char *dbname, field_list.push_back(new Item_empty_string("Database",NAME_LEN)); field_list.push_back(new Item_empty_string("Create Database",1024)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); protocol->prepare_for_resend(); protocol->store(dbname, strlen(dbname), system_charset_info); @@ -926,12 +494,12 @@ int mysqld_show_create_db(THD *thd, char *dbname, protocol->store(buffer.ptr(), buffer.length(), buffer.charset()); if (protocol->write()) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } -int +bool mysqld_show_logs(THD *thd) { List<Item> field_list; @@ -942,112 +510,17 @@ mysqld_show_logs(THD *thd) field_list.push_back(new Item_empty_string("Type",10)); field_list.push_back(new Item_empty_string("Status",10)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); #ifdef HAVE_BERKELEY_DB if ((have_berkeley_db == SHOW_OPTION_YES) && berkeley_show_logs(protocol)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); #endif send_eof(thd); - DBUG_RETURN(0); -} - - -int -mysqld_show_keys(THD *thd, TABLE_LIST *table_list) -{ - TABLE *table; - Protocol *protocol= thd->protocol; - DBUG_ENTER("mysqld_show_keys"); - DBUG_PRINT("enter",("db: %s table: %s",table_list->db, - table_list->real_name)); - - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) - { - send_error(thd); - DBUG_RETURN(1); - } - - List<Item> field_list; - Item *item; - field_list.push_back(new Item_empty_string("Table",NAME_LEN)); - field_list.push_back(new Item_return_int("Non_unique",1, MYSQL_TYPE_TINY)); - field_list.push_back(new Item_empty_string("Key_name",NAME_LEN)); - field_list.push_back(new Item_return_int("Seq_in_index",2, MYSQL_TYPE_TINY)); - field_list.push_back(new Item_empty_string("Column_name",NAME_LEN)); - field_list.push_back(item=new Item_empty_string("Collation",1)); - item->maybe_null=1; - field_list.push_back(item=new Item_int("Cardinality",0,21)); - item->maybe_null=1; - field_list.push_back(item=new Item_return_int("Sub_part",3, - MYSQL_TYPE_TINY)); - item->maybe_null=1; - field_list.push_back(item=new Item_empty_string("Packed",10)); - item->maybe_null=1; - field_list.push_back(new Item_empty_string("Null",3)); - field_list.push_back(new Item_empty_string("Index_type",16)); - field_list.push_back(new Item_empty_string("Comment",255)); - item->maybe_null=1; - - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); - - KEY *key_info=table->key_info; - table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK | HA_STATUS_TIME); - for (uint i=0 ; i < table->keys ; i++,key_info++) - { - KEY_PART_INFO *key_part= key_info->key_part; - const char *str; - for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) - { - protocol->prepare_for_resend(); - protocol->store(table->table_name, system_charset_info); - protocol->store_tiny((longlong) ((key_info->flags & HA_NOSAME) ? 0 :1)); - protocol->store(key_info->name, system_charset_info); - protocol->store_tiny((longlong) (j+1)); - str=(key_part->field ? key_part->field->field_name : - "?unknown field?"); - protocol->store(str, system_charset_info); - if (table->file->index_flags(i, j, 0) & HA_READ_ORDER) - protocol->store(((key_part->key_part_flag & HA_REVERSE_SORT) ? - "D" : "A"), 1, system_charset_info); - else - protocol->store_null(); /* purecov: inspected */ - KEY *key=table->key_info+i; - if (key->rec_per_key[j]) - { - ha_rows records=(table->file->records / key->rec_per_key[j]); - protocol->store((ulonglong) records); - } - else - protocol->store_null(); - - /* Check if we have a key part that only uses part of the field */ - if (!(key_info->flags & HA_FULLTEXT) && (!key_part->field || - key_part->length != table->field[key_part->fieldnr-1]->key_length())) - protocol->store_tiny((longlong) key_part->length); - else - protocol->store_null(); - protocol->store_null(); // No pack_information yet - - /* Null flag */ - uint flags= key_part->field ? key_part->field->flags : 0; - char *pos=(char*) ((flags & NOT_NULL_FLAG) ? "" : "YES"); - protocol->store((const char*) pos, system_charset_info); - protocol->store(table->file->index_type(i), system_charset_info); - /* Comment */ - if (!table->keys_in_use.is_set(i)) - protocol->store("disabled",8, system_charset_info); - else - protocol->store("", 0, system_charset_info); - if (protocol->write()) - DBUG_RETURN(1); /* purecov: inspected */ - } - } - send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -1060,14 +533,15 @@ void mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) { TABLE *table; + int res; DBUG_ENTER("mysqld_list_fields"); DBUG_PRINT("enter",("table: %s",table_list->real_name)); - if (!(table = open_ltable(thd, table_list, TL_UNLOCK))) - { - send_error(thd); + table_list->lock_type= TL_UNLOCK; + if (open_and_lock_tables(thd, table_list)) DBUG_VOID_RETURN; - } + table= table_list->table; + List<Item> field_list; Field **ptr,*field; @@ -1078,7 +552,8 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) field_list.push_back(new Item_field(field)); } restore_record(table,default_values); // Get empty record - if (thd->protocol->send_fields(&field_list,2)) + if (thd->protocol->send_fields(&field_list, Protocol::SEND_DEFAULTS | + Protocol::SEND_EOF)) DBUG_VOID_RETURN; thd->protocol->flush(); DBUG_VOID_RETURN; @@ -1086,15 +561,15 @@ mysqld_list_fields(THD *thd, TABLE_LIST *table_list, const char *wild) int -mysqld_dump_create_info(THD *thd, TABLE *table, int fd) +mysqld_dump_create_info(THD *thd, TABLE_LIST *table_list, int fd) { Protocol *protocol= thd->protocol; String *packet= protocol->storage_packet(); DBUG_ENTER("mysqld_dump_create_info"); - DBUG_PRINT("enter",("table: %s",table->real_name)); + DBUG_PRINT("enter",("table: %s",table_list->table->real_name)); protocol->prepare_for_resend(); - if (store_create_info(thd, table, packet)) + if (store_create_info(thd, table_list, packet)) DBUG_RETURN(-1); if (fd < 0) @@ -1114,7 +589,7 @@ mysqld_dump_create_info(THD *thd, TABLE *table, int fd) /* Go through all character combinations and ensure that sql_lex.cc can - parse it as an identifer. + parse it as an identifier. SYNOPSIS require_quotes() @@ -1237,7 +712,7 @@ static void append_directory(THD *thd, String *packet, const char *dir_type, #define LIST_PROCESS_HOST_LEN 64 static int -store_create_info(THD *thd, TABLE *table, String *packet) +store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) { List<Item> field_list; char tmp[MAX_FIELD_WIDTH], *for_str, buff[128], *end, *alias; @@ -1245,6 +720,7 @@ store_create_info(THD *thd, TABLE *table, String *packet) Field **ptr,*field; uint primary_key; KEY *key_info; + TABLE *table= table_list->table; handler *file= table->file; HA_CREATE_INFO create_info; my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | @@ -1266,8 +742,11 @@ store_create_info(THD *thd, TABLE *table, String *packet) packet->append("CREATE TEMPORARY TABLE ", 23); else packet->append("CREATE TABLE ", 13); - alias= (lower_case_table_names == 2 ? table->table_name : - table->real_name); + if (table_list->schema_table) + alias= table_list->schema_table_name; + else + alias= (lower_case_table_names == 2 ? table->table_name : + table->real_name); append_identifier(thd, packet, alias, strlen(alias)); packet->append(" (\n", 3); @@ -1329,6 +808,7 @@ store_create_info(THD *thd, TABLE *table, String *packet) field->unireg_check != Field::TIMESTAMP_UN_FIELD; has_default= (field->type() != FIELD_TYPE_BLOB && + !(field->flags & NO_DEFAULT_VALUE_FLAG) && field->unireg_check != Field::NEXT_NUMBER && !((foreign_db_mode || limited_mysql_mode) && has_now_default)); @@ -1530,6 +1010,51 @@ store_create_info(THD *thd, TABLE *table, String *packet) } +static int +view_store_create_info(THD *thd, TABLE_LIST *table, String *buff) +{ + my_bool foreign_db_mode= (thd->variables.sql_mode & (MODE_POSTGRESQL | + MODE_ORACLE | + MODE_MSSQL | + MODE_DB2 | + MODE_MAXDB | + MODE_ANSI)) != 0; + buff->append("CREATE ", 7); + if (!foreign_db_mode) + { + buff->append("ALGORITHM=", 10); + switch((int8)table->algorithm) + { + case VIEW_ALGORITHM_UNDEFINED: + buff->append("UNDEFINED ", 10); + break; + case VIEW_ALGORITHM_TMPTABLE: + buff->append("TEMPTABLE ", 10); + break; + case VIEW_ALGORITHM_MERGE: + buff->append("MERGE ", 6); + break; + default: + DBUG_ASSERT(0); // never should happen + } + } + buff->append("VIEW ", 5); + append_identifier(thd, buff, table->view_db.str, table->view_db.length); + buff->append('.'); + append_identifier(thd, buff, table->view_name.str, table->view_name.length); + buff->append(" AS ", 4); + buff->append(table->query.str, table->query.length); + if (table->with_check != VIEW_CHECK_NONE) + { + if (table->with_check == VIEW_CHECK_LOCAL) + buff->append(" WITH LOCAL CHECK OPTION", 24); + else + buff->append(" WITH CASCADED CHECK OPTION", 27); + } + return 0; +} + + /**************************************************************************** Return info about all processes returns for each thread: thread id, user, host, db, command, info @@ -1537,9 +1062,13 @@ store_create_info(THD *thd, TABLE *table, String *packet) class thread_info :public ilink { public: - static void *operator new(size_t size) {return (void*) sql_alloc((uint) size); } + static void *operator new(size_t size) + { + return (void*) sql_alloc((uint) size); + } static void operator delete(void *ptr __attribute__((unused)), - size_t size __attribute__((unused))) {} /*lint -e715 */ + size_t size __attribute__((unused))) + { TRASH(ptr, size); } ulong thread_id; time_t start_time; @@ -1573,7 +1102,8 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) field->maybe_null=1; field_list.push_back(field=new Item_empty_string("Info",max_query_length)); field->maybe_null=1; - if (protocol->send_fields(&field_list,1)) + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) DBUG_VOID_RETURN; VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list @@ -1606,7 +1136,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) thd_info->command=(int) tmp->command; if ((mysys_var= tmp->mysys_var)) pthread_mutex_lock(&mysys_var->mutex); - thd_info->proc_info= (char*) (tmp->killed ? "Killed" : 0); + thd_info->proc_info= (char*) (tmp->killed == THD::KILL_CONNECTION? "Killed" : 0); #ifndef EMBEDDED_LIBRARY thd_info->state_info= (char*) (tmp->locked ? "Locked" : tmp->net.reading_or_writing ? @@ -1680,413 +1210,2393 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) Status functions *****************************************************************************/ -static bool write_collation(Protocol *protocol, CHARSET_INFO *cs) + +static bool show_status_array(THD *thd, const char *wild, + show_var_st *variables, + enum enum_var_type value_type, + struct system_status_var *status_var, + const char *prefix, TABLE *table) { - protocol->prepare_for_resend(); - protocol->store(cs->name, system_charset_info); - protocol->store(cs->csname, system_charset_info); - protocol->store_short((longlong) cs->number); - protocol->store((cs->state & MY_CS_PRIMARY) ? "Yes" : "",system_charset_info); - protocol->store((cs->state & MY_CS_COMPILED)? "Yes" : "",system_charset_info); - protocol->store_short((longlong) cs->strxfrm_multiply); - return protocol->write(); + char buff[1024], *prefix_end; + /* the variable name should not be longer then 80 characters */ + char name_buffer[80]; + int len; + LEX_STRING null_lex_str; + DBUG_ENTER("show_status_array"); + + null_lex_str.str= 0; // For sys_var->value_ptr() + null_lex_str.length= 0; + + prefix_end=strnmov(name_buffer, prefix, sizeof(name_buffer)-1); + len=name_buffer + sizeof(name_buffer) - prefix_end; + + for (; variables->name; variables++) + { + strnmov(prefix_end, variables->name, len); + name_buffer[sizeof(name_buffer)-1]=0; /* Safety */ + SHOW_TYPE show_type=variables->type; + if (show_type == SHOW_VARS) + { + show_status_array(thd, wild, (show_var_st *) variables->value, + value_type, status_var, variables->name, table); + } + else + { + if (!(wild && wild[0] && wild_case_compare(system_charset_info, + name_buffer, wild))) + { + char *value=variables->value; + const char *pos, *end; + long nr; + if (show_type == SHOW_SYS) + { + show_type= ((sys_var*) value)->type(); + value= (char*) ((sys_var*) value)->value_ptr(thd, value_type, + &null_lex_str); + } + + pos= end= buff; + switch (show_type) { + case SHOW_LONG_STATUS: + case SHOW_LONG_CONST_STATUS: + value= ((char *) status_var + (ulong) value); + /* fall through */ + case SHOW_LONG: + case SHOW_LONG_CONST: + end= int10_to_str(*(long*) value, buff, 10); + break; + case SHOW_LONGLONG: + end= longlong10_to_str(*(longlong*) value, buff, 10); + break; + case SHOW_HA_ROWS: + end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10); + break; + case SHOW_BOOL: + end= strmov(buff, *(bool*) value ? "ON" : "OFF"); + break; + case SHOW_MY_BOOL: + end= strmov(buff, *(my_bool*) value ? "ON" : "OFF"); + break; + case SHOW_INT_CONST: + case SHOW_INT: + end= int10_to_str((long) *(uint32*) value, buff, 10); + break; + case SHOW_HAVE: + { + SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value; + pos= show_comp_option_name[(int) tmp]; + end= strend(pos); + break; + } + case SHOW_CHAR: + { + if (!(pos= value)) + pos= ""; + end= strend(pos); + break; + } + case SHOW_STARTTIME: + nr= (long) (thd->query_start() - start_time); + end= int10_to_str(nr, buff, 10); + break; + case SHOW_QUESTION: + end= int10_to_str((long) thd->query_id, buff, 10); + break; +#ifdef HAVE_REPLICATION + case SHOW_RPL_STATUS: + end= strmov(buff, rpl_status_type[(int)rpl_status]); + break; + case SHOW_SLAVE_RUNNING: + { + pthread_mutex_lock(&LOCK_active_mi); + end= strmov(buff, (active_mi->slave_running && + active_mi->rli.slave_running) ? "ON" : "OFF"); + pthread_mutex_unlock(&LOCK_active_mi); + break; + } +#endif /* HAVE_REPLICATION */ + case SHOW_OPENTABLES: + end= int10_to_str((long) cached_tables(), buff, 10); + break; + case SHOW_CHAR_PTR: + { + if (!(pos= *(char**) value)) + pos= ""; + end= strend(pos); + break; + } + case SHOW_DOUBLE: + { + end= buff + sprintf(buff, "%f", *(double*) value); + break; + } +#ifdef HAVE_OPENSSL + /* First group - functions relying on CTX */ + case SHOW_SSL_CTX_SESS_ACCEPT: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_ACCEPT_GOOD: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept_good(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_CONNECT_GOOD: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect_good(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_CONNECT_RENEGOTIATE: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd-> ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_CB_HITS: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_cb_hits(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_HITS: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_hits(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_CACHE_FULL: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_cache_full(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_MISSES: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_misses(ssl_acceptor_fd-> + ssl_context)), + buff, 10); + break; + case SHOW_SSL_CTX_SESS_TIMEOUTS: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_SESS_NUMBER: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_SESS_CONNECT: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_SESS_GET_CACHE_SIZE: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_GET_VERIFY_MODE: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_GET_VERIFY_DEPTH: + end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : + SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context)), + buff,10); + break; + case SHOW_SSL_CTX_GET_SESSION_CACHE_MODE: + if (!ssl_acceptor_fd) + { + pos= "NONE"; + end= pos+4; + break; + } + switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context)) + { + case SSL_SESS_CACHE_OFF: + pos= "OFF"; + break; + case SSL_SESS_CACHE_CLIENT: + pos= "CLIENT"; + break; + case SSL_SESS_CACHE_SERVER: + pos= "SERVER"; + break; + case SSL_SESS_CACHE_BOTH: + pos= "BOTH"; + break; + case SSL_SESS_CACHE_NO_AUTO_CLEAR: + pos= "NO_AUTO_CLEAR"; + break; + case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP: + pos= "NO_INTERNAL_LOOKUP"; + break; + default: + pos= "Unknown"; + break; + } + end= strend(pos); + break; + /* First group - functions relying on SSL */ + case SHOW_SSL_GET_VERSION: + pos= (thd->net.vio->ssl_arg ? + SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); + end= strend(pos); + break; + case SHOW_SSL_SESSION_REUSED: + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_session_reused((SSL*) thd->net.vio-> + ssl_arg) : + 0), + buff, 10); + break; + case SHOW_SSL_GET_DEFAULT_TIMEOUT: + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_default_timeout((SSL*) thd->net.vio-> + ssl_arg) : + 0), + buff, 10); + break; + case SHOW_SSL_GET_VERIFY_MODE: + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_verify_mode((SSL*) thd->net.vio-> + ssl_arg): + 0), + buff, 10); + break; + case SHOW_SSL_GET_VERIFY_DEPTH: + end= int10_to_str((long) (thd->net.vio->ssl_arg ? + SSL_get_verify_depth((SSL*) thd->net.vio-> + ssl_arg): + 0), + buff, 10); + break; + case SHOW_SSL_GET_CIPHER: + pos= (thd->net.vio->ssl_arg ? + SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : "" ); + end= strend(pos); + break; + case SHOW_SSL_GET_CIPHER_LIST: + if (thd->net.vio->ssl_arg) + { + char *to= buff; + for (int i=0 ; i++ ;) + { + const char *p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i); + if (p == NULL) + break; + to= strmov(to, p); + *to++= ':'; + } + if (to != buff) + to--; // Remove last ':' + end= to; + } + break; + +#endif /* HAVE_OPENSSL */ + case SHOW_KEY_CACHE_LONG: + case SHOW_KEY_CACHE_CONST_LONG: + value= (value-(char*) &dflt_key_cache_var)+ (char*) sql_key_cache; + end= int10_to_str(*(long*) value, buff, 10); + break; + case SHOW_UNDEF: // Show never happen + case SHOW_SYS: + break; // Return empty string + default: + break; + } + restore_record(table, default_values); + table->field[0]->store(name_buffer, strlen(name_buffer), + system_charset_info); + table->field[1]->store(pos, (uint32) (end - pos), system_charset_info); + table->file->write_row(table->record[0]); + } + } + } + + DBUG_RETURN(FALSE); } -int mysqld_show_collations(THD *thd, const char *wild) + +bool mysqld_show(THD *thd, const char *wild, show_var_st *variables, + enum enum_var_type value_type, + pthread_mutex_t *mutex, + struct system_status_var *status_var, TABLE *table) { - char buff[8192]; - String packet2(buff,sizeof(buff),thd->charset()); - List<Item> field_list; - CHARSET_INFO **cs; - Protocol *protocol= thd->protocol; + DBUG_ENTER("mysqld_show"); + ha_update_statistics(); /* Export engines statistics */ + pthread_mutex_lock(mutex); + if (show_status_array(thd, wild, variables, value_type, status_var, "", table)) + goto err; + pthread_mutex_unlock(mutex); + DBUG_RETURN(FALSE); + + err: + pthread_mutex_unlock(mutex); + DBUG_RETURN(TRUE); +} - DBUG_ENTER("mysqld_show_charsets"); - field_list.push_back(new Item_empty_string("Collation",30)); - field_list.push_back(new Item_empty_string("Charset",30)); - field_list.push_back(new Item_return_int("Id",11, FIELD_TYPE_SHORT)); - field_list.push_back(new Item_empty_string("Default",30)); - field_list.push_back(new Item_empty_string("Compiled",30)); - field_list.push_back(new Item_return_int("Sortlen",3, FIELD_TYPE_SHORT)); +/* collect status for all running threads */ - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(1); +void calc_sum_of_all_status(STATUS_VAR *to) +{ + DBUG_ENTER("calc_sum_of_all_status"); + + /* Ensure that thread id not killed during loop */ + VOID(pthread_mutex_lock(&LOCK_thread_count)); // For unlink from list + + I_List_iterator<THD> it(threads); + THD *tmp; + + /* Get global values as base */ + *to= global_status_var; + + /* Add to this status from existing threads */ + while ((tmp= it++)) + add_to_status(to, &tmp->status_var); + + VOID(pthread_mutex_unlock(&LOCK_thread_count)); + DBUG_VOID_RETURN; +} + + +LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, + const char* str, uint length, + bool allocate_lex_string) +{ + MEM_ROOT *mem= thd->mem_root; + if (allocate_lex_string) + lex_str= (LEX_STRING *)thd->alloc(sizeof(LEX_STRING)); + lex_str->str= strmake_root(mem, str, length); + lex_str->length= length; + return lex_str; +} + + +/* INFORMATION_SCHEMA name */ +LEX_STRING information_schema_name= {(char*)"information_schema", 18}; +extern ST_SCHEMA_TABLE schema_tables[]; + +typedef struct st_index_field_values +{ + const char *db_value, *table_value; +} INDEX_FIELD_VALUES; + + +void get_index_field_values(LEX *lex, INDEX_FIELD_VALUES *index_field_values) +{ + const char *wild= lex->wild ? lex->wild->ptr() : NullS; + switch (lex->orig_sql_command) { + case SQLCOM_SHOW_DATABASES: + index_field_values->db_value= wild; + break; + case SQLCOM_SHOW_TABLES: + case SQLCOM_SHOW_TABLE_STATUS: + index_field_values->db_value= lex->current_select->db; + index_field_values->table_value= wild; + break; + default: + index_field_values->db_value= NullS; + index_field_values->table_value= NullS; + break; + } +} + + +int make_table_list(THD *thd, SELECT_LEX *sel, + char *db, char *table) +{ + Table_ident *table_ident; + LEX_STRING ident_db, ident_table; + ident_db.str= db; + ident_db.length= strlen(db); + ident_table.str= table; + ident_table.length= strlen(table); + table_ident= new Table_ident(thd, ident_db, ident_table, 1); + sel->init_query(); + if(!sel->add_table_to_list(thd, table_ident, 0, 0, TL_READ, + (List<String> *) 0, (List<String> *) 0)) + return 1; + return 0; +} + + +bool uses_only_table_name_fields(Item *item, TABLE_LIST *table) +{ + if (item->type() == Item::FUNC_ITEM) + { + Item_func *item_func= (Item_func*)item; + Item **child; + Item **item_end= (item_func->arguments()) + item_func->argument_count(); + for (child= item_func->arguments(); child != item_end; child++) + { + if (!uses_only_table_name_fields(*child, table)) + return 0; + } + } + else if (item->type() == Item::FIELD_ITEM) + { + Item_field *item_field= (Item_field*)item; + CHARSET_INFO *cs= system_charset_info; + ST_SCHEMA_TABLE *schema_table= table->schema_table; + ST_FIELD_INFO *field_info= schema_table->fields_info; + const char *field_name1= field_info[schema_table->idx_field1].field_name; + const char *field_name2= field_info[schema_table->idx_field2].field_name; + if (table->table != item_field->field->table || + (cs->coll->strnncollsp(cs, (uchar *) field_name1, strlen(field_name1), + (uchar *) item_field->field_name, + strlen(item_field->field_name), 0) && + cs->coll->strnncollsp(cs, (uchar *) field_name2, strlen(field_name2), + (uchar *) item_field->field_name, + strlen(item_field->field_name), 0))) + return 0; + } + return 1; +} + + +static COND * make_cond_for_info_schema(COND *cond, TABLE_LIST *table) +{ + if (!cond) + return (COND*) 0; + if (cond->type() == Item::COND_ITEM) + { + if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC) + { + /* Create new top level AND item */ + Item_cond_and *new_cond=new Item_cond_and; + if (!new_cond) + return (COND*) 0; + List_iterator<Item> li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item=li++)) + { + Item *fix= make_cond_for_info_schema(item, table); + if (fix) + new_cond->argument_list()->push_back(fix); + } + switch (new_cond->argument_list()->elements) { + case 0: + return (COND*) 0; + case 1: + return new_cond->argument_list()->head(); + default: + new_cond->quick_fix_field(); + return new_cond; + } + } + else + { // Or list + Item_cond_or *new_cond=new Item_cond_or; + if (!new_cond) + return (COND*) 0; + List_iterator<Item> li(*((Item_cond*) cond)->argument_list()); + Item *item; + while ((item=li++)) + { + Item *fix=make_cond_for_info_schema(item, table); + if (!fix) + return (COND*) 0; + new_cond->argument_list()->push_back(fix); + } + new_cond->quick_fix_field(); + new_cond->top_level_item(); + return new_cond; + } + } + + if (!uses_only_table_name_fields(cond, table)) + return (COND*) 0; + return cond; +} + + +enum enum_schema_tables get_schema_table_idx(ST_SCHEMA_TABLE *schema_table) +{ + return (enum enum_schema_tables) (schema_table - &schema_tables[0]); +} + + +/* + Add 'information_schema' name to db_names list + + SYNOPSIS + schema_db_add() + thd thread handler + files list of db names + wild wild string + with_i_schema returns 1 if we added 'IS' name to list + otherwise returns 0 + + RETURN + 1 error + 0 success +*/ + +int schema_db_add(THD *thd, List<char> *files, + const char *wild, bool *with_i_schema) +{ + *with_i_schema= 0; + if (!wild || !wild_compare(information_schema_name.str, wild, 0)) + { + *with_i_schema= 1; + if (files->push_back(thd->strdup(information_schema_name.str))) + return 1; + } + return 0; +} + + +int schema_tables_add(THD *thd, List<char> *files, const char *wild) +{ + ST_SCHEMA_TABLE *tmp_schema_table= schema_tables; + for ( ; tmp_schema_table->table_name; tmp_schema_table++) + { + if (tmp_schema_table->hidden) + continue; + if (wild) + { + if (lower_case_table_names) + { + if (wild_case_compare(files_charset_info, + tmp_schema_table->table_name, + wild)) + continue; + } + else if (wild_compare(tmp_schema_table->table_name, wild, 0)) + continue; + } + if (files->push_back(thd->strdup(tmp_schema_table->table_name))) + return 1; + } + return 0; +} + + +int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + LEX *lex= thd->lex; + TABLE *table= tables->table; + SELECT_LEX *select_lex= &lex->select_lex; + SELECT_LEX *lsel= tables->schema_select_lex; + ST_SCHEMA_TABLE *schema_table= tables->schema_table; + DBUG_ENTER("fill_schema_tables"); + + if (lsel) + { + TABLE *old_open_tables= thd->open_tables; + TABLE_LIST *show_table_list= (TABLE_LIST*) lsel->table_list.first; + lex->all_selects_list= lsel; + bool res= open_and_lock_tables(thd, show_table_list); + if (schema_table->process_table(thd, show_table_list, + table, res, show_table_list->db, + show_table_list->real_name)) + { + DBUG_RETURN(1); + } + close_thread_tables(thd, 0, 0, old_open_tables); + show_table_list->table= 0; + lex->all_selects_list= select_lex; + DBUG_RETURN(0); + } + + SELECT_LEX sel; + INDEX_FIELD_VALUES idx_field_vals; + char path[FN_REFLEN], *end= 0, *base_name, *file_name; + uint len= 0; + bool with_i_schema; + List<char> bases; + lex->all_selects_list= &sel; + enum enum_schema_tables schema_table_idx= get_schema_table_idx(schema_table); + thr_lock_type lock_type= TL_UNLOCK; + if (schema_table_idx == SCH_TABLES) + lock_type= TL_READ; + get_index_field_values(lex, &idx_field_vals); + + /* information schema name always is first in list */ + if (schema_db_add(thd, &bases, idx_field_vals.db_value, &with_i_schema)) + return 1; + + if (mysql_find_files(thd, &bases, NullS, mysql_data_home, + idx_field_vals.db_value, 1)) + return 1; + List_iterator_fast<char> it(bases); + COND *partial_cond= make_cond_for_info_schema(cond, tables); + while ((base_name=it++) || + /* + generate error for non existing database. + (to save old behaviour for SHOW TABLES FROM db) + */ + ((lex->orig_sql_command == SQLCOM_SHOW_TABLES || + lex->orig_sql_command == SQLCOM_SHOW_TABLE_STATUS) && + (base_name= select_lex->db) && !bases.elements)) + { +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (with_i_schema || // don't check the rights if information schema db + !check_access(thd,SELECT_ACL, base_name, &thd->col_access,0,1) || + thd->master_access & (DB_ACLS | SHOW_DB_ACL) || + acl_get(thd->host, thd->ip, thd->priv_user, base_name,0) || + (grant_option && !check_grant_db(thd, base_name))) +#endif + { + List<char> files; + if (with_i_schema) // information schema table names + { + if (schema_tables_add(thd, &files, idx_field_vals.table_value)) + DBUG_RETURN(1); + } + else + { + strxmov(path, mysql_data_home, "/", base_name, NullS); + end= path + (len= unpack_dirname(path,path)); + len= FN_LEN - len; + if (mysql_find_files(thd, &files, base_name, + path, idx_field_vals.table_value, 0)) + DBUG_RETURN(1); + } + + List_iterator_fast<char> it(files); + while ((file_name=it++)) + { + restore_record(table, default_values); + table->field[schema_table->idx_field1]-> + store(base_name, strlen(base_name), system_charset_info); + table->field[schema_table->idx_field2]-> + store(file_name, strlen(file_name),system_charset_info); + if (!partial_cond || partial_cond->val_int()) + { + if (schema_table_idx == SCH_TABLE_NAMES) + { + if (lex->verbose || lex->orig_sql_command == SQLCOM_END) + { + if (with_i_schema) + { + table->field[3]->store("TEMPORARY", 9, system_charset_info); + } + else + { + my_snprintf(end, len, "/%s%s", file_name, reg_ext); + switch (mysql_frm_type(path)) + { + case FRMTYPE_ERROR: + table->field[3]->store("ERROR", 5, system_charset_info); + break; + case FRMTYPE_TABLE: + table->field[3]->store("BASE TABLE", 10, system_charset_info); + break; + case FRMTYPE_VIEW: + table->field[3]->store("VIEW", 4, system_charset_info); + break; + default: + DBUG_ASSERT(0); + } + } + } + table->file->write_row(table->record[0]); + } + else + { + int res; + TABLE *old_open_tables= thd->open_tables; + if (make_table_list(thd, &sel, base_name, file_name)) + DBUG_RETURN(1); + TABLE_LIST *show_table_list= (TABLE_LIST*) sel.table_list.first; + show_table_list->lock_type= lock_type; + res= open_and_lock_tables(thd, show_table_list); + if (schema_table->process_table(thd, show_table_list, table, + res, base_name, file_name)) + { + DBUG_RETURN(1); + } + close_thread_tables(thd, 0, 0, old_open_tables); + } + } + } + /* + If we have information schema its always the first table and only + the first table. Reset for other tables. + */ + with_i_schema= 0; + } + } + lex->all_selects_list= select_lex; + DBUG_RETURN(0); +} + + +void store_schema_shemata(TABLE *table, const char *db_name, + const char* cs_name) +{ + restore_record(table, default_values); + table->field[1]->store(db_name, strlen(db_name), system_charset_info); + table->field[2]->store(cs_name, strlen(cs_name), system_charset_info); + table->file->write_row(table->record[0]); +} + + +int fill_schema_shemata(THD *thd, TABLE_LIST *tables, COND *cond) +{ + char path[FN_REFLEN],*end; + bool found_libchar; + INDEX_FIELD_VALUES idx_field_vals; + List<char> files; + char *file_name; + uint length; + bool with_i_schema; + HA_CREATE_INFO create; + TABLE *table= tables->table; + + get_index_field_values(thd->lex, &idx_field_vals); + /* information schema name always is first in list */ + if (schema_db_add(thd, &files, idx_field_vals.db_value, &with_i_schema)) + return 1; + if (mysql_find_files(thd, &files, NullS, mysql_data_home, + idx_field_vals.db_value, 1)) + return 1; + List_iterator_fast<char> it(files); + while ((file_name=it++)) + { + if (with_i_schema) // information schema name is always first in list + { + store_schema_shemata(table, file_name, system_charset_info->csname); + with_i_schema= 0; + continue; + } +#ifndef NO_EMBEDDED_ACCESS_CHECKS + if (thd->master_access & (DB_ACLS | SHOW_DB_ACL) || + acl_get(thd->host, thd->ip, thd->priv_user, file_name,0) || + (grant_option && !check_grant_db(thd, file_name))) +#endif + { + strxmov(path, mysql_data_home, "/", file_name, NullS); + length=unpack_dirname(path,path); // Convert if not unix + found_libchar= 0; + if (length && path[length-1] == FN_LIBCHAR) + { + found_libchar= 1; + path[length-1]=0; // remove ending '\' + } + + if (found_libchar) + path[length-1]= FN_LIBCHAR; + strmov(path+length, MY_DB_OPT_FILE); + load_db_opt(thd, path, &create); + store_schema_shemata(table, file_name, + create.default_table_charset->csname); + } + } + return 0; +} + + +static int get_schema_tables_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + const char *tmp_buff; + TIME time; + CHARSET_INFO *cs= system_charset_info; + + DBUG_ENTER("get_schema_tables_record"); + restore_record(table, default_values); + table->field[1]->store(base_name, strlen(base_name), cs); + table->field[2]->store(file_name, strlen(file_name), cs); + if (res) + { + /* + there was errors during opening tables + */ + const char *error= thd->net.last_error; + table->field[20]->store(error, strlen(error), cs); + thd->clear_error(); + } + else if (tables->view) + { + table->field[3]->store("VIEW", 4, cs); + table->field[20]->store("view", 4, cs); + } + else + { + TABLE *show_table= tables->table; + handler *file= show_table->file; + file->info(HA_STATUS_VARIABLE | HA_STATUS_TIME | HA_STATUS_NO_LOCK); + if (show_table->tmp_table == TMP_TABLE) + table->field[3]->store("TEMPORARY", 9, cs); + else + table->field[3]->store("BASE TABLE", 10, cs); + + for (int i= 4; i < 20; i++) + { + if (i == 7 || (i > 12 && i < 17) || i == 18) + continue; + table->field[i]->set_notnull(); + } + tmp_buff= file->table_type(); + table->field[4]->store(tmp_buff, strlen(tmp_buff), cs); + table->field[5]->store((longlong) show_table->frm_version); + tmp_buff= ((show_table->db_options_in_use & + HA_OPTION_COMPRESS_RECORD) ? "Compressed" : + (show_table->db_options_in_use & HA_OPTION_PACK_RECORD) ? + "Dynamic" : "Fixed"); + table->field[6]->store(tmp_buff, strlen(tmp_buff), cs); + if (!tables->schema_table) + { + table->field[7]->store((longlong) file->records); + table->field[7]->set_notnull(); + } + table->field[8]->store((longlong) file->mean_rec_length); + table->field[9]->store((longlong) file->data_file_length); + if (file->max_data_file_length) + { + table->field[10]->store((longlong) file->max_data_file_length); + } + table->field[11]->store((longlong) file->index_file_length); + table->field[12]->store((longlong) file->delete_length); + if (show_table->found_next_number_field) + { + show_table->next_number_field=show_table->found_next_number_field; + show_table->next_number_field->reset(); + file->update_auto_increment(); + table->field[13]->store((longlong) show_table-> + next_number_field->val_int()); + table->field[13]->set_notnull(); + show_table->next_number_field=0; + } + if (file->create_time) + { + thd->variables.time_zone->gmt_sec_to_TIME(&time, + file->create_time); + table->field[14]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + table->field[14]->set_notnull(); + } + if (file->update_time) + { + thd->variables.time_zone->gmt_sec_to_TIME(&time, + file->update_time); + table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + table->field[15]->set_notnull(); + } + if (file->check_time) + { + thd->variables.time_zone->gmt_sec_to_TIME(&time, file->check_time); + table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + table->field[16]->set_notnull(); + } + tmp_buff= (show_table->table_charset ? show_table-> + table_charset->name : "default"); + table->field[17]->store(tmp_buff, strlen(tmp_buff), cs); + if (file->table_flags() & (ulong) HA_HAS_CHECKSUM) + { + table->field[18]->store((longlong) file->checksum()); + table->field[18]->set_notnull(); + } + + char option_buff[350],*ptr; + ptr=option_buff; + if (show_table->min_rows) + { + ptr=strmov(ptr," min_rows="); + ptr=longlong10_to_str(show_table->min_rows,ptr,10); + } + if (show_table->max_rows) + { + ptr=strmov(ptr," max_rows="); + ptr=longlong10_to_str(show_table->max_rows,ptr,10); + } + if (show_table->avg_row_length) + { + ptr=strmov(ptr," avg_row_length="); + ptr=longlong10_to_str(show_table->avg_row_length,ptr,10); + } + if (show_table->db_create_options & HA_OPTION_PACK_KEYS) + ptr=strmov(ptr," pack_keys=1"); + if (show_table->db_create_options & HA_OPTION_NO_PACK_KEYS) + ptr=strmov(ptr," pack_keys=0"); + if (show_table->db_create_options & HA_OPTION_CHECKSUM) + ptr=strmov(ptr," checksum=1"); + if (show_table->db_create_options & HA_OPTION_DELAY_KEY_WRITE) + ptr=strmov(ptr," delay_key_write=1"); + if (show_table->row_type != ROW_TYPE_DEFAULT) + ptr=strxmov(ptr, " row_format=", + ha_row_type[(uint) show_table->row_type], + NullS); + if (file->raid_type) + { + char buff[100]; + my_snprintf(buff,sizeof(buff), + " raid_type=%s raid_chunks=%d raid_chunksize=%ld", + my_raid_type(file->raid_type), file->raid_chunks, + file->raid_chunksize/RAID_BLOCK_SIZE); + ptr=strmov(ptr,buff); + } + table->field[19]->store(option_buff+1, + (ptr == option_buff ? 0 : + (uint) (ptr-option_buff)-1), cs); + { + char *comment= show_table->file-> + update_table_comment(show_table->comment); + if (comment) + { + table->field[20]->store(comment, strlen(comment), cs); + if (comment != show_table->comment) + my_free(comment,MYF(0)); + } + } + } + table->file->write_row(table->record[0]); + DBUG_RETURN(0); +} + + +static int get_schema_column_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + TIME time; + LEX *lex= thd->lex; + const char *wild= lex->wild ? lex->wild->ptr() : NullS; + CHARSET_INFO *cs= system_charset_info; + DBUG_ENTER("get_schema_column_record"); + if (res) + { + if (lex->orig_sql_command != SQLCOM_SHOW_FIELDS) + { + /* + I.e. we are in SELECT FROM INFORMATION_SCHEMA.COLUMS + rather than in SHOW COLUMNS + */ + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + res= 0; + } + DBUG_RETURN(res); + } + TABLE *show_table= tables->table; + handler *file= show_table->file; + file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); + restore_record(show_table, default_values); + Field **ptr,*field; + int count= 0; + for (ptr=show_table->field; (field= *ptr) ; ptr++) + { + if (!wild || !wild[0] || + !wild_case_compare(system_charset_info, field->field_name,wild)) + { + uint tmp_length; + const char *tmp_buff; + byte *pos; + uint flags=field->flags; + char tmp[MAX_FIELD_WIDTH]; + char tmp1[MAX_FIELD_WIDTH]; + String type(tmp,sizeof(tmp), system_charset_info); + char tmp_buffer[128]; + count++; + restore_record(table, default_values); + table->field[1]->store(base_name, strlen(base_name), cs); + table->field[2]->store(file_name, strlen(file_name), cs); + table->field[3]->store(field->field_name, strlen(field->field_name), + cs); + table->field[4]->store((longlong) count); + field->sql_type(type); + table->field[14]->store(type.ptr(), type.length(), cs); + tmp_buff= strchr(type.ptr(), '('); + table->field[7]->store(type.ptr(), + (tmp_buff ? tmp_buff - type.ptr() : + type.length()), cs); + if (show_table->timestamp_field == field && + field->unireg_check != Field::TIMESTAMP_UN_FIELD) + { + table->field[5]->store("CURRENT_TIMESTAMP", 17, cs); + table->field[5]->set_notnull(); + } + else if (field->unireg_check != Field::NEXT_NUMBER && + !field->is_null() && + !(field->flags & NO_DEFAULT_VALUE_FLAG)) + { + String def(tmp1,sizeof(tmp1), cs); + type.set(tmp, sizeof(tmp), field->charset()); + field->val_str(&type); + uint dummy_errors; + def.copy(type.ptr(), type.length(), type.charset(), cs, &dummy_errors); + table->field[5]->store(def.ptr(), def.length(), def.charset()); + table->field[5]->set_notnull(); + } + else if (field->unireg_check == Field::NEXT_NUMBER || + field->maybe_null()) + table->field[5]->set_null(); // Null as default + else + { + table->field[5]->store("",0, cs); + table->field[5]->set_notnull(); + } + pos=(byte*) ((flags & NOT_NULL_FLAG) && + field->type() != FIELD_TYPE_TIMESTAMP ? + "NO" : "YES"); + table->field[6]->store((const char*) pos, + strlen((const char*) pos), cs); + if (field->has_charset()) + table->field[8]->store((longlong) field->field_length/ + field->charset()->mbmaxlen); + else + table->field[8]->store((longlong) field->field_length); + table->field[9]->store((longlong) field->field_length); + + { + uint dec =field->decimals(); + switch (field->type()) { + case FIELD_TYPE_DECIMAL: + { + uint int_part=field->field_length - (dec ? dec + 1 : 0); + table->field[10]->store((longlong) (int_part + dec - 1)); + table->field[10]->set_notnull(); + table->field[11]->store((longlong) field->decimals()); + table->field[11]->set_notnull(); + } + break; + case FIELD_TYPE_TINY: + case FIELD_TYPE_SHORT: + case FIELD_TYPE_LONG: + case FIELD_TYPE_LONGLONG: + case FIELD_TYPE_INT24: + case FIELD_TYPE_FLOAT: + case FIELD_TYPE_DOUBLE: + { + table->field[10]->store((longlong) field->field_length); + table->field[10]->set_notnull(); + if (dec != NOT_FIXED_DEC) + { + table->field[11]->store((longlong) dec); + table->field[11]->set_notnull(); + } + } + break; + default: + break; + } + } + if (field->has_charset()) + { + pos=(byte*) field->charset()->csname; + table->field[12]->store((const char*) pos, + strlen((const char*) pos), cs); + table->field[12]->set_notnull(); + pos=(byte*) field->charset()->name; + table->field[13]->store((const char*) pos, + strlen((const char*) pos), cs); + table->field[13]->set_notnull(); + } + pos=(byte*) ((field->flags & PRI_KEY_FLAG) ? "PRI" : + (field->flags & UNIQUE_KEY_FLAG) ? "UNI" : + (field->flags & MULTIPLE_KEY_FLAG) ? "MUL":""); + table->field[15]->store((const char*) pos, + strlen((const char*) pos), cs); + char *end=tmp; + if (field->unireg_check == Field::NEXT_NUMBER) + end=strmov(tmp,"auto_increment"); + table->field[16]->store(tmp, (uint) (end-tmp), cs); + + end=tmp; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + uint col_access; + check_access(thd,SELECT_ACL | EXTRA_ACL, base_name, + &tables->grant.privilege, 0, 0); + col_access= get_column_grant(thd, &tables->grant, tables->db, + tables->real_name, + field->field_name) & COL_ACLS; + for (uint bitnr=0; col_access ; col_access>>=1,bitnr++) + { + if (col_access & 1) + { + *end++=','; + end=strmov(end,grant_types.type_names[bitnr]); + } + } +#else + end=strmov(end,""); +#endif + table->field[17]->store(tmp+1,end == tmp ? 0 : (uint) (end-tmp-1), cs); + table->field[18]->store(field->comment.str, field->comment.length, cs); + table->file->write_row(table->record[0]); + } + } + DBUG_RETURN(0); +} + + + +int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond) +{ + CHARSET_INFO **cs; + const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; + TABLE *table= tables->table; + CHARSET_INFO *scs= system_charset_info; + for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) + { + CHARSET_INFO *tmp_cs= cs[0]; + if (tmp_cs && (tmp_cs->state & MY_CS_PRIMARY) && + (tmp_cs->state & MY_CS_AVAILABLE) && + !(wild && wild[0] && + wild_case_compare(scs, tmp_cs->csname,wild))) + { + restore_record(table, default_values); + table->field[0]->store(tmp_cs->csname, strlen(tmp_cs->csname), scs); + table->field[1]->store(tmp_cs->name, strlen(tmp_cs->name), scs); + table->field[2]->store(tmp_cs->comment ? tmp_cs->comment : "", + strlen(tmp_cs->comment ? tmp_cs->comment : ""), + scs); + table->field[3]->store((longlong) tmp_cs->mbmaxlen); + table->file->write_row(table->record[0]); + } + } + return 0; +} + + +int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond) +{ + CHARSET_INFO **cs; + const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; + TABLE *table= tables->table; + CHARSET_INFO *scs= system_charset_info; for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) { CHARSET_INFO **cl; - if (!cs[0] || !(cs[0]->state & MY_CS_AVAILABLE) || - !(cs[0]->state & MY_CS_PRIMARY)) + CHARSET_INFO *tmp_cs= cs[0]; + if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) || + !(tmp_cs->state & MY_CS_PRIMARY)) continue; for ( cl= all_charsets; cl < all_charsets+255 ;cl ++) { - if (!cl[0] || !(cl[0]->state & MY_CS_AVAILABLE) || - !my_charset_same(cs[0],cl[0])) + CHARSET_INFO *tmp_cl= cl[0]; + if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) || + !my_charset_same(tmp_cs, tmp_cl)) continue; if (!(wild && wild[0] && - wild_case_compare(system_charset_info,cl[0]->name,wild))) + wild_case_compare(scs, tmp_cl->name,wild))) { - if (write_collation(protocol, cl[0])) - goto err; + const char *tmp_buff; + restore_record(table, default_values); + table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs); + table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs); + table->field[2]->store((longlong) tmp_cl->number); + tmp_buff= (tmp_cl->state & MY_CS_PRIMARY) ? "Yes" : ""; + table->field[3]->store(tmp_buff, strlen(tmp_buff), scs); + tmp_buff= (tmp_cl->state & MY_CS_COMPILED)? "Yes" : ""; + table->field[4]->store(tmp_buff, strlen(tmp_buff), scs); + table->field[5]->store((longlong) tmp_cl->strxfrm_multiply); + table->file->write_row(table->record[0]); } } } - send_eof(thd); - DBUG_RETURN(0); -err: - DBUG_RETURN(1); + return 0; } -static bool write_charset(Protocol *protocol, CHARSET_INFO *cs) -{ - protocol->prepare_for_resend(); - protocol->store(cs->csname, system_charset_info); - protocol->store(cs->comment ? cs->comment : "", system_charset_info); - protocol->store(cs->name, system_charset_info); - protocol->store_short((longlong) cs->mbmaxlen); - return protocol->write(); -} -int mysqld_show_charsets(THD *thd, const char *wild) +int fill_schema_coll_charset_app(THD *thd, TABLE_LIST *tables, COND *cond) { - char buff[8192]; - String packet2(buff,sizeof(buff),thd->charset()); - List<Item> field_list; CHARSET_INFO **cs; - Protocol *protocol= thd->protocol; + const char *wild= NullS; + TABLE *table= tables->table; + CHARSET_INFO *scs= system_charset_info; + for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) + { + CHARSET_INFO **cl; + CHARSET_INFO *tmp_cs= cs[0]; + if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) || + !(tmp_cs->state & MY_CS_PRIMARY)) + continue; + for ( cl= all_charsets; cl < all_charsets+255 ;cl ++) + { + CHARSET_INFO *tmp_cl= cl[0]; + if (!tmp_cl || !(tmp_cl->state & MY_CS_AVAILABLE) || + !my_charset_same(tmp_cs,tmp_cl)) + continue; + restore_record(table, default_values); + table->field[0]->store(tmp_cl->name, strlen(tmp_cl->name), scs); + table->field[1]->store(tmp_cl->csname , strlen(tmp_cl->csname), scs); + table->file->write_row(table->record[0]); + } + } + return 0; +} - DBUG_ENTER("mysqld_show_charsets"); - field_list.push_back(new Item_empty_string("Charset",30)); - field_list.push_back(new Item_empty_string("Description",60)); - field_list.push_back(new Item_empty_string("Default collation",60)); - field_list.push_back(new Item_return_int("Maxlen",3, FIELD_TYPE_SHORT)); +void store_schema_proc(THD *thd, TABLE *table, + TABLE *proc_table, + const char *wild) +{ + String tmp_string; + TIME time; + LEX *lex= thd->lex; + CHARSET_INFO *cs= system_charset_info; + restore_record(table, default_values); + if (lex->orig_sql_command == SQLCOM_SHOW_STATUS_PROC && + proc_table->field[2]->val_int() == TYPE_ENUM_PROCEDURE || + lex->orig_sql_command == SQLCOM_SHOW_STATUS_FUNC && + proc_table->field[2]->val_int() == TYPE_ENUM_FUNCTION || + lex->orig_sql_command == SQLCOM_END) + { + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[1], &tmp_string); + if (!wild || !wild[0] || !wild_compare(tmp_string.ptr(), wild, 0)) + { + table->field[3]->store(tmp_string.ptr(), tmp_string.length(), cs); + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[3], &tmp_string); + table->field[0]->store(tmp_string.ptr(), tmp_string.length(), cs); + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[0], &tmp_string); + table->field[2]->store(tmp_string.ptr(), tmp_string.length(), cs); + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[2], &tmp_string); + table->field[4]->store(tmp_string.ptr(), tmp_string.length(), cs); + if (proc_table->field[2]->val_int() == TYPE_ENUM_FUNCTION) + { + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[9], &tmp_string); + table->field[5]->store(tmp_string.ptr(), tmp_string.length(), cs); + table->field[5]->set_notnull(); + } + table->field[6]->store("SQL", 3, cs); + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[10], &tmp_string); + table->field[7]->store(tmp_string.ptr(), tmp_string.length(), cs); + table->field[10]->store("SQL", 3, cs); + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[6], &tmp_string); + table->field[11]->store(tmp_string.ptr(), tmp_string.length(), cs); + if (proc_table->field[5]->val_int() == SP_CONTAINS_SQL) + { + table->field[12]->store("CONTAINS SQL", 12 , cs); + } + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[7], &tmp_string); + table->field[14]->store(tmp_string.ptr(), tmp_string.length(), cs); + bzero((char *)&time, sizeof(time)); + ((Field_timestamp *) proc_table->field[12])->get_time(&time); + table->field[15]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + bzero((char *)&time, sizeof(time)); + ((Field_timestamp *) proc_table->field[13])->get_time(&time); + table->field[16]->store_time(&time, MYSQL_TIMESTAMP_DATETIME); + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[14], &tmp_string); + table->field[17]->store(tmp_string.ptr(), tmp_string.length(), cs); + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[15], &tmp_string); + table->field[18]->store(tmp_string.ptr(), tmp_string.length(), cs); + tmp_string.length(0); + get_field(thd->mem_root, proc_table->field[11], &tmp_string); + table->field[19]->store(tmp_string.ptr(), tmp_string.length(), cs); + table->file->write_row(table->record[0]); + } + } +} + - if (protocol->send_fields(&field_list, 1)) +int fill_schema_proc(THD *thd, TABLE_LIST *tables, COND *cond) +{ + TABLE *proc_table; + TABLE_LIST proc_tables; + const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; + int res= 0; + TABLE *table= tables->table, *old_open_tables= thd->open_tables; + DBUG_ENTER("fill_schema_proc"); + + bzero((char*) &proc_tables,sizeof(proc_tables)); + proc_tables.db= (char*) "mysql"; + proc_tables.real_name= proc_tables.alias= (char*) "proc"; + proc_tables.lock_type= TL_READ; + if (!(proc_table= open_ltable(thd, &proc_tables, TL_READ))) + { DBUG_RETURN(1); + } + proc_table->file->ha_index_init(0); + if ((res= proc_table->file->index_first(proc_table->record[0]))) + { + res= (res == HA_ERR_END_OF_FILE) ? 0 : 1; + goto err; + } + store_schema_proc(thd, table, proc_table, wild); + while (!proc_table->file->index_next(proc_table->record[0])) + store_schema_proc(thd, table, proc_table, wild); - for ( cs= all_charsets ; cs < all_charsets+255 ; cs++ ) +err: + proc_table->file->ha_index_end(); + close_thread_tables(thd, 0, 0, old_open_tables); + DBUG_RETURN(res); +} + + +static int get_schema_stat_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + CHARSET_INFO *cs= system_charset_info; + DBUG_ENTER("get_schema_stat_record"); + if (res) { - if (cs[0] && (cs[0]->state & MY_CS_PRIMARY) && - (cs[0]->state & MY_CS_AVAILABLE) && - !(wild && wild[0] && - wild_case_compare(system_charset_info,cs[0]->csname,wild))) + if (thd->lex->orig_sql_command != SQLCOM_SHOW_KEYS) { - if (write_charset(protocol, cs[0])) - goto err; + /* + I.e. we are in SELECT FROM INFORMATION_SCHEMA.STATISTICS + rather than in SHOW KEYS + */ + if (!tables->view) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + res= 0; + } + DBUG_RETURN(res); + } + else if (!tables->view) + { + TABLE *show_table= tables->table; + KEY *key_info=show_table->key_info; + show_table->file->info(HA_STATUS_VARIABLE | + HA_STATUS_NO_LOCK | + HA_STATUS_TIME); + for (uint i=0 ; i < show_table->keys ; i++,key_info++) + { + KEY_PART_INFO *key_part= key_info->key_part; + const char *str; + for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) + { + restore_record(table, default_values); + table->field[1]->store(base_name, strlen(base_name), cs); + table->field[2]->store(file_name, strlen(file_name), cs); + table->field[3]->store((longlong) ((key_info->flags & + HA_NOSAME) ? 0 :1)); + table->field[4]->store(base_name, strlen(base_name), cs); + table->field[5]->store(key_info->name, strlen(key_info->name), cs); + table->field[6]->store((longlong) (j+1)); + str=(key_part->field ? key_part->field->field_name : + "?unknown field?"); + table->field[7]->store(str, strlen(str), cs); + if (show_table->file->index_flags(i, j, 0) & HA_READ_ORDER) + { + table->field[8]->store(((key_part->key_part_flag & + HA_REVERSE_SORT) ? + "D" : "A"), 1, cs); + table->field[8]->set_notnull(); + } + KEY *key=show_table->key_info+i; + if (key->rec_per_key[j]) + { + ha_rows records=(show_table->file->records / + key->rec_per_key[j]); + table->field[9]->store((longlong) records); + table->field[9]->set_notnull(); + } + if (!(key_info->flags & HA_FULLTEXT) && + (!key_part->field || + key_part->length != + show_table->field[key_part->fieldnr-1]->key_length())) + { + table->field[10]->store((longlong) key_part->length); + table->field[10]->set_notnull(); + } + uint flags= key_part->field ? key_part->field->flags : 0; + const char *pos=(char*) ((flags & NOT_NULL_FLAG) ? "" : "YES"); + table->field[12]->store(pos, strlen(pos), cs); + pos= show_table->file->index_type(i); + table->field[13]->store(pos, strlen(pos), cs); + if (!show_table->keys_in_use.is_set(i)) + table->field[14]->store("disabled", 8, cs); + else + table->field[14]->store("", 0, cs); + table->field[14]->set_notnull(); + table->file->write_row(table->record[0]); + } + } + } + DBUG_RETURN(res); +} + + +static int get_schema_views_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + CHARSET_INFO *cs= system_charset_info; + DBUG_ENTER("get_schema_views_record"); + if (!res) + { + if (tables->view) + { + restore_record(table, default_values); + table->field[1]->store(tables->view_db.str, tables->view_db.length, cs); + table->field[2]->store(tables->view_name.str,tables->view_name.length,cs); + table->field[3]->store(tables->query.str, tables->query.length, cs); + + if (tables->with_check != VIEW_CHECK_NONE) + { + if (tables->with_check == VIEW_CHECK_LOCAL) + table->field[4]->store("LOCAL", 5, cs); + else + table->field[4]->store("CASCADED", 8, cs); + } + else + table->field[4]->store("NONE", 4, cs); + + if (tables->updatable_view) + table->field[5]->store("YES", 3, cs); + else + table->field[5]->store("NO", 2, cs); + table->file->write_row(table->record[0]); } } - send_eof(thd); + else + { + if (tables->view) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + } DBUG_RETURN(0); -err: - DBUG_RETURN(1); } - -int mysqld_show(THD *thd, const char *wild, show_var_st *variables, - enum enum_var_type value_type, - pthread_mutex_t *mutex) +void store_constraints(TABLE *table, const char*db, const char *tname, + const char *key_name, uint key_len, + const char *con_type, uint con_len) { - char buff[1024]; - List<Item> field_list; - Protocol *protocol= thd->protocol; - LEX_STRING null_lex_str; - DBUG_ENTER("mysqld_show"); + CHARSET_INFO *cs= system_charset_info; + restore_record(table, default_values); + table->field[1]->store(db, strlen(db), cs); + table->field[2]->store(key_name, key_len, cs); + table->field[3]->store(db, strlen(db), cs); + table->field[4]->store(tname, strlen(tname), cs); + table->field[5]->store(con_type, con_len, cs); + table->file->write_row(table->record[0]); +} - field_list.push_back(new Item_empty_string("Variable_name",30)); - field_list.push_back(new Item_empty_string("Value",256)); - if (protocol->send_fields(&field_list,1)) - DBUG_RETURN(1); /* purecov: inspected */ - null_lex_str.str= 0; // For sys_var->value_ptr() - null_lex_str.length= 0; - pthread_mutex_lock(mutex); - for (; variables->name; variables++) +static int get_schema_constraints_record(THD *thd, struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + DBUG_ENTER("get_schema_constraints_record"); + if (res) { - if (!(wild && wild[0] && wild_case_compare(system_charset_info, - variables->name,wild))) + if (!tables->view) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + DBUG_RETURN(0); + } + else if (!tables->view) + { + List<FOREIGN_KEY_INFO> f_key_list; + TABLE *show_table= tables->table; + KEY *key_info=show_table->key_info; + uint primary_key= show_table->primary_key; + show_table->file->info(HA_STATUS_VARIABLE | + HA_STATUS_NO_LOCK | + HA_STATUS_TIME); + for (uint i=0 ; i < show_table->keys ; i++, key_info++) + { + if (i != primary_key && !(key_info->flags & HA_NOSAME)) + continue; + + if (i == primary_key && !strcmp(key_info->name, primary_key_name)) + store_constraints(table, base_name, file_name, key_info->name, + strlen(key_info->name), "PRIMARY KEY", 11); + else if (key_info->flags & HA_NOSAME) + store_constraints(table, base_name, file_name, key_info->name, + strlen(key_info->name), "UNIQUE", 6); + } + + show_table->file->get_foreign_key_list(thd, &f_key_list); + FOREIGN_KEY_INFO *f_key_info; + List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list); + while ((f_key_info=it++)) { - protocol->prepare_for_resend(); - protocol->store(variables->name, system_charset_info); - SHOW_TYPE show_type=variables->type; - char *value=variables->value; - const char *pos, *end; - long nr; - - if (show_type == SHOW_SYS) + store_constraints(table, base_name, file_name, f_key_info->forein_id->str, + strlen(f_key_info->forein_id->str), "FOREIGN KEY", 11); + } + } + DBUG_RETURN(res); +} + + +void store_key_column_usage(TABLE *table, const char*db, const char *tname, + const char *key_name, uint key_len, + const char *con_type, uint con_len, longlong idx) +{ + CHARSET_INFO *cs= system_charset_info; + table->field[1]->store(db, strlen(db), cs); + table->field[2]->store(key_name, key_len, cs); + table->field[4]->store(db, strlen(db), cs); + table->field[5]->store(tname, strlen(tname), cs); + table->field[6]->store(con_type, con_len, cs); + table->field[7]->store((longlong) idx); +} + + +static int get_schema_key_column_usage_record(THD *thd, + struct st_table_list *tables, + TABLE *table, bool res, + const char *base_name, + const char *file_name) +{ + DBUG_ENTER("get_schema_key_column_usage_record"); + CHARSET_INFO *cs= system_charset_info; + if (res) + { + if (!tables->view) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + thd->net.last_errno, thd->net.last_error); + thd->clear_error(); + DBUG_RETURN(0); + } + else if (!tables->view) + { + List<FOREIGN_KEY_INFO> f_key_list; + TABLE *show_table= tables->table; + KEY *key_info=show_table->key_info; + uint primary_key= show_table->primary_key; + show_table->file->info(HA_STATUS_VARIABLE | + HA_STATUS_NO_LOCK | + HA_STATUS_TIME); + for (uint i=0 ; i < show_table->keys ; i++, key_info++) + { + if (i != primary_key && !(key_info->flags & HA_NOSAME)) + continue; + uint f_idx= 0; + KEY_PART_INFO *key_part= key_info->key_part; + for (uint j=0 ; j < key_info->key_parts ; j++,key_part++) { - show_type= ((sys_var*) value)->type(); - value= (char*) ((sys_var*) value)->value_ptr(thd, value_type, - &null_lex_str); + uint f_idx= 0; + if (key_part->field) + { + f_idx++; + restore_record(table, default_values); + store_key_column_usage(table, base_name, file_name, + key_info->name, + strlen(key_info->name), + key_part->field->field_name, + strlen(key_part->field->field_name), + (longlong) f_idx); + table->file->write_row(table->record[0]); + } } + } - pos= end= buff; - switch (show_type) { - case SHOW_LONG: - case SHOW_LONG_CONST: - end= int10_to_str(*(long*) value, buff, 10); - break; - case SHOW_LONGLONG: - end= longlong10_to_str(*(longlong*) value, buff, 10); - break; - case SHOW_HA_ROWS: - end= longlong10_to_str((longlong) *(ha_rows*) value, buff, 10); - break; - case SHOW_BOOL: - end= strmov(buff, *(bool*) value ? "ON" : "OFF"); - break; - case SHOW_MY_BOOL: - end= strmov(buff, *(my_bool*) value ? "ON" : "OFF"); - break; - case SHOW_INT_CONST: - case SHOW_INT: - end= int10_to_str((long) *(uint32*) value, buff, 10); - break; - case SHOW_HAVE: + show_table->file->get_foreign_key_list(thd, &f_key_list); + FOREIGN_KEY_INFO *f_key_info; + List_iterator_fast<FOREIGN_KEY_INFO> it(f_key_list); + while ((f_key_info= it++)) + { + LEX_STRING *f_info, *r_info; + List_iterator_fast<LEX_STRING> it(f_key_info->foreign_fields), + it1(f_key_info->referenced_fields); + uint f_idx= 0; + while ((f_info= it++)) { - SHOW_COMP_OPTION tmp= *(SHOW_COMP_OPTION*) value; - pos= show_comp_option_name[(int) tmp]; - end= strend(pos); - break; + r_info= it1++; + f_idx++; + restore_record(table, default_values); + store_key_column_usage(table, base_name, file_name, + f_key_info->forein_id->str, + f_key_info->forein_id->length, + f_info->str, f_info->length, + (longlong) f_idx); + table->field[8]->store((longlong) f_idx); + table->field[8]->set_notnull(); + table->file->write_row(table->record[0]); } - case SHOW_CHAR: + } + } + DBUG_RETURN(res); +} + + +int fill_open_tables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + DBUG_ENTER("fill_open_tables"); + const char *wild= thd->lex->wild ? thd->lex->wild->ptr() : NullS; + TABLE *table= tables->table; + CHARSET_INFO *cs= system_charset_info; + OPEN_TABLE_LIST *open_list; + if (!(open_list=list_open_tables(thd,wild)) && thd->is_fatal_error) + DBUG_RETURN(1); + + for (; open_list ; open_list=open_list->next) + { + restore_record(table, default_values); + table->field[0]->store(open_list->db, strlen(open_list->db), cs); + table->field[1]->store(open_list->table, strlen(open_list->table), cs); + table->field[2]->store((longlong) open_list->in_use); + table->field[3]->store((longlong) open_list->locked); + table->file->write_row(table->record[0]); + } + DBUG_RETURN(0); +} + + +int fill_variables(THD *thd, TABLE_LIST *tables, COND *cond) +{ + DBUG_ENTER("fill_variables"); + LEX *lex= thd->lex; + const char *wild= lex->wild ? lex->wild->ptr() : NullS; + int res= mysqld_show(thd, wild, init_vars, lex->option_type, + &LOCK_global_system_variables, 0, tables->table); + DBUG_RETURN(res); +} + + +int fill_status(THD *thd, TABLE_LIST *tables, COND *cond) +{ + DBUG_ENTER("fill_status"); + LEX *lex= thd->lex; + const char *wild= lex->wild ? lex->wild->ptr() : NullS; + int res= 0; + STATUS_VAR tmp; + + if (lex->option_type == OPT_GLOBAL) + { + pthread_mutex_lock(&LOCK_status); + calc_sum_of_all_status(&tmp); + } + res= mysqld_show(thd, wild, status_vars, OPT_GLOBAL, &LOCK_status, + (lex->option_type == OPT_GLOBAL ? + &tmp: &thd->status_var), tables->table); + if (lex->option_type == OPT_GLOBAL) + pthread_mutex_unlock(&LOCK_status); + DBUG_RETURN(res); +} + + +/* + Find schema_tables elment by name + + SYNOPSIS + find_schema_table() + thd thread handler + table_name table name + + RETURN + 0 table not found + # pointer to 'shema_tables' element +*/ + +ST_SCHEMA_TABLE *find_schema_table(THD *thd, const char* table_name) +{ + ST_SCHEMA_TABLE *schema_table= schema_tables; + for ( ; schema_table->table_name; schema_table++) + { + if (!my_strcasecmp(system_charset_info, + schema_table->table_name, + table_name)) + return schema_table; + } + return 0; +} + + +ST_SCHEMA_TABLE *get_schema_table(enum enum_schema_tables schema_table_idx) +{ + return &schema_tables[schema_table_idx]; +} + + +/* + Create information_schema table using schema_table data + + SYNOPSIS + create_schema_table() + thd thread handler + schema_table pointer to 'shema_tables' element + + RETURN + # Pointer to created table + 0 Can't create table +*/ + +TABLE *create_schema_table(THD *thd, TABLE_LIST *table_list) +{ + int field_count= 0; + Item *item; + TABLE *table; + List<Item> field_list; + ST_SCHEMA_TABLE *schema_table= table_list->schema_table; + ST_FIELD_INFO *fields_info= schema_table->fields_info; + CHARSET_INFO *cs= system_charset_info; + DBUG_ENTER("create_schema_table"); + + for ( ; fields_info->field_name; fields_info++) + { + switch (fields_info->field_type) { + case MYSQL_TYPE_LONG: + if (!(item= new Item_int(fields_info->field_name, + fields_info->value, + fields_info->field_length))) { - if (!(pos= value)) - pos= ""; - end= strend(pos); - break; - } - case SHOW_STARTTIME: - nr= (long) (thd->query_start() - start_time); - end= int10_to_str(nr, buff, 10); - break; - case SHOW_QUESTION: - end= int10_to_str((long) thd->query_id, buff, 10); - break; -#ifdef HAVE_REPLICATION - case SHOW_RPL_STATUS: - end= strmov(buff, rpl_status_type[(int)rpl_status]); - break; - case SHOW_SLAVE_RUNNING: + DBUG_RETURN(0); + } + break; + case MYSQL_TYPE_TIMESTAMP: + if (!(item=new Item_datetime(fields_info->field_name))) { - pthread_mutex_lock(&LOCK_active_mi); - end= strmov(buff, (active_mi->slave_running && - active_mi->rli.slave_running) ? "ON" : "OFF"); - pthread_mutex_unlock(&LOCK_active_mi); - break; + DBUG_RETURN(0); } -#endif /* HAVE_REPLICATION */ - case SHOW_OPENTABLES: - end= int10_to_str((long) cached_tables(), buff, 10); - break; - case SHOW_CHAR_PTR: + break; + default: + /* this should be changed when Item_empty_string is fixed(in 4.1) */ + if (!(item= new Item_empty_string("", 0, cs))) { - if (!(pos= *(char**) value)) - pos= ""; - end= strend(pos); - break; + DBUG_RETURN(0); } -#ifdef HAVE_OPENSSL - /* First group - functions relying on CTX */ - case SHOW_SSL_CTX_SESS_ACCEPT: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_ACCEPT_GOOD: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept_good(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_CONNECT_GOOD: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect_good(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_ACCEPT_RENEGOTIATE: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_accept_renegotiate(ssl_acceptor_fd->ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_CONNECT_RENEGOTIATE: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect_renegotiate(ssl_acceptor_fd-> ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_CB_HITS: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_cb_hits(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_HITS: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_hits(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_CACHE_FULL: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_cache_full(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_MISSES: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_misses(ssl_acceptor_fd-> - ssl_context)), - buff, 10); - break; - case SHOW_SSL_CTX_SESS_TIMEOUTS: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_timeouts(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_SESS_NUMBER: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_number(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_SESS_CONNECT: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_connect(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_SESS_GET_CACHE_SIZE: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_sess_get_cache_size(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_GET_VERIFY_MODE: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_get_verify_mode(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_GET_VERIFY_DEPTH: - end= int10_to_str((long) (!ssl_acceptor_fd ? 0 : - SSL_CTX_get_verify_depth(ssl_acceptor_fd->ssl_context)), - buff,10); - break; - case SHOW_SSL_CTX_GET_SESSION_CACHE_MODE: - if (!ssl_acceptor_fd) - { - pos= "NONE"; - end= pos+4; - break; - } - switch (SSL_CTX_get_session_cache_mode(ssl_acceptor_fd->ssl_context)) - { - case SSL_SESS_CACHE_OFF: - pos= "OFF"; - break; - case SSL_SESS_CACHE_CLIENT: - pos= "CLIENT"; - break; - case SSL_SESS_CACHE_SERVER: - pos= "SERVER"; - break; - case SSL_SESS_CACHE_BOTH: - pos= "BOTH"; - break; - case SSL_SESS_CACHE_NO_AUTO_CLEAR: - pos= "NO_AUTO_CLEAR"; - break; - case SSL_SESS_CACHE_NO_INTERNAL_LOOKUP: - pos= "NO_INTERNAL_LOOKUP"; - break; - default: - pos= "Unknown"; - break; - } - end= strend(pos); - break; - /* First group - functions relying on SSL */ - case SHOW_SSL_GET_VERSION: - pos= (thd->net.vio->ssl_arg ? - SSL_get_version((SSL*) thd->net.vio->ssl_arg) : ""); - end= strend(pos); - break; - case SHOW_SSL_SESSION_REUSED: - end= int10_to_str((long) (thd->net.vio->ssl_arg ? - SSL_session_reused((SSL*) thd->net.vio-> - ssl_arg) : - 0), - buff, 10); - break; - case SHOW_SSL_GET_DEFAULT_TIMEOUT: - end= int10_to_str((long) (thd->net.vio->ssl_arg ? - SSL_get_default_timeout((SSL*) thd->net.vio-> - ssl_arg) : - 0), - buff, 10); - break; - case SHOW_SSL_GET_VERIFY_MODE: - end= int10_to_str((long) (thd->net.vio->ssl_arg ? - SSL_get_verify_mode((SSL*) thd->net.vio-> - ssl_arg): - 0), - buff, 10); - break; - case SHOW_SSL_GET_VERIFY_DEPTH: - end= int10_to_str((long) (thd->net.vio->ssl_arg ? - SSL_get_verify_depth((SSL*) thd->net.vio-> - ssl_arg): - 0), - buff, 10); - break; - case SHOW_SSL_GET_CIPHER: - pos= (thd->net.vio->ssl_arg ? - SSL_get_cipher((SSL*) thd->net.vio->ssl_arg) : "" ); - end= strend(pos); - break; - case SHOW_SSL_GET_CIPHER_LIST: - if (thd->net.vio->ssl_arg) - { - char *to= buff; - for (int i=0 ; i++ ;) - { - const char *p= SSL_get_cipher_list((SSL*) thd->net.vio->ssl_arg,i); - if (p == NULL) - break; - to= strmov(to, p); - *to++= ':'; - } - if (to != buff) - to--; // Remove last ':' - end= to; - } - break; + item->max_length= fields_info->field_length * cs->mbmaxlen; + item->set_name(fields_info->field_name, + strlen(fields_info->field_name), cs); + break; + } + field_list.push_back(item); + item->maybe_null= fields_info->maybe_null; + field_count++; + } + TMP_TABLE_PARAM *tmp_table_param = + (TMP_TABLE_PARAM*) (thd->calloc(sizeof(TMP_TABLE_PARAM))); + tmp_table_param->init(); + tmp_table_param->table_charset= cs; + tmp_table_param->field_count= field_count; + SELECT_LEX *select_lex= thd->lex->current_select; + if (!(table= create_tmp_table(thd, tmp_table_param, + field_list, (ORDER*) 0, 0, 0, + (select_lex->options | thd->options | + TMP_TABLE_ALL_COLUMNS), + HA_POS_ERROR, table_list->alias))) + DBUG_RETURN(0); + DBUG_RETURN(table); +} -#endif /* HAVE_OPENSSL */ - case SHOW_KEY_CACHE_LONG: - case SHOW_KEY_CACHE_CONST_LONG: - value= (value-(char*) &dflt_key_cache_var)+ (char*) sql_key_cache; - end= int10_to_str(*(long*) value, buff, 10); - break; - case SHOW_UNDEF: // Show never happen - case SHOW_SYS: - break; // Return empty string - default: - break; + +/* + For old SHOW compatibility. It is used when + old SHOW doesn't have generated column names + Make list of fields for SHOW + + SYNOPSIS + make_old_format() + thd thread handler + schema_table pointer to 'schema_tables' element + + RETURN + -1 errror + 0 success +*/ + +int make_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + ST_FIELD_INFO *field_info= schema_table->fields_info; + for ( ; field_info->field_name; field_info++) + { + if (field_info->old_name) + { + Item_field *field= new Item_field(NullS, NullS, field_info->field_name); + if (field) + { + field->set_name(field_info->old_name, + strlen(field_info->old_name), + system_charset_info); + if (add_item_to_list(thd, field)) + return 1; } - if (protocol->store(pos, (uint32) (end - pos), system_charset_info) || - protocol->write()) - goto err; /* purecov: inspected */ } } - pthread_mutex_unlock(mutex); - send_eof(thd); + return 0; +} + + +int make_schemata_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + char tmp[128]; + LEX *lex= thd->lex; + SELECT_LEX *sel= lex->current_select; + + if (!sel->item_list.elements) + { + ST_FIELD_INFO *field_info= &schema_table->fields_info[1]; + String buffer(tmp,sizeof(tmp), system_charset_info); + Item_field *field= new Item_field(NullS, NullS, field_info->field_name); + if (!field || add_item_to_list(thd, field)) + return 1; + buffer.length(0); + buffer.append(field_info->old_name); + if (lex->wild && lex->wild->ptr()) + { + buffer.append(" ("); + buffer.append(lex->wild->ptr()); + buffer.append(")"); + } + field->set_name(buffer.ptr(), buffer.length(), system_charset_info); + } + return 0; +} + + +int make_table_names_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + char tmp[128]; + String buffer(tmp,sizeof(tmp), thd->charset()); + LEX *lex= thd->lex; + + ST_FIELD_INFO *field_info= &schema_table->fields_info[2]; + buffer.length(0); + buffer.append(field_info->old_name); + buffer.append(lex->select_lex.db); + if (lex->wild && lex->wild->ptr()) + { + buffer.append(" ("); + buffer.append(lex->wild->ptr()); + buffer.append(")"); + } + Item_field *field= new Item_field(NullS, NullS, field_info->field_name); + if (add_item_to_list(thd, field)) + return 1; + field->set_name(buffer.ptr(), buffer.length(), system_charset_info); + if (thd->lex->verbose) + { + field->set_name(buffer.ptr(), buffer.length(), system_charset_info); + field_info= &schema_table->fields_info[3]; + field= new Item_field(NullS, NullS, field_info->field_name); + if (add_item_to_list(thd, field)) + return 1; + field->set_name(field_info->old_name, strlen(field_info->old_name), + system_charset_info); + } + return 0; +} + + +int make_columns_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + int fields_arr[]= {3, 14, 13, 6, 15, 5, 16, 17, 18, -1}; + int *field_num= fields_arr; + ST_FIELD_INFO *field_info; + for (; *field_num >= 0; field_num++) + { + field_info= &schema_table->fields_info[*field_num]; + if (!thd->lex->verbose && (*field_num == 13 || + *field_num == 17 || + *field_num == 18)) + continue; + Item_field *field= new Item_field(NullS, NullS, field_info->field_name); + if (field) + { + field->set_name(field_info->old_name, + strlen(field_info->old_name), + system_charset_info); + if (add_item_to_list(thd, field)) + return 1; + } + } + return 0; +} + + +int make_character_sets_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + int fields_arr[]= {0, 2, 1, 3, -1}; + int *field_num= fields_arr; + ST_FIELD_INFO *field_info; + for (; *field_num >= 0; field_num++) + { + field_info= &schema_table->fields_info[*field_num]; + Item_field *field= new Item_field(NullS, NullS, field_info->field_name); + if (field) + { + field->set_name(field_info->old_name, + strlen(field_info->old_name), + system_charset_info); + if (add_item_to_list(thd, field)) + return 1; + } + } + return 0; +} + + +int make_proc_old_format(THD *thd, ST_SCHEMA_TABLE *schema_table) +{ + int fields_arr[]= {2, 3, 4, 19, 16, 15, 14, 18, -1}; + int *field_num= fields_arr; + ST_FIELD_INFO *field_info; + for (; *field_num >= 0; field_num++) + { + field_info= &schema_table->fields_info[*field_num]; + Item_field *field= new Item_field(NullS, NullS, field_info->field_name); + if (field) + { + field->set_name(field_info->old_name, + strlen(field_info->old_name), + system_charset_info); + if (add_item_to_list(thd, field)) + return 1; + } + } + return 0; +} + + +/* + Create information_schema table + + SYNOPSIS + mysql_schema_table() + thd thread handler + lex pointer to LEX + table_list pointer to table_list + + RETURN + 0 success + 1 error +*/ + +int mysql_schema_table(THD *thd, LEX *lex, TABLE_LIST *table_list) +{ + TABLE *table; + DBUG_ENTER("mysql_schema_table"); + if (!(table= table_list->schema_table->create_table(thd, table_list))) + { + DBUG_RETURN(1); + } + table->tmp_table= TMP_TABLE; + table->grant.privilege= SELECT_ACL; + table->alias_name_used= my_strcasecmp(table_alias_charset, + table_list->real_name, + table_list->alias); + table_list->schema_table_name= table_list->real_name; + table_list->real_name= table->real_name; + table_list->table= table; + table->next= thd->derived_tables; + thd->derived_tables= table; + table_list->select_lex->options |= OPTION_SCHEMA_TABLE; + lex->safe_to_cache_query= 0; DBUG_RETURN(0); +} - err: - pthread_mutex_unlock(mutex); - DBUG_RETURN(1); + +/* + Generate select from information_schema table + + SYNOPSIS + make_schema_select() + thd thread handler + sel pointer to SELECT_LEX + schema_table_idx index of 'schema_tables' element + + RETURN + 0 success + 1 error +*/ + +int make_schema_select(THD *thd, SELECT_LEX *sel, + enum enum_schema_tables schema_table_idx) +{ + ST_SCHEMA_TABLE *schema_table= get_schema_table(schema_table_idx); + LEX_STRING db, table; + DBUG_ENTER("mysql_schema_select"); + /* + We have to make non const db_name & table_name + because of lower_case_table_names + */ + make_lex_string(thd, &db, information_schema_name.str, + information_schema_name.length, 0); + make_lex_string(thd, &table, schema_table->table_name, + strlen(schema_table->table_name), 0); + if (!sel->item_list.elements && /* Handle old syntax */ + schema_table->old_format(thd, schema_table) || + !sel->add_table_to_list(thd, new Table_ident(thd, db, table, 0), + 0, 0, TL_READ, (List<String> *) 0, + (List<String> *) 0)) + { + DBUG_RETURN(1); + } + DBUG_RETURN(0); } + +/* + Fill temporary schema tables before SELECT + + SYNOPSIS + get_schema_tables_result() + join join which use schema tables + + RETURN + FALSE success + TRUE error +*/ + +bool get_schema_tables_result(JOIN *join) +{ + DBUG_ENTER("get_schema_tables_result"); + JOIN_TAB *tmp_join_tab= join->join_tab+join->tables; + THD *thd= join->thd; + LEX *lex= thd->lex; + bool result= 0; + for (JOIN_TAB *tab= join->join_tab; tab < tmp_join_tab; tab++) + { + if (!tab->table || !tab->table->pos_in_table_list) + break; + + TABLE_LIST *table_list= tab->table->pos_in_table_list; + if (table_list->schema_table && thd->fill_derived_tables()) + { + TABLE_LIST *save_next_global= table_list->next_global; + TABLE_LIST **query_tables_last= lex->query_tables_last; + TABLE *old_derived_tables= thd->derived_tables; + MYSQL_LOCK *sql_lock= thd->lock; + lex->sql_command= SQLCOM_SHOW_FIELDS; + + if (&lex->unit != lex->current_select->master_unit()) // is subselect + { + table_list->table->file->extra(HA_EXTRA_RESET_STATE); + table_list->table->file->delete_all_rows(); + free_io_cache(table_list->table); + filesort_free_buffers(table_list->table); + } + else + table_list->table->file->records= 0; + + thd->derived_tables= 0; + thd->lock=0; + if (table_list->schema_table->fill_table(thd, table_list, + tab->select_cond)) + result= 1; + thd->lock= sql_lock; + lex->sql_command= SQLCOM_SELECT; + thd->derived_tables= old_derived_tables; + table_list->next_global= save_next_global; + lex->query_tables_last= query_tables_last; + } + } + DBUG_RETURN(result); +} + + +ST_FIELD_INFO schema_fields_info[]= +{ + {"CATALOG_NAME", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"SCHEMA_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Database"}, + {"DEFAULT_CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SQL_PATH", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO tables_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, + {"TABLE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ENGINE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Engine"}, + {"VERSION", 21 , MYSQL_TYPE_LONG, 0, 1, "Version"}, + {"ROW_FORMAT", 10, MYSQL_TYPE_STRING, 0, 1, "Row_format"}, + {"TABLE_ROWS", 21 , MYSQL_TYPE_LONG, 0, 1, "Rows"}, + {"AVG_ROW_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Avg_row_length"}, + {"DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Data_length"}, + {"MAX_DATA_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Max_data_length"}, + {"INDEX_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 1, "Index_length"}, + {"DATA_FREE", 21 , MYSQL_TYPE_LONG, 0, 1, "Data_free"}, + {"AUTO_INCREMENT", 21 , MYSQL_TYPE_LONG, 0, 1, "Auto_increment"}, + {"CREATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Create_time"}, + {"UPDATE_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Update_time"}, + {"CHECK_TIME", 0, MYSQL_TYPE_TIMESTAMP, 0, 1, "Check_time"}, + {"TABLE_COLLATION", 64, MYSQL_TYPE_STRING, 0, 1, "Collation"}, + {"CHECKSUM", 21 , MYSQL_TYPE_LONG, 0, 1, "Checksum"}, + {"CREATE_OPTIONS", 255, MYSQL_TYPE_STRING, 0, 1, "Create_options"}, + {"TABLE_COMMENT", 80, MYSQL_TYPE_STRING, 0, 0, "Comment"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO columns_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Field"}, + {"ORDINAL_POSITION", 21 , MYSQL_TYPE_LONG, 0, 0, 0}, + {"COLUMN_DEFAULT", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, "Default"}, + {"IS_NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"}, + {"DATA_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CHARACTER_MAXIMUM_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 0, 0}, + {"CHARACTER_OCTET_LENGTH", 21 , MYSQL_TYPE_LONG, 0, 0, 0}, + {"NUMERIC_PRECISION", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, + {"NUMERIC_SCALE", 21 , MYSQL_TYPE_LONG, 0, 1, 0}, + {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 1, 0}, + {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 1, "Collation"}, + {"COLUMN_TYPE", 65535, MYSQL_TYPE_STRING, 0, 0, "Type"}, + {"COLUMN_KEY", 3, MYSQL_TYPE_STRING, 0, 0, "Key"}, + {"EXTRA", 20, MYSQL_TYPE_STRING, 0, 0, "Extra"}, + {"PRIVILEGES", 80, MYSQL_TYPE_STRING, 0, 0, "Privileges"}, + {"COLUMN_COMMENT", 255, MYSQL_TYPE_STRING, 0, 0, "Comment"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO charsets_fields_info[]= +{ + {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Charset"}, + {"DEFAULT_COLLATE_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Default collation"}, + {"DESCRIPTION", 60, MYSQL_TYPE_STRING, 0, 0, "Description"}, + {"MAXLEN", 3 ,MYSQL_TYPE_LONG, 0, 0, "Maxlen"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO collation_fields_info[]= +{ + {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Collation"}, + {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, "Charset"}, + {"ID", 11, MYSQL_TYPE_LONG, 0, 0, "Id"}, + {"IS_DEFAULT", 3, MYSQL_TYPE_STRING, 0, 0, "Default"}, + {"IS_COMPILED", 3, MYSQL_TYPE_STRING, 0, 0, "Compiled"}, + {"SORTLEN", 3 ,MYSQL_TYPE_LONG, 0, 0, "Sortlen"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO coll_charset_app_fields_info[]= +{ + {"COLLATION_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CHARACTER_SET_NAME", 64, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO proc_fields_info[]= +{ + {"SPECIFIC_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ROUTINE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ROUTINE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Db"}, + {"ROUTINE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Name"}, + {"ROUTINE_TYPE", 9, MYSQL_TYPE_STRING, 0, 0, "Type"}, + {"DTD_IDENTIFIER", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"ROUTINE_BODY", 8, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ROUTINE_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, + {"EXTERNAL_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"EXTERNAL_LANGUAGE", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"PARAMETER_STYLE", 8, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_DETERMINISTIC", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SQL_DATA_ACCESS", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"SQL_PATH", NAME_LEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"SECURITY_TYPE", 7, MYSQL_TYPE_STRING, 0, 0, "Security_type"}, + {"CREATED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, "Created"}, + {"LAST_ALTERED", 0, MYSQL_TYPE_TIMESTAMP, 0, 0, "Modified"}, + {"SQL_MODE", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ROUTINE_COMMENT", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Comment"}, + {"DEFINER", 77, MYSQL_TYPE_STRING, 0, 0, "Definer"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO stat_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, + {"NON_UNIQUE", 1, MYSQL_TYPE_LONG, 0, 0, "Non_unique"}, + {"INDEX_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"INDEX_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Key_name"}, + {"SEQ_IN_INDEX", 2, MYSQL_TYPE_LONG, 0, 0, "Seq_in_index"}, + {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Column_name"}, + {"COLLATION", 1, MYSQL_TYPE_STRING, 0, 1, "Collation"}, + {"CARDINALITY", 21, MYSQL_TYPE_LONG, 0, 1, "Cardinality"}, + {"SUB_PART", 3, MYSQL_TYPE_LONG, 0, 1, "Sub_part"}, + {"PACKED", 10, MYSQL_TYPE_STRING, 0, 1, "Packed"}, + {"NULLABLE", 3, MYSQL_TYPE_STRING, 0, 0, "Null"}, + {"INDEX_TYPE", 16, MYSQL_TYPE_STRING, 0, 0, "Index_type"}, + {"COMMENT", 16, MYSQL_TYPE_STRING, 0, 1, "Comment"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO view_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"VIEW_DEFINITION", 65535, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CHECK_OPTION", 8, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_UPDATABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO user_privileges_fields_info[]= +{ + {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO schema_privileges_fields_info[]= +{ + {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO table_privileges_fields_info[]= +{ + {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO column_privileges_fields_info[]= +{ + {"GRANTEE", 81, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"PRIVILEGE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"IS_GRANTABLE", 3, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO table_constraints_fields_info[]= +{ + {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO key_column_usage_fields_info[]= +{ + {"CONSTRAINT_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"CONSTRAINT_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"CONSTRAINT_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"COLUMN_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"ORDINAL_POSITION", 10 ,MYSQL_TYPE_LONG, 0, 0, 0}, + {"POSITION_IN_UNIQUE_CONSTRAINT", 10 ,MYSQL_TYPE_LONG, 0, 1, 0}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO table_names_fields_info[]= +{ + {"TABLE_CATALOG", FN_REFLEN, MYSQL_TYPE_STRING, 0, 1, 0}, + {"TABLE_SCHEMA",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, 0}, + {"TABLE_NAME", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Tables_in_"}, + {"TABLE_TYPE", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table_type"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO open_tables_fields_info[]= +{ + {"Database", NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Database"}, + {"Table",NAME_LEN, MYSQL_TYPE_STRING, 0, 0, "Table"}, + {"In_use", 1, MYSQL_TYPE_LONG, 0, 0, "In_use"}, + {"Name_locked", 4, MYSQL_TYPE_LONG, 0, 0, "Name_locked"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +ST_FIELD_INFO variables_fields_info[]= +{ + {"Variable_name", 80, MYSQL_TYPE_STRING, 0, 0, "Variable_name"}, + {"Value", 255, MYSQL_TYPE_STRING, 0, 0, "Value"}, + {0, 0, MYSQL_TYPE_STRING, 0, 0, 0} +}; + + +/* + Description of ST_FIELD_INFO in table.h +*/ + +ST_SCHEMA_TABLE schema_tables[]= +{ + {"SCHEMATA", schema_fields_info, create_schema_table, + fill_schema_shemata, make_schemata_old_format, 0, 1, -1, 0}, + {"TABLES", tables_fields_info, create_schema_table, + get_all_tables, make_old_format, get_schema_tables_record, 1, 2, 0}, + {"COLUMNS", columns_fields_info, create_schema_table, + get_all_tables, make_columns_old_format, get_schema_column_record, 1, 2, 0}, + {"CHARACTER_SETS", charsets_fields_info, create_schema_table, + fill_schema_charsets, make_character_sets_old_format, 0, -1, -1, 0}, + {"COLLATIONS", collation_fields_info, create_schema_table, + fill_schema_collation, make_old_format, 0, -1, -1, 0}, + {"COLLATION_CHARACTER_SET_APPLICABILITY", coll_charset_app_fields_info, + create_schema_table, fill_schema_coll_charset_app, 0, 0, -1, -1, 0}, + {"ROUTINES", proc_fields_info, create_schema_table, + fill_schema_proc, make_proc_old_format, 0, -1, -1, 0}, + {"STATISTICS", stat_fields_info, create_schema_table, + get_all_tables, make_old_format, get_schema_stat_record, 1, 2, 0}, + {"VIEWS", view_fields_info, create_schema_table, + get_all_tables, 0, get_schema_views_record, 1, 2, 0}, + {"USER_PRIVILEGES", user_privileges_fields_info, create_schema_table, + fill_schema_user_privileges, 0, 0, -1, -1, 0}, + {"SCHEMA_PRIVILEGES", schema_privileges_fields_info, create_schema_table, + fill_schema_schema_privileges, 0, 0, -1, -1, 0}, + {"TABLE_PRIVILEGES", table_privileges_fields_info, create_schema_table, + fill_schema_table_privileges, 0, 0, -1, -1, 0}, + {"COLUMN_PRIVILEGES", column_privileges_fields_info, create_schema_table, + fill_schema_column_privileges, 0, 0, -1, -1, 0}, + {"TABLE_CONSTRAINTS", table_constraints_fields_info, create_schema_table, + get_all_tables, 0, get_schema_constraints_record, 3, 4, 0}, + {"KEY_COLUMN_USAGE", key_column_usage_fields_info, create_schema_table, + get_all_tables, 0, get_schema_key_column_usage_record, 4, 5, 0}, + {"TABLE_NAMES", table_names_fields_info, create_schema_table, + get_all_tables, make_table_names_old_format, 0, 1, 2, 1}, + {"OPEN_TABLES", open_tables_fields_info, create_schema_table, + fill_open_tables, make_old_format, 0, -1, -1, 1}, + {"STATUS", variables_fields_info, create_schema_table, fill_status, + make_old_format, 0, -1, -1, 1}, + {"VARIABLES", variables_fields_info, create_schema_table, fill_variables, + make_old_format, 0, -1, -1, 1}, + {0, 0, 0, 0, 0, 0, 0, 0, 0} +}; + + #ifdef __GNUC__ template class List_iterator_fast<char>; template class List<char>; diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 9f95ffa4884..1831c4a2f3d 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -78,3 +78,4 @@ int merge_buffers(SORTPARAM *param,IO_CACHE *from_file, IO_CACHE *to_file, uchar *sort_buffer, BUFFPEK *lastbuff,BUFFPEK *Fb, BUFFPEK *Tb,int flag); +void reuse_freed_buff(QUEUE *queue, BUFFPEK *reuse, uint key_length); diff --git a/sql/sql_string.cc b/sql/sql_string.cc index c1701e7e9bf..226a80201a1 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -684,6 +684,19 @@ void String::qs_append(double *d) qs_append(ld); } +void String::qs_append(int i) +{ + char *buff = Ptr + str_length; + sprintf(buff,"%d", i); + str_length += strlen(buff); +} + +void String::qs_append(uint i) +{ + char *buff = Ptr + str_length; + sprintf(buff,"%u", i); + str_length += strlen(buff); +} /* Compare strings according to collation, without end space. @@ -707,8 +720,8 @@ void String::qs_append(double *d) int sortcmp(const String *s,const String *t, CHARSET_INFO *cs) { return cs->coll->strnncollsp(cs, - (unsigned char *) s->ptr(),s->length(), - (unsigned char *) t->ptr(),t->length()); + (unsigned char *) s->ptr(),s->length(), + (unsigned char *) t->ptr(),t->length(), 0); } diff --git a/sql/sql_string.h b/sql/sql_string.h index a8fb9574c0b..cb9db52c830 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -72,9 +72,9 @@ public: static void *operator new(size_t size, MEM_ROOT *mem_root) { return (void*) alloc_root(mem_root, (uint) size); } static void operator delete(void *ptr_arg,size_t size) - {} + { TRASH(ptr_arg, size); } static void operator delete(void *ptr_arg,size_t size, MEM_ROOT *mem_root) - {} + { TRASH(ptr_arg, size); } ~String() { free(); } inline void set_charset(CHARSET_INFO *charset) { str_charset= charset; } @@ -141,6 +141,34 @@ public: bool set(longlong num, CHARSET_INFO *cs); bool set(ulonglong num, CHARSET_INFO *cs); bool set(double num,uint decimals, CHARSET_INFO *cs); + + /* + PMG 2004.11.12 + This is a method that works the same as perl's "chop". It simply + drops the last character of a string. This is useful in the case + of the federated storage handler where I'm building a unknown + number, list of values and fields to be used in a sql insert + statement to be run on the remote server, and have a comma after each. + When the list is complete, I "chop" off the trailing comma + + ex. + String stringobj; + stringobj.append("VALUES ('foo', 'fi', 'fo',"); + stringobj.chop(); + stringobj.append(")"); + + In this case, the value of string was: + + VALUES ('foo', 'fi', 'fo', + VALUES ('foo', 'fi', 'fo' + VALUES ('foo', 'fi', 'fo') + + */ + inline void chop() + { + Ptr[str_length--]= '\0'; + } + inline void free() { if (alloced) @@ -284,6 +312,8 @@ public: Ptr[str_length]= c; str_length++; } + void qs_append(int i); + void qs_append(uint i); /* Inline (general) functions used by the protocol functions */ diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 8ee55150a18..c3aac59b98b 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -58,15 +58,15 @@ static int copy_data_between_tables(TABLE *from,TABLE *to, Wait if global_read_lock (FLUSH TABLES WITH READ LOCK) is set. RETURN - 0 ok. In this case ok packet is sent to user - -1 Error (Error message given but not sent to user) + FALSE OK. In this case ok packet is sent to user + TRUE Error */ -int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, - my_bool drop_temporary) +bool mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, + my_bool drop_temporary) { - int error= 0; + bool error= FALSE; DBUG_ENTER("mysql_rm_table"); /* mark for close and remove all cached entries */ @@ -79,9 +79,8 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, { if (thd->global_read_lock) { - my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE,MYF(0), - tables->real_name); - error= 1; + my_error(ER_TABLE_NOT_LOCKED_FOR_WRITE, MYF(0), tables->real_name); + error= TRUE; goto err; } while (global_read_lock && ! thd->killed) @@ -90,7 +89,7 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, } } - error=mysql_rm_table_part2(thd,tables, if_exists, drop_temporary, 0); + error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 0, 0); err: pthread_mutex_unlock(&LOCK_open); @@ -101,9 +100,9 @@ int mysql_rm_table(THD *thd,TABLE_LIST *tables, my_bool if_exists, pthread_mutex_unlock(&thd->mysys_var->mutex); if (error) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); send_ok(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } @@ -135,8 +134,8 @@ int mysql_rm_table_part2_with_lock(THD *thd, thd->mysys_var->current_cond= &COND_refresh; VOID(pthread_mutex_lock(&LOCK_open)); - error=mysql_rm_table_part2(thd,tables, if_exists, drop_temporary, - dont_log_query); + error= mysql_rm_table_part2(thd, tables, if_exists, drop_temporary, 1, + dont_log_query); pthread_mutex_unlock(&LOCK_open); @@ -158,6 +157,7 @@ int mysql_rm_table_part2_with_lock(THD *thd, if_exists If set, don't give an error if table doesn't exists. In this case we give an warning of level 'NOTE' drop_temporary Only drop temporary tables + drop_view Allow to delete VIEW .frm dont_log_query Don't log the query TODO: @@ -177,7 +177,8 @@ int mysql_rm_table_part2_with_lock(THD *thd, */ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, - bool drop_temporary, bool dont_log_query) + bool drop_temporary, bool drop_view, + bool dont_log_query) { TABLE_LIST *table; char path[FN_REFLEN], *alias; @@ -189,7 +190,7 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, if (lock_table_names(thd, tables)) DBUG_RETURN(1); - for (table=tables ; table ; table=table->next) + for (table= tables; table; table= table->next_local) { char *db=table->db; mysql_ha_flush(thd, table, MYSQL_HA_CLOSE_FINAL); @@ -217,8 +218,9 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, strxmov(path, mysql_data_home, "/", db, "/", alias, reg_ext, NullS); (void) unpack_filename(path,path); } - if (drop_temporary || - (access(path,F_OK) && ha_create_table_from_engine(thd,db,alias,TRUE))) + if (drop_temporary || + (access(path,F_OK) && ha_create_table_from_engine(thd,db,alias,TRUE)) || + (!drop_view && mysql_frm_type(path) != FRMTYPE_TABLE)) { if (if_exists) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, @@ -260,27 +262,23 @@ int mysql_rm_table_part2(THD *thd, TABLE_LIST *tables, bool if_exists, if (wrong_tables.length()) { if (!foreign_key_error) - my_error(ER_BAD_TABLE_ERROR,MYF(0), wrong_tables.c_ptr()); + my_error(ER_BAD_TABLE_ERROR, MYF(0), wrong_tables.c_ptr()); else - my_error(ER_ROW_IS_REFERENCED, MYF(0)); + my_message(ER_ROW_IS_REFERENCED, ER(ER_ROW_IS_REFERENCED), MYF(0)); error= 1; } if (some_tables_deleted || tmp_table_deleted || !error) { query_cache_invalidate3(thd, tables, 0); - if (!dont_log_query) + if (!dont_log_query && mysql_bin_log.is_open()) { - mysql_update_log.write(thd, thd->query,thd->query_length); - if (mysql_bin_log.is_open()) - { - if (!error) - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - tmp_table_deleted && !some_tables_deleted, - FALSE); - mysql_bin_log.write(&qinfo); - } + if (!error) + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, + tmp_table_deleted && !some_tables_deleted, + FALSE); + mysql_bin_log.write(&qinfo); } } @@ -436,6 +434,9 @@ void calculate_interval_lengths(CHARSET_INFO *cs, TYPELIB *interval, DESCRIPTION Prepares the table and key structures for table creation. + NOTES + sets create_info->varchar if the table has a varchar or blob. + RETURN VALUES 0 ok -1 error @@ -450,20 +451,25 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, const char *key_name; create_field *sql_field,*dup_field; uint field,null_fields,blob_columns; + uint max_key_length= file->max_key_length(); ulong pos; KEY *key_info; KEY_PART_INFO *key_part_info; int timestamps= 0, timestamps_with_niladic= 0; int field_no,dup_no; int select_field_pos,auto_increment=0; + List_iterator<create_field> it(fields),it2(fields); + uint total_uneven_bit_length= 0; DBUG_ENTER("mysql_prepare_table"); - List_iterator<create_field> it(fields),it2(fields); select_field_pos=fields.elements - select_field_count; null_fields=blob_columns=0; + create_info->varchar= 0; for (field_no=0; (sql_field=it++) ; field_no++) { + CHARSET_INFO *save_cs; + if (!sql_field->charset) sql_field->charset= create_info->default_table_charset; /* @@ -475,13 +481,13 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (create_info->table_charset && sql_field->charset != &my_charset_bin) sql_field->charset= create_info->table_charset; - CHARSET_INFO *savecs= sql_field->charset; + save_cs= sql_field->charset; if ((sql_field->flags & BINCMP_FLAG) && !(sql_field->charset= get_charset_by_csname(sql_field->charset->csname, MY_CS_BINSORT,MYF(0)))) { char tmp[64]; - strmake(strmake(tmp, savecs->csname, sizeof(tmp)-4), "_bin", 4); + strmake(strmake(tmp, save_cs->csname, sizeof(tmp)-4), "_bin", 4); my_error(ER_UNKNOWN_COLLATION, MYF(0), tmp); DBUG_RETURN(-1); } @@ -538,6 +544,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (sql_field->sql_type == FIELD_TYPE_SET) { + uint32 field_length; if (sql_field->def) { char *not_used; @@ -553,11 +560,12 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, DBUG_RETURN(-1); } } - calculate_interval_lengths(cs, interval, &dummy, &sql_field->length); - sql_field->length+= (interval->count - 1); + calculate_interval_lengths(cs, interval, &dummy, &field_length); + sql_field->length= field_length + (interval->count - 1); } else /* FIELD_TYPE_ENUM */ { + uint32 field_length; if (sql_field->def) { String str, *def= sql_field->def->val_str(&str); @@ -568,23 +576,51 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, DBUG_RETURN(-1); } } - calculate_interval_lengths(cs, interval, &sql_field->length, &dummy); + calculate_interval_lengths(cs, interval, &field_length, &dummy); + sql_field->length= field_length; } set_if_smaller(sql_field->length, MAX_FIELD_WIDTH-1); } sql_field->create_length_to_internal_length(); - - /* Don't pack keys in old tables if the user has requested this */ - if ((sql_field->flags & BLOB_FLAG) || - sql_field->sql_type == FIELD_TYPE_VAR_STRING && - create_info->row_type != ROW_TYPE_FIXED) + if (sql_field->length > MAX_FIELD_VARCHARLENGTH && + !(sql_field->flags & BLOB_FLAG)) { - db_options|=HA_OPTION_PACK_RECORD; + /* Convert long VARCHAR columns to TEXT or BLOB */ + char warn_buff[MYSQL_ERRMSG_SIZE]; + + if (sql_field->def) + { + my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), sql_field->field_name, + MAX_FIELD_VARCHARLENGTH / sql_field->charset->mbmaxlen); + DBUG_RETURN(-1); + } + sql_field->sql_type= FIELD_TYPE_BLOB; + sql_field->flags|= BLOB_FLAG; + sprintf(warn_buff, ER(ER_AUTO_CONVERT), sql_field->field_name, + "VARCHAR", + (sql_field->charset == &my_charset_bin) ? "BLOB" : "TEXT"); + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_AUTO_CONVERT, + warn_buff); + } + + if ((sql_field->flags & BLOB_FLAG) && sql_field->length) + { + if (sql_field->sql_type == FIELD_TYPE_BLOB) + { + /* The user has given a length to the blob column */ + sql_field->sql_type= get_blob_type_from_length(sql_field->length); + sql_field->pack_length= calc_pack_length(sql_field->sql_type, 0); + } + sql_field->length= 0; // Probably from an item } + if (!(sql_field->flags & NOT_NULL_FLAG)) null_fields++; + if (sql_field->sql_type == FIELD_TYPE_BIT) + total_uneven_bit_length+= sql_field->length & 7; + if (check_column_name(sql_field->field_name)) { my_error(ER_WRONG_COLUMN_NAME, MYF(0), sql_field->field_name); @@ -604,7 +640,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, */ if (field_no < select_field_pos || dup_no >= select_field_pos) { - my_error(ER_DUP_FIELDNAME,MYF(0),sql_field->field_name); + my_error(ER_DUP_FIELDNAME, MYF(0), sql_field->field_name); DBUG_RETURN(-1); } else @@ -616,6 +652,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, create_info->default_table_charset); sql_field->length= dup_field->length; sql_field->pack_length= dup_field->pack_length; + sql_field->key_length= dup_field->key_length; sql_field->create_length_to_internal_length(); sql_field->decimals= dup_field->decimals; sql_field->flags= dup_field->flags; @@ -626,12 +663,17 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } } } + /* Don't pack rows in old tables if the user has requested this */ + if ((sql_field->flags & BLOB_FLAG) || + sql_field->sql_type == MYSQL_TYPE_VARCHAR && + create_info->row_type != ROW_TYPE_FIXED) + db_options|= HA_OPTION_PACK_RECORD; it2.rewind(); } /* If fixed row records, we need one bit to check for deleted rows */ if (!(db_options & HA_OPTION_PACK_RECORD)) null_fields++; - pos=(null_fields+7)/8; + pos= (null_fields + total_uneven_bit_length + 7) / 8; it.rewind(); while ((sql_field=it++)) @@ -651,13 +693,13 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, sql_field->length=8; // Unireg field length sql_field->unireg_check=Field::BLOB_FIELD; blob_columns++; + create_info->varchar= 1; break; case FIELD_TYPE_GEOMETRY: #ifdef HAVE_SPATIAL if (!(file->table_flags() & HA_CAN_GEOMETRY)) { - my_printf_error(ER_CHECK_NOT_IMPLEMENTED, ER(ER_CHECK_NOT_IMPLEMENTED), - MYF(0), "GEOMETRY"); + my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "GEOMETRY"); DBUG_RETURN(-1); } sql_field->pack_flag=FIELDFLAG_GEOM | @@ -668,17 +710,37 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, sql_field->length=8; // Unireg field length sql_field->unireg_check=Field::BLOB_FIELD; blob_columns++; + create_info->varchar= 1; break; #else - my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED), MYF(0), - sym_group_geom.name, sym_group_geom.needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); DBUG_RETURN(-1); #endif /*HAVE_SPATIAL*/ - case FIELD_TYPE_VAR_STRING: - case FIELD_TYPE_STRING: + case MYSQL_TYPE_VARCHAR: +#ifndef QQ_ALL_HANDLERS_SUPPORT_VARCHAR + if (file->table_flags() & HA_NO_VARCHAR) + { + /* convert VARCHAR to CHAR because handler is not yet up to date */ + sql_field->sql_type= MYSQL_TYPE_VAR_STRING; + sql_field->pack_length= calc_pack_length(sql_field->sql_type, + (uint) sql_field->length); + if ((sql_field->length / sql_field->charset->mbmaxlen) > + MAX_FIELD_CHARLENGTH) + { + my_printf_error(ER_TOO_BIG_FIELDLENGTH, ER(ER_TOO_BIG_FIELDLENGTH), + MYF(0), sql_field->field_name, MAX_FIELD_CHARLENGTH); + DBUG_RETURN(-1); + } + } + else +#endif + create_info->varchar= 1; + /* fall through */ + case MYSQL_TYPE_STRING: sql_field->pack_flag=0; if (sql_field->charset->state & MY_CS_BINSORT) - sql_field->pack_flag|=FIELDFLAG_BINARY; + sql_field->pack_flag|= FIELDFLAG_BINARY; break; case FIELD_TYPE_ENUM: sql_field->pack_flag=pack_length_to_packflag(sql_field->pack_length) | @@ -707,6 +769,14 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, case FIELD_TYPE_NULL: sql_field->pack_flag=f_settype((uint) sql_field->sql_type); break; + case FIELD_TYPE_BIT: + if (!(file->table_flags() & HA_CAN_BIT_FIELD)) + { + my_error(ER_CHECK_NOT_IMPLEMENTED, MYF(0), "BIT FIELD"); + DBUG_RETURN(-1); + } + sql_field->pack_flag= FIELDFLAG_NUMBER; + break; case FIELD_TYPE_TIMESTAMP: /* We should replace old TIMESTAMP fields with their newer analogs */ if (sql_field->unireg_check == Field::TIMESTAMP_OLD_FIELD) @@ -735,7 +805,9 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, break; } if (!(sql_field->flags & NOT_NULL_FLAG)) - sql_field->pack_flag|=FIELDFLAG_MAYBE_NULL; + sql_field->pack_flag|= FIELDFLAG_MAYBE_NULL; + if (sql_field->flags & NO_DEFAULT_VALUE_FLAG) + sql_field->pack_flag|= FIELDFLAG_NO_DEFAULT; sql_field->offset= pos; if (MTYP_TYPENR(sql_field->unireg_check) == Field::NEXT_NUMBER) auto_increment++; @@ -743,24 +815,27 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } if (timestamps_with_niladic > 1) { - my_error(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS,MYF(0)); + my_message(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS, + ER(ER_TOO_MUCH_AUTO_TIMESTAMP_COLS), MYF(0)); DBUG_RETURN(-1); } if (auto_increment > 1) { - my_error(ER_WRONG_AUTO_KEY,MYF(0)); + my_message(ER_WRONG_AUTO_KEY, ER(ER_WRONG_AUTO_KEY), MYF(0)); DBUG_RETURN(-1); } if (auto_increment && (file->table_flags() & HA_NO_AUTO_INCREMENT)) { - my_error(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT,MYF(0)); + my_message(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT, + ER(ER_TABLE_CANT_HANDLE_AUTO_INCREMENT), MYF(0)); DBUG_RETURN(-1); } if (blob_columns && (file->table_flags() & HA_NO_BLOBS)) { - my_error(ER_TABLE_CANT_HANDLE_BLOB,MYF(0)); + my_message(ER_TABLE_CANT_HANDLE_BLOB, ER(ER_TABLE_CANT_HANDLE_BLOB), + MYF(0)); DBUG_RETURN(-1); } @@ -786,9 +861,9 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (fk_key->ref_columns.elements && fk_key->ref_columns.elements != fk_key->columns.elements) { - my_error(ER_WRONG_FK_DEF, MYF(0), fk_key->name ? fk_key->name : - "foreign key without name", - ER(ER_KEY_REF_DO_NOT_MATCH_TABLE_REF)); + my_error(ER_WRONG_FK_DEF, MYF(0), + (fk_key->name ? fk_key->name : "foreign key without name"), + ER(ER_KEY_REF_DO_NOT_MATCH_TABLE_REF)); DBUG_RETURN(-1); } continue; @@ -887,8 +962,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, key_info->flags= HA_SPATIAL; break; #else - my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED),MYF(0), - sym_group_geom.name, sym_group_geom.needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); DBUG_RETURN(-1); #endif case Key::FOREIGN_KEY: @@ -910,7 +985,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { if (!(file->table_flags() & HA_CAN_FULLTEXT)) { - my_error(ER_TABLE_CANT_HANDLE_FT, MYF(0)); + my_message(ER_TABLE_CANT_HANDLE_FT, ER(ER_TABLE_CANT_HANDLE_FT), + MYF(0)); DBUG_RETURN(-1); } } @@ -927,8 +1003,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { if (key_info->key_parts != 1) { - my_printf_error(ER_WRONG_ARGUMENTS, - ER(ER_WRONG_ARGUMENTS),MYF(0),"SPATIAL INDEX"); + my_error(ER_WRONG_ARGUMENTS, MYF(0), "SPATIAL INDEX"); DBUG_RETURN(-1); } } @@ -937,17 +1012,15 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, #ifdef HAVE_RTREE_KEYS if ((key_info->key_parts & 1) == 1) { - my_printf_error(ER_WRONG_ARGUMENTS, - ER(ER_WRONG_ARGUMENTS),MYF(0),"RTREE INDEX"); + my_error(ER_WRONG_ARGUMENTS, MYF(0), "RTREE INDEX"); DBUG_RETURN(-1); } /* TODO: To be deleted */ - my_printf_error(ER_NOT_SUPPORTED_YET, ER(ER_NOT_SUPPORTED_YET), - MYF(0), "RTREE INDEX"); + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "RTREE INDEX"); DBUG_RETURN(-1); #else - my_printf_error(ER_FEATURE_DISABLED,ER(ER_FEATURE_DISABLED),MYF(0), - sym_group_rtree.name, sym_group_rtree.needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_rtree.name, sym_group_rtree.needed_define); DBUG_RETURN(-1); #endif } @@ -956,6 +1029,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, CHARSET_INFO *ft_key_charset=0; // for FULLTEXT for (uint column_nr=0 ; (column=cols++) ; column_nr++) { + uint length; key_part_spec *dup_column; it.rewind(); @@ -967,9 +1041,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, field++; if (!sql_field) { - my_printf_error(ER_KEY_COLUMN_DOES_NOT_EXITS, - ER(ER_KEY_COLUMN_DOES_NOT_EXITS),MYF(0), - column->field_name); + my_error(ER_KEY_COLUMN_DOES_NOT_EXITS, MYF(0), column->field_name); DBUG_RETURN(-1); } while ((dup_column= cols2++) != column) @@ -986,15 +1058,14 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, cols2.rewind(); if (key->type == Key::FULLTEXT) { - if ((sql_field->sql_type != FIELD_TYPE_STRING && - sql_field->sql_type != FIELD_TYPE_VAR_STRING && + if ((sql_field->sql_type != MYSQL_TYPE_STRING && + sql_field->sql_type != MYSQL_TYPE_VARCHAR && !f_is_blob(sql_field->pack_flag)) || sql_field->charset == &my_charset_bin || sql_field->charset->mbminlen > 1 || // ucs2 doesn't work yet (ft_key_charset && sql_field->charset != ft_key_charset)) { - my_printf_error(ER_BAD_FT_COLUMN,ER(ER_BAD_FT_COLUMN),MYF(0), - column->field_name); + my_error(ER_BAD_FT_COLUMN, MYF(0), column->field_name); DBUG_RETURN(-1); } ft_key_charset=sql_field->charset; @@ -1015,28 +1086,25 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { if (!(file->table_flags() & HA_CAN_INDEX_BLOBS)) { - my_printf_error(ER_BLOB_USED_AS_KEY,ER(ER_BLOB_USED_AS_KEY),MYF(0), - column->field_name); + my_error(ER_BLOB_USED_AS_KEY, MYF(0), column->field_name); DBUG_RETURN(-1); } if (!column->length) { - my_printf_error(ER_BLOB_KEY_WITHOUT_LENGTH, - ER(ER_BLOB_KEY_WITHOUT_LENGTH),MYF(0), - column->field_name); + my_error(ER_BLOB_KEY_WITHOUT_LENGTH, MYF(0), column->field_name); DBUG_RETURN(-1); } } #ifdef HAVE_SPATIAL - if (key->type == Key::SPATIAL) + if (key->type == Key::SPATIAL) { - if (!column->length ) + if (!column->length) { /* - BAR: 4 is: (Xmin,Xmax,Ymin,Ymax), this is for 2D case - Lately we'll extend this code to support more dimensions + 4 is: (Xmin,Xmax,Ymin,Ymax), this is for 2D case + Lately we'll extend this code to support more dimensions */ - column->length=4*sizeof(double); + column->length= 4*sizeof(double); } } #endif @@ -1052,13 +1120,13 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, key_info->flags|= HA_NULL_PART_KEY; if (!(file->table_flags() & HA_NULL_IN_KEY)) { - my_printf_error(ER_NULL_COLUMN_IN_INDEX,ER(ER_NULL_COLUMN_IN_INDEX), - MYF(0),column->field_name); + my_error(ER_NULL_COLUMN_IN_INDEX, MYF(0), column->field_name); DBUG_RETURN(-1); } if (key->type == Key::SPATIAL) { - my_error(ER_SPATIAL_CANT_HAVE_NULL, MYF(0)); + my_message(ER_SPATIAL_CANT_HAVE_NULL, + ER(ER_SPATIAL_CANT_HAVE_NULL), MYF(0)); DBUG_RETURN(-1); } } @@ -1072,7 +1140,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, key_part_info->fieldnr= field; key_part_info->offset= (uint16) sql_field->offset; key_part_info->key_type=sql_field->pack_flag; - uint length=sql_field->pack_length; + length= sql_field->key_length; + if (column->length) { if (f_is_blob(sql_field->pack_flag)) @@ -1104,7 +1173,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, (key_info->flags & HA_NOSAME))) && column->length != length))) { - my_error(ER_WRONG_SUB_KEY,MYF(0)); + my_message(ER_WRONG_SUB_KEY, ER(ER_WRONG_SUB_KEY), MYF(0)); DBUG_RETURN(-1); } else if (!(file->table_flags() & HA_NO_PREFIX_CHAR_KEYS)) @@ -1112,8 +1181,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, } else if (length == 0) { - my_printf_error(ER_WRONG_KEY_COLUMN, ER(ER_WRONG_KEY_COLUMN), MYF(0), - column->field_name); + my_error(ER_WRONG_KEY_COLUMN, MYF(0), column->field_name); DBUG_RETURN(-1); } if (length > file->max_key_part_length()) @@ -1138,12 +1206,13 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, /* Use packed keys for long strings on the first column */ if (!(db_options & HA_OPTION_NO_PACK_KEYS) && (length >= KEY_DEFAULT_PACK_LENGTH && - (sql_field->sql_type == FIELD_TYPE_STRING || - sql_field->sql_type == FIELD_TYPE_VAR_STRING || + (sql_field->sql_type == MYSQL_TYPE_STRING || + sql_field->sql_type == MYSQL_TYPE_VARCHAR || sql_field->pack_flag & FIELDFLAG_BLOB))) { - if (column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB)) - key_info->flags|= HA_BINARY_PACK_KEY; + if (column_nr == 0 && (sql_field->pack_flag & FIELDFLAG_BLOB) || + sql_field->sql_type == MYSQL_TYPE_VARCHAR) + key_info->flags|= HA_BINARY_PACK_KEY | HA_VAR_LENGTH_KEY; else key_info->flags|= HA_PACK_KEY; } @@ -1157,7 +1226,8 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, { if (primary_key) { - my_error(ER_MULTIPLE_PRI_KEY,MYF(0)); + my_message(ER_MULTIPLE_PRI_KEY, ER(ER_MULTIPLE_PRI_KEY), + MYF(0)); DBUG_RETURN(-1); } key_name=primary_key_name; @@ -1168,7 +1238,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, key_info_buffer,key_info); if (check_if_keyname_exists(key_name,key_info_buffer,key_info)) { - my_error(ER_DUP_KEYNAME,MYF(0),key_name); + my_error(ER_DUP_KEYNAME, MYF(0), key_name); DBUG_RETURN(-1); } key_info->name=(char*) key_name; @@ -1182,7 +1252,6 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (!(key_info->flags & HA_NULL_PART_KEY)) unique_key=1; key_info->key_length=(uint16) key_length; - uint max_key_length= file->max_key_length(); if (key_length > max_key_length && key->type != Key::FULLTEXT) { my_error(ER_TOO_LONG_KEY,MYF(0),max_key_length); @@ -1193,12 +1262,12 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, if (!unique_key && !primary_key && (file->table_flags() & HA_REQUIRE_PRIMARY_KEY)) { - my_error(ER_REQUIRES_PRIMARY_KEY,MYF(0)); + my_message(ER_REQUIRES_PRIMARY_KEY, ER(ER_REQUIRES_PRIMARY_KEY), MYF(0)); DBUG_RETURN(-1); } if (auto_increment > 0) { - my_error(ER_WRONG_AUTO_KEY,MYF(0)); + my_message(ER_WRONG_AUTO_KEY, ER(ER_WRONG_AUTO_KEY), MYF(0)); DBUG_RETURN(-1); } /* Sort keys in optimized order */ @@ -1224,7 +1293,7 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, (From ALTER TABLE) DESCRIPTION - If one creates a temporary table, this is automaticly opened + If one creates a temporary table, this is automatically opened no_log is needed for the case of CREATE ... SELECT, as the logging will be done later in sql_insert.cc @@ -1232,30 +1301,31 @@ int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, and must be zero for standard create of table. RETURN VALUES - 0 ok - -1 error + FALSE OK + TRUE error */ -int mysql_create_table(THD *thd,const char *db, const char *table_name, - HA_CREATE_INFO *create_info, - List<create_field> &fields, - List<Key> &keys,bool tmp_table, - uint select_field_count) +bool mysql_create_table(THD *thd,const char *db, const char *table_name, + HA_CREATE_INFO *create_info, + List<create_field> &fields, + List<Key> &keys,bool tmp_table, + uint select_field_count) { char path[FN_REFLEN]; const char *alias; - int error= -1; uint db_options, key_count; KEY *key_info_buffer; handler *file; + bool error= TRUE; enum db_type new_db_type; DBUG_ENTER("mysql_create_table"); /* Check for duplicate fields and check type of table to create */ if (!fields.elements) { - my_error(ER_TABLE_MUST_HAVE_COLUMNS,MYF(0)); - DBUG_RETURN(-1); + my_message(ER_TABLE_MUST_HAVE_COLUMNS, ER(ER_TABLE_MUST_HAVE_COLUMNS), + MYF(0)); + DBUG_RETURN(TRUE); } if ((new_db_type= ha_checktype(create_info->db_type)) != create_info->db_type) @@ -1284,8 +1354,8 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) && (file->table_flags() & HA_NO_TEMP_TABLES)) { - my_error(ER_ILLEGAL_HA,MYF(0),table_name); - DBUG_RETURN(-1); + my_error(ER_ILLEGAL_HA, MYF(0), table_name); + DBUG_RETURN(TRUE); } #endif @@ -1310,7 +1380,7 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, keys, tmp_table, db_options, file, key_info_buffer, &key_count, select_field_count)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); /* Check if table exists */ if (create_info->options & HA_LEX_CREATE_TMP_TABLE) @@ -1333,10 +1403,10 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS) { create_info->table_existed= 1; // Mark that table existed - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } if (wait_if_global_read_lock(thd, 0, 1)) DBUG_RETURN(error); @@ -1348,10 +1418,10 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if (create_info->options & HA_LEX_CREATE_IF_NOT_EXISTS) { create_info->table_existed= 1; // Mark that table existed - error= 0; + error= FALSE; } else - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name); goto end; } } @@ -1377,10 +1447,10 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, if (create_if_not_exists) { create_info->table_existed= 1; // Mark that table existed - error= 0; + error= FALSE; } else - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),table_name); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name); goto end; } } @@ -1408,21 +1478,17 @@ int mysql_create_table(THD *thd,const char *db, const char *table_name, } thd->tmp_table_used= 1; } - if (!tmp_table) + if (!tmp_table && mysql_bin_log.is_open()) { - // Must be written before unlock - mysql_update_log.write(thd,thd->query, thd->query_length); - if (mysql_bin_log.is_open()) - { - thd->clear_error(); - Query_log_event qinfo(thd, thd->query, thd->query_length, - test(create_info->options & - HA_LEX_CREATE_TMP_TABLE), - FALSE); - mysql_bin_log.write(&qinfo); - } + thd->clear_error(); + Query_log_event qinfo(thd, thd->query, thd->query_length, + test(create_info->options & + HA_LEX_CREATE_TMP_TABLE), + FALSE); + mysql_bin_log.write(&qinfo); } - error=0; + error= FALSE; + end: VOID(pthread_mutex_unlock(&LOCK_open)); start_waiting_global_read_lock(thd); @@ -1474,7 +1540,7 @@ make_unique_key_name(const char *field_name,KEY *start,KEY *end) ****************************************************************************/ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, - const char *db, const char *name, + TABLE_LIST *create_table, List<create_field> *extra_fields, List<Key> *keys, List<Item> *items, @@ -1504,7 +1570,7 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, field=item->tmp_table_field(&tmp_table); else field=create_tmp_field(thd, &tmp_table, item, item->type(), - (Item ***) 0, &tmp_field, 0, 0, 0); + (Item ***) 0, &tmp_field,0,0,0); if (!field || !(cr_field=new create_field(field,(item->type() == Item::FIELD_ITEM ? ((Item_field *)item)->field : @@ -1512,34 +1578,42 @@ TABLE *create_table_from_items(THD *thd, HA_CREATE_INFO *create_info, DBUG_RETURN(0); extra_fields->push_back(cr_field); } - /* create and lock table */ - /* QQ: create and open should be done atomic ! */ /* + create and lock table + We don't log the statement, it will be logged later. + If this is a HEAP table, the automatic DELETE FROM which is written to the binlog when a HEAP table is opened for the first time since startup, must not be written: 1) it would be wrong (imagine we're in CREATE SELECT: we don't want to delete from it) 2) it would be written before the CREATE TABLE, which is a wrong order. So we keep binary logging disabled when we open_table(). + TODO: create and open should be done atomic ! */ - tmp_disable_binlog(thd); - if (!mysql_create_table(thd,db,name,create_info,*extra_fields, - *keys,0,select_field_count)) { - if (!(table=open_table(thd,db,name,name,(bool*) 0))) - quick_rm_table(create_info->db_type,db,table_case_name(create_info,name)); + tmp_disable_binlog(thd); + if (!mysql_create_table(thd, create_table->db, create_table->real_name, + create_info, *extra_fields, *keys, 0, + select_field_count)) + { + if (!(table= open_table(thd, create_table, thd->mem_root, (bool*) 0))) + quick_rm_table(create_info->db_type, create_table->db, + table_case_name(create_info, create_table->real_name)); + } + reenable_binlog(thd); + if (!table) // open failed + DBUG_RETURN(0); } - reenable_binlog(thd); - if (!table) - DBUG_RETURN(0); + table->reginfo.lock_type=TL_WRITE; - if (!((*lock)=mysql_lock_tables(thd,&table,1))) + if (!((*lock)= mysql_lock_tables(thd, &table,1))) { VOID(pthread_mutex_lock(&LOCK_open)); hash_delete(&open_cache,(byte*) table); VOID(pthread_mutex_unlock(&LOCK_open)); - quick_rm_table(create_info->db_type,db,table_case_name(create_info, name)); + quick_rm_table(create_info->db_type, create_table->db, + table_case_name(create_info, create_table->real_name)); DBUG_RETURN(0); } table->file->extra(HA_EXTRA_WRITE_CACHE); @@ -1560,11 +1634,12 @@ mysql_rename_table(enum db_type base, { char from[FN_REFLEN], to[FN_REFLEN]; char tmp_from[NAME_LEN+1], tmp_to[NAME_LEN+1]; - handler *file=get_new_handler((TABLE*) 0, base); + handler *file=(base == DB_TYPE_UNKNOWN ? 0 : get_new_handler((TABLE*) 0, base)); int error=0; DBUG_ENTER("mysql_rename_table"); - if (lower_case_table_names == 2 && !(file->table_flags() & HA_FILE_BASED)) + if (lower_case_table_names == 2 && file && + !(file->table_flags() & HA_FILE_BASED)) { /* Table handler expects to get all file names as lower case */ strmov(tmp_from, old_name); @@ -1582,13 +1657,15 @@ mysql_rename_table(enum db_type base, fn_format(from,from,"","",4); fn_format(to,to, "","",4); - if (!(error=file->rename_table((const char*) from,(const char *) to))) + if (!file || + !(error=file->rename_table((const char*) from,(const char *) to))) { if (rename_file_ext(from,to,reg_ext)) { error=my_errno; /* Restore old file name */ - file->rename_table((const char*) to,(const char *) from); + if (file) + file->rename_table((const char*) to,(const char *) from); } } delete file; @@ -1655,7 +1732,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table, Win32 clients must also have a WRITE LOCK on the table ! */ -static bool close_cached_table(THD *thd, TABLE *table) +void close_cached_table(THD *thd, TABLE *table) { DBUG_ENTER("close_cached_table"); @@ -1671,7 +1748,7 @@ static bool close_cached_table(THD *thd, TABLE *table) /* When lock on LOCK_open is freed other threads can continue */ pthread_cond_broadcast(&COND_refresh); - DBUG_RETURN(0); + DBUG_VOID_RETURN; } static int send_check_errmsg(THD *thd, TABLE_LIST* table, @@ -1684,7 +1761,7 @@ static int send_check_errmsg(THD *thd, TABLE_LIST* table, protocol->store((char*) operator_name, system_charset_info); protocol->store("error", 5, system_charset_info); protocol->store(errmsg, system_charset_info); - thd->net.last_error[0]=0; + thd->clear_error(); if (protocol->write()) return -1; return 1; @@ -1768,7 +1845,7 @@ static int prepare_for_repair(THD* thd, TABLE_LIST *table_list, char name[FN_REFLEN]; strxmov(name, mysql_data_home, "/", table_list->db, "/", table_list->real_name, NullS); - if (openfrm(name, "", 0, 0, 0, &tmp_table)) + if (openfrm(thd, name, "", 0, 0, 0, &tmp_table)) DBUG_RETURN(0); // Can't open frm file table= &tmp_table; } @@ -1861,25 +1938,27 @@ end: /* RETURN VALUES - 0 Message sent to net (admin operation went ok) - -1 Message should be sent by caller - (admin operation or network communication failed) + FALSE Message sent to net (admin operation went ok) + TRUE Message should be sent by caller + (admin operation or network communication failed) */ -static int mysql_admin_table(THD* thd, TABLE_LIST* tables, - HA_CHECK_OPT* check_opt, - const char *operator_name, - thr_lock_type lock_type, - bool open_for_modify, - uint extra_open_options, - int (*prepare_func)(THD *, TABLE_LIST *, - HA_CHECK_OPT *), - int (handler::*operator_func) - (THD *, HA_CHECK_OPT *)) +static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, + HA_CHECK_OPT* check_opt, + const char *operator_name, + thr_lock_type lock_type, + bool open_for_modify, + uint extra_open_options, + int (*prepare_func)(THD *, TABLE_LIST *, + HA_CHECK_OPT *), + int (handler::*operator_func)(THD *, + HA_CHECK_OPT *), + int (view_operator_func)(THD *, TABLE_LIST*)) { - TABLE_LIST *table; + TABLE_LIST *table, *next_global_table; List<Item> field_list; Item *item; Protocol *protocol= thd->protocol; + int result_code; DBUG_ENTER("mysql_admin_table"); field_list.push_back(item = new Item_empty_string("Table", NAME_LEN*2)); @@ -1890,11 +1969,12 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, item->maybe_null = 1; field_list.push_back(item = new Item_empty_string("Msg_text", 255)); item->maybe_null = 1; - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); mysql_ha_flush(thd, tables, MYSQL_HA_CLOSE_FINAL); - for (table = tables; table; table = table->next) + for (table= tables; table; table= table->next_local) { char table_name[NAME_LEN*2+2]; char* db = (table->db) ? table->db : thd->db; @@ -1902,10 +1982,18 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, strxmov(table_name,db ? db : "",".",table->real_name,NullS); thd->open_options|= extra_open_options; - table->table = open_ltable(thd, table, lock_type); -#ifdef EMBEDDED_LIBRARY - thd->net.last_errno= 0; // these errors shouldn't get client -#endif + table->lock_type= lock_type; + /* open only one table from local list of command */ + next_global_table= table->next_global; + table->next_global= 0; + open_and_lock_tables(thd, table); + table->next_global= next_global_table; + /* if view are unsupported */ + if (table->view && !view_operator_func) + { + result_code= HA_ADMIN_NOT_IMPLEMENTED; + goto send_result; + } thd->open_options&= ~extra_open_options; if (prepare_func) @@ -1921,8 +2009,17 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, } } + /* + CHECK TABLE command is only command where VIEW allowed here and this + command use only temporary teble method for VIEWs resolving => there + can't be VIEW tree substitition of join view => if opening table + succeed then table->table will have real TABLE pointer as value (in + case of join view substitution table->table can be 0, but here it is + impossible) + */ if (!table->table) { + char buf[ERRMSGSIZE+ERRMSGSIZE+2]; const char *err_msg; protocol->prepare_for_resend(); protocol->store(table_name, system_charset_info); @@ -1930,12 +2027,26 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, protocol->store("error",5, system_charset_info); if (!(err_msg=thd->net.last_error)) err_msg=ER(ER_CHECK_NO_SUCH_TABLE); + /* if it was a view will check md5 sum */ + if (table->view && + view_checksum(thd, table) == HA_ADMIN_WRONG_CHECKSUM) + { + strxmov(buf, err_msg, "; ", ER(ER_VIEW_CHECKSUM), NullS); + err_msg= (const char *)buf; + } protocol->store(err_msg, system_charset_info); - thd->net.last_error[0]=0; + thd->clear_error(); if (protocol->write()) goto err; continue; } + + if (table->view) + { + result_code= (*view_operator_func)(thd, table); + goto send_result; + } + table->table->pos_in_table_list= table; if ((table->table->db_stat & HA_READ_ONLY) && open_for_modify) { @@ -1974,10 +2085,11 @@ static int mysql_admin_table(THD* thd, TABLE_LIST* tables, open_for_modify=0; } - int result_code = (table->table->file->*operator_func)(thd, check_opt); -#ifdef EMBEDDED_LIBRARY - thd->net.last_errno= 0; // these errors shouldn't get client -#endif + result_code = (table->table->file->*operator_func)(thd, check_opt); + +send_result: + + thd->clear_error(); // these errors shouldn't get client protocol->prepare_for_resend(); protocol->store(table_name, system_charset_info); protocol->store(operator_name, system_charset_info); @@ -2036,8 +2148,9 @@ send_result_message: reopen the table and do ha_innobase::analyze() on it. */ close_thread_tables(thd); - TABLE_LIST *save_next= table->next; - table->next= 0; + TABLE_LIST *save_next_local= table->next_local, + *save_next_global= table->next_global; + table->next_local= table->next_global= 0; result_code= mysql_recreate_table(thd, table, 0); close_thread_tables(thd); if (!result_code) // recreation went ok @@ -2047,9 +2160,17 @@ send_result_message: result_code= 0; // analyze went ok } result_code= result_code ? HA_ADMIN_FAILED : HA_ADMIN_OK; - table->next= save_next; + table->next_local= save_next_local; + table->next_global= save_next_global; goto send_result_message; } + case HA_ADMIN_WRONG_CHECKSUM: + { + protocol->store("note", 4, system_charset_info); + protocol->store(ER(ER_VIEW_CHECKSUM), strlen(ER(ER_VIEW_CHECKSUM)), + system_charset_info); + break; + } default: // Probably HA_ADMIN_INTERNAL_ERROR protocol->store("error", 5, system_charset_info); @@ -2076,50 +2197,50 @@ send_result_message: } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: close_thread_tables(thd); // Shouldn't be needed if (table) table->table=0; - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } -int mysql_backup_table(THD* thd, TABLE_LIST* table_list) +bool mysql_backup_table(THD* thd, TABLE_LIST* table_list) { DBUG_ENTER("mysql_backup_table"); DBUG_RETURN(mysql_admin_table(thd, table_list, 0, "backup", TL_READ, 0, 0, 0, - &handler::backup)); + &handler::backup, 0)); } -int mysql_restore_table(THD* thd, TABLE_LIST* table_list) +bool mysql_restore_table(THD* thd, TABLE_LIST* table_list) { DBUG_ENTER("mysql_restore_table"); DBUG_RETURN(mysql_admin_table(thd, table_list, 0, "restore", TL_WRITE, 1, 0, &prepare_for_restore, - &handler::restore)); + &handler::restore, 0)); } -int mysql_repair_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) +bool mysql_repair_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) { DBUG_ENTER("mysql_repair_table"); DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, "repair", TL_WRITE, 1, HA_OPEN_FOR_REPAIR, &prepare_for_repair, - &handler::repair)); + &handler::repair, 0)); } -int mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) +bool mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) { DBUG_ENTER("mysql_optimize_table"); DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, "optimize", TL_WRITE, 1,0,0, - &handler::optimize)); + &handler::optimize, 0)); } @@ -2132,11 +2253,11 @@ int mysql_optimize_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) tables Table list (one table only) RETURN VALUES - 0 ok - -1 error + FALSE ok + TRUE error */ -int mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables, +bool mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables, LEX_STRING *key_cache_name) { HA_CHECK_OPT check_opt; @@ -2149,13 +2270,13 @@ int mysql_assign_to_keycache(THD* thd, TABLE_LIST* tables, { pthread_mutex_unlock(&LOCK_global_system_variables); my_error(ER_UNKNOWN_KEY_CACHE, MYF(0), key_cache_name->str); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } pthread_mutex_unlock(&LOCK_global_system_variables); check_opt.key_cache= key_cache; DBUG_RETURN(mysql_admin_table(thd, tables, &check_opt, "assign_to_keycache", TL_READ_NO_INSERT, 0, - 0, 0, &handler::assign_to_keycache)); + 0, 0, &handler::assign_to_keycache, 0)); } @@ -2207,16 +2328,16 @@ int reassign_keycache_tables(THD *thd, KEY_CACHE *src_cache, tables Table list (one table only) RETURN VALUES - 0 ok - -1 error + FALSE ok + TRUE error */ -int mysql_preload_keys(THD* thd, TABLE_LIST* tables) +bool mysql_preload_keys(THD* thd, TABLE_LIST* tables) { DBUG_ENTER("mysql_preload_keys"); DBUG_RETURN(mysql_admin_table(thd, tables, 0, "preload_keys", TL_READ, 0, 0, 0, - &handler::preload_keys)); + &handler::preload_keys, 0)); } @@ -2231,13 +2352,13 @@ int mysql_preload_keys(THD* thd, TABLE_LIST* tables) table_ident Src table_ident RETURN VALUES - 0 ok - -1 error + FALSE OK + TRUE error */ -int mysql_create_like_table(THD* thd, TABLE_LIST* table, - HA_CREATE_INFO *create_info, - Table_ident *table_ident) +bool mysql_create_like_table(THD* thd, TABLE_LIST* table, + HA_CREATE_INFO *create_info, + Table_ident *table_ident) { TABLE **tmp_table; char src_path[FN_REFLEN], dst_path[FN_REFLEN]; @@ -2245,7 +2366,8 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, char *table_name= table->real_name; char *src_db= thd->db; char *src_table= table_ident->table.str; - int err, res= -1; + int err; + bool res= TRUE; TABLE_LIST src_tables_list; DBUG_ENTER("mysql_create_like_table"); @@ -2258,12 +2380,12 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, table_ident->db.str && check_db_name((src_db= table_ident->db.str))) { my_error(ER_WRONG_TABLE_NAME, MYF(0), src_table); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } + bzero((gptr)&src_tables_list, sizeof(src_tables_list)); src_tables_list.db= table_ident->db.str ? table_ident->db.str : thd->db; src_tables_list.real_name= table_ident->table.str; - src_tables_list.next= 0; if (lock_and_wait_for_table_name(thd, &src_tables_list)) goto err; @@ -2337,7 +2459,6 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, } // Must be written before unlock - mysql_update_log.write(thd,thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -2347,7 +2468,7 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, FALSE); mysql_bin_log.write(&qinfo); } - res= 0; + res= FALSE; goto err; table_exists: @@ -2356,9 +2477,9 @@ table_exists: char warn_buff[MYSQL_ERRMSG_SIZE]; my_snprintf(warn_buff, sizeof(warn_buff), ER(ER_TABLE_EXISTS_ERROR), table_name); - push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, ER_TABLE_EXISTS_ERROR,warn_buff); - res= 0; + res= FALSE; } else my_error(ER_TABLE_EXISTS_ERROR, MYF(0), table_name); @@ -2371,7 +2492,7 @@ err: } -int mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) +bool mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) { #ifdef OS2 thr_lock_type lock_type = TL_WRITE; @@ -2382,11 +2503,11 @@ int mysql_analyze_table(THD* thd, TABLE_LIST* tables, HA_CHECK_OPT* check_opt) DBUG_ENTER("mysql_analyze_table"); DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, "analyze", lock_type, 1,0,0, - &handler::analyze)); + &handler::analyze, 0)); } -int mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt) +bool mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt) { #ifdef OS2 thr_lock_type lock_type = TL_WRITE; @@ -2398,7 +2519,7 @@ int mysql_check_table(THD* thd, TABLE_LIST* tables,HA_CHECK_OPT* check_opt) DBUG_RETURN(mysql_admin_table(thd, tables, check_opt, "check", lock_type, 0, HA_OPEN_FOR_REPAIR, 0, - &handler::check)); + &handler::check, &view_checksum)); } @@ -2452,7 +2573,6 @@ mysql_discard_or_import_tablespace(THD *thd, error=1; if (error) goto err; - mysql_update_log.write(thd, thd->query,thd->query_length); if (mysql_bin_log.is_open()) { Query_log_event qinfo(thd, thd->query, thd->query_length, 0, FALSE); @@ -2682,15 +2802,15 @@ int mysql_drop_indexes(THD *thd, TABLE_LIST *table_list, Alter table */ -int mysql_alter_table(THD *thd,char *new_db, char *new_name, - HA_CREATE_INFO *create_info, - TABLE_LIST *table_list, - List<create_field> &fields, List<Key> &keys, - uint order_num, ORDER *order, - enum enum_duplicates handle_duplicates, bool ignore, - ALTER_INFO *alter_info, bool do_send_ok) +bool mysql_alter_table(THD *thd,char *new_db, char *new_name, + HA_CREATE_INFO *create_info, + TABLE_LIST *table_list, + List<create_field> &fields, List<Key> &keys, + uint order_num, ORDER *order, + enum enum_duplicates handle_duplicates, bool ignore, + ALTER_INFO *alter_info, bool do_send_ok) { - TABLE *table,*new_table; + TABLE *table,*new_table=0; int error; char tmp_name[80],old_name[32],new_name_buff[FN_REFLEN]; char new_alias_buff[FN_REFLEN], *table_name, *db, *new_alias, *alias; @@ -2699,6 +2819,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, ulonglong next_insert_id; uint db_create_options, used_fields; enum db_type old_db_type,new_db_type; + bool need_copy_table; DBUG_ENTER("mysql_alter_table"); thd->proc_info="init"; @@ -2716,7 +2837,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, DBUG_RETURN(mysql_discard_or_import_tablespace(thd,table_list, alter_info->tablespace_op)); if (!(table=open_ltable(thd,table_list,TL_WRITE_ALLOW_READ))) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); /* Check that we are not trying to rename to an existing table */ if (new_name) @@ -2747,8 +2868,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { if (find_temporary_table(thd,new_db,new_name_buff)) { - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_name_buff); - DBUG_RETURN(-1); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name_buff); + DBUG_RETURN(TRUE); } } else @@ -2759,8 +2880,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, F_OK)) { /* Table will be closed in do_command() */ - my_error(ER_TABLE_EXISTS_ERROR,MYF(0), new_alias); - DBUG_RETURN(-1); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_alias); + DBUG_RETURN(TRUE); } } } @@ -2788,7 +2909,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, create_info->row_type=table->row_type; thd->proc_info="setup"; - if (alter_info->is_simple && !table->tmp_table) + if (!(alter_info->flags & ~(ALTER_RENAME | ALTER_KEYS_ONOFF)) && + !table->tmp_table) // no need to touch frm { error=0; if (new_name != table_name || new_db != db) @@ -2799,7 +2921,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, error=0; if (!access(new_name_buff,F_OK)) { - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_name); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name); error= -1; } else @@ -2816,23 +2938,24 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { switch (alter_info->keys_onoff) { case LEAVE_AS_IS: - break; + break; case ENABLE: - VOID(pthread_mutex_lock(&LOCK_open)); - wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); - VOID(pthread_mutex_unlock(&LOCK_open)); - error= table->file->enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); - /* COND_refresh will be signaled in close_thread_tables() */ - break; + VOID(pthread_mutex_lock(&LOCK_open)); + wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); + VOID(pthread_mutex_unlock(&LOCK_open)); + error= table->file->enable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + /* COND_refresh will be signaled in close_thread_tables() */ + break; case DISABLE: - VOID(pthread_mutex_lock(&LOCK_open)); - wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); - VOID(pthread_mutex_unlock(&LOCK_open)); - error=table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); - /* COND_refresh will be signaled in close_thread_tables() */ - break; + VOID(pthread_mutex_lock(&LOCK_open)); + wait_while_table_is_used(thd, table, HA_EXTRA_FORCE_REOPEN); + VOID(pthread_mutex_unlock(&LOCK_open)); + error=table->file->disable_indexes(HA_KEY_SWITCH_NONUNIQ_SAVE); + /* COND_refresh will be signaled in close_thread_tables() */ + break; } } + if (error == HA_ERR_WRONG_COMMAND) { push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, @@ -2842,7 +2965,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } if (!error) { - mysql_update_log.write(thd, thd->query, thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -2864,7 +2986,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, /* Full alter table */ - /* let new create options override the old ones */ + /* Let new create options override the old ones */ if (!(used_fields & HA_CREATE_USED_MIN_ROWS)) create_info->min_rows=table->min_rows; if (!(used_fields & HA_CREATE_USED_MAX_ROWS)) @@ -2943,8 +3065,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { if (def->sql_type == FIELD_TYPE_BLOB) { - my_error(ER_BLOB_CANT_HAVE_DEFAULT,MYF(0),def->change); - DBUG_RETURN(-1); + my_error(ER_BLOB_CANT_HAVE_DEFAULT, MYF(0), def->change); + DBUG_RETURN(TRUE); } def->def=alter->def; // Use new default alter_it.remove(); @@ -2957,8 +3079,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, { if (def->change && ! def->field) { - my_error(ER_BAD_FIELD_ERROR,MYF(0),def->change,table_name); - DBUG_RETURN(-1); + my_error(ER_BAD_FIELD_ERROR, MYF(0), def->change, table_name); + DBUG_RETURN(TRUE); } if (!def->after) create_list.push_back(def); @@ -2975,22 +3097,23 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } if (!find) { - my_error(ER_BAD_FIELD_ERROR,MYF(0),def->after,table_name); - DBUG_RETURN(-1); + my_error(ER_BAD_FIELD_ERROR, MYF(0), def->after, table_name); + DBUG_RETURN(TRUE); } find_it.after(def); // Put element after this } } if (alter_info->alter_list.elements) { - my_error(ER_BAD_FIELD_ERROR,MYF(0),alter_info->alter_list.head()->name, - table_name); - DBUG_RETURN(-1); + my_error(ER_BAD_FIELD_ERROR, MYF(0), + alter_info->alter_list.head()->name, table_name); + DBUG_RETURN(TRUE); } if (!create_list.elements) { - my_error(ER_CANT_REMOVE_ALL_FIELDS,MYF(0)); - DBUG_RETURN(-1); + my_message(ER_CANT_REMOVE_ALL_FIELDS, ER(ER_CANT_REMOVE_ALL_FIELDS), + MYF(0)); + DBUG_RETURN(TRUE); } /* @@ -3079,21 +3202,21 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, !my_strcasecmp(system_charset_info,key->name,primary_key_name)) { my_error(ER_WRONG_NAME_FOR_INDEX, MYF(0), key->name); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } } if (alter_info->drop_list.elements) { - my_error(ER_CANT_DROP_FIELD_OR_KEY,MYF(0), - alter_info->drop_list.head()->name); + my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), + alter_info->drop_list.head()->name); goto err; } if (alter_info->alter_list.elements) { - my_error(ER_CANT_DROP_FIELD_OR_KEY,MYF(0), - alter_info->alter_list.head()->name); + my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), + alter_info->alter_list.head()->name); goto err; } @@ -3125,6 +3248,16 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, create_info->options|=HA_LEX_CREATE_TMP_TABLE; /* + better have a negative test here, instead of positive, like + alter_info->flags & ALTER_ADD_COLUMN|ALTER_ADD_INDEX|... + so that ALTER TABLE won't break when somebody will add new flag + */ + need_copy_table=(alter_info->flags & ~(ALTER_CHANGE_COLUMN_DEFAULT|ALTER_OPTIONS) || + create_info->used_fields & ~(HA_CREATE_USED_COMMENT|HA_CREATE_USED_PASSWORD) || + table->tmp_table); + create_info->frm_only= !need_copy_table; + + /* Handling of symlinked tables: If no rename: Create new data file and index file on the same disk as the @@ -3170,8 +3303,9 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, } else create_info->data_file_name=create_info->index_file_name=0; + + /* We don't log the statement, it will be logged later. */ { - /* We don't log the statement, it will be logged later. */ tmp_disable_binlog(thd); error= mysql_create_table(thd, new_db, tmp_name, create_info,create_list,key_list,1,0); @@ -3179,35 +3313,45 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (error) DBUG_RETURN(error); } - if (table->tmp_table) - new_table=open_table(thd,new_db,tmp_name,tmp_name,0); - else - { - char path[FN_REFLEN]; - my_snprintf(path, sizeof(path), "%s/%s/%s", mysql_data_home, - new_db, tmp_name); - fn_format(path,path,"","",4); - new_table=open_temporary_table(thd, path, new_db, tmp_name,0); - } - if (!new_table) + if (need_copy_table) { - VOID(quick_rm_table(new_db_type,new_db,tmp_name)); - goto err; + if (table->tmp_table) + { + TABLE_LIST tbl; + bzero((void*) &tbl, sizeof(tbl)); + tbl.db= new_db; + tbl.real_name= tbl.alias= tmp_name; + new_table= open_table(thd, &tbl, thd->mem_root, 0); + } + else + { + char path[FN_REFLEN]; + my_snprintf(path, sizeof(path), "%s/%s/%s", mysql_data_home, + new_db, tmp_name); + fn_format(path,path,"","",4); + new_table=open_temporary_table(thd, path, new_db, tmp_name,0); + } + if (!new_table) + { + VOID(quick_rm_table(new_db_type,new_db,tmp_name)); + goto err; + } } - /* We don't want update TIMESTAMP fields during ALTER TABLE. */ - new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - new_table->next_number_field=new_table->found_next_number_field; thd->count_cuted_fields= CHECK_FIELD_WARN; // calc cuted fields thd->cuted_fields=0L; thd->proc_info="copy to tmp table"; - next_insert_id=thd->next_insert_id; // Remember for loggin + next_insert_id=thd->next_insert_id; // Remember for logging copied=deleted=0; - if (!new_table->is_view) + if (new_table && !new_table->is_view) + { + new_table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; + new_table->next_number_field=new_table->found_next_number_field; error=copy_data_between_tables(table,new_table,create_list, handle_duplicates, ignore, order_num, order, &copied, &deleted); + } thd->last_insert_id=next_insert_id; // Needed for correct log thd->count_cuted_fields= CHECK_FIELD_IGNORE; @@ -3237,7 +3381,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, my_free((gptr) new_table,MYF(0)); goto err; } - mysql_update_log.write(thd, thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -3247,8 +3390,11 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, goto end_temporary; } - intern_close_table(new_table); /* close temporary table */ - my_free((gptr) new_table,MYF(0)); + if (new_table) + { + intern_close_table(new_table); /* close temporary table */ + my_free((gptr) new_table,MYF(0)); + } VOID(pthread_mutex_lock(&LOCK_open)); if (error) { @@ -3260,7 +3406,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, /* Data is copied. Now we rename the old table to a temp name, rename the new one to the old name, remove all entries from the old table - from the cash, free all locks, close the old table and remove it. + from the cache, free all locks, close the old table and remove it. */ thd->proc_info="rename result table"; @@ -3273,7 +3419,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, if (!access(new_name_buff,F_OK)) { error=1; - my_error(ER_TABLE_EXISTS_ERROR,MYF(0),new_name_buff); + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), new_name_buff); VOID(quick_rm_table(new_db_type,new_db,tmp_name)); VOID(pthread_mutex_unlock(&LOCK_open)); goto err; @@ -3289,12 +3435,7 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, close the original table at before doing the rename */ table_name=thd->strdup(table_name); // must be saved - if (close_cached_table(thd, table)) - { // Aborted - VOID(quick_rm_table(new_db_type,new_db,tmp_name)); - VOID(pthread_mutex_unlock(&LOCK_open)); - goto err; - } + close_cached_table(thd, table); table=0; // Marker that table is closed } #if (!defined( __WIN__) && !defined( __EMX__) && !defined( OS2)) @@ -3304,6 +3445,8 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, error=0; + if (!need_copy_table) + new_db_type=old_db_type=DB_TYPE_UNKNOWN; // this type cannot happen in regular ALTER if (mysql_rename_table(old_db_type,db,table_name,db,old_name)) { error=1; @@ -3372,7 +3515,6 @@ int mysql_alter_table(THD *thd,char *new_db, char *new_name, goto err; } thd->proc_info="end"; - mysql_update_log.write(thd, thd->query,thd->query_length); if (mysql_bin_log.is_open()) { thd->clear_error(); @@ -3415,10 +3557,10 @@ end_temporary: if (do_send_ok) send_ok(thd,copied+deleted,0L,tmp_name); thd->some_tables_deleted=0; - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } @@ -3528,7 +3670,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, { if (thd->killed) { - my_error(ER_SERVER_SHUTDOWN,MYF(0)); + thd->send_kill_message(); error= 1; break; } @@ -3554,6 +3696,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, to->file->print_error(error,MYF(0)); break; } + to->file->restore_auto_increment(); delete_count++; } else @@ -3563,7 +3706,7 @@ copy_data_between_tables(TABLE *from,TABLE *to, free_io_cache(from); delete [] copy; // This is never 0 - if (to->file->end_bulk_insert() && !error) + if (to->file->end_bulk_insert() && error <= 0) { to->file->print_error(my_errno,MYF(0)); error=1; @@ -3604,8 +3747,8 @@ copy_data_between_tables(TABLE *from,TABLE *to, RETURN Like mysql_alter_table(). */ -int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, - bool do_send_ok) +bool mysql_recreate_table(THD *thd, TABLE_LIST *table_list, + bool do_send_ok) { DBUG_ENTER("mysql_recreate_table"); LEX *lex= thd->lex; @@ -3614,11 +3757,12 @@ int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, lex->key_list.empty(); lex->col_list.empty(); lex->alter_info.reset(); - lex->alter_info.is_simple= 0; // Force full recreate bzero((char*) &create_info,sizeof(create_info)); create_info.db_type=DB_TYPE_DEFAULT; create_info.row_type=ROW_TYPE_DEFAULT; create_info.default_table_charset=default_charset_info; + /* Force alter table to recreate table */ + lex->alter_info.flags= ALTER_CHANGE_COLUMN; DBUG_RETURN(mysql_alter_table(thd, NullS, NullS, &create_info, table_list, lex->create_list, lex->key_list, 0, (ORDER *) 0, @@ -3626,7 +3770,7 @@ int mysql_recreate_table(THD *thd, TABLE_LIST *table_list, } -int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) +bool mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) { TABLE_LIST *table; List<Item> field_list; @@ -3638,10 +3782,11 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) item->maybe_null= 1; field_list.push_back(item=new Item_int("Checksum",(longlong) 1,21)); item->maybe_null= 1; - if (protocol->send_fields(&field_list, 1)) - DBUG_RETURN(-1); + if (protocol->send_fields(&field_list, + Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)) + DBUG_RETURN(TRUE); - for (table= tables; table; table= table->next) + for (table= tables; table; table= table->next_local) { char table_name[NAME_LEN*2+2]; TABLE *t; @@ -3658,7 +3803,7 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) { /* Table didn't exist */ protocol->store_null(); - thd->net.last_error[0]=0; + thd->clear_error(); } else { @@ -3720,11 +3865,11 @@ int mysql_checksum_table(THD *thd, TABLE_LIST *tables, HA_CHECK_OPT *check_opt) } send_eof(thd); - DBUG_RETURN(0); + DBUG_RETURN(FALSE); err: close_thread_tables(thd); // Shouldn't be needed if (table) table->table=0; - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } diff --git a/sql/sql_test.cc b/sql/sql_test.cc index d992c93f8fc..6cffa9df2c6 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -181,9 +181,10 @@ TEST_join(JOIN *join) " quick select checked for each record (keys: %s)\n", tab->select->quick_keys.print(buf)); else if (tab->select->quick) - fprintf(DBUG_FILE," quick select used on key %s, length: %d\n", - form->key_info[tab->select->quick->index].name, - tab->select->quick->max_used_key_length); + { + fprintf(DBUG_FILE, " quick select used:\n"); + tab->select->quick->dbug_dump(18, false); + } else VOID(fputs(" select used\n",DBUG_FILE)); } @@ -202,6 +203,104 @@ TEST_join(JOIN *join) DBUG_VOID_RETURN; } + +/* + Print the current state during query optimization. + + SYNOPSIS + print_plan() + join pointer to the structure providing all context info for + the query + read_time the cost of the best partial plan + record_count estimate for the number of records returned by the best + partial plan + idx length of the partial QEP in 'join->positions'; + also an index in the array 'join->best_ref'; + info comment string to appear above the printout + + DESCRIPTION + This function prints to the log file DBUG_FILE the members of 'join' that + are used during query optimization (join->positions, join->best_positions, + and join->best_ref) and few other related variables (read_time, + record_count). + Useful to trace query optimizer functions. + + RETURN + None +*/ + +void +print_plan(JOIN* join, double read_time, double record_count, + uint idx, const char *info) +{ + uint i; + POSITION pos; + JOIN_TAB *join_table; + JOIN_TAB **plan_nodes; + TABLE* table; + + if (info == 0) + info= ""; + + DBUG_LOCK_FILE; + if (join->best_read == DBL_MAX) + { + fprintf(DBUG_FILE,"%s; idx:%u, best: DBL_MAX, current:%g\n", + info, idx, read_time); + } + else + { + fprintf(DBUG_FILE,"%s; idx: %u, best: %g, current: %g\n", + info, idx, join->best_read, read_time); + } + + /* Print the tables in JOIN->positions */ + fputs(" POSITIONS: ", DBUG_FILE); + for (i= 0; i < idx ; i++) + { + pos = join->positions[i]; + table= pos.table->table; + if (table) + fputs(table->real_name, DBUG_FILE); + fputc(' ', DBUG_FILE); + } + fputc('\n', DBUG_FILE); + + /* + Print the tables in JOIN->best_positions only if at least one complete plan + has been found. An indicator for this is the value of 'join->best_read'. + */ + fputs("BEST_POSITIONS: ", DBUG_FILE); + if (join->best_read < DBL_MAX) + { + for (i= 0; i < idx ; i++) + { + pos= join->best_positions[i]; + table= pos.table->table; + if (table) + fputs(table->real_name, DBUG_FILE); + fputc(' ', DBUG_FILE); + } + } + fputc('\n', DBUG_FILE); + + /* Print the tables in JOIN->best_ref */ + fputs(" BEST_REF: ", DBUG_FILE); + for (plan_nodes= join->best_ref ; *plan_nodes ; plan_nodes++) + { + join_table= (*plan_nodes); + fputs(join_table->table->real_name, DBUG_FILE); + fprintf(DBUG_FILE, "(%lu,%lu,%lu)", + (ulong) join_table->found_records, + (ulong) join_table->records, + (ulong) join_table->read_time); + fputc(' ', DBUG_FILE); + } + fputc('\n', DBUG_FILE); + + DBUG_UNLOCK_FILE; +} + #endif typedef struct st_debug_lock @@ -369,16 +468,20 @@ read_first: %10lu\n\ write: %10lu\n\ delete %10lu\n\ update: %10lu\n", - ha_read_key_count, ha_read_next_count, - ha_read_rnd_count, ha_read_first_count, - ha_write_count, ha_delete_count, ha_update_count); + thd->status_var.ha_read_key_count, + thd->status_var.ha_read_next_count, + thd->status_var.ha_read_rnd_count, + thd->status_var.ha_read_first_count, + thd->status_var.ha_write_count, + thd->status_var.ha_delete_count, + thd->status_var.ha_update_count); pthread_mutex_unlock(&LOCK_status); printf("\nTable status:\n\ Opened tables: %10lu\n\ Open tables: %10lu\n\ Open files: %10lu\n\ Open streams: %10lu\n", - opened_tables, + thd->status_var.opened_tables, (ulong) cached_tables(), (ulong) my_file_opened, (ulong) my_stream_opened); diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc new file mode 100644 index 00000000000..96ede3fbe6b --- /dev/null +++ b/sql/sql_trigger.cc @@ -0,0 +1,481 @@ +#include "mysql_priv.h" +#include "sp_head.h" +#include "sql_trigger.h" +#include "parse_file.h" + +static const LEX_STRING triggers_file_type= {(char *)"TRIGGERS", 8}; +static const char * const triggers_file_ext= ".TRG"; + +/* + Table of .TRG file field descriptors. + We have here only one field now because in nearest future .TRG + files will be merged into .FRM files (so we don't need something + like md5 or created fields). +*/ +static File_option triggers_file_parameters[]= +{ + {{(char*)"triggers", 8}, offsetof(class Table_triggers_list, definitions_list), + FILE_OPTIONS_STRLIST}, + {{0, 0}, 0, FILE_OPTIONS_STRING} +}; + + +/* + Create or drop trigger for table. + + SYNOPSIS + mysql_create_or_drop_trigger() + thd - current thread context (including trigger definition in LEX) + tables - table list containing one table for which trigger is created. + create - whenever we create (true) or drop (false) trigger + + NOTE + This function is mainly responsible for opening and locking of table and + invalidation of all its instances in table cache after trigger creation. + Real work on trigger creation/dropping is done inside Table_triggers_list + methods. + + RETURN VALUE + FALSE Success + TRUE error +*/ +bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) +{ + TABLE *table; + bool result= 0; + + DBUG_ENTER("mysql_create_or_drop_trigger"); + + /* + QQ: This function could be merged in mysql_alter_table() function + But do we want this ? + */ + + if (open_and_lock_tables(thd, tables)) + DBUG_RETURN(TRUE); + + /* + TODO: We should check if user has TRIGGER privilege for table here. + Now we just require SUPER privilege for creating/dropping because + we don't have proper privilege checking for triggers in place yet. + */ + if (check_global_access(thd, SUPER_ACL)) + DBUG_RETURN(TRUE); + + table= tables->table; + + /* + We do not allow creation of triggers on views or temporary tables. + We have to do this check here and not in + Table_triggers_list::create_trigger() because we want to avoid messing + with table cash for views and temporary tables. + */ + if (tables->view || table->tmp_table != NO_TMP_TABLE) + { + my_error(ER_TRG_ON_VIEW_OR_TEMP_TABLE, MYF(0), tables->alias); + DBUG_RETURN(TRUE); + } + + if (!table->triggers) + { + if (!create) + { + my_message(ER_TRG_DOES_NOT_EXIST, ER(ER_TRG_DOES_NOT_EXIST), MYF(0)); + DBUG_RETURN(TRUE); + } + + if (!(table->triggers= new (&table->mem_root) Table_triggers_list())) + DBUG_RETURN(TRUE); + } + + /* + We don't want perform our operations while global read lock is held + so we have to wait until its end and then prevent it from occuring + again until we are done. (Acquiring LOCK_open is not enough because + global read lock is held without helding LOCK_open). + */ + if (wait_if_global_read_lock(thd, 0, 0)) + DBUG_RETURN(TRUE); + + VOID(pthread_mutex_lock(&LOCK_open)); + result= (create ? + table->triggers->create_trigger(thd, tables): + table->triggers->drop_trigger(thd, tables)); + + /* It is sensible to invalidate table in any case */ + close_cached_table(thd, table); + VOID(pthread_mutex_unlock(&LOCK_open)); + start_waiting_global_read_lock(thd); + + if (!result) + send_ok(thd); + + DBUG_RETURN(result); +} + + +/* + Create trigger for table. + + SYNOPSIS + create_trigger() + thd - current thread context (including trigger definition in LEX) + tables - table list containing one open table for which trigger is + created. + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables) +{ + LEX *lex= thd->lex; + TABLE *table= tables->table; + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + LEX_STRING dir, file; + LEX_STRING *trg_def, *name; + Item_trigger_field *trg_field; + List_iterator_fast<LEX_STRING> it(names_list); + + /* We don't allow creation of several triggers of the same type yet */ + if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time]) + { + my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0)); + return 1; + } + + /* Let us check if trigger with the same name exists */ + while ((name= it++)) + { + if (my_strcasecmp(system_charset_info, lex->name_and_length.str, + name->str) == 0) + { + my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0)); + return 1; + } + } + + /* + Let us check if all references to fields in old/new versions of row in + this trigger are ok. + + NOTE: We do it here more from ease of use standpoint. We still have to + do some checks on each execution. E.g. we can catch privilege changes + only during execution. Also in near future, when we will allow access + to other tables from trigger we won't be able to catch changes in other + tables... + + To simplify code a bit we have to create Fields for accessing to old row + values if we have ON UPDATE trigger. + */ + if (!old_field && lex->trg_chistics.event == TRG_EVENT_UPDATE && + prepare_old_row_accessors(table)) + return 1; + + for (trg_field= (Item_trigger_field *)(lex->trg_table_fields.first); + trg_field; trg_field= trg_field->next_trg_field) + { + trg_field->setup_field(thd, table, lex->trg_chistics.event); + if (trg_field->fix_fields(thd, (TABLE_LIST *)0, (Item **)0)) + return 1; + } + + /* + Here we are creating file with triggers and save all triggers in it. + sql_create_definition_file() files handles renaming and backup of older + versions + */ + strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db, "/", NullS); + dir.length= unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + file.length= strxnmov(file_buff, FN_REFLEN, tables->real_name, + triggers_file_ext, NullS) - file_buff; + file.str= file_buff; + + /* + Soon we will invalidate table object and thus Table_triggers_list object + so don't care about place to which trg_def->ptr points and other + invariants (e.g. we don't bother to update names_list) + + QQ: Hmm... probably we should not care about setting up active thread + mem_root too. + */ + if (!(trg_def= (LEX_STRING *)alloc_root(&table->mem_root, + sizeof(LEX_STRING))) || + definitions_list.push_back(trg_def, &table->mem_root)) + return 1; + + trg_def->str= thd->query; + trg_def->length= thd->query_length; + + return sql_create_definition_file(&dir, &file, &triggers_file_type, + (gptr)this, triggers_file_parameters, 3); +} + + +/* + Drop trigger for table. + + SYNOPSIS + drop_trigger() + thd - current thread context (including trigger definition in LEX) + tables - table list containing one open table for which trigger is + dropped. + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::drop_trigger(THD *thd, TABLE_LIST *tables) +{ + LEX *lex= thd->lex; + LEX_STRING *name; + List_iterator_fast<LEX_STRING> it_name(names_list); + List_iterator<LEX_STRING> it_def(definitions_list); + + while ((name= it_name++)) + { + it_def++; + + if (my_strcasecmp(system_charset_info, lex->name_and_length.str, + name->str) == 0) + { + /* + Again we don't care much about other things required for + clean trigger removing since table will be reopened anyway. + */ + it_def.remove(); + + if (definitions_list.is_empty()) + { + char path[FN_REFLEN]; + + /* + TODO: Probably instead of removing .TRG file we should move + to archive directory but this should be done as part of + parse_file.cc functionality (because we will need it + elsewhere). + */ + strxnmov(path, FN_REFLEN, mysql_data_home, "/", tables->db, "/", + tables->real_name, triggers_file_ext, NullS); + unpack_filename(path, path); + return my_delete(path, MYF(MY_WME)); + } + else + { + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + LEX_STRING dir, file; + + strxnmov(dir_buff, FN_REFLEN, mysql_data_home, "/", tables->db, + "/", NullS); + dir.length= unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + file.length= strxnmov(file_buff, FN_REFLEN, tables->real_name, + triggers_file_ext, NullS) - file_buff; + file.str= file_buff; + + return sql_create_definition_file(&dir, &file, &triggers_file_type, + (gptr)this, + triggers_file_parameters, 3); + } + } + } + + my_message(ER_TRG_DOES_NOT_EXIST, ER(ER_TRG_DOES_NOT_EXIST), MYF(0)); + return 1; +} + + +Table_triggers_list::~Table_triggers_list() +{ + for (int i= 0; i < 3; i++) + for (int j= 0; j < 2; j++) + delete bodies[i][j]; + + if (old_field) + for (Field **fld_ptr= old_field; *fld_ptr; fld_ptr++) + delete *fld_ptr; +} + + +/* + Prepare array of Field objects which will represent OLD.* row values in + ON UPDATE trigger (by referencing to record[1] instead of record[0]). + + SYNOPSIS + prepare_old_row_accessors() + table - pointer to TABLE object for which we are creating fields. + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::prepare_old_row_accessors(TABLE *table) +{ + Field **fld, **old_fld; + + if (!(old_field= (Field **)alloc_root(&table->mem_root, + (table->fields + 1) * + sizeof(Field*)))) + return 1; + + for (fld= table->field, old_fld= old_field; *fld; fld++, old_fld++) + { + /* + QQ: it is supposed that it is ok to use this function for field + cloning... + */ + if (!(*old_fld= (*fld)->new_field(&table->mem_root, table))) + return 1; + (*old_fld)->move_field((my_ptrdiff_t)(table->record[1] - + table->record[0])); + } + *old_fld= 0; + + return 0; +} + + +/* + Check whenever .TRG file for table exist and load all triggers it contains. + + SYNOPSIS + check_n_load() + thd - current thread context + db - table's database name + table_name - table's name + table - pointer to table object + + RETURN VALUE + False - success + True - error +*/ +bool Table_triggers_list::check_n_load(THD *thd, const char *db, + const char *table_name, TABLE *table) +{ + char path_buff[FN_REFLEN]; + LEX_STRING path; + File_parser *parser; + + DBUG_ENTER("Table_triggers_list::check_n_load"); + + strxnmov(path_buff, FN_REFLEN, mysql_data_home, "/", db, "/", table_name, + triggers_file_ext, NullS); + path.length= unpack_filename(path_buff, path_buff); + path.str= path_buff; + + // QQ: should we analyze errno somehow ? + if (access(path_buff, F_OK)) + DBUG_RETURN(0); + + /* + File exists so we got to load triggers + FIXME: A lot of things to do here e.g. how about other funcs and being + more paranoical ? + */ + + if ((parser= sql_parse_prepare(&path, &table->mem_root, 1))) + { + if (!strncmp(triggers_file_type.str, parser->type()->str, + parser->type()->length)) + { + Table_triggers_list *triggers= + new (&table->mem_root) Table_triggers_list(); + + if (!triggers) + DBUG_RETURN(1); + + if (parser->parse((gptr)triggers, &table->mem_root, + triggers_file_parameters, 1)) + DBUG_RETURN(1); + + table->triggers= triggers; + + /* TODO: This could be avoided if there is no ON UPDATE trigger. */ + if (triggers->prepare_old_row_accessors(table)) + DBUG_RETURN(1); + + List_iterator_fast<LEX_STRING> it(triggers->definitions_list); + LEX_STRING *trg_create_str, *trg_name_str; + char *trg_name_buff; + LEX *old_lex= thd->lex, lex; + + thd->lex= &lex; + + while ((trg_create_str= it++)) + { + lex_start(thd, (uchar*)trg_create_str->str, trg_create_str->length); + + if (yyparse((void *)thd) || thd->is_fatal_error) + { + /* + Free lex associated resources + QQ: Do we really need all this stuff here ? + */ + if (lex.sphead) + { + if (&lex != thd->lex) + thd->lex->sphead->restore_lex(thd); + delete lex.sphead; + } + goto err_with_lex_cleanup; + } + + triggers->bodies[lex.trg_chistics.event] + [lex.trg_chistics.action_time]= lex.sphead; + lex.sphead= 0; + + if (!(trg_name_buff= alloc_root(&table->mem_root, + sizeof(LEX_STRING) + + lex.name_and_length.length + 1))) + goto err_with_lex_cleanup; + + trg_name_str= (LEX_STRING *)trg_name_buff; + trg_name_buff+= sizeof(LEX_STRING); + memcpy(trg_name_buff, lex.name_and_length.str, + lex.name_and_length.length + 1); + trg_name_str->str= trg_name_buff; + trg_name_str->length= lex.name_and_length.length; + + if (triggers->names_list.push_back(trg_name_str, &table->mem_root)) + goto err_with_lex_cleanup; + + /* + Let us bind Item_trigger_field objects representing access to fields + in old/new versions of row in trigger to Field objects in table being + opened. + + We ignore errors here, because if even something is wrong we still will + be willing to open table to perform some operations (e.g. SELECT)... + Anyway some things can be checked only during trigger execution. + */ + for (Item_trigger_field *trg_field= + (Item_trigger_field *)(lex.trg_table_fields.first); + trg_field; + trg_field= trg_field->next_trg_field) + trg_field->setup_field(thd, table, lex.trg_chistics.event); + + lex_end(&lex); + } + thd->lex= old_lex; + + DBUG_RETURN(0); + +err_with_lex_cleanup: + // QQ: anything else ? + lex_end(&lex); + thd->lex= old_lex; + DBUG_RETURN(1); + } + + /* + We don't care about this error message much because .TRG files will + be merged into .FRM anyway. + */ + my_error(ER_WRONG_OBJECT, MYF(0), + table_name, triggers_file_ext, "TRIGGER"); + DBUG_RETURN(1); + } + + DBUG_RETURN(1); +} diff --git a/sql/sql_trigger.h b/sql/sql_trigger.h new file mode 100644 index 00000000000..82e7c1ce023 --- /dev/null +++ b/sql/sql_trigger.h @@ -0,0 +1,71 @@ +/* + This class holds all information about triggers of table. + + QQ: Will it be merged into TABLE in future ? +*/ +class Table_triggers_list: public Sql_alloc +{ + /* Triggers as SPs grouped by event, action_time */ + sp_head *bodies[3][2]; + /* + Copy of TABLE::Field array with field pointers set to old version + of record, used for OLD values in trigger on UPDATE. + */ + Field **old_field; + /* + Names of triggers. + Should correspond to order of triggers on definitions_list, + used in CREATE/DROP TRIGGER for looking up trigger by name. + */ + List<LEX_STRING> names_list; + +public: + /* + Field responsible for storing triggers definitions in file. + It have to be public because we are using it directly from parser. + */ + List<LEX_STRING> definitions_list; + + Table_triggers_list(): + old_field(0) + { + bzero((char *)bodies, sizeof(bodies)); + } + ~Table_triggers_list(); + + bool create_trigger(THD *thd, TABLE_LIST *table); + bool drop_trigger(THD *thd, TABLE_LIST *table); + bool process_triggers(THD *thd, trg_event_type event, + trg_action_time_type time_type) + { + int res= 0; + + if (bodies[event][time_type]) + { + /* + Similar to function invocation we don't need to surpress sending of + ok packets here because don't allow execute statements from trigger. + + FIXME: We should juggle with security context here (because trigger + should be invoked with creator rights). + */ + res= bodies[event][time_type]->execute_function(thd, 0, 0, 0); + } + + return res; + } + + static bool check_n_load(THD *thd, const char *db, const char *table_name, + TABLE *table); + + bool has_delete_triggers() + { + return (bodies[TRG_EVENT_DELETE][TRG_ACTION_BEFORE] || + bodies[TRG_EVENT_DELETE][TRG_ACTION_AFTER]); + } + + friend class Item_trigger_field; + +private: + bool prepare_old_row_accessors(TABLE *table); +}; diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 0bb8ac8a28b..7fd81f22e66 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -368,7 +368,7 @@ int mysql_create_function(THD *thd,udf_func *udf) if (!initialized) { - send_error(thd, ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES)); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } @@ -379,19 +379,19 @@ int mysql_create_function(THD *thd,udf_func *udf) */ if (strchr(udf->dl, '/')) { - send_error(thd, ER_UDF_NO_PATHS,ER(ER_UDF_NO_PATHS)); + my_message(ER_UDF_NO_PATHS, ER(ER_UDF_NO_PATHS), MYF(0)); DBUG_RETURN(1); } if (udf->name.length > NAME_LEN) { - net_printf(thd, ER_TOO_LONG_IDENT,udf->name); + my_error(ER_TOO_LONG_IDENT, MYF(0), udf->name); DBUG_RETURN(1); } rw_wrlock(&THR_LOCK_udf); if ((hash_search(&udf_hash,(byte*) udf->name.str, udf->name.length))) { - net_printf(thd, ER_UDF_EXISTS, udf->name); + my_error(ER_UDF_EXISTS, MYF(0), udf->name); goto err; } if (!(dl = find_udf_dl(udf->dl))) @@ -400,7 +400,8 @@ int mysql_create_function(THD *thd,udf_func *udf) { DBUG_PRINT("error",("dlopen of %s failed, error: %d (%s)", udf->dl,errno,dlerror())); - net_printf(thd, ER_CANT_OPEN_LIBRARY, udf->dl, errno, dlerror()); + my_error(ER_CANT_OPEN_LIBRARY, MYF(0), + udf->dl, errno, dlerror()); goto err; } new_dl=1; @@ -410,16 +411,13 @@ int mysql_create_function(THD *thd,udf_func *udf) if (udf->func == NULL) { - net_printf(thd, ER_CANT_FIND_DL_ENTRY, udf->name); + my_error(ER_CANT_FIND_DL_ENTRY, MYF(0), udf->name); goto err; } udf->name.str=strdup_root(&mem,udf->name.str); udf->dl=strdup_root(&mem,udf->dl); if (!(u_d=add_udf(&udf->name,udf->returns,udf->dl,udf->type))) - { - send_error(thd,0); // End of memory goto err; - } u_d->dlhandle = dl; u_d->func=udf->func; u_d->func_init=udf->func_init; @@ -447,7 +445,7 @@ int mysql_create_function(THD *thd,udf_func *udf) close_thread_tables(thd); if (error) { - net_printf(thd, ER_ERROR_ON_WRITE, "func@mysql",error); + my_error(ER_ERROR_ON_WRITE, MYF(0), "func@mysql", error); del_udf(u_d); goto err; } @@ -470,14 +468,14 @@ int mysql_drop_function(THD *thd,const LEX_STRING *udf_name) DBUG_ENTER("mysql_drop_function"); if (!initialized) { - send_error(thd, ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES)); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); DBUG_RETURN(1); } rw_wrlock(&THR_LOCK_udf); if (!(udf=(udf_func*) hash_search(&udf_hash,(byte*) udf_name->str, (uint) udf_name->length))) { - net_printf(thd, ER_FUNCTION_NOT_DEFINED, udf_name->str); + my_error(ER_FUNCTION_NOT_DEFINED, MYF(0), udf_name->str); goto err; } del_udf(udf); diff --git a/sql/sql_union.cc b/sql/sql_union.cc index f89b234f5b0..901cb0ba496 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -24,14 +24,20 @@ #include "mysql_priv.h" #include "sql_select.h" -int mysql_union(THD *thd, LEX *lex, select_result *result, - SELECT_LEX_UNIT *unit) +bool mysql_union(THD *thd, LEX *lex, select_result *result, + SELECT_LEX_UNIT *unit, ulong setup_tables_done_option) { DBUG_ENTER("mysql_union"); - int res= 0; - if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK))) + bool res; + if (!(res= unit->prepare(thd, result, SELECT_NO_UNLOCK | + setup_tables_done_option))) res= unit->exec(); - res|= unit->cleanup(); + if (!res && thd->cursor && thd->cursor->is_open()) + { + thd->cursor->set_unit(unit); + } + else + res|= unit->cleanup(); DBUG_RETURN(res); } @@ -70,8 +76,8 @@ bool select_union::send_data(List<Item> &values) unit->offset_limit_cnt--; return 0; } - fill_record(table->field, values, 1); - if (thd->net.report_error || write_record(table,&info)) + fill_record(thd, table->field, values, 1); + if (thd->net.report_error || write_record(thd, table,&info)) { if (thd->net.last_errno == ER_RECORD_FILE_FULL) { @@ -98,8 +104,7 @@ bool select_union::flush() int error; if ((error=table->file->extra(HA_EXTRA_NO_CACHE))) { - table->file->print_error(error,MYF(0)); - ::send_error(thd); + table->file->print_error(error, MYF(0)); return 1; } return 0; @@ -136,13 +141,13 @@ st_select_lex_unit::init_prepare_fake_select_lex(THD *thd) fake_select_lex->ftfunc_list= &fake_select_lex->ftfunc_list_alloc; fake_select_lex->table_list.link_in_list((byte *)&result_table_list, (byte **) - &result_table_list.next); + &result_table_list.next_local); return options_tmp; } -int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, - ulong additional_options) +bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, + ulong additional_options) { SELECT_LEX *lex_select_save= thd_arg->lex->current_select; SELECT_LEX *sl, *first_select; @@ -171,16 +176,16 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, if (!sl->join->procedure && result->prepare(sl->join->fields_list, this)) { - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } sl->join->select_options|= SELECT_DESCRIBE; sl->join->reinit(); } } - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } prepared= 1; - res= 0; + res= FALSE; thd_arg->lex->current_select= sl= first_select= first_select_in_union(); found_rows_for_union= first_select->options & OPTION_FOUND_ROWS; @@ -206,24 +211,28 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, JOIN *join= new JOIN(thd_arg, sl->item_list, sl->options | thd_arg->options | additional_options, tmp_result); + /* + setup_tables_done_option should be set only for very first SELECT, + because it protect from secont setup_tables call for select-like non + select commands (DELETE/INSERT/...) and they use only very first + SELECT (for union it can be only INSERT ... SELECT). + */ + additional_options&= ~OPTION_SETUP_TABLES_DONE; if (!join) goto err; thd_arg->lex->current_select= sl; - offset_limit_cnt= sl->offset_limit; - select_limit_cnt= sl->select_limit+sl->offset_limit; - if (select_limit_cnt < sl->select_limit) - select_limit_cnt= HA_POS_ERROR; // no limit - if (select_limit_cnt == HA_POS_ERROR || sl->braces) + set_limit(sl, sl); + if (sl->braces) sl->options&= ~OPTION_FOUND_ROWS; can_skip_order_by= is_union && (!sl->braces || select_limit_cnt == HA_POS_ERROR); - + res= join->prepare(&sl->ref_pointer_array, (TABLE_LIST*) sl->table_list.first, sl->with_wild, sl->where, - (can_skip_order_by ? 0 : sl->order_list.elements) + + (can_skip_order_by ? 0 : sl->order_list.elements) + sl->group_list.elements, can_skip_order_by ? (ORDER*) 0 : (ORDER *)sl->order_list.first, @@ -235,7 +244,8 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, /* There are no * in the statement anymore (for PS) */ sl->with_wild= 0; last_procedure= join->procedure; - if (res || thd_arg->is_fatal_error) + + if ((res= (res || thd_arg->is_fatal_error))) goto err; if (sl == first_select) { @@ -265,7 +275,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, while ((type= tp++, item_tmp= it++)) { if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp)) - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } } @@ -279,6 +289,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, List_iterator_fast<Item> tp(types); Item_arena *arena= thd->current_arena; Item *type; + while ((type= tp++)) { if (type->result_type() == STRING_RESULT && @@ -309,14 +320,10 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, thd_arg->lex->current_select= lex_select_save; if (!item_list.elements) { - /* - We're in statement prepare or in execution - of a conventional statement. - */ + Field **field; Item_arena *tmp_arena,backup; tmp_arena= thd->change_arena_if_needed(&backup); - Field **field; for (field= table->field; *field; field++) { Item_field *item= new Item_field(*field); @@ -324,7 +331,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, { if (tmp_arena) thd->restore_backup_item_arena(tmp_arena, &backup); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } } if (tmp_arena) @@ -333,11 +340,16 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, { /* prepare fake select to initialize it correctly */ ulong options_tmp= init_prepare_fake_select_lex(thd); + /* + it should be done only once (because item_list builds only onece + per statement) + */ + DBUG_ASSERT(fake_select_lex->join == 0); if (!(fake_select_lex->join= new JOIN(thd, item_list, thd->options, result))) { fake_select_lex->table_list.empty(); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } fake_select_lex->item_list= item_list; @@ -354,11 +366,11 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, fake_select_lex->table_list.empty(); } } - else if (arena->is_stmt_execute()) + else if (!arena->is_conventional()) { /* - We're in execution of a prepared statement: reset field items - to point at fields from the created temporary table. + We're in execution of a prepared statement or stored procedure: + reset field items to point at fields from the created temporary table. */ List_iterator_fast<Item> it(item_list); for (Field **field= table->field; *field; field++) @@ -372,15 +384,15 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, thd_arg->lex->current_select= lex_select_save; - DBUG_RETURN(res || thd_arg->is_fatal_error ? 1 : 0); + DBUG_RETURN(res || thd_arg->is_fatal_error); err: thd_arg->lex->current_select= lex_select_save; - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } -int st_select_lex_unit::exec() +bool st_select_lex_unit::exec() { SELECT_LEX *lex_select_save= thd->lex->current_select; SELECT_LEX *select_cursor=first_select_in_union(); @@ -389,7 +401,7 @@ int st_select_lex_unit::exec() DBUG_ENTER("st_select_lex_unit::exec"); if (executed && !uncacheable && !describe) - DBUG_RETURN(0); + DBUG_RETURN(FALSE); executed= 1; if (uncacheable || !item || !item->assigned() || describe) @@ -406,7 +418,7 @@ int st_select_lex_unit::exec() } /* re-enabling indexes for next subselect iteration */ if (union_distinct && table->file->enable_indexes(HA_KEY_SWITCH_ALL)) - DBUG_ASSERT(1); + DBUG_ASSERT(TRUE); } for (SELECT_LEX *sl= select_cursor; sl; sl= sl->next_select()) { @@ -461,7 +473,7 @@ int st_select_lex_unit::exec() if (sl == union_distinct) { if (table->file->disable_indexes(HA_KEY_SWITCH_ALL)) - DBUG_RETURN(1); + DBUG_RETURN(TRUE); table->no_keyread=1; } res= sl->join->error; @@ -470,7 +482,7 @@ int st_select_lex_unit::exec() { examined_rows+= thd->examined_row_count; thd->lex->current_select= lex_select_save; - DBUG_RETURN(1); + DBUG_RETURN(TRUE); } } if (res) @@ -496,7 +508,7 @@ int st_select_lex_unit::exec() optimized= 1; /* Send result to 'result' */ - res= -1; + res= TRUE; { List<Item_func_match> empty_list; empty_list.empty(); @@ -511,11 +523,11 @@ int st_select_lex_unit::exec() allocate JOIN for fake select only once (prevent mysql_select automatic allocation) */ - if (!(fake_select_lex->join= new JOIN(thd, item_list, thd->options, - result))) + if (!(fake_select_lex->join= new JOIN(thd, item_list, + fake_select_lex->options, result))) { fake_select_lex->table_list.empty(); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } /* @@ -527,12 +539,14 @@ int st_select_lex_unit::exec() else { JOIN_TAB *tab,*end; - for (tab=join->join_tab,end=tab+join->tables ; tab != end ; tab++) + for (tab=join->join_tab, end=tab+join->tables ; + tab && tab != end ; + tab++) { delete tab->select; delete tab->quick; } - join->init(thd, item_list, thd->options, result); + join->init(thd, item_list, fake_select_lex->options, result); } res= mysql_select(thd, &fake_select_lex->ref_pointer_array, &result_table_list, @@ -540,7 +554,7 @@ int st_select_lex_unit::exec() global_parameters->order_list.elements, (ORDER*)global_parameters->order_list.first, (ORDER*) NULL, NULL, (ORDER*) NULL, - options_tmp | SELECT_NO_UNLOCK, + fake_select_lex->options | SELECT_NO_UNLOCK, result, this, fake_select_lex); fake_select_lex->table_list.empty(); @@ -560,14 +574,15 @@ int st_select_lex_unit::exec() } -int st_select_lex_unit::cleanup() +bool st_select_lex_unit::cleanup() { int error= 0; + JOIN *join; DBUG_ENTER("st_select_lex_unit::cleanup"); if (cleaned) { - DBUG_RETURN(0); + DBUG_RETURN(FALSE); } cleaned= 1; @@ -579,9 +594,8 @@ int st_select_lex_unit::cleanup() free_tmp_table(thd, table); table= 0; // Safety } - JOIN *join; - SELECT_LEX *sl= first_select_in_union(); - for (; sl; sl= sl->next_select()) + + for (SELECT_LEX *sl= first_select_in_union(); sl; sl= sl->next_select()) { if ((join= sl->join)) { @@ -606,6 +620,7 @@ int st_select_lex_unit::cleanup() error|= join->cleanup(); delete join; } + DBUG_RETURN(error); } @@ -641,19 +656,19 @@ void st_select_lex_unit::reinit_exec_mechanism() old_result old select_result object RETURN - 0 - OK - -1 - error + FALSE - OK + TRUE - error */ -int st_select_lex_unit::change_result(select_subselect *result, - select_subselect *old_result) +bool st_select_lex_unit::change_result(select_subselect *result, + select_subselect *old_result) { - int res= 0; + bool res= FALSE; for (SELECT_LEX *sl= first_select_in_union(); sl; sl= sl->next_select()) { if (sl->join && sl->join->result == old_result) - if ((res= sl->join->change_result(result))) - return (res); + if (sl->join->change_result(result)) + return TRUE; } if (fake_select_lex && fake_select_lex->join) res= fake_select_lex->join->change_result(result); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 761e8d6de8b..636d265d256 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -22,6 +22,8 @@ #include "mysql_priv.h" #include "sql_select.h" +#include "sp_head.h" +#include "sql_trigger.h" static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields); @@ -35,18 +37,75 @@ static bool compare_record(TABLE *table, ulong query_id) if (memcmp(table->null_flags, table->null_flags+table->rec_buff_length, table->null_bytes)) - return 1; // Diff in NULL value + return TRUE; // Diff in NULL value /* Compare updated fields */ for (Field **ptr=table->field ; *ptr ; ptr++) { if ((*ptr)->query_id == query_id && (*ptr)->cmp_binary_offset(table->rec_buff_length)) - return 1; + return TRUE; } - return 0; + return FALSE; } +/* + check that all fields are real fields + + SYNOPSIS + check_fields() + thd thread handler + items Items for check + + RETURN + TRUE Items can't be used in UPDATE + FALSE Items are OK +*/ + +static bool check_fields(THD *thd, List<Item> &items) +{ + List_iterator<Item> it(items); + Item *item; + Item_field *field; + while ((item= it++)) + { + if (!(field= item->filed_for_view_update())) + { + /* item has name, because it comes from VIEW SELECT list */ + my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name); + return TRUE; + } + /* + we make temporary copy of Item_field, to avoid influence of changing + result_field on Item_ref which refer on this field + */ + thd->change_item_tree(it.ref(), new Item_field(thd, field)); + } + return FALSE; +} + + +/* + Process usual UPDATE + + SYNOPSIS + mysql_update() + thd thread handler + fields fields for update + values values of fields for update + conds WHERE clause expression + order_num number of elemen in ORDER BY clause + order ORDER BY clause list + limit limit clause + handle_duplicates how to handle duplicates + + RETURN + 0 - OK + 2 - privilege check and openning table passed, but we need to convert to + multi-update because of view substitution + 1 - error +*/ + int mysql_update(THD *thd, TABLE_LIST *table_list, List<Item> &fields, @@ -54,32 +113,55 @@ int mysql_update(THD *thd, COND *conds, uint order_num, ORDER *order, ha_rows limit, - enum enum_duplicates handle_duplicates, - bool ignore) + enum enum_duplicates handle_duplicates, bool ignore) { - bool using_limit=limit != HA_POS_ERROR; + bool using_limit= limit != HA_POS_ERROR; bool safe_update= thd->options & OPTION_SAFE_UPDATES; bool used_key_is_modified, transactional_table, log_delayed; + bool ignore_err= (thd->lex->duplicates == DUP_IGNORE); + int res; int error=0; uint used_index; #ifndef NO_EMBEDDED_ACCESS_CHECKS uint want_privilege; #endif + uint table_count= 0; ulong query_id=thd->query_id, timestamp_query_id; ha_rows updated, found; key_map old_used_keys; TABLE *table; SQL_SELECT *select; READ_RECORD info; - TABLE_LIST *update_table_list= ((TABLE_LIST*) - thd->lex->select_lex.table_list.first); + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_update"); LINT_INIT(used_index); LINT_INIT(timestamp_query_id); - if ((open_and_lock_tables(thd, table_list))) - DBUG_RETURN(-1); + if (open_tables(thd, table_list, &table_count)) + DBUG_RETURN(1); + + if (table_list->ancestor && table_list->ancestor->next_local) + { + DBUG_ASSERT(table_list->view); + DBUG_PRINT("info", ("Switch to multi-update")); + /* pass counter value */ + thd->lex->table_count= table_count; + /* + give correct value to multi_lock_option, because it will be used + in multiupdate + */ + thd->lex->multi_lock_option= table_list->lock_type; + /* convert to multiupdate */ + return 2; + } + + if (lock_tables(thd, table_list, table_count) || + mysql_handle_derived(thd->lex, &mysql_derived_prepare) || + (thd->fill_derived_tables() && + mysql_handle_derived(thd->lex, &mysql_derived_filling))) + DBUG_RETURN(1); + thd->proc_info="init"; table= table_list->table; table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK); @@ -89,11 +171,13 @@ int mysql_update(THD *thd, table->quick_keys.clear_all(); #ifndef NO_EMBEDDED_ACCESS_CHECKS - want_privilege= table->grant.want_privilege; + /* In case of view TABLE_LIST contain right privilages request */ + want_privilege= (table_list->view ? + table_list->grant.want_privilege : + table->grant.want_privilege); #endif - if ((error= mysql_prepare_update(thd, table_list, update_table_list, - &conds, order_num, order))) - DBUG_RETURN(error); + if (mysql_prepare_update(thd, table_list, &conds, order_num, order)) + DBUG_RETURN(1); old_used_keys= table->used_keys; // Keys used in WHERE /* @@ -108,10 +192,24 @@ int mysql_update(THD *thd, /* Check the fields we are going to modify */ #ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.want_privilege=want_privilege; + table_list->grant.want_privilege= table->grant.want_privilege= want_privilege; #endif - if (setup_fields(thd, 0, update_table_list, fields, 1, 0, 0)) - DBUG_RETURN(-1); /* purecov: inspected */ + { + select_lex->no_wrap_view_item= 1; + res= setup_fields(thd, 0, table_list, fields, 1, 0, 0); + select_lex->no_wrap_view_item= 0; + if (res) + DBUG_RETURN(1); /* purecov: inspected */ + } + if (table_list->view && check_fields(thd, fields)) + { + DBUG_RETURN(1); + } + if (!table_list->updatable || check_key_in_view(thd, table_list)) + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE"); + DBUG_RETURN(1); + } if (table->timestamp_field) { // Don't set timestamp column if this is modified @@ -123,12 +221,13 @@ int mysql_update(THD *thd, #ifndef NO_EMBEDDED_ACCESS_CHECKS /* Check values */ - table->grant.want_privilege=(SELECT_ACL & ~table->grant.privilege); + table_list->grant.want_privilege= table->grant.want_privilege= + (SELECT_ACL & ~table->grant.privilege); #endif - if (setup_fields(thd, 0, update_table_list, values, 1, 0, 0)) + if (setup_fields(thd, 0, table_list, values, 1, 0, 0)) { - free_underlaid_joins(thd, &thd->lex->select_lex); - DBUG_RETURN(-1); /* purecov: inspected */ + free_underlaid_joins(thd, select_lex); + DBUG_RETURN(1); /* purecov: inspected */ } // Don't count on usage of 'only index' when calculating which key to use @@ -138,10 +237,10 @@ int mysql_update(THD *thd, (select && select->check_quick(thd, safe_update, limit)) || !limit) { delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); + free_underlaid_joins(thd, select_lex); if (error) { - DBUG_RETURN(-1); // Error in where + DBUG_RETURN(1); // Error in where } send_ok(thd); // No matching records DBUG_RETURN(0); @@ -157,13 +256,15 @@ int mysql_update(THD *thd, goto err; } } - init_ftfuncs(thd, &thd->lex->select_lex, 1); + init_ftfuncs(thd, select_lex, 1); /* Check if we are modifying a key that we are used to search with */ + if (select && select->quick) + { + used_index= select->quick->index; used_key_is_modified= (!select->quick->unique_key_range() && - check_if_key_used(table, - (used_index=select->quick->index), - fields)); + select->quick->check_if_keys_used(&fields)); + } else if ((used_index=table->file->key_used_on_scan) < MAX_KEY) used_key_is_modified=check_if_key_used(table, used_index, fields); else @@ -175,7 +276,7 @@ int mysql_update(THD *thd, matching rows before updating the table! */ table->file->extra(HA_EXTRA_RETRIEVE_ALL_COLS); - if (old_used_keys.is_set(used_index)) + if ( (used_index != MAX_KEY) && old_used_keys.is_set(used_index)) { table->key_read=1; table->file->extra(HA_EXTRA_KEYREAD); @@ -221,8 +322,12 @@ int mysql_update(THD *thd, if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME))) goto err; - + + /* If quick select is used, initialize it before retrieving rows. */ + if (select && select->quick && select->quick->reset()) + goto err; init_read_record(&info,thd,table,select,0,1); + thd->proc_info="Searching rows for update"; uint tmp_limit= limit; @@ -277,6 +382,9 @@ int mysql_update(THD *thd, if (ignore) table->file->extra(HA_EXTRA_IGNORE_DUP_KEY); + + if (select && select->quick && select->quick->reset()) + goto err; init_read_record(&info,thd,table,select,0,1); updated= found= 0; @@ -285,22 +393,47 @@ int mysql_update(THD *thd, thd->proc_info="Updating"; query_id=thd->query_id; + transactional_table= table->file->has_transactions(); + thd->no_trans_update= 0; + thd->abort_on_warning= test(handle_duplicates != DUP_IGNORE && + (thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES))); + while (!(error=info.read_record(&info)) && !thd->killed) { if (!(select && select->skip_record())) { store_record(table,record[1]); - if (fill_record(fields,values, 0) || thd->net.report_error) + if (fill_record(thd, fields, values, 0)) break; /* purecov: inspected */ + found++; + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_BEFORE); + if (compare_record(table, query_id)) { + if ((res= table_list->view_check_option(thd, ignore_err)) != + VIEW_CHECK_OK) + { + found--; + if (res == VIEW_CHECK_SKIP) + continue; + else if (res == VIEW_CHECK_ERROR) + { + error= 1; + break; + } + } if (!(error=table->file->update_row((byte*) table->record[1], (byte*) table->record[0]))) { updated++; + thd->no_trans_update= !transactional_table; } - else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) + else if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { thd->fatal_error(); // Force error message table->file->print_error(error,MYF(0)); @@ -308,6 +441,10 @@ int mysql_update(THD *thd, break; } } + + if (table->triggers) + table->triggers->process_triggers(thd, TRG_EVENT_UPDATE, TRG_ACTION_AFTER); + if (!--limit && using_limit) { error= -1; // Simulate end of file @@ -331,13 +468,13 @@ int mysql_update(THD *thd, This must be before binlog writing and ha_autocommit_... */ if (updated) + { query_cache_invalidate3(thd, table_list, 1); + } - transactional_table= table->file->has_transactions(); log_delayed= (transactional_table || table->tmp_table); if ((updated || (error < 0)) && (error <= 0 || !transactional_table)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (error <= 0) @@ -362,32 +499,33 @@ int mysql_update(THD *thd, thd->lock=0; } - free_underlaid_joins(thd, &thd->lex->select_lex); - if (error >= 0) - send_error(thd,thd->killed ? ER_SERVER_SHUTDOWN : 0); /* purecov: inspected */ - else + free_underlaid_joins(thd, select_lex); + if (error < 0) { char buff[80]; sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, (ulong) thd->cuted_fields); - send_ok(thd, - (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, + thd->row_count_func= + (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; + send_ok(thd, (ulong) thd->row_count_func, thd->insert_id_used ? thd->insert_id() : 0L,buff); DBUG_PRINT("info",("%d records updated",updated)); } thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */ + thd->abort_on_warning= 0; free_io_cache(table); - DBUG_RETURN(0); + DBUG_RETURN((error >= 0 || thd->net.report_error) ? 1 : 0); err: delete select; - free_underlaid_joins(thd, &thd->lex->select_lex); + free_underlaid_joins(thd, select_lex); if (table->key_read) { table->key_read=0; table->file->extra(HA_EXTRA_NO_KEYREAD); } - DBUG_RETURN(-1); + thd->abort_on_warning= 0; + DBUG_RETURN(1); } /* @@ -396,51 +534,50 @@ err: SYNOPSIS mysql_prepare_update() thd - thread handler - table_list - global table list - update_table_list - local table list of UPDATE SELECT_LEX + table_list - global/local table list conds - conditions order_num - number of ORDER BY list entries order - ORDER BY clause list RETURN VALUE - 0 - OK - 1 - error (message is sent to user) - -1 - error (message is not sent to user) + FALSE OK + TRUE error */ -int mysql_prepare_update(THD *thd, TABLE_LIST *table_list, - TABLE_LIST *update_table_list, +bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list, Item **conds, uint order_num, ORDER *order) { TABLE *table= table_list->table; TABLE_LIST tables; List<Item> all_fields; + SELECT_LEX *select_lex= &thd->lex->select_lex; DBUG_ENTER("mysql_prepare_update"); #ifndef NO_EMBEDDED_ACCESS_CHECKS - table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); + table_list->grant.want_privilege= table->grant.want_privilege= + (SELECT_ACL & ~table->grant.privilege); #endif bzero((char*) &tables,sizeof(tables)); // For ORDER BY tables.table= table; tables.alias= table_list->alias; - if (setup_tables(update_table_list) || - setup_conds(thd, update_table_list, conds) || - thd->lex->select_lex.setup_ref_array(thd, order_num) || - setup_order(thd, thd->lex->select_lex.ref_pointer_array, - update_table_list, all_fields, all_fields, order) || - setup_ftfuncs(&thd->lex->select_lex)) - DBUG_RETURN(-1); + if (setup_tables(thd, table_list, conds, &select_lex->leaf_tables, + FALSE, FALSE) || + setup_conds(thd, table_list, select_lex->leaf_tables, conds) || + select_lex->setup_ref_array(thd, order_num) || + setup_order(thd, select_lex->ref_pointer_array, + table_list, all_fields, all_fields, order) || + setup_ftfuncs(select_lex)) + DBUG_RETURN(TRUE); /* Check that we are not using table that we are updating in a sub select */ - if (find_real_table_in_list(table_list->next, - table_list->db, table_list->real_name)) + if (unique_table(table_list, table_list->next_global)) { my_error(ER_UPDATE_TABLE_USED, MYF(0), table_list->real_name); - DBUG_RETURN(-1); + DBUG_RETURN(TRUE); } - - DBUG_RETURN(0); + select_lex->fix_prepare_information(thd, conds); + DBUG_RETURN(FALSE); } @@ -465,230 +602,260 @@ static table_map get_table_map(List<Item> *items) } - /* - Setup multi-update handling and call SELECT to do the join + make update specific preparation and checks after opening tables + + SYNOPSIS + mysql_multi_update_prepare() + thd thread handler + + RETURN + FALSE OK + TRUE Error */ -int mysql_multi_update(THD *thd, - TABLE_LIST *table_list, - List<Item> *fields, - List<Item> *values, - COND *conds, - ulong options, - enum enum_duplicates handle_duplicates, bool ignore, - SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) +bool mysql_multi_update_prepare(THD *thd) { + LEX *lex= thd->lex; + ulong opened_tables; + TABLE_LIST *table_list= lex->query_tables; + TABLE_LIST *tl, *leaves; + List<Item> *fields= &lex->select_lex.item_list; + table_map tables_for_update; int res; - multi_update *result; - TABLE_LIST *tl; - TABLE_LIST *update_list= (TABLE_LIST*) thd->lex->select_lex.table_list.first; - List<Item> total_list; + bool update_view= 0; + /* + if this multi-update was converted from usual update, here is table + counter else junk will be assigned here, but then replaced with real + count in open_tables() + */ + uint table_count= lex->table_count; const bool using_lock_tables= thd->locked_tables != 0; - bool initialized_dervied= 0; - DBUG_ENTER("mysql_multi_update"); + bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI); + DBUG_ENTER("mysql_multi_update_prepare"); - select_lex->select_limit= HA_POS_ERROR; + /* following need for prepared statements, to run next time multi-update */ + thd->lex->sql_command= SQLCOM_UPDATE_MULTI; + /* open tables and create derived ones, but do not lock and fill them */ + if ((original_multiupdate && open_tables(thd, table_list, & table_count)) || + mysql_handle_derived(lex, &mysql_derived_prepare)) + DBUG_RETURN(TRUE); /* - The following loop is here to to ensure that we only lock tables - that we are going to update with a write lock + setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables() + second time, but this call will do nothing (there are check for second + call in setup_tables()). */ - for (;;) - { - table_map update_tables, derived_tables=0; - uint tnr, table_count; - if ((res=open_tables(thd, table_list, &table_count))) - DBUG_RETURN(res); + if (setup_tables(thd, table_list, &lex->select_lex.where, + &lex->select_lex.leaf_tables, FALSE, FALSE)) + DBUG_RETURN(TRUE); + leaves= lex->select_lex.leaf_tables; - /* Only need to call lock_tables if we are not using LOCK TABLES */ - if (!using_lock_tables && - ((res= lock_tables(thd, table_list, table_count)))) - DBUG_RETURN(res); + if ((lex->select_lex.no_wrap_view_item= 1, + res= setup_fields(thd, 0, table_list, *fields, 1, 0, 0), + lex->select_lex.no_wrap_view_item= 0, + res)) + DBUG_RETURN(TRUE); - if (!initialized_dervied) - { - initialized_dervied= 1; - relink_tables_for_derived(thd); - if ((res= mysql_handle_derived(thd->lex))) - DBUG_RETURN(res); - } - - /* - Ensure that we have update privilege for all tables and columns in the - SET part - While we are here, initialize the table->map field to check which - tables are updated and updatability of derived tables - */ - for (tl= update_list, tnr=0 ; tl ; tl=tl->next) + for (tl= table_list; tl ; tl= tl->next_local) + { + if (tl->view) { - TABLE *table= tl->table; - /* - Update of derived tables is checked later - We don't check privileges here, becasue then we would get error - "UPDATE command denided .. for column N" instead of - "Target table ... is not updatable" - */ - if (!tl->derived) - table->grant.want_privilege= (UPDATE_ACL & ~table->grant.privilege); - table->map= (table_map) 1 << (tnr++); + update_view= 1; + break; } + } - if (setup_fields(thd, 0, update_list, *fields, 1, 0, 0)) - DBUG_RETURN(-1); + if (update_view && check_fields(thd, *fields)) + { + DBUG_RETURN(TRUE); + } - update_tables= get_table_map(fields); + tables_for_update= get_table_map(fields); - /* Unlock the tables in preparation for relocking */ - if (!using_lock_tables) - { - mysql_unlock_tables(thd, thd->lock); - thd->lock= 0; - } + /* + Setup timestamp handling and locking mode + */ + for (tl= leaves; tl; tl= tl->next_leaf) + { + TABLE *table= tl->table; + /* Only set timestamp column if this is not modified */ + if (table->timestamp_field && + table->timestamp_field->query_id == thd->query_id) + table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - /* - Count tables and setup timestamp handling - Set also the table locking strategy according to the update map - */ - for (tl= update_list; tl; tl= tl->next) + /* if table will be updated then check that it is unique */ + if (table->map & tables_for_update) { - TABLE_LIST *save= tl->next; - TABLE *table= tl->table; - uint wants; - /* if table will be updated then check that it is unique */ - if (table->map & update_tables) - { - /* - Multi-update can't be constructed over-union => we always have - single SELECT on top and have to check underlaying SELECTs of it - */ - if (select_lex->check_updateable_in_subqueries(tl->db, - tl->real_name)) - { - my_error(ER_UPDATE_TABLE_USED, MYF(0), - tl->real_name); - DBUG_RETURN(-1); - } - DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); - tl->lock_type= thd->lex->multi_lock_option; - tl->updating= 1; - wants= UPDATE_ACL; - } - else + if (!tl->updatable || check_key_in_view(thd, tl)) { - DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias)); - tl->lock_type= TL_READ; - tl->updating= 0; - wants= SELECT_ACL; + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE"); + DBUG_RETURN(TRUE); } - if (tl->derived) - derived_tables|= table->map; - else + /* + Multi-update can't be constructed over-union => we always have + single SELECT on top and have to check underlying SELECTs of it + */ + if (lex->select_lex.check_updateable_in_subqueries(tl->db, + tl->real_name)) { - tl->next= 0; - if (!using_lock_tables) - tl->table->reginfo.lock_type= tl->lock_type; - if (check_access(thd, wants, tl->db, &tl->grant.privilege, 0, 0) || - (grant_option && check_grant(thd, wants, tl, 0, 0, 0))) - { - tl->next= save; - DBUG_RETURN(0); - } - tl->next= save; + my_error(ER_UPDATE_TABLE_USED, MYF(0), tl->real_name); + DBUG_RETURN(TRUE); } + DBUG_PRINT("info",("setting table `%s` for update", tl->alias)); + tl->lock_type= lex->multi_lock_option; + tl->updating= 1; } - - if (thd->lex->derived_tables && (update_tables & derived_tables)) + else { - // find derived table which cause error - for (tl= update_list; tl; tl= tl->next) - { - if (tl->derived && (update_tables & tl->table->map)) - { - my_printf_error(ER_NON_UPDATABLE_TABLE, ER(ER_NON_UPDATABLE_TABLE), - MYF(0), tl->alias, "UPDATE"); - DBUG_RETURN(-1); - } - } + DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias)); + tl->lock_type= TL_READ; + tl->updating= 0; } - /* Relock the tables with the correct modes */ - res= lock_tables(thd, table_list, table_count); - if (using_lock_tables) + /* Check access privileges for table */ + if (!tl->derived) { - if (res) - DBUG_RETURN(res); - break; // Don't have to do setup_field() + uint want_privilege= tl->updating ? UPDATE_ACL : SELECT_ACL; + if (!using_lock_tables) + tl->table->reginfo.lock_type= tl->lock_type; + + if (check_access(thd, want_privilege, + tl->db, &tl->grant.privilege, 0, 0) || + (grant_option && check_grant(thd, want_privilege, tl, 0, 1, 0))) + DBUG_RETURN(TRUE); } + } - /* - We must setup fields again as the file may have been reopened - during lock_tables - */ + /* check single table update for view compound from several tables */ + for (tl= table_list; tl; tl= tl->next_local) + { + if (tl->table == 0) { - List_iterator_fast<Item> field_it(*fields); - Item_field *item; - - while ((item= (Item_field *) field_it++)) + DBUG_ASSERT(tl->view && + tl->ancestor && tl->ancestor->next_local); + TABLE_LIST *for_update= 0; + if (tl->check_single_table(&for_update, tables_for_update)) { - item->field->query_id= 0; - item->cleanup(); + my_error(ER_VIEW_MULTIUPDATE, MYF(0), + tl->view_db.str, tl->view_name.str); + DBUG_RETURN(-1); } } - if (setup_fields(thd, 0, update_list, *fields, 1, 0, 0)) - DBUG_RETURN(-1); - /* - If lock succeded and the table map didn't change since the above lock - we can continue. - */ - if (!res && update_tables == get_table_map(fields)) - break; + } + opened_tables= thd->status_var.opened_tables; + /* now lock and fill tables */ + if (lock_tables(thd, table_list, table_count)) + DBUG_RETURN(TRUE); + + /* + we have to re-call fixfields for fixed items, because lock maybe + reopened tables + */ + if (opened_tables != thd->status_var.opened_tables) + { /* - There was some very unexpected changes in the table definition between - open tables and lock tables. Close tables and try again. + Fields items cleanup(). There are only Item_fields in the list, so we + do not do Item tree walking */ - close_thread_tables(thd); + List_iterator_fast<Item> it(*fields); + Item *item; + while ((item= it++)) + item->cleanup(); + + /* We have to cleanup translation tables of views. */ + for (TABLE_LIST *tbl= table_list; tbl; tbl= tbl->next_global) + tbl->cleanup_items(); + + if (setup_tables(thd, table_list, &lex->select_lex.where, + &lex->select_lex.leaf_tables, FALSE, FALSE) || + (lex->select_lex.no_wrap_view_item= 1, + res= setup_fields(thd, 0, table_list, *fields, 1, 0, 0), + lex->select_lex.no_wrap_view_item= 0, + res)) + DBUG_RETURN(TRUE); } - /* Setup timestamp handling */ - for (tl= update_list; tl; tl= tl->next) + /* We only need SELECT privilege for columns in the values list */ + for (tl= leaves; tl; tl= tl->next_leaf) { TABLE *table= tl->table; - /* Only set timestamp column if this is not modified */ - if (table->timestamp_field && - table->timestamp_field->query_id == thd->query_id) - table->timestamp_field_type= TIMESTAMP_NO_AUTO_SET; - - /* We only need SELECT privilege for columns in the values list */ - table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); + TABLE_LIST *tlist; + if (!(tlist= tl->belong_to_view ? tl->belong_to_view : tl)->derived) + { + tlist->grant.want_privilege= + (SELECT_ACL & ~tlist->grant.privilege); + table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege); + } + DBUG_PRINT("info", ("table: %s want_privilege: %u", tl->alias, + (uint) table->grant.want_privilege)); } - if (!(result=new multi_update(thd, update_list, fields, values, - handle_duplicates, ignore))) - DBUG_RETURN(-1); + if (thd->fill_derived_tables() && + mysql_handle_derived(lex, &mysql_derived_filling)) + DBUG_RETURN(TRUE); + DBUG_RETURN (FALSE); +} + + +/* + Setup multi-update handling and call SELECT to do the join +*/ +bool mysql_multi_update(THD *thd, + TABLE_LIST *table_list, + List<Item> *fields, + List<Item> *values, + COND *conds, + ulong options, + enum enum_duplicates handle_duplicates, bool ignore, + SELECT_LEX_UNIT *unit, SELECT_LEX *select_lex) +{ + bool res= FALSE; + multi_update *result; + DBUG_ENTER("mysql_multi_update"); + + if (mysql_multi_update_prepare(thd)) + DBUG_RETURN(TRUE); + + if (!(result= new multi_update(thd, table_list, + thd->lex->select_lex.leaf_tables, + fields, values, + handle_duplicates, ignore))) + DBUG_RETURN(TRUE); + + thd->no_trans_update= 0; + thd->abort_on_warning= test(thd->variables.sql_mode & + (MODE_STRICT_TRANS_TABLES | + MODE_STRICT_ALL_TABLES)); + + List<Item> total_list; res= mysql_select(thd, &select_lex->ref_pointer_array, - select_lex->get_table_list(), select_lex->with_wild, + table_list, select_lex->with_wild, total_list, conds, 0, (ORDER *) NULL, (ORDER *)NULL, (Item *) NULL, (ORDER *)NULL, - options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK, + options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK | + OPTION_SETUP_TABLES_DONE, result, unit, select_lex); delete result; - DBUG_RETURN(res); + thd->abort_on_warning= 0; + DBUG_RETURN(TRUE); } multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list, + TABLE_LIST *leaves_list, List<Item> *field_list, List<Item> *value_list, enum enum_duplicates handle_duplicates_arg, bool ignore_arg) - :all_tables(table_list), update_tables(0), thd(thd_arg), tmp_tables(0), - updated(0), found(0), fields(field_list), values(value_list), - table_count(0), copy_field(0), handle_duplicates(handle_duplicates_arg), - do_update(1), trans_safe(0), transactional_tables(1), ignore(ignore_arg) + :all_tables(table_list), leaves(leaves_list), update_tables(0), + thd(thd_arg), tmp_tables(0), updated(0), found(0), fields(field_list), + values(value_list), table_count(0), copy_field(0), + handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(0), + transactional_tables(1), ignore(ignore_arg) {} @@ -716,7 +883,7 @@ int multi_update::prepare(List<Item> ¬_used_values, if (!tables_to_update) { - my_error(ER_NO_TABLES_USED, MYF(0)); + my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0)); DBUG_RETURN(1); } @@ -735,8 +902,9 @@ int multi_update::prepare(List<Item> ¬_used_values, */ update.empty(); - for (table_ref= all_tables; table_ref; table_ref=table_ref->next) + for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf) { + /* TODO: add support of view of join support */ TABLE *table=table_ref->table; if (tables_to_update & table->map) { @@ -744,7 +912,7 @@ int multi_update::prepare(List<Item> ¬_used_values, sizeof(*tl)); if (!tl) DBUG_RETURN(1); - update.link_in_list((byte*) tl, (byte**) &tl->next); + update.link_in_list((byte*) tl, (byte**) &tl->next_local); tl->shared= table_count++; table->no_keyread=1; table->used_keys.clear_all(); @@ -803,11 +971,11 @@ int multi_update::prepare(List<Item> ¬_used_values, which will cause an error when reading a row. (This issue is mostly relevent for MyISAM tables) */ - for (table_ref= all_tables; table_ref; table_ref=table_ref->next) + for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf) { TABLE *table=table_ref->table; - if (!(tables_to_update & table->map) && - find_real_table_in_list(update_tables, table_ref->db, + if (!(tables_to_update & table->map) && + find_table_in_local_list(update_tables, table_ref->db, table_ref->real_name)) table->no_cache= 1; // Disable row cache } @@ -838,7 +1006,7 @@ multi_update::initialize_tables(JOIN *join) table_to_update= 0; /* Create a temporary table for keys to all tables, except main table */ - for (table_ref= update_tables; table_ref; table_ref=table_ref->next) + for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local) { TABLE *table=table_ref->table; uint cnt= table_ref->shared; @@ -928,30 +1096,29 @@ static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields) case JT_SYSTEM: case JT_CONST: case JT_EQ_REF: - return 1; // At most one matching row + return TRUE; // At most one matching row case JT_REF: return !check_if_key_used(table, join_tab->ref.key, *fields); case JT_ALL: /* If range search on index */ if (join_tab->quick) - return !check_if_key_used(table, join_tab->quick->index, - *fields); + return !join_tab->quick->check_if_keys_used(fields); /* If scanning in clustered key */ if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && table->primary_key < MAX_KEY) return !check_if_key_used(table, table->primary_key, *fields); - return 1; + return TRUE; default: break; // Avoid compler warning } - return 0; + return FALSE; } multi_update::~multi_update() { TABLE_LIST *table; - for (table= update_tables ; table; table= table->next) + for (table= update_tables ; table; table= table->next_local) table->table->no_keyread= table->table->no_cache= 0; if (tmp_tables) @@ -976,9 +1143,10 @@ multi_update::~multi_update() bool multi_update::send_data(List<Item> ¬_used_values) { TABLE_LIST *cur_table; + bool ignore_err= (thd->lex->duplicates == DUP_IGNORE); DBUG_ENTER("multi_update::send_data"); - for (cur_table= update_tables; cur_table ; cur_table= cur_table->next) + for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { TABLE *table= cur_table->table; /* @@ -1002,12 +1170,23 @@ bool multi_update::send_data(List<Item> ¬_used_values) { table->status|= STATUS_UPDATED; store_record(table,record[1]); - if (fill_record(*fields_for_table[offset], *values_for_table[offset], 0)) + if (fill_record(thd, *fields_for_table[offset], + *values_for_table[offset], 0)) DBUG_RETURN(1); + found++; if (compare_record(table, thd->query_id)) { int error; + if ((error= cur_table->view_check_option(thd, ignore_err)) != + VIEW_CHECK_OK) + { + found--; + if (error == VIEW_CHECK_SKIP) + continue; + else if (error == VIEW_CHECK_ERROR) + DBUG_RETURN(1); + } if (!updated++) { /* @@ -1021,20 +1200,22 @@ bool multi_update::send_data(List<Item> ¬_used_values) table->record[0]))) { updated--; - if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) + if (!ignore || error != HA_ERR_FOUND_DUPP_KEY) { thd->fatal_error(); // Force error message table->file->print_error(error,MYF(0)); DBUG_RETURN(1); } } + else if (!table->file->has_transactions()) + thd->no_trans_update= 1; } } else { int error; TABLE *tmp_table= tmp_tables[offset]; - fill_record(tmp_table->field+1, *values_for_table[offset], 1); + fill_record(thd, tmp_table->field+1, *values_for_table[offset], 1); found++; /* Store pointer to row */ memcpy((char*) tmp_table->field[0]->ptr, @@ -1060,7 +1241,7 @@ bool multi_update::send_data(List<Item> ¬_used_values) void multi_update::send_error(uint errcode,const char *err) { /* First send error what ever it is ... */ - ::send_error(thd,errcode,err); + my_error(errcode, MYF(0), err); /* If nothing updated return */ if (!updated) @@ -1095,7 +1276,7 @@ int multi_update::do_updates(bool from_send_error) do_update= 0; // Don't retry this function if (!found) DBUG_RETURN(0); - for (cur_table= update_tables; cur_table ; cur_table= cur_table->next) + for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { byte *ref_pos; @@ -1228,7 +1409,6 @@ bool multi_update::send_eof() if (updated && (local_error <= 0 || !trans_safe)) { - mysql_update_log.write(thd,thd->query,thd->query_length); if (mysql_bin_log.is_open()) { if (local_error <= 0) @@ -1253,15 +1433,15 @@ bool multi_update::send_eof() /* Safety: If we haven't got an error before (should not happen) */ my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update", MYF(0)); - ::send_error(thd); - return 1; + return TRUE; } sprintf(buff, ER(ER_UPDATE_INFO), (ulong) found, (ulong) updated, (ulong) thd->cuted_fields); - ::send_ok(thd, - (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated, + thd->row_count_func= + (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated; + ::send_ok(thd, (ulong) thd->row_count_func, thd->insert_id_used ? thd->insert_id() : 0L,buff); - return 0; + return FALSE; } diff --git a/sql/sql_view.cc b/sql/sql_view.cc new file mode 100644 index 00000000000..bafb57c44b0 --- /dev/null +++ b/sql/sql_view.cc @@ -0,0 +1,1128 @@ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#include "mysql_priv.h" +#include "sql_select.h" +#include "parse_file.h" +#include "sp.h" + +#define MD5_BUFF_LENGTH 33 + +static int mysql_register_view(THD *thd, TABLE_LIST *view, + enum_view_create_mode mode); + +const char *updatable_views_with_limit_names[]= { "NO", "YES", NullS }; +TYPELIB updatable_views_with_limit_typelib= +{ + array_elements(updatable_views_with_limit_names)-1, "", + updatable_views_with_limit_names, + 0 +}; + + +/* + Creating/altering VIEW procedure + + SYNOPSIS + mysql_create_view() + thd - thread handler + mode - VIEW_CREATE_NEW, VIEW_ALTER, VIEW_CREATE_OR_REPLACE + + RETURN VALUE + FALSE OK + TRUE Error +*/ + +bool mysql_create_view(THD *thd, + enum_view_create_mode mode) +{ + LEX *lex= thd->lex; + bool link_to_local; + /* first table in list is target VIEW name => cut off it */ + TABLE_LIST *view= lex->unlink_first_table(&link_to_local); + TABLE_LIST *tables= lex->query_tables; + TABLE_LIST *tbl; + SELECT_LEX *select_lex= &lex->select_lex; +#ifndef NO_EMBEDDED_ACCESS_CHECKS + SELECT_LEX *sl; +#endif + SELECT_LEX_UNIT *unit= &lex->unit; + bool res= FALSE; + DBUG_ENTER("mysql_create_view"); + + if (lex->proc_list.first || + lex->result) + { + my_error(ER_VIEW_SELECT_CLAUSE, MYF(0), (lex->result ? + "INTO" : + "PROCEDURE")); + res= TRUE; + goto err; + } + if (lex->derived_tables || + lex->variables_used || lex->param_list.elements) + { + int err= (lex->derived_tables ? + ER_VIEW_SELECT_DERIVED : + ER_VIEW_SELECT_VARIABLE); + my_message(err, ER(err), MYF(0)); + res= TRUE; + goto err; + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Privilege check for view creation: + - user have CREATE VIEW privilege on view table + - user have DELETE privilege in case of ALTER VIEW or CREATE OR REPLACE + VIEW + - have some (SELECT/UPDATE/INSERT/DELETE) privileges on columns of + underlying tables used on top of SELECT list (because it can be + (theoretically) updated, so it is enough to have UPDATE privilege on + them, for example) + - have SELECT privilege on columns used in expressions of VIEW select + - for columns of underly tables used on top of SELECT list also will be + checked that we have not more privileges on correspondent column of view + table (i.e. user will not get some privileges by view creation) + */ + if ((check_access(thd, CREATE_VIEW_ACL, view->db, &view->grant.privilege, + 0, 0) || + grant_option && check_grant(thd, CREATE_VIEW_ACL, view, 0, 1, 0)) || + (mode != VIEW_CREATE_NEW && + (check_access(thd, DELETE_ACL, view->db, &view->grant.privilege, + 0, 0) || + grant_option && check_grant(thd, DELETE_ACL, view, 0, 1, 0)))) + DBUG_RETURN(TRUE); + for (sl= select_lex; sl; sl= sl->next_select()) + { + for (tbl= sl->get_table_list(); tbl; tbl= tbl->next_local) + { + /* + Ensure that we have some privileges on this table, more strict check + will be done on column level after preparation, + */ + if (check_some_access(thd, VIEW_ANY_ACL, tbl)) + { + my_error(ER_TABLEACCESS_DENIED_ERROR, MYF(0), + "ANY", thd->priv_user, thd->host_or_ip, tbl->real_name); + DBUG_RETURN(TRUE); + } + /* + Mark this table as a table which will be checked after the prepare + phase + */ + tbl->table_in_first_from_clause= 1; + + /* + We need to check only SELECT_ACL for all normal fields, fields for + which we need "any" (SELECT/UPDATE/INSERT/DELETE) privilege will be + checked later + */ + tbl->grant.want_privilege= SELECT_ACL; + /* + Make sure that all rights are loaded to the TABLE::grant field. + + tbl->real_name will be correct name of table because VIEWs are + not opened yet. + */ + fill_effective_table_privileges(thd, &tbl->grant, tbl->db, + tbl->real_name); + } + } + + if (&lex->select_lex != lex->all_selects_list) + { + /* check tables of subqueries */ + for (tbl= tables; tbl; tbl= tbl->next_global) + { + if (!tbl->table_in_first_from_clause) + { + if (check_access(thd, SELECT_ACL, tbl->db, + &tbl->grant.privilege, 0, 0) || + grant_option && check_grant(thd, SELECT_ACL, tbl, 0, 1, 0)) + { + res= TRUE; + goto err; + } + } + } + } + /* + Mark fields for special privilege check ("any" privilege) + */ + for (sl= select_lex; sl; sl= sl->next_select()) + { + List_iterator_fast<Item> it(sl->item_list); + Item *item; + while ((item= it++)) + { + Item_field *field; + if ((field= item->filed_for_view_update())) + field->any_privileges= 1; + } + } +#endif + + if (open_and_lock_tables(thd, tables)) + DBUG_RETURN(TRUE); + + /* + check that tables are not temporary and this VIEW do not used in query + (it is possible with ALTERing VIEW) + */ + for (tbl= tables; tbl; tbl= tbl->next_global) + { + /* is this table temporary and is not view? */ + if (tbl->table->tmp_table != NO_TMP_TABLE && !tbl->view && + !tbl->schema_table) + { + my_error(ER_VIEW_SELECT_TMPTABLE, MYF(0), tbl->alias); + res= TRUE; + goto err; + } + + /* is this table view and the same view which we creates now? */ + if (tbl->view && + strcmp(tbl->view_db.str, view->db) == 0 && + strcmp(tbl->view_name.str, view->real_name) == 0) + { + my_error(ER_NO_SUCH_TABLE, MYF(0), tbl->view_db.str, tbl->view_name.str); + res= TRUE; + goto err; + } + + /* + Copy the privileges of the underlying VIEWs which were filled by + fill_effective_table_privileges + (they were not copied at derived tables processing) + */ + tbl->table->grant.privilege= tbl->grant.privilege; + } + + /* prepare select to resolve all fields */ + lex->view_prepare_mode= 1; + if (unit->prepare(thd, 0, 0)) + { + /* + some errors from prepare are reported to user, if is not then + it will be checked after err: label + */ + res= TRUE; + goto err; + } + + /* view list (list of view fields names) */ + if (lex->view_list.elements) + { + List_iterator_fast<Item> it(select_lex->item_list); + List_iterator_fast<LEX_STRING> nm(lex->view_list); + Item *item; + LEX_STRING *name; + + if (lex->view_list.elements != select_lex->item_list.elements) + { + my_message(ER_VIEW_WRONG_LIST, ER(ER_VIEW_WRONG_LIST), MYF(0)); + goto err; + } + while ((item= it++, name= nm++)) + item->set_name(name->str, name->length, system_charset_info); + } + + /* Test absence of duplicates names */ + { + Item *item; + List_iterator_fast<Item> it(select_lex->item_list); + it++; + while ((item= it++)) + { + Item *check; + List_iterator_fast<Item> itc(select_lex->item_list); + while ((check= itc++) && check != item) + { + if (strcmp(item->name, check->name) == 0) + { + my_error(ER_DUP_FIELDNAME, MYF(0), item->name); + DBUG_RETURN(TRUE); + } + } + } + } + +#ifndef NO_EMBEDDED_ACCESS_CHECKS + /* + Compare/check grants on view with grants of underlying tables + */ + for (sl= select_lex; sl; sl= sl->next_select()) + { + char *db= view->db ? view->db : thd->db; + List_iterator_fast<Item> it(sl->item_list); + Item *item; + fill_effective_table_privileges(thd, &view->grant, db, + view->real_name); + while ((item= it++)) + { + Item_field *fld; + uint priv= (get_column_grant(thd, &view->grant, db, + view->real_name, item->name) & + VIEW_ANY_ACL); + if ((fld= item->filed_for_view_update())) + { + /* + Do we have more privileges on view field then underlying table field? + */ + if (!fld->field->table->tmp_table && (~fld->have_privileges & priv)) + { + /* VIEW column has more privileges */ + my_error(ER_COLUMNACCESS_DENIED_ERROR, MYF(0), + "create view", thd->priv_user, thd->host_or_ip, item->name, + view->real_name); + DBUG_RETURN(TRUE); + } + } + } + } +#endif + + if (wait_if_global_read_lock(thd, 0, 0)) + { + res= TRUE; + goto err; + } + VOID(pthread_mutex_lock(&LOCK_open)); + res= mysql_register_view(thd, view, mode); + VOID(pthread_mutex_unlock(&LOCK_open)); + start_waiting_global_read_lock(thd); + if (res) + goto err; + + send_ok(thd); + lex->link_first_table_back(view, link_to_local); + DBUG_RETURN(0); + +err: + thd->proc_info= "end"; + lex->link_first_table_back(view, link_to_local); + unit->cleanup(); + DBUG_RETURN(res || thd->net.report_error); +} + + +/* index of revision number in following table */ +static const int revision_number_position= 5; +/* index of last required parameter for making view */ +static const int required_view_parameters= 7; + +/* + table of VIEW .frm field descriptors + + Note that one should NOT change the order for this, as it's used by + parse() +*/ +static File_option view_parameters[]= +{{{(char*) "query", 5}, offsetof(TABLE_LIST, query), + FILE_OPTIONS_STRING}, + {{(char*) "md5", 3}, offsetof(TABLE_LIST, md5), + FILE_OPTIONS_STRING}, + {{(char*) "updatable", 9}, offsetof(TABLE_LIST, updatable_view), + FILE_OPTIONS_ULONGLONG}, + {{(char*) "algorithm", 9}, offsetof(TABLE_LIST, algorithm), + FILE_OPTIONS_ULONGLONG}, + {{(char*) "with_check_option", 17}, offsetof(TABLE_LIST, with_check), + FILE_OPTIONS_ULONGLONG}, + {{(char*) "revision", 8}, offsetof(TABLE_LIST, revision), + FILE_OPTIONS_REV}, + {{(char*) "timestamp", 9}, offsetof(TABLE_LIST, timestamp), + FILE_OPTIONS_TIMESTAMP}, + {{(char*)"create-version", 14},offsetof(TABLE_LIST, file_version), + FILE_OPTIONS_ULONGLONG}, + {{(char*) "source", 6}, offsetof(TABLE_LIST, source), + FILE_OPTIONS_ESTRING}, + {{NullS, 0}, 0, + FILE_OPTIONS_STRING} +}; + +static LEX_STRING view_file_type[]= {{(char*)"VIEW", 4}}; + + +/* + Register VIEW (write .frm & process .frm's history backups) + + SYNOPSIS + mysql_register_view() + thd - thread handler + view - view description + mode - VIEW_CREATE_NEW, VIEW_ALTER, VIEW_CREATE_OR_REPLACE + + RETURN + 0 OK + -1 Error + 1 Error and error message given +*/ + +static int mysql_register_view(THD *thd, TABLE_LIST *view, + enum_view_create_mode mode) +{ + LEX *lex= thd->lex; + char buff[4096]; + String str(buff,(uint32) sizeof(buff), system_charset_info); + char md5[MD5_BUFF_LENGTH]; + bool can_be_merged; + char dir_buff[FN_REFLEN], file_buff[FN_REFLEN]; + LEX_STRING dir, file; + DBUG_ENTER("mysql_register_view"); + + /* print query */ + str.length(0); + { + ulong sql_mode= thd->variables.sql_mode & MODE_ANSI_QUOTES; + thd->variables.sql_mode&= ~MODE_ANSI_QUOTES; + lex->unit.print(&str); + thd->variables.sql_mode|= sql_mode; + } + str.append('\0'); + DBUG_PRINT("VIEW", ("View: %s", str.ptr())); + + /* print file name */ + (void) my_snprintf(dir_buff, FN_REFLEN, "%s/%s/", + mysql_data_home, view->db); + unpack_filename(dir_buff, dir_buff); + dir.str= dir_buff; + dir.length= strlen(dir_buff); + + file.str= file_buff; + file.length= (strxnmov(file_buff, FN_REFLEN, view->real_name, reg_ext, + NullS) - file_buff); + /* init timestamp */ + if (!view->timestamp.str) + view->timestamp.str= view->timestamp_buffer; + + /* check old .frm */ + { + char path_buff[FN_REFLEN]; + LEX_STRING path; + File_parser *parser; + + path.str= path_buff; + fn_format(path_buff, file.str, dir.str, 0, MY_UNPACK_FILENAME); + path.length= strlen(path_buff); + + if (!access(path.str, F_OK)) + { + if (mode == VIEW_CREATE_NEW) + { + my_error(ER_TABLE_EXISTS_ERROR, MYF(0), view->alias); + DBUG_RETURN(-1); + } + + if (!(parser= sql_parse_prepare(&path, thd->mem_root, 0))) + DBUG_RETURN(1); + + if (!parser->ok() || + strncmp("VIEW", parser->type()->str, parser->type()->length)) + { + my_error(ER_WRONG_OBJECT, MYF(0), + (view->db ? view->db : thd->db), view->real_name, "VIEW"); + DBUG_RETURN(-1); + } + + /* + read revision number + + TODO: read dependence list, too, to process cascade/restrict + TODO: special cascade/restrict procedure for alter? + */ + if (parser->parse((gptr)view, thd->mem_root, + view_parameters + revision_number_position, 1)) + { + DBUG_RETURN(thd->net.report_error? -1 : 0); + } + } + else + { + if (mode == VIEW_ALTER) + { + my_error(ER_NO_SUCH_TABLE, MYF(0), view->db, view->alias); + DBUG_RETURN(-1); + } + } + } + /* fill structure */ + view->query.str= (char*)str.ptr(); + view->query.length= str.length()-1; // we do not need last \0 + view->source.str= thd->query; + view->source.length= thd->query_length; + view->file_version= 1; + view->calc_md5(md5); + view->md5.str= md5; + view->md5.length= 32; + can_be_merged= lex->can_be_merged(); + if (lex->create_view_algorithm == VIEW_ALGORITHM_MERGE && + !lex->can_be_merged()) + { + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_VIEW_MERGE, + ER(ER_WARN_VIEW_MERGE)); + lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; + } + view->algorithm= lex->create_view_algorithm; + view->with_check= lex->create_view_check; + if ((view->updatable_view= (can_be_merged && + view->algorithm != VIEW_ALGORITHM_TMPTABLE))) + { + /* TODO: change here when we will support UNIONs */ + for (TABLE_LIST *tbl= (TABLE_LIST *)lex->select_lex.table_list.first; + tbl; + tbl= tbl->next_local) + { + if ((tbl->view && !tbl->updatable_view) || tbl->schema_table) + { + view->updatable_view= 0; + break; + } + for (TABLE_LIST *up= tbl; up; up= up->embedding) + { + if (up->outer_join) + { + view->updatable_view= 0; + goto loop_out; + } + } + } + } +loop_out: + /* + Check that table of main select do not used in subqueries. + + This test can catch only very simple cases of such non-updateable views, + all other will be detected before updating commands execution. + (it is more optimisation then real check) + + NOTE: this skip cases of using table via VIEWs, joined VIEWs, VIEWs with + UNION + */ + if (view->updatable_view && + !lex->select_lex.next_select() && + !((TABLE_LIST*)lex->select_lex.table_list.first)->next_local && + find_table_in_global_list(lex->query_tables->next_global, + lex->query_tables->db, + lex->query_tables->real_name)) + { + view->updatable_view= 0; + } + + if (view->with_check != VIEW_CHECK_NONE && + !view->updatable_view) + { + my_error(ER_VIEW_NONUPD_CHECK, MYF(0), view->db, view->real_name); + DBUG_RETURN(-1); + } + + if (sql_create_definition_file(&dir, &file, view_file_type, + (gptr)view, view_parameters, 3)) + { + DBUG_RETURN(thd->net.report_error? -1 : 1); + } + DBUG_RETURN(0); +} + + +/* + read VIEW .frm and create structures + + SYNOPSIS + mysql_make_view() + parser - parser object; + table - TABLE_LIST structure for filling + + RETURN + 0 ok + 1 error +*/ + +my_bool +mysql_make_view(File_parser *parser, TABLE_LIST *table) +{ + DBUG_ENTER("mysql_make_view"); + + if (table->view) + { + DBUG_PRINT("info", + ("VIEW %s.%s is already processed on previous PS/SP execution", + table->view_db.str, table->view_name.str)); + DBUG_RETURN(0); + } + + SELECT_LEX *end; + THD *thd= current_thd; + LEX *old_lex= thd->lex, *lex; + SELECT_LEX *view_select; + int res= 0; + + /* + For now we assume that tables will not be changed during PS life (it + will be TRUE as far as we make new table cache). + */ + Item_arena *arena= thd->current_arena, backup; + if (arena->is_conventional()) + arena= 0; + else + thd->set_n_backup_item_arena(arena, &backup); + + /* init timestamp */ + if (!table->timestamp.str) + table->timestamp.str= table->timestamp_buffer; + /* + TODO: when VIEWs will be stored in cache, table mem_root should + be used here + */ + if (parser->parse((gptr)table, thd->mem_root, view_parameters, + required_view_parameters)) + goto err; + + /* + Save VIEW parameters, which will be wiped out by derived table + processing + */ + table->view_db.str= table->db; + table->view_db.length= table->db_length; + table->view_name.str= table->real_name; + table->view_name.length= table->real_name_length; + + /*TODO: md5 test here and warning if it is differ */ + + /* + TODO: TABLE mem root should be used here when VIEW will be stored in + TABLE cache + + now Lex placed in statement memory + */ + table->view= lex= thd->lex= (LEX*) new(thd->mem_root) st_lex_local; + lex_start(thd, (uchar*)table->query.str, table->query.length); + view_select= &lex->select_lex; + view_select->select_number= ++thd->select_number; + old_lex->derived_tables|= DERIVED_VIEW; + { + ulong options= thd->options; + /* switch off modes which can prevent normal parsing of VIEW + - MODE_REAL_AS_FLOAT affect only CREATE TABLE parsing + + MODE_PIPES_AS_CONCAT affect expression parsing + + MODE_ANSI_QUOTES affect expression parsing + + MODE_IGNORE_SPACE affect expression parsing + - MODE_NOT_USED not used :) + * MODE_ONLY_FULL_GROUP_BY affect execution + * MODE_NO_UNSIGNED_SUBTRACTION affect execution + - MODE_NO_DIR_IN_CREATE affect table creation only + - MODE_POSTGRESQL compounded from other modes + - MODE_ORACLE compounded from other modes + - MODE_MSSQL compounded from other modes + - MODE_DB2 compounded from other modes + - MODE_MAXDB affect only CREATE TABLE parsing + - MODE_NO_KEY_OPTIONS affect only SHOW + - MODE_NO_TABLE_OPTIONS affect only SHOW + - MODE_NO_FIELD_OPTIONS affect only SHOW + - MODE_MYSQL323 affect only SHOW + - MODE_MYSQL40 affect only SHOW + - MODE_ANSI compounded from other modes + (+ transaction mode) + ? MODE_NO_AUTO_VALUE_ON_ZERO affect UPDATEs + + MODE_NO_BACKSLASH_ESCAPES affect expression parsing + */ + thd->options&= ~(MODE_PIPES_AS_CONCAT | MODE_ANSI_QUOTES | + MODE_IGNORE_SPACE | MODE_NO_BACKSLASH_ESCAPES); + CHARSET_INFO *save_cs= thd->variables.character_set_client; + thd->variables.character_set_client= system_charset_info; + res= yyparse((void *)thd); + thd->variables.character_set_client= save_cs; + thd->options= options; + } + if (!res && !thd->is_fatal_error) + { + TABLE_LIST *top_view= (table->belong_to_view ? + table->belong_to_view : + table); + TABLE_LIST *view_tables= lex->query_tables; + TABLE_LIST *view_tables_tail= 0; + TABLE_LIST *tbl; + + if (lex->spfuns.records) + { + /* move SP to main LEX */ + sp_merge_funs(old_lex, lex); + /* open mysq.proc for functions which are not in cache */ + if (old_lex->proc_table == 0 && + (old_lex->proc_table= + (TABLE_LIST*)thd->calloc(sizeof(TABLE_LIST))) != 0) + { + TABLE_LIST *table= old_lex->proc_table; + table->db= (char*)"mysql"; + table->db_length= 5; + table->real_name= table->alias= (char*)"proc"; + table->real_name_length= 4; + table->cacheable_table= 1; + old_lex->add_to_query_tables(table); + } + } + /* cleanup LEX */ + if (lex->spfuns.array.buffer) + hash_free(&lex->spfuns); + + /* + mark to avoid temporary table using and put view reference and find + last view table + */ + for (tbl= view_tables; + tbl; + tbl= (view_tables_tail= tbl)->next_global) + { + tbl->skip_temporary= 1; + tbl->belong_to_view= top_view; + } + + /* + check rights to run commands (EXPLAIN SELECT & SHOW CREATE) which show + underlying tables + */ + if ((old_lex->sql_command == SQLCOM_SELECT && old_lex->describe)) + { + if (check_table_access(thd, SELECT_ACL, view_tables, 1) && + check_table_access(thd, SHOW_VIEW_ACL, table, 1)) + { + my_message(ER_VIEW_NO_EXPLAIN, ER(ER_VIEW_NO_EXPLAIN), MYF(0)); + goto err; + } + } + else if (old_lex->sql_command == SQLCOM_SHOW_CREATE) + { + if (check_table_access(thd, SHOW_VIEW_ACL, table, 0)) + goto err; + } + + /* move SQL_NO_CACHE & Co to whole query */ + old_lex->safe_to_cache_query= (old_lex->safe_to_cache_query && + lex->safe_to_cache_query); + /* move SQL_CACHE to whole query */ + if (view_select->options & OPTION_TO_QUERY_CACHE) + old_lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + + /* + Put tables of VIEW after VIEW TABLE_LIST + + NOTE: It is important for UPDATE/INSERT/DELETE checks to have this + tables just after VIEW instead of tail of list, to be able check that + table is unique. Also we store old next table for the same purpose. + */ + if (view_tables) + { + if (table->next_global) + { + view_tables_tail->next_global= table->next_global; + table->next_global->prev_global= &view_tables_tail->next_global; + } + else + { + lex->query_tables_last= &view_tables_tail->next_global; + } + view_tables->prev_global= &table->next_global; + table->next_global= view_tables; + } + + /* + check MERGE algorithm ability + - algorithm is not explicit TEMPORARY TABLE + - VIEW SELECT allow merging + - VIEW used in subquery or command support MERGE algorithm + */ + if (table->algorithm != VIEW_ALGORITHM_TMPTABLE && + lex->can_be_merged() && + (table->select_lex->master_unit() != &old_lex->unit || + old_lex->can_use_merged()) && + !old_lex->can_not_use_merged()) + { + /* lex should contain at least one table */ + DBUG_ASSERT(view_tables != 0); + + table->effective_algorithm= VIEW_ALGORITHM_MERGE; + DBUG_PRINT("info", ("algorithm: MERGE")); + table->updatable= (table->updatable_view != 0); + table->effective_with_check= (uint8)table->with_check; + + table->ancestor= view_tables; + + /* next table should include SELECT_LEX under this table SELECT_LEX */ + table->ancestor->select_lex= table->select_lex; + + /* + Process upper level tables of view. As far as we do noy suport union + here we can go through local tables of view most upper SELECT + */ + for(tbl= (TABLE_LIST*)view_select->table_list.first; + tbl; + tbl= tbl->next_local) + { + /* + move lock type (TODO: should we issue error in case of TMPTABLE + algorithm and non-read locking)? + */ + tbl->lock_type= table->lock_type; + } + + /* multi table view */ + if (view_tables->next_local) + { + /* make nested join structure for view tables */ + NESTED_JOIN *nested_join; + if (!(nested_join= table->nested_join= + (NESTED_JOIN *) thd->calloc(sizeof(NESTED_JOIN)))) + goto err; + nested_join->join_list= view_select->top_join_list; + + /* re-nest tables of VIEW */ + { + List_iterator_fast<TABLE_LIST> ti(nested_join->join_list); + while ((tbl= ti++)) + { + tbl->join_list= &nested_join->join_list; + tbl->embedding= table; + } + } + } + + /* Store WHERE clause for post-processing in setup_ancestor */ + table->where= view_select->where; + + /* + Add subqueries units to SELECT in which we merging current view. + + NOTE: we do not support UNION here, so we take only one select + */ + for (SELECT_LEX_UNIT *unit= lex->select_lex.first_inner_unit(); + unit; + unit= unit->next_unit()) + { + SELECT_LEX_NODE *save_slave= unit->slave; + unit->include_down(table->select_lex); + unit->slave= save_slave; // fix include_down initialisation + } + + /* + This SELECT_LEX will be linked in global SELECT_LEX list + to make it processed by mysql_handle_derived(), + but it will not be included to SELECT_LEX tree, because it + will not be executed + */ + goto ok; + } + + table->effective_algorithm= VIEW_ALGORITHM_TMPTABLE; + DBUG_PRINT("info", ("algorithm: TEMPORARY TABLE")); + view_select->linkage= DERIVED_TABLE_TYPE; + table->updatable= 0; + table->effective_with_check= VIEW_CHECK_NONE; + old_lex->subqueries= TRUE; + + /* SELECT tree link */ + lex->unit.include_down(table->select_lex); + lex->unit.slave= view_select; // fix include_down initialisation + + table->derived= &lex->unit; + } + else + goto err; + +ok: + if (arena) + thd->restore_backup_item_arena(arena, &backup); + /* global SELECT list linking */ + end= view_select; // primary SELECT_LEX is always last + end->link_next= old_lex->all_selects_list; + old_lex->all_selects_list->link_prev= &end->link_next; + old_lex->all_selects_list= lex->all_selects_list; + lex->all_selects_list->link_prev= + (st_select_lex_node**)&old_lex->all_selects_list; + + thd->lex= old_lex; + DBUG_RETURN(0); + +err: + if (arena) + thd->restore_backup_item_arena(arena, &backup); + table->view= 0; // now it is not VIEW placeholder + thd->lex= old_lex; + DBUG_RETURN(1); +} + + +/* + drop view + + SYNOPSIS + mysql_drop_view() + thd - thread handler + views - views to delete + drop_mode - cascade/check + + RETURN VALUE + FALSE OK + TRUE Error +*/ + +bool mysql_drop_view(THD *thd, TABLE_LIST *views, enum_drop_mode drop_mode) +{ + DBUG_ENTER("mysql_drop_view"); + char path[FN_REFLEN]; + TABLE_LIST *view; + bool type= 0; + + for (view= views; view; view= view->next_local) + { + strxnmov(path, FN_REFLEN, mysql_data_home, "/", view->db, "/", + view->real_name, reg_ext, NullS); + (void) unpack_filename(path, path); + VOID(pthread_mutex_lock(&LOCK_open)); + if (access(path, F_OK) || (type= (mysql_frm_type(path) != FRMTYPE_VIEW))) + { + char name[FN_REFLEN]; + my_snprintf(name, sizeof(name), "%s.%s", view->db, view->real_name); + if (thd->lex->drop_if_exists) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_BAD_TABLE_ERROR, ER(ER_BAD_TABLE_ERROR), + name); + VOID(pthread_mutex_unlock(&LOCK_open)); + continue; + } + if (type) + my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->real_name, "VIEW"); + else + my_error(ER_BAD_TABLE_ERROR, MYF(0), name); + goto err; + } + if (my_delete(path, MYF(MY_WME))) + goto err; + VOID(pthread_mutex_unlock(&LOCK_open)); + } + send_ok(thd); + DBUG_RETURN(FALSE); + +err: + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(TRUE); + +} + + +/* + Check type of .frm if we are not going to parse it + + SYNOPSIS + mysql_frm_type() + path path to file + + RETURN + FRMTYPE_ERROR error + FRMTYPE_TABLE table + FRMTYPE_VIEW view +*/ + +frm_type_enum mysql_frm_type(char *path) +{ + File file; + char header[10]; //"TYPE=VIEW\n" it is 10 characters + int length; + DBUG_ENTER("mysql_frm_type"); + + if ((file= my_open(path, O_RDONLY | O_SHARE, MYF(MY_WME))) < 0) + { + DBUG_RETURN(FRMTYPE_ERROR); + } + length= my_read(file, (byte*) header, 10, MYF(MY_WME)); + my_close(file, MYF(MY_WME)); + if (length == (int) MY_FILE_ERROR) + DBUG_RETURN(FRMTYPE_ERROR); + if (!strncmp(header, "TYPE=VIEW\n", 10)) + DBUG_RETURN(FRMTYPE_VIEW); + DBUG_RETURN(FRMTYPE_TABLE); // Is probably a .frm table +} + + +/* + check of key (primary or unique) presence in updatable view + + SYNOPSIS + check_key_in_view() + thd thread handler + view view for check with opened table + + DESCRIPTION + If it is VIEW and query have LIMIT clause then check that undertlying + table of viey contain one of following: + 1) primary key of underlying table + 2) unique key underlying table with fields for which NULL value is + impossible + 3) all fields of underlying table + + RETURN + FALSE OK + TRUE view do not contain key or all fields +*/ + +bool check_key_in_view(THD *thd, TABLE_LIST *view) +{ + TABLE *table; + Field_translator *trans; + KEY *key_info, *key_info_end; + uint i, elements_in_view; + DBUG_ENTER("check_key_in_view"); + + /* + we do not support updatable UNIONs in VIEW, so we can check just limit of + LEX::select_lex + */ + if ((!view->view && !view->belong_to_view) || thd->lex->sql_command == SQLCOM_INSERT || + thd->lex->select_lex.select_limit == HA_POS_ERROR) + DBUG_RETURN(FALSE); /* it is normal table or query without LIMIT */ + table= view->table; + if (view->belong_to_view) + view= view->belong_to_view; + trans= view->field_translation; + key_info_end= (key_info= table->key_info)+ table->keys; + + elements_in_view= view->view->select_lex.item_list.elements; + DBUG_ASSERT(table != 0 && view->field_translation != 0); + + /* Loop over all keys to see if a unique-not-null key is used */ + for (;key_info != key_info_end ; key_info++) + { + if ((key_info->flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME) + { + KEY_PART_INFO *key_part= key_info->key_part; + KEY_PART_INFO *key_part_end= key_part + key_info->key_parts; + + /* check that all key parts are used */ + for (;;) + { + uint k; + for (k= 0; k < elements_in_view; k++) + { + Item_field *field; + if ((field= trans[k].item->filed_for_view_update()) && + field->field == key_part->field) + break; + } + if (k == elements_in_view) + break; // Key is not possible + if (++key_part == key_part_end) + DBUG_RETURN(FALSE); // Found usable key + } + } + } + + DBUG_PRINT("info", ("checking if all fields of table are used")); + /* check all fields presence */ + { + Field **field_ptr; + for (field_ptr= table->field; *field_ptr; field_ptr++) + { + for (i= 0; i < elements_in_view; i++) + { + Item_field *field; + if ((field= trans[i].item->filed_for_view_update()) && + field->field == *field_ptr) + break; + } + if (i == elements_in_view) // If field didn't exists + { + /* + Keys or all fields of underlying tables are not foud => we have + to check variable updatable_views_with_limit to decide should we + issue an error or just a warning + */ + if (thd->variables.updatable_views_with_limit) + { + /* update allowed, but issue warning */ + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_NOTE, + ER_WARN_VIEW_WITHOUT_KEY, ER(ER_WARN_VIEW_WITHOUT_KEY)); + DBUG_RETURN(FALSE); + } + /* prohibit update */ + DBUG_RETURN(TRUE); + } + } + } + DBUG_RETURN(FALSE); +} + + +/* + insert fields from VIEW (MERGE algorithm) into given list + + SYNOPSIS + insert_view_fields() + list list for insertion + view view for processing + + RETURN + FALSE OK + TRUE error (is not sent to cliet) +*/ + +bool insert_view_fields(List<Item> *list, TABLE_LIST *view) +{ + uint elements_in_view= view->view->select_lex.item_list.elements; + Field_translator *trans; + DBUG_ENTER("insert_view_fields"); + + if (!(trans= view->field_translation)) + DBUG_RETURN(FALSE); + + for (uint i= 0; i < elements_in_view; i++) + { + Item_field *fld; + if ((fld= trans[i].item->filed_for_view_update())) + list->push_back(fld); + else + { + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), view->alias, "INSERT"); + DBUG_RETURN(TRUE); + } + } + DBUG_RETURN(FALSE); +} + +/* + checking view md5 check suum + + SINOPSYS + view_checksum() + thd threar handler + view view for check + + RETUIRN + HA_ADMIN_OK OK + HA_ADMIN_NOT_IMPLEMENTED it is not VIEW + HA_ADMIN_WRONG_CHECKSUM check sum is wrong +*/ + +int view_checksum(THD *thd, TABLE_LIST *view) +{ + char md5[MD5_BUFF_LENGTH]; + if (!view->view || view->md5.length != 32) + return HA_ADMIN_NOT_IMPLEMENTED; + view->calc_md5(md5); + return (strncmp(md5, view->md5.str, 32) ? + HA_ADMIN_WRONG_CHECKSUM : + HA_ADMIN_OK); +} diff --git a/sql/sql_view.h b/sql/sql_view.h new file mode 100644 index 00000000000..4e6aaf7f477 --- /dev/null +++ b/sql/sql_view.h @@ -0,0 +1,37 @@ +/* -*- C++ -*- */ +/* Copyright (C) 2004 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +bool mysql_create_view(THD *thd, + enum_view_create_mode mode); + +my_bool mysql_make_view(File_parser *parser, TABLE_LIST *table); + +bool mysql_drop_view(THD *thd, TABLE_LIST *view, enum_drop_mode drop_mode); + +bool check_key_in_view(THD *thd, TABLE_LIST * view); + +bool insert_view_fields(List<Item> *list, TABLE_LIST *view); + +frm_type_enum mysql_frm_type(char *path); + +int view_checksum(THD *thd, TABLE_LIST *view); + +extern TYPELIB updatable_views_with_limit_typelib; + +#define VIEW_ANY_ACL (SELECT_ACL | UPDATE_ACL | INSERT_ACL | DELETE_ACL) + diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 9235ce1fb6d..289b96dff51 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -34,6 +34,10 @@ #include "slave.h" #include "lex_symbol.h" #include "item_create.h" +#include "sp_head.h" +#include "sp_pcontext.h" +#include "sp_rcontext.h" +#include "sp.h" #include <myisam.h> #include <myisammrg.h> @@ -46,10 +50,13 @@ int yylex(void *yylval, void *yythd); ER_WARN_DEPRECATED_SYNTAX, \ ER(ER_WARN_DEPRECATED_SYNTAX), (A), (B)); -inline Item *or_or_concat(THD *thd, Item* A, Item* B) +/* Helper for parsing "IS [NOT] truth_value" */ +inline Item *is_truth_value(Item *A, bool v1, bool v2) { - return (thd->variables.sql_mode & MODE_PIPES_AS_CONCAT ? - (Item*) new Item_func_concat(A,B) : (Item*) new Item_cond_or(A,B)); + return new Item_func_if(create_func_ifnull(A, + new Item_int((char *) (v2 ? "TRUE" : "FALSE"), v2, 1)), + new Item_int((char *) (v1 ? "TRUE" : "FALSE"), v1, 1), + new Item_int((char *) (v1 ? "FALSE" : "TRUE"),!v1, 1)); } %} @@ -82,10 +89,14 @@ inline Item *or_or_concat(THD *thd, Item* A, Item* B) enum Item_udftype udf_type; CHARSET_INFO *charset; thr_lock_type lock_type; - interval_type interval; + interval_type interval, interval_time_st; timestamp_type date_time_type; st_select_lex *select_lex; chooser_compare_func_creator boolfunc2creator; + struct sp_cond_type *spcondtype; + struct { int vars, conds, hndlrs, curs; } spblock; + sp_name *spname; + struct st_lex *lex; } %{ @@ -126,6 +137,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token AVG_SYM %token BEGIN_SYM %token BINLOG_SYM +%token CALL_SYM %token CHANGE %token CLIENT_SYM %token COMMENT_SYM @@ -135,7 +147,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token CREATE %token CROSS %token CUBE_SYM +%token DEFINER_SYM %token DELETE_SYM +%token DETERMINISTIC_SYM %token DUAL_SYM %token DO_SYM %token DROP @@ -166,6 +180,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token SHOW %token SLAVE %token SNAPSHOT_SYM +%token SQL_SYM %token SQL_THREAD %token START_SYM %token STD_SYM @@ -181,8 +196,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token ACTION %token AGGREGATE_SYM +%token ALGORITHM_SYM %token ALL %token AND_SYM +%token AND_AND_SYM %token AS %token ASC %token AUTO_INC @@ -190,6 +207,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token BACKUP_SYM %token BERKELEY_DB_SYM %token BINARY +%token BIN_NUM %token BIT_SYM %token BOOL_SYM %token BOOLEAN_SYM @@ -199,6 +217,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token BYTE_SYM %token CACHE_SYM %token CASCADE +%token CASCADED %token CAST_SYM %token CHARSET %token CHECKSUM_SYM @@ -209,11 +228,16 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token COLUMNS %token COLUMN_SYM %token CONCURRENT +%token CONDITION_SYM +%token CONNECTION_SYM %token CONSTRAINT +%token CONTAINS_SYM +%token CONTINUE_SYM %token CONVERT_SYM %token CURRENT_USER %token DATABASES %token DATA_SYM +%token DECLARE_SYM %token DEFAULT %token DELAYED_SYM %token DELAY_KEY_WRITE_SYM @@ -225,20 +249,24 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token DISTINCT %token DUPLICATE_SYM %token DYNAMIC_SYM +%token EACH_SYM %token ENABLE_SYM %token ENCLOSED %token ESCAPED %token DIRECTORY_SYM %token ESCAPE_SYM %token EXISTS +%token EXIT_SYM %token EXTENDED_SYM %token FALSE_SYM +%token FETCH_SYM %token FILE_SYM %token FIRST_SYM %token FIXED_SYM %token FLOAT_NUM %token FORCE_SYM %token FOREIGN +%token FOUND_SYM %token FROM %token FULL %token FULLTEXT_SYM @@ -261,8 +289,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token INFILE %token INNER_SYM %token INNOBASE_SYM +%token INOUT_SYM %token INTO %token IN_SYM +%token INVOKER_SYM %token ISOLATION %token JOIN_SYM %token KEYS @@ -272,14 +302,17 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token LEAVES %token LEVEL_SYM %token LEX_HOSTNAME +%token LANGUAGE_SYM %token LIKE %token LINES %token LOCAL_SYM +%token LOCATOR_SYM %token LOG_SYM %token LOGS_SYM %token LONG_NUM %token LONG_SYM %token LOW_PRIORITY +%token MERGE_SYM %token MASTER_HOST_SYM %token MASTER_USER_SYM %token MASTER_LOG_FILE_SYM @@ -301,9 +334,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token MAX_CONNECTIONS_PER_HOUR %token MAX_QUERIES_PER_HOUR %token MAX_UPDATES_PER_HOUR +%token MAX_USER_CONNECTIONS_SYM %token MEDIUM_SYM %token MIN_ROWS +%token MUTEX_SYM %token NAMES_SYM +%token NAME_SYM %token NATIONAL_SYM %token NATURAL %token NDBCLUSTER_SYM @@ -311,7 +347,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token NCHAR_SYM %token NCHAR_STRING %token NVARCHAR_SYM -%token NOT +%token NOT_SYM +%token NOT2_SYM %token NO_SYM %token NULL_SYM %token NUM @@ -322,8 +359,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token OPTION %token OPTIONALLY %token OR_SYM -%token OR_OR_CONCAT +%token OR2_SYM +%token OR_OR_SYM %token ORDER_SYM +%token OUT_SYM %token OUTER %token OUTFILE %token DUMPFILE @@ -340,6 +379,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token RAID_CHUNKS %token RAID_CHUNKSIZE %token READ_SYM +%token READS_SYM %token REAL_NUM %token REFERENCES %token REGEXP @@ -351,10 +391,12 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token RESTORE_SYM %token RESTRICT %token REVOKE +%token ROUTINE_SYM %token ROWS_SYM %token ROW_FORMAT_SYM %token ROW_SYM %token RTREE_SYM +%token SECURITY_SYM %token SET %token SEPARATOR_SYM %token SERIAL_SYM @@ -363,6 +405,10 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token SIMPLE_SYM %token SHUTDOWN %token SPATIAL_SYM +%token SPECIFIC_SYM +%token SQLEXCEPTION_SYM +%token SQLSTATE_SYM +%token SQLWARNING_SYM %token SSL_SYM %token STARTING %token STATUS_SYM @@ -373,11 +419,13 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token TABLE_SYM %token TABLESPACE %token TEMPORARY +%token TEMPTABLE_SYM %token TERMINATED %token TEXT_STRING %token TO_SYM %token TRAILING %token TRANSACTION_SYM +%token TRIGGER_SYM %token TRUE_SYM %token TYPE_SYM %token TYPES_SYM @@ -385,14 +433,19 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token FUNC_ARG1 %token FUNC_ARG2 %token FUNC_ARG3 -%token UDF_RETURNS_SYM +%token RETURN_SYM +%token RETURNS_SYM %token UDF_SONAME_SYM -%token UDF_SYM +%token UDF_RETURNS_SYM +%token FUNCTION_SYM %token UNCOMMITTED_SYM +%token UNDEFINED_SYM %token UNDERSCORE_CHARSET +%token UNDO_SYM %token UNICODE_SYM %token UNION_SYM %token UNIQUE_SYM +%token UNKNOWN_SYM %token USAGE %token USE_FRM %token USE_SYM @@ -400,6 +453,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token VALUE_SYM %token VALUES %token VARIABLES +%token VIEW_SYM %token WHERE %token WITH %token WRITE_SYM @@ -407,6 +461,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token X509_SYM %token XOR %token COMPRESSED_SYM +%token ROW_COUNT_SYM %token ERRORS %token WARNINGS @@ -443,6 +498,8 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token STRING_SYM %token TEXT_SYM %token TIMESTAMP +%token TIMESTAMP_ADD +%token TIMESTAMP_DIFF %token TIME_SYM %token TINYBLOB %token TINYINT @@ -489,6 +546,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token FIELD_FUNC %token FORMAT_SYM %token FOR_SYM +%token FRAC_SECOND_SYM %token FROM_UNIXTIME %token GEOMCOLLFROMTEXT %token GEOMFROMTEXT @@ -517,6 +575,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token MINUTE_SECOND_SYM %token MINUTE_SYM %token MODE_SYM +%token MODIFIES_SYM %token MODIFY_SYM %token MONTH_SYM %token MLINEFROMTEXT @@ -534,6 +593,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token POLYGON %token POSITION_SYM %token PROCEDURE +%token QUARTER_SYM %token RAND %token REPLACE %token RIGHT @@ -545,12 +605,6 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token SUBSTRING %token SUBSTRING_INDEX %token TRIM -%token UDA_CHAR_SUM -%token UDA_FLOAT_SUM -%token UDA_INT_SUM -%token UDF_CHAR_FUNC -%token UDF_FLOAT_FUNC -%token UDF_INT_FUNC %token UNIQUE_USERS %token UNIX_TIMESTAMP %token USER @@ -574,14 +628,28 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token SQL_SMALL_RESULT %token SQL_BUFFER_RESULT +%token CURSOR_SYM +%token ELSEIF_SYM +%token ITERATE_SYM +%token GOTO_SYM +%token LABEL_SYM +%token LEAVE_SYM +%token LOOP_SYM +%token REPEAT_SYM +%token UNTIL_SYM +%token WHILE_SYM +%token ASENSITIVE_SYM +%token INSENSITIVE_SYM +%token SENSITIVE_SYM + %token ISSUER_SYM %token SUBJECT_SYM %token CIPHER_SYM %token BEFORE_SYM %left SET_VAR -%left OR_OR_CONCAT OR_SYM XOR -%left AND_SYM +%left OR_OR_SYM OR_SYM OR2_SYM XOR +%left AND_SYM AND_AND_SYM %left BETWEEN_SYM CASE_SYM WHEN_SYM THEN_SYM ELSE %left EQ EQUAL_SYM GE GT_SYM LE LT NE IS LIKE REGEXP IN_SYM %left '|' @@ -591,7 +659,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %left '*' '/' '%' DIV_SYM MOD_SYM %left '^' %left NEG '~' -%right NOT +%right NOT_SYM NOT2_SYM %right BINARY COLLATE_SYM %type <lex_str> @@ -599,6 +667,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal NCHAR_STRING opt_component key_cache_name + sp_opt_label BIN_NUM %type <lex_str_ptr> opt_table_alias @@ -633,17 +702,22 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <item> literal text_literal insert_ident order_ident simple_ident select_item2 expr opt_expr opt_else sum_expr in_sum_expr - table_wild no_in_expr expr_expr simple_expr no_and_expr + bool_term bool_factor bool_test bool_pri + predicate bit_expr bit_term bit_factor value_expr term factor + table_wild simple_expr udf_expr using_list expr_or_default set_expr_or_default interval_expr param_marker singlerow_subselect singlerow_subselect_init exists_subselect exists_subselect_init geometry_function signed_literal now_or_signed_literal opt_escape + sp_opt_default + simple_ident_nospvar simple_ident_q %type <item_num> NUM_literal %type <item_list> - expr_list udf_expr_list when_list ident_list ident_list_arg + expr_list udf_expr_list udf_expr_list2 when_list + ident_list ident_list_arg %type <key_type> key_type opt_unique_or_fulltext constraint_key_type @@ -659,14 +733,13 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type <table_list> join_table_list join_table - -%type <udf> - UDF_CHAR_FUNC UDF_FLOAT_FUNC UDF_INT_FUNC - UDA_CHAR_SUM UDA_FLOAT_SUM UDA_INT_SUM + table_factor table_ref %type <date_time_type> date_time_type; %type <interval> interval +%type <interval_time_st> interval_time_st + %type <db_type> storage_engines %type <row_type> row_types @@ -710,7 +783,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); select_item_list select_item values_list no_braces opt_limit_clause delete_limit_clause fields opt_values values procedure_list procedure_list2 procedure_item - when_list2 expr_list2 handler + when_list2 expr_list2 udf_expr_list3 handler opt_precision opt_ignore opt_column opt_restrict grant revoke set lock unlock string_list field_options field_option field_opt_list opt_binary table_lock_list table_lock @@ -718,9 +791,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); opt_delete_options opt_delete_option varchar nchar nvarchar opt_outer table_list table_name opt_option opt_place opt_attribute opt_attribute_list attribute column_list column_list_id - opt_column_list grant_privileges opt_table user_list grant_option - grant_privilege grant_privilege_list - flush_options flush_option + opt_column_list grant_privileges opt_table grant_list grant_option + object_privilege object_privilege_list user_list rename_list + clear_privileges flush_options flush_option equal optional_braces opt_key_definition key_usage_list2 opt_mi_check_type opt_to mi_check_types normal_join table_to_table_list table_to_table opt_table_list opt_as @@ -731,11 +804,20 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); subselect_end select_var_list select_var_list_init help opt_len opt_extended_describe prepare prepare_src execute deallocate + statement sp_suid opt_view_list view_list or_replace algorithm + sp_c_chistics sp_a_chistics sp_chistic sp_c_chistic END_OF_INPUT +%type <NONE> call sp_proc_stmts sp_proc_stmts1 sp_proc_stmt +%type <num> sp_decl_idents sp_opt_inout sp_handler_type sp_hcond_list +%type <spcondtype> sp_cond sp_hcond +%type <spblock> sp_decls sp_decl +%type <lex> sp_cursor_stmt +%type <spname> sp_name + %type <NONE> '-' '+' '*' '/' '%' '(' ')' - ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_CONCAT BETWEEN_SYM CASE_SYM + ',' '!' '{' '}' '&' '|' AND_SYM OR_SYM OR_OR_SYM BETWEEN_SYM CASE_SYM THEN_SYM WHEN_SYM DIV_SYM MOD_SYM %% @@ -747,7 +829,7 @@ query: if (!thd->bootstrap && (!(thd->lex->select_lex.options & OPTION_FOUND_COMMENT))) { - send_error(thd,ER_EMPTY_QUERY); + my_message(ER_EMPTY_QUERY, ER(ER_EMPTY_QUERY), MYF(0)); YYABORT; } else @@ -758,10 +840,16 @@ query: | verb_clause END_OF_INPUT {}; verb_clause: + statement + | begin + ; + +/* Verb clauses, except begin */ +statement: alter | analyze | backup - | begin + | call | change | check | checksum @@ -1062,19 +1150,1291 @@ create: lex->name=$4.str; lex->create_info.options=$3; } - | CREATE udf_func_type UDF_SYM IDENT_sys + | CREATE udf_func_type FUNCTION_SYM sp_name { LEX *lex=Lex; - lex->sql_command = SQLCOM_CREATE_FUNCTION; - lex->udf.name = $4; + lex->spname= $4; lex->udf.type= $2; } - UDF_RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING_sys + create_function_tail + {} + | CREATE PROCEDURE sp_name + { + LEX *lex= Lex; + sp_head *sp; + + if (lex->sphead) + { + my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "PROCEDURE"); + YYABORT; + } + /* Order is important here: new - reset - init */ + sp= new sp_head(); + sp->reset_thd_mem_root(YYTHD); + sp->init(lex); + + sp->m_type= TYPE_ENUM_PROCEDURE; + lex->sphead= sp; + /* + * We have to turn of CLIENT_MULTI_QUERIES while parsing a + * stored procedure, otherwise yylex will chop it into pieces + * at each ';'. + */ + sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES; + YYTHD->client_capabilities &= (~CLIENT_MULTI_QUERIES); + } + '(' + { + LEX *lex= Lex; + + lex->sphead->m_param_begin= lex->tok_start+1; + } + sp_pdparam_list + ')' + { + LEX *lex= Lex; + + lex->sphead->m_param_end= lex->tok_start; + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + } + sp_c_chistics + { + LEX *lex= Lex; + + lex->sphead->m_chistics= &lex->sp_chistics; + lex->sphead->m_body_begin= lex->tok_start; + } + sp_proc_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + if (sp->check_backpatch(YYTHD)) + YYABORT; + sp->init_strings(YYTHD, lex, $3); + lex->sql_command= SQLCOM_CREATE_PROCEDURE; + /* Restore flag if it was cleared above */ + if (sp->m_old_cmq) + YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES; + sp->restore_thd_mem_root(YYTHD); + } + | CREATE or_replace algorithm VIEW_SYM table_ident + { + THD *thd= YYTHD; + LEX *lex= thd->lex; + lex->sql_command= SQLCOM_CREATE_VIEW; + lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; + /* first table in list is target VIEW name */ + if (!lex->select_lex.add_table_to_list(thd, $5, NULL, 0)) + YYABORT; + } + opt_view_list AS select_init check_option + {} + | CREATE TRIGGER_SYM ident trg_action_time trg_event + ON table_ident FOR_SYM EACH_SYM ROW_SYM + { + LEX *lex= Lex; + sp_head *sp; + + if (lex->sphead) + { + my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "TRIGGER"); + YYABORT; + } + + if (!(sp= new sp_head())) + YYABORT; + sp->reset_thd_mem_root(YYTHD); + sp->init(lex); + + sp->m_type= TYPE_ENUM_TRIGGER; + lex->sphead= sp; + /* + We have to turn of CLIENT_MULTI_QUERIES while parsing a + stored procedure, otherwise yylex will chop it into pieces + at each ';'. + */ + sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES; + YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES; + + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + lex->sphead->m_chistics= &lex->sp_chistics; + lex->sphead->m_body_begin= lex->tok_start; + } + sp_proc_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + lex->sql_command= SQLCOM_CREATE_TRIGGER; + sp->init_strings(YYTHD, lex, NULL); + /* Restore flag if it was cleared above */ + if (sp->m_old_cmq) + YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES; + sp->restore_thd_mem_root(YYTHD); + + lex->name_and_length= $3; + + /* + We have to do it after parsing trigger body, because some of + sp_proc_stmt alternatives are not saving/restoring LEX, so + lex->query_tables can be wiped out. + + QQ: What are other consequences of this? + + QQ: Could we loosen lock type in certain cases ? + */ + if (!lex->select_lex.add_table_to_list(YYTHD, $7, + (LEX_STRING*) 0, + TL_OPTION_UPDATING, + TL_WRITE)) + YYABORT; + } + | CREATE USER clear_privileges grant_list + { + Lex->sql_command = SQLCOM_CREATE_USER; + } + ; + +clear_privileges: + /* Nothing */ + { + LEX *lex=Lex; + lex->users_list.empty(); + lex->columns.empty(); + lex->grant= lex->grant_tot_col= 0; + lex->all_privileges= 0; + lex->select_lex.db= 0; + lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; + lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0; + bzero((char *)&(lex->mqh),sizeof(lex->mqh)); + } + ; + +sp_name: + IDENT_sys '.' IDENT_sys + { + $$= new sp_name($1, $3); + $$->init_qname(YYTHD); + } + | IDENT_sys + { + $$= sp_name_current_db_new(YYTHD, $1); + } + ; + +create_function_tail: + RETURNS_SYM udf_type UDF_SONAME_SYM TEXT_STRING_sys { LEX *lex=Lex; - lex->udf.returns=(Item_result) $7; - lex->udf.dl=$9.str; + lex->sql_command = SQLCOM_CREATE_FUNCTION; + lex->udf.name = lex->spname->m_name; + lex->udf.returns=(Item_result) $2; + lex->udf.dl=$4.str; + } + | '(' + { + LEX *lex= Lex; + sp_head *sp; + + if (lex->sphead) + { + my_error(ER_SP_NO_RECURSIVE_CREATE, MYF(0), "FUNCTION"); + YYABORT; + } + /* Order is important here: new - reset - init */ + sp= new sp_head(); + sp->reset_thd_mem_root(YYTHD); + sp->init(lex); + + sp->m_type= TYPE_ENUM_FUNCTION; + lex->sphead= sp; + /* + * We have to turn of CLIENT_MULTI_QUERIES while parsing a + * stored procedure, otherwise yylex will chop it into pieces + * at each ';'. + */ + sp->m_old_cmq= YYTHD->client_capabilities & CLIENT_MULTI_QUERIES; + YYTHD->client_capabilities &= ~CLIENT_MULTI_QUERIES; + lex->sphead->m_param_begin= lex->tok_start+1; + } + sp_fdparam_list ')' + { + LEX *lex= Lex; + + lex->sphead->m_param_end= lex->tok_start; + } + RETURNS_SYM + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + sp->m_returns_begin= lex->tok_start; + sp->m_returns_cs= lex->charset= NULL; + } + type + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + sp->m_returns_end= lex->tok_start; + sp->m_returns= (enum enum_field_types)$8; + sp->m_returns_cs= lex->charset; + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + } + sp_c_chistics + { + LEX *lex= Lex; + + lex->sphead->m_chistics= &lex->sp_chistics; + lex->sphead->m_body_begin= lex->tok_start; + } + sp_proc_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + if (sp->check_backpatch(YYTHD)) + YYABORT; + lex->sql_command= SQLCOM_CREATE_SPFUNCTION; + sp->init_strings(YYTHD, lex, lex->spname); + /* Restore flag if it was cleared above */ + if (sp->m_old_cmq) + YYTHD->client_capabilities |= CLIENT_MULTI_QUERIES; + sp->restore_thd_mem_root(YYTHD); + } + ; + +sp_a_chistics: + /* Empty */ {} + | sp_a_chistics sp_chistic {} + ; + +sp_c_chistics: + /* Empty */ {} + | sp_c_chistics sp_c_chistic {} + ; + +/* Characteristics for both create and alter */ +sp_chistic: + COMMENT_SYM TEXT_STRING_sys + { Lex->sp_chistics.comment= $2; } + | LANGUAGE_SYM SQL_SYM + { /* Just parse it, we only have one language for now. */ } + | NO_SYM SQL_SYM + { Lex->sp_chistics.daccess= SP_NO_SQL; } + | CONTAINS_SYM SQL_SYM + { Lex->sp_chistics.daccess= SP_CONTAINS_SQL; } + | READS_SYM SQL_SYM DATA_SYM + { Lex->sp_chistics.daccess= SP_READS_SQL_DATA; } + | MODIFIES_SYM SQL_SYM DATA_SYM + { Lex->sp_chistics.daccess= SP_MODIFIES_SQL_DATA; } + | sp_suid + { } + ; + +/* Create characteristics */ +sp_c_chistic: + sp_chistic { } + | DETERMINISTIC_SYM { Lex->sp_chistics.detistic= TRUE; } + | not DETERMINISTIC_SYM { Lex->sp_chistics.detistic= FALSE; } + ; + +sp_suid: + SQL_SYM SECURITY_SYM DEFINER_SYM + { + Lex->sp_chistics.suid= SP_IS_SUID; + } + | SQL_SYM SECURITY_SYM INVOKER_SYM + { + Lex->sp_chistics.suid= SP_IS_NOT_SUID; + } + ; + +call: + CALL_SYM sp_name + { + LEX *lex = Lex; + + lex->sql_command= SQLCOM_CALL; + lex->spname= $2; + lex->value_list.empty(); + } + '(' sp_cparam_list ')' {} + ; + +/* CALL parameters */ +sp_cparam_list: + /* Empty */ + | sp_cparams + ; + +sp_cparams: + sp_cparams ',' expr + { + Lex->value_list.push_back($3); + } + | expr + { + Lex->value_list.push_back($1); + } + ; + +/* Stored FUNCTION parameter declaration list */ +sp_fdparam_list: + /* Empty */ + | sp_fdparams + ; + +sp_fdparams: + sp_fdparams ',' sp_fdparam + | sp_fdparam + ; + +sp_fdparam: + ident type + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_pvar(&$1, TRUE)) + { + my_error(ER_SP_DUP_PARAM, MYF(0), $1.str); + YYABORT; + } + spc->push_pvar(&$1, (enum enum_field_types)$2, sp_param_in); + } + ; + +/* Stored PROCEDURE parameter declaration list */ +sp_pdparam_list: + /* Empty */ + | sp_pdparams + ; + +sp_pdparams: + sp_pdparams ',' sp_pdparam + | sp_pdparam + ; + +sp_pdparam: + sp_opt_inout ident type + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_pvar(&$2, TRUE)) + { + my_error(ER_SP_DUP_PARAM, MYF(0), $2.str); + YYABORT; + } + spc->push_pvar(&$2, (enum enum_field_types)$3, + (sp_param_mode_t)$1); + } + ; + +sp_opt_inout: + /* Empty */ { $$= sp_param_in; } + | IN_SYM { $$= sp_param_in; } + | OUT_SYM { $$= sp_param_out; } + | INOUT_SYM { $$= sp_param_inout; } + ; + +sp_proc_stmts: + /* Empty */ {} + | sp_proc_stmts { Lex->query_tables= 0; } sp_proc_stmt ';' + ; + +sp_proc_stmts1: + sp_proc_stmt ';' {} + | sp_proc_stmts1 { Lex->query_tables= 0; } sp_proc_stmt ';' + ; + +sp_decls: + /* Empty */ + { + $$.vars= $$.conds= $$.hndlrs= $$.curs= 0; + } + | sp_decls sp_decl ';' + { + /* We check for declarations out of (standard) order this way + because letting the grammar rules reflect it caused tricky + shift/reduce conflicts with the wrong result. (And we get + better error handling this way.) */ + if (($2.vars || $2.conds) && ($1.curs || $1.hndlrs)) + { /* Variable or condition following cursor or handler */ + my_message(ER_SP_VARCOND_AFTER_CURSHNDLR, + ER(ER_SP_VARCOND_AFTER_CURSHNDLR), MYF(0)); + YYABORT; + } + if ($2.curs && $1.hndlrs) + { /* Cursor following handler */ + my_message(ER_SP_CURSOR_AFTER_HANDLER, + ER(ER_SP_CURSOR_AFTER_HANDLER), MYF(0)); + YYABORT; + } + $$.vars= $1.vars + $2.vars; + $$.conds= $1.conds + $2.conds; + $$.hndlrs= $1.hndlrs + $2.hndlrs; + $$.curs= $1.curs + $2.curs; + } + ; + +sp_decl: + DECLARE_SYM sp_decl_idents type sp_opt_default + { + LEX *lex= Lex; + sp_pcontext *ctx= lex->spcont; + uint max= ctx->context_pvars(); + enum enum_field_types type= (enum enum_field_types)$3; + Item *it= $4; + + for (uint i = max-$2 ; i < max ; i++) + { + ctx->set_type(i, type); + if (! it) + ctx->set_isset(i, FALSE); + else + { + sp_instr_set *in= new sp_instr_set(lex->sphead->instructions(), + ctx, + ctx->pvar_context2index(i), + it, type); + + in->tables= lex->query_tables; + lex->query_tables= 0; + lex->sphead->add_instr(in); + ctx->set_isset(i, TRUE); + ctx->set_default(i, it); + } + } + $$.vars= $2; + $$.conds= $$.hndlrs= $$.curs= 0; + } + | DECLARE_SYM ident CONDITION_SYM FOR_SYM sp_cond + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_cond(&$2, TRUE)) + { + my_error(ER_SP_DUP_COND, MYF(0), $2.str); + YYABORT; + } + YYTHD->lex->spcont->push_cond(&$2, $5); + $$.vars= $$.hndlrs= $$.curs= 0; + $$.conds= 1; + } + | DECLARE_SYM sp_handler_type HANDLER_SYM FOR_SYM + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + sp_instr_hpush_jump *i= + new sp_instr_hpush_jump(sp->instructions(), ctx, $2, + ctx->current_pvars()); + + sp->add_instr(i); + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + ctx->add_handler(); + sp->m_in_handler= TRUE; + } + sp_hcond_list sp_proc_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + sp_label_t *hlab= lex->spcont->pop_label(); /* After this hdlr */ + sp_instr_hreturn *i; + + if ($2 == SP_HANDLER_CONTINUE) + { + i= new sp_instr_hreturn(sp->instructions(), ctx, + ctx->current_pvars()); + sp->add_instr(i); + } + else + { /* EXIT or UNDO handler, just jump to the end of the block */ + i= new sp_instr_hreturn(sp->instructions(), ctx, 0); + + sp->add_instr(i); + sp->push_backpatch(i, lex->spcont->last_label()); /* Block end */ + } + lex->sphead->backpatch(hlab); + sp->m_in_handler= FALSE; + $$.vars= $$.conds= $$.curs= 0; + $$.hndlrs= $6; + } + | DECLARE_SYM ident CURSOR_SYM FOR_SYM sp_cursor_stmt + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + uint offp; + sp_instr_cpush *i; + + if (ctx->find_cursor(&$2, &offp, TRUE)) + { + my_error(ER_SP_DUP_CURS, MYF(0), $2.str); + delete $5; + YYABORT; + } + i= new sp_instr_cpush(sp->instructions(), ctx, $5); + sp->add_instr(i); + ctx->push_cursor(&$2); + $$.vars= $$.conds= $$.hndlrs= 0; + $$.curs= 1; + } + ; + +sp_cursor_stmt: + { + Lex->sphead->reset_lex(YYTHD); + + /* We use statement here just be able to get a better + error message. Using 'select' works too, but will then + result in a generic "syntax error" if a non-select + statement is given. */ + } + statement + { + LEX *lex= Lex; + + if (lex->sql_command != SQLCOM_SELECT) + { + my_message(ER_SP_BAD_CURSOR_QUERY, ER(ER_SP_BAD_CURSOR_QUERY), + MYF(0)); + YYABORT; + } + if (lex->result) + { + my_message(ER_SP_BAD_CURSOR_SELECT, ER(ER_SP_BAD_CURSOR_SELECT), + MYF(0)); + YYABORT; + } + lex->sp_lex_in_use= TRUE; + $$= lex; + lex->sphead->restore_lex(YYTHD); + } + ; + +sp_handler_type: + EXIT_SYM { $$= SP_HANDLER_EXIT; } + | CONTINUE_SYM { $$= SP_HANDLER_CONTINUE; } +/* | UNDO_SYM { QQ No yet } */ + ; + +sp_hcond_list: + sp_hcond + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_instr_hpush_jump *i= (sp_instr_hpush_jump *)sp->last_instruction(); + + i->add_condition($1); + $$= 1; + } + | sp_hcond_list ',' sp_hcond + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_instr_hpush_jump *i= (sp_instr_hpush_jump *)sp->last_instruction(); + + i->add_condition($3); + $$= $1 + 1; + } + ; + +sp_cond: + ULONG_NUM + { /* mysql errno */ + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::number; + $$->mysqlerr= $1; + } + | SQLSTATE_SYM opt_value TEXT_STRING_literal + { /* SQLSTATE */ + uint len= ($3.length < sizeof($$->sqlstate)-1 ? + $3.length : sizeof($$->sqlstate)-1); + + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::state; + memcpy($$->sqlstate, $3.str, len); + $$->sqlstate[len]= '\0'; + } + ; + +opt_value: + /* Empty */ {} + | VALUE_SYM {} + ; + +sp_hcond: + sp_cond + { + $$= $1; } + | ident /* CONDITION name */ + { + $$= Lex->spcont->find_cond(&$1); + if ($$ == NULL) + { + my_error(ER_SP_COND_MISMATCH, MYF(0), $1.str); + YYABORT; + } + } + | SQLWARNING_SYM /* SQLSTATEs 01??? */ + { + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::warning; + } + | not FOUND_SYM /* SQLSTATEs 02??? */ + { + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::notfound; + } + | SQLEXCEPTION_SYM /* All other SQLSTATEs */ + { + $$= (sp_cond_type_t *)YYTHD->alloc(sizeof(sp_cond_type_t)); + $$->type= sp_cond_type_t::exception; + } + ; + +sp_decl_idents: + ident + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_pvar(&$1, TRUE)) + { + my_error(ER_SP_DUP_VAR, MYF(0), $1.str); + YYABORT; + } + spc->push_pvar(&$1, (enum_field_types)0, sp_param_in); + $$= 1; + } + | sp_decl_idents ',' ident + { + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + + if (spc->find_pvar(&$3, TRUE)) + { + my_error(ER_SP_DUP_VAR, MYF(0), $3.str); + YYABORT; + } + spc->push_pvar(&$3, (enum_field_types)0, sp_param_in); + $$= $1 + 1; + } + ; + +sp_opt_default: + /* Empty */ { $$ = NULL; } + | DEFAULT expr { $$ = $2; } + ; + +sp_proc_stmt: + { + LEX *lex= Lex; + + lex->sphead->reset_lex(YYTHD); + lex->sphead->m_tmp_query= lex->tok_start; + } + statement + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + + if ((lex->sql_command == SQLCOM_SELECT && !lex->result) || + sp_multi_results_command(lex->sql_command)) + { + /* We maybe have one or more SELECT without INTO */ + sp->m_multi_results= TRUE; + } + if (lex->sql_command == SQLCOM_CHANGE_DB) + { /* "USE db" doesn't work in a procedure */ + my_message(ER_SP_NO_USE, ER(ER_SP_NO_USE), MYF(0)); + YYABORT; + } + /* Don't add an instruction for empty SET statements. + ** (This happens if the SET only contained local variables, + ** which get their set instructions generated separately.) + */ + if (lex->sql_command != SQLCOM_SET_OPTION || + ! lex->var_list.is_empty()) + { + /* + Currently we can't handle queries inside a FUNCTION or + TRIGGER, because of the way table locking works. This is + unfortunate, and limits the usefulness of functions and + especially triggers a tremendously, but it's nothing we + can do about this at the moment. + */ + if (sp->m_type != TYPE_ENUM_PROCEDURE) + { + my_message(ER_SP_BADSTATEMENT, ER(ER_SP_BADSTATEMENT), MYF(0)); + YYABORT; + } + else + { + sp_instr_stmt *i=new sp_instr_stmt(sp->instructions(), + lex->spcont); + + /* Extract the query statement from the tokenizer: + The end is either lex->tok_end or tok->ptr. */ + if (lex->ptr - lex->tok_end > 1) + i->m_query.length= lex->ptr - sp->m_tmp_query; + else + i->m_query.length= lex->tok_end - sp->m_tmp_query; + i->m_query.str= strmake_root(YYTHD->mem_root, + (char *)sp->m_tmp_query, + i->m_query.length); + i->set_lex(lex); + sp->add_instr(i); + lex->sp_lex_in_use= TRUE; + } + } + sp->restore_lex(YYTHD); + } + | RETURN_SYM expr + { + LEX *lex= Lex; + + if (lex->sphead->m_type == TYPE_ENUM_PROCEDURE) + { + my_message(ER_SP_BADRETURN, ER(ER_SP_BADRETURN), MYF(0)); + YYABORT; + } + else + { + sp_instr_freturn *i; + + if ($2->type() == Item::SUBSELECT_ITEM) + { /* QQ For now, just disallow subselects as values */ + my_message(ER_SP_BADSTATEMENT, ER(ER_SP_BADSTATEMENT), MYF(0)); + YYABORT; + } + i= new sp_instr_freturn(lex->sphead->instructions(), + lex->spcont, + $2, lex->sphead->m_returns); + lex->sphead->add_instr(i); + lex->sphead->m_has_return= TRUE; + } + } + | IF sp_if END IF {} + | CASE_SYM WHEN_SYM + { + Lex->sphead->m_simple_case= FALSE; + } + sp_case END CASE_SYM {} + | CASE_SYM expr WHEN_SYM + { + /* We "fake" this by using an anonymous variable which we + set to the expression. Note that all WHENs are evaluate + at the same frame level, so we then know that it's the + top-most variable in the frame. */ + LEX *lex= Lex; + uint offset= lex->spcont->current_pvars(); + sp_instr_set *i = new sp_instr_set(lex->sphead->instructions(), + lex->spcont, + offset, $2, MYSQL_TYPE_STRING); + LEX_STRING dummy; + + dummy.str= (char *)""; + dummy.length= 0; + lex->spcont->push_pvar(&dummy, MYSQL_TYPE_STRING, sp_param_in); + i->tables= lex->query_tables; + lex->query_tables= 0; + lex->sphead->add_instr(i); + lex->sphead->m_simple_case= TRUE; + } + sp_case END CASE_SYM + { + Lex->spcont->pop_pvar(); + } + | sp_labeled_control + {} + | { /* Unlabeled controls get a secret label. */ + LEX *lex= Lex; + + lex->spcont->push_label((char *)"", lex->sphead->instructions()); + } + sp_unlabeled_control + { + LEX *lex= Lex; + + lex->sphead->backpatch(lex->spcont->pop_label()); + } + | LEAVE_SYM IDENT + { + LEX *lex= Lex; + sp_head *sp = lex->sphead; + sp_pcontext *ctx= lex->spcont; + sp_label_t *lab= ctx->find_label($2.str); + + if (! lab) + { + my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "LEAVE", $2.str); + YYABORT; + } + else + { + uint ip= sp->instructions(); + sp_instr_jump *i; + sp_instr_hpop *ih; + sp_instr_cpop *ic; + + ih= new sp_instr_hpop(ip++, ctx, 0); + sp->push_backpatch(ih, lab); + sp->add_instr(ih); + ic= new sp_instr_cpop(ip++, ctx, 0); + sp->push_backpatch(ic, lab); + sp->add_instr(ic); + i= new sp_instr_jump(ip, ctx); + sp->push_backpatch(i, lab); /* Jumping forward */ + sp->add_instr(i); + } + } + | ITERATE_SYM IDENT + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + sp_label_t *lab= ctx->find_label($2.str); + + if (! lab || lab->type != SP_LAB_ITER) + { + my_error(ER_SP_LILABEL_MISMATCH, MYF(0), "ITERATE", $2.str); + YYABORT; + } + else + { + sp_instr_jump *i; + uint ip= sp->instructions(); + uint n; + + n= ctx->diff_handlers(lab->ctx); + if (n) + sp->add_instr(new sp_instr_hpop(ip++, ctx, n)); + n= ctx->diff_cursors(lab->ctx); + if (n) + sp->add_instr(new sp_instr_cpop(ip++, ctx, n)); + i= new sp_instr_jump(ip, ctx, lab->ip); /* Jump back */ + sp->add_instr(i); + } + } + | LABEL_SYM IDENT + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + sp_label_t *lab= ctx->find_label($2.str); + + if (lab) + { + my_error(ER_SP_LABEL_REDEFINE, MYF(0), $2.str); + YYABORT; + } + else + { + lab= ctx->push_label($2.str, sp->instructions()); + lab->type= SP_LAB_GOTO; + lab->ctx= ctx; + sp->backpatch(lab); + } + } + | GOTO_SYM IDENT + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + uint ip= lex->sphead->instructions(); + sp_label_t *lab; + sp_instr_jump *i; + sp_instr_hpop *ih; + sp_instr_cpop *ic; + + if (sp->m_in_handler) + { + my_message(ER_SP_GOTO_IN_HNDLR, ER(ER_SP_GOTO_IN_HNDLR), MYF(0)); + YYABORT; + } + lab= ctx->find_label($2.str); + if (! lab) + { + lab= (sp_label_t *)YYTHD->alloc(sizeof(sp_label_t)); + lab->name= $2.str; + lab->ip= 0; + lab->type= SP_LAB_REF; + lab->ctx= ctx; + + ih= new sp_instr_hpop(ip++, ctx, 0); + sp->push_backpatch(ih, lab); + sp->add_instr(ih); + ic= new sp_instr_cpop(ip++, ctx, 0); + sp->add_instr(ic); + sp->push_backpatch(ic, lab); + i= new sp_instr_jump(ip, ctx); + sp->push_backpatch(i, lab); /* Jumping forward */ + sp->add_instr(i); + } + else + { + uint n; + + n= ctx->diff_handlers(lab->ctx); + if (n) + { + ih= new sp_instr_hpop(ip++, ctx, n); + sp->add_instr(ih); + } + n= ctx->diff_cursors(lab->ctx); + if (n) + { + ic= new sp_instr_cpop(ip++, ctx, n); + sp->add_instr(ic); + } + i= new sp_instr_jump(ip, ctx, lab->ip); /* Jump back */ + sp->add_instr(i); + } + } + | OPEN_SYM ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint offset; + sp_instr_copen *i; + + if (! lex->spcont->find_cursor(&$2, &offset)) + { + my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str); + YYABORT; + } + i= new sp_instr_copen(sp->instructions(), lex->spcont, offset); + sp->add_instr(i); + } + | FETCH_SYM sp_opt_fetch_noise ident INTO + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint offset; + sp_instr_cfetch *i; + + if (! lex->spcont->find_cursor(&$3, &offset)) + { + my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $3.str); + YYABORT; + } + i= new sp_instr_cfetch(sp->instructions(), lex->spcont, offset); + sp->add_instr(i); + } + sp_fetch_list + { } + | CLOSE_SYM ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint offset; + sp_instr_cclose *i; + + if (! lex->spcont->find_cursor(&$2, &offset)) + { + my_error(ER_SP_CURSOR_MISMATCH, MYF(0), $2.str); + YYABORT; + } + i= new sp_instr_cclose(sp->instructions(), lex->spcont, offset); + sp->add_instr(i); + } + ; + +sp_opt_fetch_noise: + /* Empty */ + | NEXT_SYM FROM + | FROM + ; + +sp_fetch_list: + ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *spc= lex->spcont; + sp_pvar_t *spv; + + if (!spc || !(spv = spc->find_pvar(&$1))) + { + my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str); + YYABORT; + } + else + { + /* An SP local variable */ + sp_instr_cfetch *i= (sp_instr_cfetch *)sp->last_instruction(); + + i->add_to_varlist(spv); + spv->isset= TRUE; + } + } + | + sp_fetch_list ',' ident + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *spc= lex->spcont; + sp_pvar_t *spv; + + if (!spc || !(spv = spc->find_pvar(&$3))) + { + my_error(ER_SP_UNDECLARED_VAR, MYF(0), $3.str); + YYABORT; + } + else + { + /* An SP local variable */ + sp_instr_cfetch *i= (sp_instr_cfetch *)sp->last_instruction(); + + i->add_to_varlist(spv); + spv->isset= TRUE; + } + } + ; + +sp_if: + expr THEN_SYM + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + uint ip= sp->instructions(); + sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, ctx, $1); + + i->tables= lex->query_tables; + lex->query_tables= 0; + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + sp->add_instr(i); + } + sp_proc_stmts1 + { + sp_head *sp= Lex->sphead; + sp_pcontext *ctx= Lex->spcont; + uint ip= sp->instructions(); + sp_instr_jump *i = new sp_instr_jump(ip, ctx); + + sp->add_instr(i); + sp->backpatch(ctx->pop_label()); + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + } + sp_elseifs + { + LEX *lex= Lex; + + lex->sphead->backpatch(lex->spcont->pop_label()); + } + ; + +sp_elseifs: + /* Empty */ + | ELSEIF_SYM sp_if + | ELSE sp_proc_stmts1 + ; + +sp_case: + expr THEN_SYM + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= Lex->spcont; + uint ip= sp->instructions(); + sp_instr_jump_if_not *i; + + if (! sp->m_simple_case) + i= new sp_instr_jump_if_not(ip, ctx, $1); + else + { /* Simple case: <caseval> = <whenval> */ + LEX_STRING ivar; + + ivar.str= (char *)"_tmp_"; + ivar.length= 5; + Item *var= (Item*) new Item_splocal(ivar, + ctx->current_pvars()-1); + Item *expr= new Item_func_eq(var, $1); + + i= new sp_instr_jump_if_not(ip, ctx, expr); + lex->variables_used= 1; + } + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + i->tables= lex->query_tables; + lex->query_tables= 0; + sp->add_instr(i); + } + sp_proc_stmts1 + { + sp_head *sp= Lex->sphead; + sp_pcontext *ctx= Lex->spcont; + uint ip= sp->instructions(); + sp_instr_jump *i = new sp_instr_jump(ip, ctx); + + sp->add_instr(i); + sp->backpatch(ctx->pop_label()); + sp->push_backpatch(i, ctx->push_label((char *)"", 0)); + } + sp_whens + { + LEX *lex= Lex; + + lex->sphead->backpatch(lex->spcont->pop_label()); + } + ; + +sp_whens: + /* Empty */ + { + sp_head *sp= Lex->sphead; + uint ip= sp->instructions(); + sp_instr_error *i= new sp_instr_error(ip, Lex->spcont, + ER_SP_CASE_NOT_FOUND); + + sp->add_instr(i); + } + | ELSE sp_proc_stmts1 {} + | WHEN_SYM sp_case {} + ; + +sp_labeled_control: + IDENT ':' + { + LEX *lex= Lex; + sp_pcontext *ctx= lex->spcont; + sp_label_t *lab= ctx->find_label($1.str); + + if (lab) + { + my_error(ER_SP_LABEL_REDEFINE, MYF(0), $1.str); + YYABORT; + } + else + { + lab= lex->spcont->push_label($1.str, + lex->sphead->instructions()); + lab->type= SP_LAB_ITER; + } + } + sp_unlabeled_control sp_opt_label + { + LEX *lex= Lex; + + if ($5.str) + { + sp_label_t *lab= lex->spcont->find_label($5.str); + + if (!lab || + my_strcasecmp(system_charset_info, $5.str, lab->name) != 0) + { + my_error(ER_SP_LABEL_MISMATCH, MYF(0), $5.str); + YYABORT; + } + } + lex->sphead->backpatch(lex->spcont->pop_label()); + } + ; + +sp_opt_label: + /* Empty */ + { $$.str= NULL; $$.length= 0; } + | IDENT + { $$= $1; } + ; + +sp_unlabeled_control: + BEGIN_SYM + { /* QQ This is just a dummy for grouping declarations and statements + together. No [[NOT] ATOMIC] yet, and we need to figure out how + make it coexist with the existing BEGIN COMMIT/ROLLBACK. */ + LEX *lex= Lex; + sp_label_t *lab= lex->spcont->last_label(); + + lab->type= SP_LAB_BEGIN; + lex->spcont= lex->spcont->push_context(); + } + sp_decls + sp_proc_stmts + END + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + sp_pcontext *ctx= lex->spcont; + + sp->backpatch(ctx->last_label()); /* We always have a label */ + if ($3.hndlrs) + sp->add_instr(new sp_instr_hpop(sp->instructions(), ctx, + $3.hndlrs)); + if ($3.curs) + sp->add_instr(new sp_instr_cpop(sp->instructions(), ctx, + $3.curs)); + lex->spcont= ctx->pop_context(); + } + | LOOP_SYM + sp_proc_stmts1 END LOOP_SYM + { + LEX *lex= Lex; + uint ip= lex->sphead->instructions(); + sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip); + + lex->sphead->add_instr(i); + } + | WHILE_SYM expr DO_SYM + { + LEX *lex= Lex; + sp_head *sp= lex->sphead; + uint ip= sp->instructions(); + sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont, + $2); + + /* Jumping forward */ + sp->push_backpatch(i, lex->spcont->last_label()); + i->tables= lex->query_tables; + lex->query_tables= 0; + sp->add_instr(i); + } + sp_proc_stmts1 END WHILE_SYM + { + LEX *lex= Lex; + uint ip= lex->sphead->instructions(); + sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_instr_jump *i = new sp_instr_jump(ip, lex->spcont, lab->ip); + + lex->sphead->add_instr(i); + } + | REPEAT_SYM sp_proc_stmts1 UNTIL_SYM expr END REPEAT_SYM + { + LEX *lex= Lex; + uint ip= lex->sphead->instructions(); + sp_label_t *lab= lex->spcont->last_label(); /* Jumping back */ + sp_instr_jump_if_not *i = new sp_instr_jump_if_not(ip, lex->spcont, + $4, lab->ip); + + i->tables= lex->query_tables; + lex->query_tables= 0; + lex->sphead->add_instr(i); + } + ; + +trg_action_time: + BEFORE_SYM + { Lex->trg_chistics.action_time= TRG_ACTION_BEFORE; } + | AFTER_SYM + { Lex->trg_chistics.action_time= TRG_ACTION_AFTER; } + ; + +trg_event: + INSERT + { Lex->trg_chistics.event= TRG_EVENT_INSERT; } + | UPDATE_SYM + { Lex->trg_chistics.event= TRG_EVENT_UPDATE; } + | DELETE_SYM + { Lex->trg_chistics.event= TRG_EVENT_DELETE; } ; create2: @@ -1116,6 +2476,10 @@ create_select: lex->sql_command= SQLCOM_INSERT_SELECT; else if (lex->sql_command == SQLCOM_REPLACE) lex->sql_command= SQLCOM_REPLACE_SELECT; + /* + The following work only with the local list, the global list + is created correctly in this case + */ lex->current_select->table_list.save_and_clear(&lex->save_list); mysql_init_select(lex); lex->current_select->parsing_place= SELECT_LIST; @@ -1125,7 +2489,13 @@ create_select: Select->parsing_place= NO_MATTER; } opt_select_from - { Lex->current_select->table_list.push_front(&Lex->save_list); } + { + /* + The following work only with the local list, the global list + is created correctly in this case + */ + Lex->current_select->table_list.push_front(&Lex->save_list); + } ; opt_as: @@ -1157,7 +2527,7 @@ table_option: opt_if_not_exists: /* empty */ { $$= 0; } - | IF NOT EXISTS { $$=HA_LEX_CREATE_IF_NOT_EXISTS; }; + | IF not EXISTS { $$=HA_LEX_CREATE_IF_NOT_EXISTS; }; opt_create_table_options: /* empty */ @@ -1173,19 +2543,19 @@ create_table_options: | create_table_option ',' create_table_options; create_table_option: - ENGINE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; } - | TYPE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; WARN_DEPRECATED("TYPE=storage_engine","ENGINE=storage_engine"); } + ENGINE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; } + | TYPE_SYM opt_equal storage_engines { Lex->create_info.db_type= $3; WARN_DEPRECATED("TYPE=storage_engine","ENGINE=storage_engine"); Lex->create_info.used_fields|= HA_CREATE_USED_ENGINE; } | MAX_ROWS opt_equal ulonglong_num { Lex->create_info.max_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MAX_ROWS;} | MIN_ROWS opt_equal ulonglong_num { Lex->create_info.min_rows= $3; Lex->create_info.used_fields|= HA_CREATE_USED_MIN_ROWS;} | AVG_ROW_LENGTH opt_equal ULONG_NUM { Lex->create_info.avg_row_length=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AVG_ROW_LENGTH;} - | PASSWORD opt_equal TEXT_STRING_sys { Lex->create_info.password=$3.str; } - | COMMENT_SYM opt_equal TEXT_STRING_sys { Lex->create_info.comment=$3.str; } + | PASSWORD opt_equal TEXT_STRING_sys { Lex->create_info.password=$3.str; Lex->create_info.used_fields|= HA_CREATE_USED_PASSWORD; } + | COMMENT_SYM opt_equal TEXT_STRING_sys { Lex->create_info.comment=$3.str; Lex->create_info.used_fields|= HA_CREATE_USED_COMMENT; } | AUTO_INC opt_equal ulonglong_num { Lex->create_info.auto_increment_value=$3; Lex->create_info.used_fields|= HA_CREATE_USED_AUTO;} | PACK_KEYS_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_PACK_KEYS : HA_OPTION_NO_PACK_KEYS; Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;} | PACK_KEYS_SYM opt_equal DEFAULT { Lex->create_info.table_options&= ~(HA_OPTION_PACK_KEYS | HA_OPTION_NO_PACK_KEYS); Lex->create_info.used_fields|= HA_CREATE_USED_PACK_KEYS;} - | CHECKSUM_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; } - | DELAY_KEY_WRITE_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_DELAY_KEY_WRITE : HA_OPTION_NO_DELAY_KEY_WRITE; } - | ROW_FORMAT_SYM opt_equal row_types { Lex->create_info.row_type= $3; } + | CHECKSUM_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_CHECKSUM : HA_OPTION_NO_CHECKSUM; Lex->create_info.used_fields|= HA_CREATE_USED_CHECKSUM; } + | DELAY_KEY_WRITE_SYM opt_equal ULONG_NUM { Lex->create_info.table_options|= $3 ? HA_OPTION_DELAY_KEY_WRITE : HA_OPTION_NO_DELAY_KEY_WRITE; Lex->create_info.used_fields|= HA_CREATE_USED_DELAY_KEY_WRITE; } + | ROW_FORMAT_SYM opt_equal row_types { Lex->create_info.row_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_ROW_FORMAT; } | RAID_TYPE opt_equal raid_types { Lex->create_info.raid_type= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} | RAID_CHUNKS opt_equal ULONG_NUM { Lex->create_info.raid_chunks= $3; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} | RAID_CHUNKSIZE opt_equal ULONG_NUM { Lex->create_info.raid_chunksize= $3*RAID_BLOCK_SIZE; Lex->create_info.used_fields|= HA_CREATE_USED_RAID;} @@ -1196,18 +2566,20 @@ create_table_option: TABLE_LIST *table_list= lex->select_lex.get_table_list(); lex->create_info.merge_list= lex->select_lex.table_list; lex->create_info.merge_list.elements--; - lex->create_info.merge_list.first= (byte*) (table_list->next); + lex->create_info.merge_list.first= + (byte*) (table_list->next_local); lex->select_lex.table_list.elements=1; - lex->select_lex.table_list.next= (byte**) &(table_list->next); - table_list->next=0; + lex->select_lex.table_list.next= + (byte**) &(table_list->next_local); + table_list->next_local= 0; lex->create_info.used_fields|= HA_CREATE_USED_UNION; } | default_charset | default_collation | INSERT_METHOD opt_equal merge_insert_types { Lex->create_info.merge_insert_method= $3; Lex->create_info.used_fields|= HA_CREATE_USED_INSERT_METHOD;} - | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys - { Lex->create_info.data_file_name= $4.str; } - | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; }; + | DATA_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.data_file_name= $4.str; Lex->create_info.used_fields|= HA_CREATE_USED_DATADIR; } + | INDEX_SYM DIRECTORY_SYM opt_equal TEXT_STRING_sys { Lex->create_info.index_file_name= $4.str; Lex->create_info.used_fields|= HA_CREATE_USED_INDEXDIR; } + ; default_charset: opt_default charset opt_equal charset_name_or_default @@ -1217,9 +2589,9 @@ default_charset: cinfo->default_table_charset && $4 && !my_charset_same(cinfo->default_table_charset,$4)) { - net_printf(YYTHD, ER_CONFLICTING_DECLARATIONS, - "CHARACTER SET ", cinfo->default_table_charset->csname, - "CHARACTER SET ", $4->csname); + my_error(ER_CONFLICTING_DECLARATIONS, MYF(0), + "CHARACTER SET ", cinfo->default_table_charset->csname, + "CHARACTER SET ", $4->csname); YYABORT; } Lex->create_info.default_table_charset= $4; @@ -1234,8 +2606,8 @@ default_collation: cinfo->default_table_charset && $4 && !my_charset_same(cinfo->default_table_charset,$4)) { - net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, - $4->name, cinfo->default_table_charset->csname); + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + $4->name, cinfo->default_table_charset->csname); YYABORT; } Lex->create_info.default_table_charset= $4; @@ -1247,7 +2619,7 @@ storage_engines: { $$ = ha_resolve_by_name($1.str,$1.length); if ($$ == DB_TYPE_UNKNOWN) { - net_printf(YYTHD, ER_UNKNOWN_STORAGE_ENGINE, $1.str); + my_error(ER_UNKNOWN_STORAGE_ENGINE, MYF(0), $1.str); YYABORT; } }; @@ -1382,8 +2754,10 @@ type: int_type opt_len field_options { $$=$1; } | real_type opt_precision field_options { $$=$1; } | FLOAT_SYM float_options field_options { $$=FIELD_TYPE_FLOAT; } - | BIT_SYM opt_len { Lex->length=(char*) "1"; - $$=FIELD_TYPE_TINY; } + | BIT_SYM { Lex->length= (char*) "1"; + $$=FIELD_TYPE_BIT; } + | BIT_SYM '(' NUM ')' { Lex->length= $3.str; + $$=FIELD_TYPE_BIT; } | BOOL_SYM { Lex->length=(char*) "1"; $$=FIELD_TYPE_TINY; } | BOOLEAN_SYM { Lex->length=(char*) "1"; @@ -1405,13 +2779,13 @@ type: Lex->charset=&my_charset_bin; $$=FIELD_TYPE_STRING; } | varchar '(' NUM ')' opt_binary { Lex->length=$3.str; - $$=FIELD_TYPE_VAR_STRING; } + $$= MYSQL_TYPE_VARCHAR; } | nvarchar '(' NUM ')' { Lex->length=$3.str; - $$=FIELD_TYPE_VAR_STRING; + $$= MYSQL_TYPE_VARCHAR; Lex->charset=national_charset_info; } | VARBINARY '(' NUM ')' { Lex->length=$3.str; Lex->charset=&my_charset_bin; - $$=FIELD_TYPE_VAR_STRING; } + $$= MYSQL_TYPE_VARCHAR; } | YEAR_SYM opt_len field_options { $$=FIELD_TYPE_YEAR; } | DATE_SYM { $$=FIELD_TYPE_DATE; } | TIME_SYM { $$=FIELD_TYPE_TIME; } @@ -1440,18 +2814,18 @@ type: $$=FIELD_TYPE_TINY_BLOB; } | BLOB_SYM opt_len { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_BLOB; } - | spatial_type { + | spatial_type + { #ifdef HAVE_SPATIAL - Lex->charset=&my_charset_bin; - Lex->uint_geom_type= (uint)$1; - $$=FIELD_TYPE_GEOMETRY; + Lex->charset=&my_charset_bin; + Lex->uint_geom_type= (uint)$1; + $$=FIELD_TYPE_GEOMETRY; #else - net_printf(Lex->thd, ER_FEATURE_DISABLED, - sym_group_geom.name, - sym_group_geom.needed_define); - YYABORT; + my_error(ER_FEATURE_DISABLED, MYF(0) + sym_group_geom.name, sym_group_geom.needed_define); + YYABORT; #endif - } + } | MEDIUMBLOB { Lex->charset=&my_charset_bin; $$=FIELD_TYPE_MEDIUM_BLOB; } | LONGBLOB { Lex->charset=&my_charset_bin; @@ -1572,7 +2946,7 @@ opt_attribute_list: attribute: NULL_SYM { Lex->type&= ~ NOT_NULL_FLAG; } - | NOT NULL_SYM { Lex->type|= NOT_NULL_FLAG; } + | not NULL_SYM { Lex->type|= NOT_NULL_FLAG; } | DEFAULT now_or_signed_literal { Lex->default_value=$2; } | ON UPDATE_SYM NOW_SYM optional_braces { Lex->on_update_value= new Item_func_now_local(); } @@ -1607,8 +2981,8 @@ attribute: { if (Lex->charset && !my_charset_same(Lex->charset,$2)) { - net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, - $2->name,Lex->charset->csname); + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + $2->name,Lex->charset->csname); YYABORT; } else @@ -1633,7 +3007,7 @@ charset_name: { if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0)))) { - net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,$1.str); + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str); YYABORT; } } @@ -1651,7 +3025,7 @@ old_or_new_charset_name: if (!($$=get_charset_by_csname($1.str,MY_CS_PRIMARY,MYF(0))) && !($$=get_old_charset_by_name($1.str))) { - net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,$1.str); + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), $1.str); YYABORT; } } @@ -1667,7 +3041,7 @@ collation_name: { if (!($$=get_charset_by_name($1.str,MYF(0)))) { - net_printf(YYTHD,ER_UNKNOWN_COLLATION,$1.str); + my_error(ER_UNKNOWN_COLLATION, MYF(0), $1.str); YYABORT; } }; @@ -1691,9 +3065,10 @@ opt_binary: | BYTE_SYM { Lex->charset=&my_charset_bin; } | UNICODE_SYM { - if (!(Lex->charset=get_charset_by_csname("ucs2",MY_CS_PRIMARY,MYF(0)))) + if (!(Lex->charset=get_charset_by_csname("ucs2", + MY_CS_PRIMARY,MYF(0)))) { - net_printf(YYTHD,ER_UNKNOWN_CHARACTER_SET,"ucs2"); + my_error(ER_UNKNOWN_CHARACTER_SET, MYF(0), "ucs2"); YYABORT; } } @@ -1755,8 +3130,8 @@ key_type: #ifdef HAVE_SPATIAL $$= Key::SPATIAL; #else - net_printf(Lex->thd, ER_FEATURE_DISABLED, - sym_group_geom.name, sym_group_geom.needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); YYABORT; #endif }; @@ -1788,8 +3163,8 @@ opt_unique_or_fulltext: #ifdef HAVE_SPATIAL $$= Key::SPATIAL; #else - net_printf(Lex->thd, ER_FEATURE_DISABLED, - sym_group_geom.name, sym_group_geom.needed_define); + my_message(ER_FEATURE_DISABLED, ER(ER_FEATURE_DISABLED), MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); YYABORT; #endif } @@ -1817,14 +3192,10 @@ key_part: | ident '(' NUM ')' { int key_part_len= atoi($3.str); -#if MYSQL_VERSION_ID < 50000 if (!key_part_len) { - my_printf_error(ER_UNKNOWN_ERROR, - "Key part '%s' length cannot be 0", - MYF(0), $1.str); + my_error(ER_KEY_PART_0, MYF(0), $1.str); } -#endif $$=new key_part_spec($1.str,(uint) key_part_len); }; @@ -1864,8 +3235,7 @@ alter: lex->create_info.db_type= DB_TYPE_DEFAULT; lex->create_info.default_table_charset= NULL; lex->create_info.row_type= ROW_TYPE_NOT_USED; - lex->alter_info.reset(); - lex->alter_info.is_simple= 1; + lex->alter_info.reset(); lex->alter_info.flags= 0; } alter_list @@ -1880,14 +3250,53 @@ alter: LEX *lex=Lex; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; - }; + } + | ALTER PROCEDURE sp_name + { + LEX *lex= Lex; + + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + } + sp_a_chistics + { + THD *thd= YYTHD; + LEX *lex=Lex; + + lex->sql_command= SQLCOM_ALTER_PROCEDURE; + lex->spname= $3; + } + | ALTER FUNCTION_SYM sp_name + { + LEX *lex= Lex; + + bzero((char *)&lex->sp_chistics, sizeof(st_sp_chistics)); + } + sp_a_chistics + { + THD *thd= YYTHD; + LEX *lex=Lex; + lex->sql_command= SQLCOM_ALTER_FUNCTION; + lex->spname= $3; + } + | ALTER algorithm VIEW_SYM table_ident + { + THD *thd= YYTHD; + LEX *lex= thd->lex; + lex->sql_command= SQLCOM_CREATE_VIEW; + lex->create_view_mode= VIEW_ALTER; + lex->select_lex.resolve_mode= SELECT_LEX::SELECT_MODE; + /* first table in list is target VIEW name */ + lex->select_lex.add_table_to_list(thd, $4, NULL, 0); + } + opt_view_list AS select_init check_option + {} + ; ident_or_empty: /* empty */ { $$= 0; } | ident { $$= $1.str; }; - alter_list: | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; } | IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; } @@ -1895,27 +3304,27 @@ alter_list: | alter_list ',' alter_list_item; add_column: - ADD opt_column + ADD opt_column { LEX *lex=Lex; - lex->change=0; - lex->alter_info.flags|= ALTER_ADD_COLUMN; + lex->change=0; + lex->alter_info.flags|= ALTER_ADD_COLUMN; }; alter_list_item: - add_column column_def opt_place { Lex->alter_info.is_simple= 0; } - | ADD key_def - { - LEX *lex=Lex; - lex->alter_info.is_simple= 0; - lex->alter_info.flags|= ALTER_ADD_INDEX; + add_column column_def opt_place { } + | ADD key_def + { + Lex->alter_info.flags|= ALTER_ADD_INDEX; } - | add_column '(' field_list ')' { Lex->alter_info.is_simple= 0; } + | add_column '(' field_list ')' + { + Lex->alter_info.flags|= ALTER_ADD_COLUMN | ALTER_ADD_INDEX; + } | CHANGE opt_column field_ident { LEX *lex=Lex; - lex->change= $3.str; - lex->alter_info.is_simple= 0; + lex->change= $3.str; lex->alter_info.flags|= ALTER_CHANGE_COLUMN; } field_spec opt_place @@ -1926,7 +3335,6 @@ alter_list_item: lex->default_value= lex->on_update_value= 0; lex->comment=0; lex->charset= NULL; - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_CHANGE_COLUMN; } type opt_attribute @@ -1946,17 +3354,18 @@ alter_list_item: { LEX *lex=Lex; lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::COLUMN, - $3.str)); - lex->alter_info.is_simple= 0; + $3.str)); lex->alter_info.flags|= ALTER_DROP_COLUMN; } - | DROP FOREIGN KEY_SYM opt_ident { Lex->alter_info.is_simple= 0; } + | DROP FOREIGN KEY_SYM opt_ident + { + Lex->alter_info.flags|= ALTER_DROP_INDEX; + } | DROP PRIMARY_SYM KEY_SYM { LEX *lex=Lex; lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY, primary_key_name)); - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_DROP_INDEX; } | DROP key_or_index field_ident @@ -1964,25 +3373,32 @@ alter_list_item: LEX *lex=Lex; lex->alter_info.drop_list.push_back(new Alter_drop(Alter_drop::KEY, $3.str)); - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_DROP_INDEX; } - | DISABLE_SYM KEYS { Lex->alter_info.keys_onoff= DISABLE; } - | ENABLE_SYM KEYS { Lex->alter_info.keys_onoff= ENABLE; } + | DISABLE_SYM KEYS + { + LEX *lex=Lex; + lex->alter_info.keys_onoff= DISABLE; + lex->alter_info.flags|= ALTER_KEYS_ONOFF; + } + | ENABLE_SYM KEYS + { + LEX *lex=Lex; + lex->alter_info.keys_onoff= ENABLE; + lex->alter_info.flags|= ALTER_KEYS_ONOFF; + } | ALTER opt_column field_ident SET DEFAULT signed_literal { LEX *lex=Lex; lex->alter_info.alter_list.push_back(new Alter_column($3.str,$6)); - lex->alter_info.is_simple= 0; - lex->alter_info.flags|= ALTER_CHANGE_COLUMN; + lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT; } | ALTER opt_column field_ident DROP DEFAULT { LEX *lex=Lex; lex->alter_info.alter_list.push_back(new Alter_column($3.str, (Item*) 0)); - lex->alter_info.is_simple= 0; - lex->alter_info.flags|= ALTER_CHANGE_COLUMN; + lex->alter_info.flags|= ALTER_CHANGE_COLUMN_DEFAULT; } | RENAME opt_to table_ident { @@ -1992,7 +3408,7 @@ alter_list_item: if (check_table_name($3->table.str,$3->table.length) || $3->db.str && check_db_name($3->db.str)) { - net_printf(lex->thd,ER_WRONG_TABLE_NAME,$3->table.str); + my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str); YYABORT; } lex->alter_info.flags|= ALTER_RENAME; @@ -2007,27 +3423,25 @@ alter_list_item: $5= $5 ? $5 : $4; if (!my_charset_same($4,$5)) { - net_printf(YYTHD,ER_COLLATION_CHARSET_MISMATCH, - $5->name,$4->csname); + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + $5->name, $4->csname); YYABORT; } LEX *lex= Lex; - lex->create_info.table_charset= + lex->create_info.table_charset= lex->create_info.default_table_charset= $5; lex->create_info.used_fields|= (HA_CREATE_USED_CHARSET | HA_CREATE_USED_DEFAULT_CHARSET); - lex->alter_info.is_simple= 0; + lex->alter_info.flags|= ALTER_CONVERT; } - | create_table_options_space_separated + | create_table_options_space_separated { LEX *lex=Lex; - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_OPTIONS; } - | order_clause + | order_clause { LEX *lex=Lex; - lex->alter_info.is_simple= 0; lex->alter_info.flags|= ALTER_ORDER; }; @@ -2041,9 +3455,10 @@ opt_ignore: ; opt_restrict: - /* empty */ {} - | RESTRICT {} - | CASCADE {}; + /* empty */ { Lex->drop_mode= DROP_DEFAULT; } + | RESTRICT { Lex->drop_mode= DROP_RESTRICT; } + | CASCADE { Lex->drop_mode= DROP_CASCADE; } + ; opt_place: /* empty */ {} @@ -2061,7 +3476,7 @@ opt_to: */ slave: - START_SYM SLAVE slave_thread_opts + START_SYM SLAVE slave_thread_opts { LEX *lex=Lex; lex->sql_command = SQLCOM_SLAVE_START; @@ -2141,7 +3556,8 @@ slave_until: !((lex->mi.log_file_name && lex->mi.pos) || (lex->mi.relay_log_name && lex->mi.relay_log_pos))) { - send_error(lex->thd, ER_BAD_SLAVE_UNTIL_COND); + my_message(ER_BAD_SLAVE_UNTIL_COND, + ER(ER_BAD_SLAVE_UNTIL_COND), MYF(0)); YYABORT; } @@ -2277,8 +3693,25 @@ rename: } table_to_table_list {} + | RENAME USER clear_privileges rename_list + { + Lex->sql_command = SQLCOM_RENAME_USER; + } ; +rename_list: + user TO_SYM user + { + if (Lex->users_list.push_back($1) || Lex->users_list.push_back($3)) + YYABORT; + } + | rename_list ',' user TO_SYM user + { + if (Lex->users_list.push_back($3) || Lex->users_list.push_back($5)) + YYABORT; + } + ; + table_to_table_list: table_to_table | table_to_table_list ',' table_to_table; @@ -2543,8 +3976,12 @@ select_item: YYABORT; if ($4.str) $2->set_name($4.str,$4.length,system_charset_info); - else if (!$2->name) - $2->set_name($1,(uint) ($3 - $1), YYTHD->charset()); + else if (!$2->name) { + char *str = $1; + if (str[-1] == '`') + str--; + $2->set_name(str,(uint) ($3 - str), YYTHD->charset()); + } }; remember_name: @@ -2571,9 +4008,102 @@ optional_braces: /* all possible expressions */ expr: - expr_expr { $$= $1; } - | simple_expr { $$= $1; } - ; + expr or bool_term { $$= new Item_cond_or($1,$3); } + | expr XOR bool_term { $$= new Item_cond_xor($1,$3); } + | bool_term ; + +bool_term: + bool_term and bool_factor { $$= new Item_cond_and($1,$3); } + | bool_factor ; + +bool_factor: + NOT_SYM bool_factor { $$= negate_expression(YYTHD, $2); } + | bool_test ; + +bool_test: + bool_pri IS TRUE_SYM { $$= is_truth_value($1,1,0); } + | bool_pri IS not TRUE_SYM { $$= is_truth_value($1,0,0); } + | bool_pri IS FALSE_SYM { $$= is_truth_value($1,0,1); } + | bool_pri IS not FALSE_SYM { $$= is_truth_value($1,1,1); } + | bool_pri IS UNKNOWN_SYM { $$= new Item_func_isnull($1); } + | bool_pri IS not UNKNOWN_SYM { $$= new Item_func_isnotnull($1); } + | bool_pri ; + +bool_pri: + bool_pri IS NULL_SYM { $$= new Item_func_isnull($1); } + | bool_pri IS not NULL_SYM { $$= new Item_func_isnotnull($1); } + | predicate BETWEEN_SYM bit_expr AND_SYM bool_pri + { $$= new Item_func_between($1,$3,$5); } + | predicate not BETWEEN_SYM bit_expr AND_SYM bool_pri + { $$= negate_expression(YYTHD, new Item_func_between($1,$4,$6)); } + | predicate ; + +predicate: + bit_expr IN_SYM '(' expr_list ')' + { $4->push_front($1); $$= new Item_func_in(*$4); } + | bit_expr not IN_SYM '(' expr_list ')' + { $5->push_front($1); $$= negate_expression(YYTHD, new Item_func_in(*$5)); } + | bit_expr IN_SYM in_subselect + { $$= new Item_in_subselect($1, $3); } + | bit_expr not IN_SYM in_subselect + { $$= negate_expression(YYTHD, new Item_in_subselect($1, $4)); } + | bit_expr SOUNDS_SYM LIKE bit_expr + { $$= new Item_func_eq(new Item_func_soundex($1), + new Item_func_soundex($4)); } + | bit_expr LIKE simple_expr opt_escape + { $$= new Item_func_like($1,$3,$4); } + | bit_expr not LIKE simple_expr opt_escape + { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } + | bit_expr REGEXP bit_expr { $$= new Item_func_regex($1,$3); } + | bit_expr not REGEXP bit_expr + { $$= negate_expression(YYTHD, new Item_func_regex($1,$4)); } + | bit_expr EQUAL_SYM bit_expr { $$= new Item_func_equal($1,$3); } + | bit_expr comp_op bit_expr %prec EQ + { $$= (*$2)(0)->create($1,$3); } + | bit_expr comp_op all_or_any in_subselect %prec EQ + { $$= all_any_subquery_creator($1, $2, $3, $4); } + | bit_expr ; + +bit_expr: + bit_expr '|' bit_term { $$= new Item_func_bit_or($1,$3); } + | bit_term ; + +bit_term: + bit_term '&' bit_factor { $$= new Item_func_bit_and($1,$3); } + | bit_factor ; + +bit_factor: + bit_factor SHIFT_LEFT value_expr + { $$= new Item_func_shift_left($1,$3); } + | bit_factor SHIFT_RIGHT value_expr + { $$= new Item_func_shift_right($1,$3); } + | value_expr ; + +value_expr: + value_expr '+' term { $$= new Item_func_plus($1,$3); } + | value_expr '-' term { $$= new Item_func_minus($1,$3); } + | value_expr '+' interval_expr interval + { $$= new Item_date_add_interval($1,$3,$4,0); } + | value_expr '-' interval_expr interval + { $$= new Item_date_add_interval($1,$3,$4,1); } + | term ; + +term: + term '*' factor { $$= new Item_func_mul($1,$3); } + | term '/' factor { $$= new Item_func_div($1,$3); } + | term '%' factor { $$= new Item_func_mod($1,$3); } + | term DIV_SYM factor { $$= new Item_func_int_div($1,$3); } + | term MOD_SYM factor { $$= new Item_func_mod($1,$3); } + | factor ; + +factor: + factor '^' simple_expr { $$= new Item_func_bit_xor($1,$3); } + | simple_expr ; + +or: OR_SYM | OR2_SYM; +and: AND_SYM | AND_AND_SYM; +not: NOT_SYM | NOT2_SYM; +not2: '!' | NOT2_SYM; comp_op: EQ { $$ = &comp_eq_creator; } | GE { $$ = &comp_ge_creator; } @@ -2587,169 +4117,6 @@ all_or_any: ALL { $$ = 1; } | ANY_SYM { $$ = 0; } ; -/* expressions that begin with 'expr' */ -expr_expr: - expr IN_SYM '(' expr_list ')' - { $4->push_front($1); $$= new Item_func_in(*$4); } - | expr NOT IN_SYM '(' expr_list ')' - { $5->push_front($1); $$= new Item_func_not(new Item_func_in(*$5)); } - | expr IN_SYM in_subselect - { $$= new Item_in_subselect($1, $3); } - | expr NOT IN_SYM in_subselect - { - $$= new Item_func_not(new Item_in_subselect($1, $4)); - } - | expr BETWEEN_SYM no_and_expr AND_SYM expr - { $$= new Item_func_between($1,$3,$5); } - | expr NOT BETWEEN_SYM no_and_expr AND_SYM expr - { $$= new Item_func_not(new Item_func_between($1,$4,$6)); } - | expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } - | expr OR_SYM expr { $$= new Item_cond_or($1,$3); } - | expr XOR expr { $$= new Item_cond_xor($1,$3); } - | expr AND_SYM expr { $$= new Item_cond_and($1,$3); } - | expr SOUNDS_SYM LIKE expr - { - $$= new Item_func_eq(new Item_func_soundex($1), - new Item_func_soundex($4)); - } - | expr LIKE simple_expr opt_escape - { $$= new Item_func_like($1,$3,$4); } - | expr NOT LIKE simple_expr opt_escape - { $$= new Item_func_not(new Item_func_like($1,$4,$5));} - | expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | expr NOT REGEXP expr - { $$= new Item_func_not(new Item_func_regex($1,$4)); } - | expr IS NULL_SYM { $$= new Item_func_isnull($1); } - | expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } - | expr comp_op all_or_any in_subselect %prec EQ - { - $$= all_any_subquery_creator($1, $2, $3, $4); - } - | expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } - | expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } - | expr '+' expr { $$= new Item_func_plus($1,$3); } - | expr '-' expr { $$= new Item_func_minus($1,$3); } - | expr '*' expr { $$= new Item_func_mul($1,$3); } - | expr '/' expr { $$= new Item_func_div($1,$3); } - | expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } - | expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } - | expr '|' expr { $$= new Item_func_bit_or($1,$3); } - | expr '^' expr { $$= new Item_func_bit_xor($1,$3); } - | expr '&' expr { $$= new Item_func_bit_and($1,$3); } - | expr '%' expr { $$= new Item_func_mod($1,$3); } - | expr '+' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,0); } - | expr '-' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,1); } - ; - -/* expressions that begin with 'expr' that do NOT follow IN_SYM */ -no_in_expr: - no_in_expr BETWEEN_SYM no_and_expr AND_SYM expr - { $$= new Item_func_between($1,$3,$5); } - | no_in_expr NOT BETWEEN_SYM no_and_expr AND_SYM expr - { $$= new Item_func_not(new Item_func_between($1,$4,$6)); } - | no_in_expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } - | no_in_expr OR_SYM expr { $$= new Item_cond_or($1,$3); } - | no_in_expr XOR expr { $$= new Item_cond_xor($1,$3); } - | no_in_expr AND_SYM expr { $$= new Item_cond_and($1,$3); } - | no_in_expr SOUNDS_SYM LIKE expr - { - $$= new Item_func_eq(new Item_func_soundex($1), - new Item_func_soundex($4)); - } - | no_in_expr LIKE simple_expr opt_escape - { $$= new Item_func_like($1,$3,$4); } - | no_in_expr NOT LIKE simple_expr opt_escape - { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } - | no_in_expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | no_in_expr NOT REGEXP expr - { $$= new Item_func_not(new Item_func_regex($1,$4)); } - | no_in_expr IS NULL_SYM { $$= new Item_func_isnull($1); } - | no_in_expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | no_in_expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | no_in_expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } - | no_in_expr comp_op all_or_any in_subselect %prec EQ - { - all_any_subquery_creator($1, $2, $3, $4); - } - | no_in_expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } - | no_in_expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } - | no_in_expr '+' expr { $$= new Item_func_plus($1,$3); } - | no_in_expr '-' expr { $$= new Item_func_minus($1,$3); } - | no_in_expr '*' expr { $$= new Item_func_mul($1,$3); } - | no_in_expr '/' expr { $$= new Item_func_div($1,$3); } - | no_in_expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } - | no_in_expr '|' expr { $$= new Item_func_bit_or($1,$3); } - | no_in_expr '^' expr { $$= new Item_func_bit_xor($1,$3); } - | no_in_expr '&' expr { $$= new Item_func_bit_and($1,$3); } - | no_in_expr '%' expr { $$= new Item_func_mod($1,$3); } - | no_in_expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } - | no_in_expr '+' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,0); } - | no_in_expr '-' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,1); } - | simple_expr; - -/* expressions that begin with 'expr' that does NOT follow AND */ -no_and_expr: - no_and_expr IN_SYM '(' expr_list ')' - { $4->push_front($1); $$= new Item_func_in(*$4); } - | no_and_expr NOT IN_SYM '(' expr_list ')' - { $5->push_front($1); $$= new Item_func_not(new Item_func_in(*$5)); } - | no_and_expr IN_SYM in_subselect - { $$= new Item_in_subselect($1, $3); } - | no_and_expr NOT IN_SYM in_subselect - { - $$= new Item_func_not(new Item_in_subselect($1, $4)); - } - | no_and_expr BETWEEN_SYM no_and_expr AND_SYM expr - { $$= new Item_func_between($1,$3,$5); } - | no_and_expr NOT BETWEEN_SYM no_and_expr AND_SYM expr - { $$= new Item_func_not(new Item_func_between($1,$4,$6)); } - | no_and_expr OR_OR_CONCAT expr { $$= or_or_concat(YYTHD, $1,$3); } - | no_and_expr OR_SYM expr { $$= new Item_cond_or($1,$3); } - | no_and_expr XOR expr { $$= new Item_cond_xor($1,$3); } - | no_and_expr SOUNDS_SYM LIKE expr - { - $$= new Item_func_eq(new Item_func_soundex($1), - new Item_func_soundex($4)); - } - | no_and_expr LIKE simple_expr opt_escape - { $$= new Item_func_like($1,$3,$4); } - | no_and_expr NOT LIKE simple_expr opt_escape - { $$= new Item_func_not(new Item_func_like($1,$4,$5)); } - | no_and_expr REGEXP expr { $$= new Item_func_regex($1,$3); } - | no_and_expr NOT REGEXP expr - { $$= new Item_func_not(new Item_func_regex($1,$4)); } - | no_and_expr IS NULL_SYM { $$= new Item_func_isnull($1); } - | no_and_expr IS NOT NULL_SYM { $$= new Item_func_isnotnull($1); } - | no_and_expr EQUAL_SYM expr { $$= new Item_func_equal($1,$3); } - | no_and_expr comp_op expr %prec EQ { $$= (*$2)(0)->create($1,$3); } - | no_and_expr comp_op all_or_any in_subselect %prec EQ - { - all_any_subquery_creator($1, $2, $3, $4); - } - | no_and_expr SHIFT_LEFT expr { $$= new Item_func_shift_left($1,$3); } - | no_and_expr SHIFT_RIGHT expr { $$= new Item_func_shift_right($1,$3); } - | no_and_expr '+' expr { $$= new Item_func_plus($1,$3); } - | no_and_expr '-' expr { $$= new Item_func_minus($1,$3); } - | no_and_expr '*' expr { $$= new Item_func_mul($1,$3); } - | no_and_expr '/' expr { $$= new Item_func_div($1,$3); } - | no_and_expr DIV_SYM expr { $$= new Item_func_int_div($1,$3); } - | no_and_expr '|' expr { $$= new Item_func_bit_or($1,$3); } - | no_and_expr '^' expr { $$= new Item_func_bit_xor($1,$3); } - | no_and_expr '&' expr { $$= new Item_func_bit_and($1,$3); } - | no_and_expr '%' expr { $$= new Item_func_mod($1,$3); } - | no_and_expr MOD_SYM expr { $$= new Item_func_mod($1,$3); } - | no_and_expr '+' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,0); } - | no_and_expr '-' interval_expr interval - { $$= new Item_date_add_interval($1,$3,$4,1); } - | simple_expr; - interval_expr: INTERVAL_SYM expr { $$=$2; } ; @@ -2768,12 +4135,16 @@ simple_expr: | '@' ident_or_text SET_VAR expr { $$= new Item_func_set_user_var($2,$4); - Lex->uncacheable(UNCACHEABLE_RAND); + LEX *lex= Lex; + lex->uncacheable(UNCACHEABLE_RAND); + lex->variables_used= 1; } | '@' ident_or_text { $$= new Item_func_get_user_var($2); - Lex->uncacheable(UNCACHEABLE_RAND); + LEX *lex= Lex; + lex->uncacheable(UNCACHEABLE_RAND); + lex->variables_used= 1; } | '@' '@' opt_var_ident_type ident_or_text opt_component { @@ -2785,19 +4156,15 @@ simple_expr: } if (!($$= get_system_var(YYTHD, (enum_var_type) $3, $4, $5))) YYABORT; + Lex->variables_used= 1; } | sum_expr - | '+' expr %prec NEG { $$= $2; } - | '-' expr %prec NEG { $$= new Item_func_neg($2); } - | '~' expr %prec NEG { $$= new Item_func_bit_neg($2); } - | NOT expr %prec NEG - { - $$= negate_expression(YYTHD, $2); - } - | '!' expr %prec NEG - { - $$= negate_expression(YYTHD, $2); - } + | simple_expr OR_OR_SYM simple_expr + { $$= new Item_func_concat($1, $3); } + | '+' simple_expr %prec NEG { $$= $2; } + | '-' simple_expr %prec NEG { $$= new Item_func_neg($2); } + | '~' simple_expr %prec NEG { $$= new Item_func_bit_neg($2); } + | not2 simple_expr %prec NEG { $$= negate_expression(YYTHD, $2); } | '(' expr ')' { $$= $2; } | '(' expr ',' expr_list ')' { @@ -2812,12 +4179,12 @@ simple_expr: | EXISTS exists_subselect { $$= $2; } | singlerow_subselect { $$= $1; } | '{' ident expr '}' { $$= $3; } - | MATCH ident_list_arg AGAINST '(' expr fulltext_options ')' + | MATCH ident_list_arg AGAINST '(' bit_expr fulltext_options ')' { $2->push_front($5); Select->add_ftfunc_to_list((Item_func_match*) ($$=new Item_func_match(*$2,$6))); } | ASCII_SYM '(' expr ')' { $$= new Item_func_ascii($3); } - | BINARY expr %prec NEG + | BINARY simple_expr %prec NEG { $$= create_func_cast($2, ITEM_CAST_CHAR, -1, &my_charset_bin); } @@ -2845,9 +4212,9 @@ simple_expr: { if (!$1.symbol->create_func) { - net_printf(Lex->thd, ER_FEATURE_DISABLED, - $1.symbol->group->name, - $1.symbol->group->needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + $1.symbol->group->name, + $1.symbol->group->needed_define); YYABORT; } $$= ((Item*(*)(void))($1.symbol->create_func))(); @@ -2856,9 +4223,9 @@ simple_expr: { if (!$1.symbol->create_func) { - net_printf(Lex->thd, ER_FEATURE_DISABLED, - $1.symbol->group->name, - $1.symbol->group->needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + $1.symbol->group->name, + $1.symbol->group->needed_define); YYABORT; } $$= ((Item*(*)(Item*))($1.symbol->create_func))($3); @@ -2867,9 +4234,9 @@ simple_expr: { if (!$1.symbol->create_func) { - net_printf(Lex->thd, ER_FEATURE_DISABLED, - $1.symbol->group->name, - $1.symbol->group->needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + $1.symbol->group->name, + $1.symbol->group->needed_define); YYABORT; } $$= ((Item*(*)(Item*,Item*))($1.symbol->create_func))($3,$5); @@ -2878,9 +4245,9 @@ simple_expr: { if (!$1.symbol->create_func) { - net_printf(Lex->thd, ER_FEATURE_DISABLED, - $1.symbol->group->name, - $1.symbol->group->needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + $1.symbol->group->name, + $1.symbol->group->needed_define); YYABORT; } $$= ((Item*(*)(Item*,Item*,Item*))($1.symbol->create_func))($3,$5,$7); @@ -2889,6 +4256,8 @@ simple_expr: { $$= new Item_date_add_interval($3, $5, INTERVAL_DAY, 0);} | ADDDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' { $$= new Item_date_add_interval($3, $6, $7, 0); } + | REPEAT_SYM '(' expr ',' expr ')' + { $$= new Item_func_repeat($3,$5); } | ATAN '(' expr ')' { $$= new Item_func_atan($3); } | ATAN '(' expr ',' expr ')' @@ -2905,9 +4274,12 @@ simple_expr: { $$= new Item_func_concat(* $3); } | CONCAT_WS '(' expr ',' expr_list ')' { $5->push_front($3); $$= new Item_func_concat_ws(*$5); } + | CONTAINS_SYM '(' expr ',' expr ')' + { $$= create_func_contains($3, $5); } | CONVERT_TZ_SYM '(' expr ',' expr ',' expr ')' { - Lex->time_zone_tables_used= &fake_time_zone_tables_list; + if (Lex->add_time_zone_tables_to_query_tables(YYTHD)) + YYABORT; $$= new Item_func_convert_tz($3, $5, $7); } | CURDATE optional_braces @@ -2979,8 +4351,8 @@ simple_expr: #ifdef HAVE_SPATIAL $$= $1; #else - net_printf(Lex->thd, ER_FEATURE_DISABLED, - sym_group_geom.name, sym_group_geom.needed_define); + my_error(ER_FEATURE_DISABLED, MYF(0), + sym_group_geom.name, sym_group_geom.needed_define); YYABORT; #endif } @@ -3058,8 +4430,10 @@ simple_expr: } | OLD_PASSWORD '(' expr ')' { $$= new Item_func_old_password($3); } - | POSITION_SYM '(' no_in_expr IN_SYM expr ')' + | POSITION_SYM '(' bit_expr IN_SYM expr ')' { $$ = new Item_func_locate($5,$3); } + | QUARTER_SYM '(' expr ')' + { $$ = new Item_func_quarter($3); } | RAND '(' expr ')' { $$= new Item_func_rand($3); Lex->uncacheable(UNCACHEABLE_RAND);} | RAND '(' ')' @@ -3071,6 +4445,11 @@ simple_expr: | ROUND '(' expr ')' { $$= new Item_func_round($3, new Item_int((char*)"0",0,1),0); } | ROUND '(' expr ',' expr ')' { $$= new Item_func_round($3,$5,0); } + | ROW_COUNT_SYM '(' ')' + { + $$= new Item_func_row_count(); + Lex->safe_to_cache_query= 0; + } | SUBDATE_SYM '(' expr ',' expr ')' { $$= new Item_date_add_interval($3, $5, INTERVAL_DAY, 1);} | SUBDATE_SYM '(' expr ',' INTERVAL_SYM expr interval ')' @@ -3093,6 +4472,10 @@ simple_expr: { $$= new Item_datetime_typecast($3); } | TIMESTAMP '(' expr ',' expr ')' { $$= new Item_func_add_time($3, $5, 1, 0); } + | TIMESTAMP_ADD '(' interval_time_st ',' expr ',' expr ')' + { $$= new Item_date_add_interval($7,$5,$3,0); } + | TIMESTAMP_DIFF '(' interval_time_st ',' expr ',' expr ')' + { $$= new Item_func_timestamp_diff($5,$7,$3); } | TRIM '(' expr ')' { $$= new Item_func_trim($3); } | TRIM '(' LEADING expr FROM expr ')' @@ -3113,48 +4496,90 @@ simple_expr: { $$= new Item_func_round($3,$5,1); } | TRUE_SYM { $$= new Item_int((char*) "TRUE",1,1); } - | UDA_CHAR_SUM '(' udf_expr_list ')' + | ident '.' ident '(' udf_expr_list ')' { - if ($3 != NULL) - $$ = new Item_sum_udf_str($1, *$3); - else - $$ = new Item_sum_udf_str($1); - } - | UDA_FLOAT_SUM '(' udf_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_sum_udf_float($1, *$3); - else - $$ = new Item_sum_udf_float($1); - } - | UDA_INT_SUM '(' udf_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_sum_udf_int($1, *$3); - else - $$ = new Item_sum_udf_int($1); - } - | UDF_CHAR_FUNC '(' udf_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_func_udf_str($1, *$3); - else - $$ = new Item_func_udf_str($1); - } - | UDF_FLOAT_FUNC '(' udf_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_func_udf_float($1, *$3); - else - $$ = new Item_func_udf_float($1); - } - | UDF_INT_FUNC '(' udf_expr_list ')' - { - if ($3 != NULL) - $$ = new Item_func_udf_int($1, *$3); + LEX *lex= Lex; + sp_name *name= new sp_name($1, $3); + + name->init_qname(YYTHD); + sp_add_fun_to_lex(Lex, name); + if ($5) + $$= new Item_func_sp(name, *$5); else - $$ = new Item_func_udf_int($1); + $$= new Item_func_sp(name); } + | IDENT_sys '(' udf_expr_list ')' + { +#ifdef HAVE_DLOPEN + udf_func *udf; + + if (using_udf_functions && (udf=find_udf($1.str, $1.length))) + { + switch (udf->returns) { + case STRING_RESULT: + if (udf->type == UDFTYPE_FUNCTION) + { + if ($3 != NULL) + $$ = new Item_func_udf_str(udf, *$3); + else + $$ = new Item_func_udf_str(udf); + } + else + { + if ($3 != NULL) + $$ = new Item_sum_udf_str(udf, *$3); + else + $$ = new Item_sum_udf_str(udf); + } + break; + case REAL_RESULT: + if (udf->type == UDFTYPE_FUNCTION) + { + if ($3 != NULL) + $$ = new Item_func_udf_float(udf, *$3); + else + $$ = new Item_func_udf_float(udf); + } + else + { + if ($3 != NULL) + $$ = new Item_sum_udf_float(udf, *$3); + else + $$ = new Item_sum_udf_float(udf); + } + break; + case INT_RESULT: + if (udf->type == UDFTYPE_FUNCTION) + { + if ($3 != NULL) + $$ = new Item_func_udf_int(udf, *$3); + else + $$ = new Item_func_udf_int(udf); + } + else + { + if ($3 != NULL) + $$ = new Item_sum_udf_int(udf, *$3); + else + $$ = new Item_sum_udf_int(udf); + } + break; + default: + YYABORT; + } + } + else +#endif /* HAVE_DLOPEN */ + { + sp_name *name= sp_name_current_db_new(YYTHD, $1); + + sp_add_fun_to_lex(Lex, name); + if ($3) + $$= new Item_func_sp(name, *$3); + else + $$= new Item_func_sp(name); + } + } | UNIQUE_USERS '(' text_literal ',' NUM ',' NUM ',' expr_list ')' { $$= new Item_func_unique_users($3,atoi($5.str),atoi($7.str), * $9); @@ -3262,8 +4687,37 @@ fulltext_options: ; udf_expr_list: - /* empty */ { $$= NULL; } - | expr_list { $$= $1;}; + /* empty */ { $$= NULL; } + | udf_expr_list2 { $$= $1;} + ; + +udf_expr_list2: + { Select->expr_list.push_front(new List<Item>); } + udf_expr_list3 + { $$= Select->expr_list.pop(); } + ; + +udf_expr_list3: + udf_expr + { + Select->expr_list.head()->push_back($1); + } + | udf_expr_list3 ',' udf_expr + { + Select->expr_list.head()->push_back($3); + } + ; + +udf_expr: + remember_name expr remember_end select_alias + { + if ($4.str) + $2->set_name($4.str,$4.length,system_charset_info); + else + $2->set_name($1,(uint) ($3 - $1), YYTHD->charset()); + $$= $2; + } + ; sum_expr: AVG_SYM '(' in_sum_expr ')' @@ -3288,14 +4742,25 @@ sum_expr: { $$= new Item_sum_unique_users($3,atoi($5.str),atoi($7.str),$9); } | MIN_SYM '(' in_sum_expr ')' { $$=new Item_sum_min($3); } +/* + According to ANSI SQL, DISTINCT is allowed and has + no sence inside MIN and MAX grouping functions; so MIN|MAX(DISTINCT ...) + is processed like an ordinary MIN | MAX() + */ + | MIN_SYM '(' DISTINCT in_sum_expr ')' + { $$=new Item_sum_min($4); } | MAX_SYM '(' in_sum_expr ')' { $$=new Item_sum_max($3); } + | MAX_SYM '(' DISTINCT in_sum_expr ')' + { $$=new Item_sum_max($4); } | STD_SYM '(' in_sum_expr ')' { $$=new Item_sum_std($3); } | VARIANCE_SYM '(' in_sum_expr ')' { $$=new Item_sum_variance($3); } | SUM_SYM '(' in_sum_expr ')' { $$=new Item_sum_sum($3); } + | SUM_SYM '(' DISTINCT in_sum_expr ')' + { $$=new Item_sum_sum_distinct($4); } | GROUP_CONCAT_SYM '(' opt_distinct { Select->in_sum_expr++; } expr_list opt_gorder_clause @@ -3409,59 +4874,80 @@ when_list2: sel->when_list.head()->push_back($5); }; +table_ref: + table_factor { $$=$1; } + | join_table { $$=$1; } + { + LEX *lex= Lex; + if (!($$= lex->current_select->nest_last_join(lex->thd))) + YYABORT; + } + ; + join_table_list: - '(' join_table_list ')' { $$=$2; } - | join_table { $$=$1; } - | join_table_list ',' join_table_list { $$=$3; } - | join_table_list normal_join join_table_list { $$=$3; } - | join_table_list STRAIGHT_JOIN join_table_list - { $$=$3 ; $1->next->straight=1; } - | join_table_list normal_join join_table_list ON expr + table_ref { $$=$1; } + | join_table_list ',' table_ref { $$=$3; } + ; + +join_table: + table_ref normal_join table_ref { $$=$3; } + | table_ref STRAIGHT_JOIN table_factor + { $3->straight=1; $$=$3 ; } + | table_ref normal_join table_ref ON expr { add_join_on($3,$5); $$=$3; } - | join_table_list normal_join join_table_list + | table_ref normal_join table_ref USING { SELECT_LEX *sel= Select; - sel->db1=$1->db; sel->table1=$1->alias; - sel->db2=$3->db; sel->table2=$3->alias; + sel->save_names_for_using_list($1, $3); } '(' using_list ')' { add_join_on($3,$7); $$=$3; } - | join_table_list LEFT opt_outer JOIN_SYM join_table_list ON expr + | table_ref LEFT opt_outer JOIN_SYM table_ref ON expr { add_join_on($5,$7); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; } - | join_table_list LEFT opt_outer JOIN_SYM join_table_list + | table_ref LEFT opt_outer JOIN_SYM table_factor { SELECT_LEX *sel= Select; - sel->db1=$1->db; sel->table1=$1->alias; - sel->db2=$5->db; sel->table2=$5->alias; + sel->save_names_for_using_list($1, $5); } USING '(' using_list ')' { add_join_on($5,$9); $5->outer_join|=JOIN_TYPE_LEFT; $$=$5; } - | join_table_list NATURAL LEFT opt_outer JOIN_SYM join_table_list + | table_ref NATURAL LEFT opt_outer JOIN_SYM table_factor { - add_join_natural($1,$1->next); - $1->next->outer_join|=JOIN_TYPE_LEFT; + add_join_natural($1,$6); + $6->outer_join|=JOIN_TYPE_LEFT; $$=$6; } - | join_table_list RIGHT opt_outer JOIN_SYM join_table_list ON expr - { add_join_on($1,$7); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$5; } - | join_table_list RIGHT opt_outer JOIN_SYM join_table_list + | table_ref RIGHT opt_outer JOIN_SYM table_ref ON expr + { + LEX *lex= Lex; + if (!($$= lex->current_select->convert_right_join())) + YYABORT; + add_join_on($$, $7); + } + | table_ref RIGHT opt_outer JOIN_SYM table_factor { SELECT_LEX *sel= Select; - sel->db1=$1->db; sel->table1=$1->alias; - sel->db2=$5->db; sel->table2=$5->alias; + sel->save_names_for_using_list($1, $5); } USING '(' using_list ')' - { add_join_on($1,$9); $1->outer_join|=JOIN_TYPE_RIGHT; $$=$5; } - | join_table_list NATURAL RIGHT opt_outer JOIN_SYM join_table_list + { + LEX *lex= Lex; + if (!($$= lex->current_select->convert_right_join())) + YYABORT; + add_join_on($$, $9); + } + | table_ref NATURAL RIGHT opt_outer JOIN_SYM table_factor { - add_join_natural($1->next,$1); - $1->outer_join|=JOIN_TYPE_RIGHT; - $$=$6; + add_join_natural($6,$1); + LEX *lex= Lex; + if (!($$= lex->current_select->convert_right_join())) + YYABORT; } - | join_table_list NATURAL JOIN_SYM join_table_list - { add_join_natural($1,$1->next); $$=$4; }; + | table_ref NATURAL JOIN_SYM table_factor + { add_join_natural($1,$4); $$=$4; }; + normal_join: JOIN_SYM {} @@ -3469,7 +4955,7 @@ normal_join: | CROSS JOIN_SYM {} ; -join_table: +table_factor: { SELECT_LEX *sel= Select; sel->use_index_ptr=sel->ignore_index_ptr=0; @@ -3485,8 +4971,21 @@ join_table: sel->get_use_index(), sel->get_ignore_index()))) YYABORT; + sel->add_joined_table($$); } - | '{' ident join_table LEFT OUTER JOIN_SYM join_table ON expr '}' + | '(' + { + LEX *lex= Lex; + if (lex->current_select->init_nested_join(lex->thd)) + YYABORT; + } + join_table_list ')' + { + LEX *lex= Lex; + if (!($$= lex->current_select->end_nested_join(lex->thd))) + YYABORT; + } + | '{' ident table_ref LEFT OUTER JOIN_SYM table_ref ON expr '}' { add_join_on($7,$9); $7->outer_join|=JOIN_TYPE_LEFT; $$=$7; } | '(' SELECT_SYM select_derived ')' opt_table_alias { @@ -3499,12 +4998,13 @@ join_table: (List<String> *)0))) YYABORT; + lex->current_select->add_joined_table($$); }; select_derived: { LEX *lex= Lex; - lex->derived_tables= 1; + lex->derived_tables|= DERIVED_SUBQUERY; if (((int)lex->sql_command >= (int)SQLCOM_HA_OPEN && lex->sql_command <= (int)SQLCOM_HA_READ) || lex->sql_command == (int)SQLCOM_KILL) @@ -3595,23 +5095,29 @@ using_list: }; interval: - DAY_HOUR_SYM { $$=INTERVAL_DAY_HOUR; } + interval_time_st {} + | DAY_HOUR_SYM { $$=INTERVAL_DAY_HOUR; } | DAY_MICROSECOND_SYM { $$=INTERVAL_DAY_MICROSECOND; } | DAY_MINUTE_SYM { $$=INTERVAL_DAY_MINUTE; } | DAY_SECOND_SYM { $$=INTERVAL_DAY_SECOND; } - | DAY_SYM { $$=INTERVAL_DAY; } | HOUR_MICROSECOND_SYM { $$=INTERVAL_HOUR_MICROSECOND; } | HOUR_MINUTE_SYM { $$=INTERVAL_HOUR_MINUTE; } | HOUR_SECOND_SYM { $$=INTERVAL_HOUR_SECOND; } - | HOUR_SYM { $$=INTERVAL_HOUR; } | MICROSECOND_SYM { $$=INTERVAL_MICROSECOND; } | MINUTE_MICROSECOND_SYM { $$=INTERVAL_MINUTE_MICROSECOND; } | MINUTE_SECOND_SYM { $$=INTERVAL_MINUTE_SECOND; } + | SECOND_MICROSECOND_SYM { $$=INTERVAL_SECOND_MICROSECOND; } + | YEAR_MONTH_SYM { $$=INTERVAL_YEAR_MONTH; }; + +interval_time_st: + DAY_SYM { $$=INTERVAL_DAY; } + | WEEK_SYM { $$=INTERVAL_WEEK; } + | HOUR_SYM { $$=INTERVAL_HOUR; } + | FRAC_SECOND_SYM { $$=INTERVAL_MICROSECOND; } | MINUTE_SYM { $$=INTERVAL_MINUTE; } | MONTH_SYM { $$=INTERVAL_MONTH; } - | SECOND_MICROSECOND_SYM { $$=INTERVAL_SECOND_MICROSECOND; } + | QUARTER_SYM { $$=INTERVAL_QUARTER; } | SECOND_SYM { $$=INTERVAL_SECOND; } - | YEAR_MONTH_SYM { $$=INTERVAL_YEAR_MONTH; } | YEAR_SYM { $$=INTERVAL_YEAR; } ; @@ -3672,8 +5178,11 @@ having_clause: opt_escape: ESCAPE_SYM simple_expr { $$= $2; } | /* empty */ - { - $$= new Item_string("\\", 1, &my_charset_latin1); + { + + $$= ((YYTHD->variables.sql_mode & MODE_NO_BACKSLASH_ESCAPES) ? + new Item_string("", 0, &my_charset_latin1) : + new Item_string("\\", 1, &my_charset_latin1)); } ; @@ -3699,12 +5208,12 @@ olap_opt: LEX *lex=Lex; if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) { - net_printf(lex->thd, ER_WRONG_USAGE, "WITH CUBE", + my_error(ER_WRONG_USAGE, MYF(0), "WITH CUBE", "global union parameters"); YYABORT; } lex->current_select->olap= CUBE_TYPE; - net_printf(lex->thd, ER_NOT_SUPPORTED_YET, "CUBE"); + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "CUBE"); YYABORT; /* To be deleted in 5.1 */ } | WITH ROLLUP_SYM @@ -3712,7 +5221,7 @@ olap_opt: LEX *lex= Lex; if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) { - net_printf(lex->thd, ER_WRONG_USAGE, "WITH ROLLUP", + my_error(ER_WRONG_USAGE, MYF(0), "WITH ROLLUP", "global union parameters"); YYABORT; } @@ -3736,9 +5245,8 @@ order_clause: lex->current_select->olap != UNSPECIFIED_OLAP_TYPE) { - net_printf(lex->thd, ER_WRONG_USAGE, - "CUBE/ROLLUP", - "ORDER BY"); + my_error(ER_WRONG_USAGE, MYF(0), + "CUBE/ROLLUP", "ORDER BY"); YYABORT; } } order_list; @@ -3836,9 +5344,7 @@ procedure_clause: LEX *lex=Lex; if (&lex->select_lex != lex->current_select) { - net_printf(lex->thd, ER_WRONG_USAGE, - "PROCEDURE", - "subquery"); + my_error(ER_WRONG_USAGE, MYF(0), "PROCEDURE", "subquery"); YYABORT; } lex->proc_list.elements=0; @@ -3886,12 +5392,33 @@ select_var_list: | select_var_ident {} ; -select_var_ident: '@' ident_or_text +select_var_ident: + '@' ident_or_text { LEX *lex=Lex; - if (lex->result && ((select_dumpvar *)lex->result)->var_list.push_back((LEX_STRING*) sql_memdup(&$2,sizeof(LEX_STRING)))) + if (lex->result) + ((select_dumpvar *)lex->result)->var_list.push_back( new my_var($2,0,0,(enum_field_types)0)); + else YYABORT; } + | ident_or_text + { + LEX *lex=Lex; + sp_pvar_t *t; + + if (!lex->spcont || !(t=lex->spcont->find_pvar(&$1))) + { + my_error(ER_SP_UNDECLARED_VAR, MYF(0), $1.str); + YYABORT; + } + if (! lex->result) + YYABORT; + else + { + ((select_dumpvar *)lex->result)->var_list.push_back( new my_var($1,1,t->offset,t->type)); + t->isset= TRUE; + } + } ; into: @@ -3968,22 +5495,56 @@ drop: lex->drop_if_exists=$3; lex->name=$4.str; } - | DROP UDF_SYM IDENT_sys + | DROP FUNCTION_SYM if_exists sp_name { LEX *lex=Lex; + if (lex->sphead) + { + my_error(ER_SP_NO_DROP_SP, MYF(0), "FUNCTION"); + YYABORT; + } lex->sql_command = SQLCOM_DROP_FUNCTION; - lex->udf.name = $3; + lex->drop_if_exists= $3; + lex->spname= $4; } - | DROP USER + | DROP PROCEDURE if_exists sp_name { LEX *lex=Lex; - lex->sql_command = SQLCOM_DROP_USER; - lex->users_list.empty(); + if (lex->sphead) + { + my_error(ER_SP_NO_DROP_SP, MYF(0), "PROCEDURE"); + YYABORT; + } + lex->sql_command = SQLCOM_DROP_PROCEDURE; + lex->drop_if_exists= $3; + lex->spname= $4; } - user_list - {} - ; + | DROP USER clear_privileges user_list + { + Lex->sql_command = SQLCOM_DROP_USER; + } + | DROP VIEW_SYM if_exists table_list opt_restrict + { + THD *thd= YYTHD; + LEX *lex= thd->lex; + lex->sql_command= SQLCOM_DROP_VIEW; + lex->drop_if_exists= $3; + } + | DROP TRIGGER_SYM ident '.' ident + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_DROP_TRIGGER; + /* QQ: Could we loosen lock type in certain cases ? */ + if (!lex->select_lex.add_table_to_list(YYTHD, + new Table_ident($3), + (LEX_STRING*) 0, + TL_OPTION_UPDATING, + TL_WRITE)) + YYABORT; + lex->name_and_length= $5; + } + ; table_list: table_name @@ -4044,7 +5605,6 @@ replace: } insert_field_spec {} - {} ; insert_lock_option: @@ -4110,7 +5670,7 @@ ident_eq_list: ident_eq_value; ident_eq_value: - simple_ident equal expr_or_default + simple_ident_nospvar equal expr_or_default { LEX *lex=Lex; if (lex->field_list.push_back($1) || @@ -4191,8 +5751,8 @@ update: else if (lex->select_lex.get_table_list()->derived) { /* it is single table update and it is update of derived table */ - net_printf(lex->thd, ER_NON_UPDATABLE_TABLE, - lex->select_lex.get_table_list()->alias, "UPDATE"); + my_error(ER_NON_UPDATABLE_TABLE, MYF(0), + lex->select_lex.get_table_list()->alias, "UPDATE"); YYABORT; } else @@ -4206,7 +5766,7 @@ update_list: | update_elem; update_elem: - simple_ident equal expr_or_default + simple_ident_nospvar equal expr_or_default { if (add_item_to_list(YYTHD, $1) || add_value_to_list(YYTHD, $3)) YYABORT; @@ -4217,7 +5777,7 @@ insert_update_list: | insert_update_elem; insert_update_elem: - simple_ident equal expr_or_default + simple_ident_nospvar equal expr_or_default { LEX *lex= Lex; if (lex->update_list.push_back($1) || @@ -4323,36 +5883,52 @@ show: SHOW ; show_param: - DATABASES wild - { Lex->sql_command= SQLCOM_SHOW_DATABASES; } - | TABLES opt_db wild - { - LEX *lex= Lex; - lex->sql_command= SQLCOM_SHOW_TABLES; - lex->select_lex.db= $2; - } - | TABLE_SYM STATUS_SYM opt_db wild - { - LEX *lex= Lex; - lex->sql_command= SQLCOM_SHOW_TABLES; - lex->describe= DESCRIBE_EXTENDED; - lex->select_lex.db= $3; - } - | OPEN_SYM TABLES opt_db wild - { - LEX *lex= Lex; - lex->sql_command= SQLCOM_SHOW_OPEN_TABLES; - lex->select_lex.db= $3; - } + DATABASES ext_select_item_list wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_DATABASES; + if (prepare_schema_table(YYTHD, lex, 0, SCH_SCHEMATA)) + YYABORT; + } + | opt_full TABLES ext_select_item_list opt_db wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_TABLES; + lex->select_lex.db= $4; + if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLE_NAMES)) + YYABORT; + } + | TABLE_SYM STATUS_SYM ext_select_item_list opt_db wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_TABLE_STATUS; + lex->select_lex.db= $4; + if (prepare_schema_table(YYTHD, lex, 0, SCH_TABLES)) + YYABORT; + } + | OPEN_SYM TABLES ext_select_item_list opt_db wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_OPEN_TABLES; + lex->select_lex.db= $4; + if (prepare_schema_table(YYTHD, lex, 0, SCH_OPEN_TABLES)) + YYABORT; + } | ENGINE_SYM storage_engines { Lex->create_info.db_type= $2; } show_engine_param - | opt_full COLUMNS from_or_in table_ident opt_db wild - { - Lex->sql_command= SQLCOM_SHOW_FIELDS; - if ($5) - $4->change_db($5); - if (!Select->add_table_to_list(YYTHD, $4, NULL, 0)) + | opt_full COLUMNS ext_select_item_list from_or_in table_ident opt_db wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_FIELDS; + if ($6) + $5->change_db($6); + if (prepare_schema_table(YYTHD, lex, $5, SCH_COLUMNS)) YYABORT; } | NEW_SYM MASTER_SYM FOR_SYM SLAVE WITH MASTER_LOG_FILE_SYM EQ @@ -4378,13 +5954,15 @@ show_param: LEX *lex= Lex; lex->sql_command= SQLCOM_SHOW_BINLOG_EVENTS; } opt_limit_clause_init - | keys_or_index from_or_in table_ident opt_db - { - Lex->sql_command= SQLCOM_SHOW_KEYS; - if ($4) - $3->change_db($4); - if (!Select->add_table_to_list(YYTHD, $3, NULL, 0)) - YYABORT; + | keys_or_index ext_select_item_list from_or_in table_ident opt_db where_clause + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_KEYS; + if ($5) + $4->change_db($5); + if (prepare_schema_table(YYTHD, lex, $4, SCH_STATISTICS)) + YYABORT; } | COLUMN_SYM TYPES_SYM { @@ -4415,22 +5993,46 @@ show_param: { Lex->sql_command = SQLCOM_SHOW_WARNS;} | ERRORS opt_limit_clause_init { Lex->sql_command = SQLCOM_SHOW_ERRORS;} - | STATUS_SYM wild - { Lex->sql_command= SQLCOM_SHOW_STATUS; } + | opt_var_type STATUS_SYM ext_select_item_list wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_STATUS; + lex->option_type= (enum_var_type) $1; + if (prepare_schema_table(YYTHD, lex, 0, SCH_STATUS)) + YYABORT; + } | INNOBASE_SYM STATUS_SYM { Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; WARN_DEPRECATED("SHOW INNODB STATUS", "SHOW ENGINE INNODB STATUS"); } + | MUTEX_SYM STATUS_SYM + { Lex->sql_command = SQLCOM_SHOW_MUTEX_STATUS; } | opt_full PROCESSLIST_SYM { Lex->sql_command= SQLCOM_SHOW_PROCESSLIST;} - | opt_var_type VARIABLES wild - { - THD *thd= YYTHD; - thd->lex->sql_command= SQLCOM_SHOW_VARIABLES; - thd->lex->option_type= (enum_var_type) $1; - } - | charset wild - { Lex->sql_command= SQLCOM_SHOW_CHARSETS; } - | COLLATION_SYM wild - { Lex->sql_command= SQLCOM_SHOW_COLLATIONS; } + | opt_var_type VARIABLES ext_select_item_list wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_VARIABLES; + lex->option_type= (enum_var_type) $1; + if (prepare_schema_table(YYTHD, lex, 0, SCH_VARIABLES)) + YYABORT; + } + | charset ext_select_item_list wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_CHARSETS; + if (prepare_schema_table(YYTHD, lex, 0, SCH_CHARSETS)) + YYABORT; + } + | COLLATION_SYM ext_select_item_list wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_COLLATIONS; + if (prepare_schema_table(YYTHD, lex, 0, SCH_COLLATIONS)) + YYABORT; + } | BERKELEY_DB_SYM LOGS_SYM { Lex->sql_command= SQLCOM_SHOW_LOGS; WARN_DEPRECATED("SHOW BDB LOGS", "SHOW ENGINE BDB LOGS"); } | LOGS_SYM @@ -4473,9 +6075,19 @@ show_param: } | CREATE TABLE_SYM table_ident { - Lex->sql_command = SQLCOM_SHOW_CREATE; - if (!Select->add_table_to_list(YYTHD, $3, NULL,0)) + LEX *lex= Lex; + lex->sql_command = SQLCOM_SHOW_CREATE; + if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL,0)) + YYABORT; + lex->only_view= 0; + } + | CREATE VIEW_SYM table_ident + { + LEX *lex= Lex; + lex->sql_command = SQLCOM_SHOW_CREATE; + if (!lex->select_lex.add_table_to_list(YYTHD, $3, NULL, 0)) YYABORT; + lex->only_view= 1; } | MASTER_SYM STATUS_SYM { @@ -4484,7 +6096,37 @@ show_param: | SLAVE STATUS_SYM { Lex->sql_command = SQLCOM_SHOW_SLAVE_STAT; - }; + } + | CREATE PROCEDURE sp_name + { + LEX *lex= Lex; + + lex->sql_command = SQLCOM_SHOW_CREATE_PROC; + lex->spname= $3; + } + | CREATE FUNCTION_SYM sp_name + { + LEX *lex= Lex; + + lex->sql_command = SQLCOM_SHOW_CREATE_FUNC; + lex->spname= $3; + } + | PROCEDURE STATUS_SYM ext_select_item_list wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_STATUS_PROC; + if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES)) + YYABORT; + } + | FUNCTION_SYM STATUS_SYM ext_select_item_list wild_and_where + { + LEX *lex= Lex; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_STATUS_FUNC; + if (prepare_schema_table(YYTHD, lex, 0, SCH_PROCEDURES)) + YYABORT; + }; show_engine_param: STATUS_SYM @@ -4494,7 +6136,7 @@ show_engine_param: Lex->sql_command = SQLCOM_SHOW_INNODB_STATUS; break; default: - net_printf(YYTHD, ER_NOT_SUPPORTED_YET, "STATUS"); + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "STATUS"); YYABORT; } } @@ -4505,7 +6147,7 @@ show_engine_param: Lex->sql_command = SQLCOM_SHOW_LOGS; break; default: - net_printf(YYTHD, ER_NOT_SUPPORTED_YET, "LOGS"); + my_error(ER_NOT_SUPPORTED_YET, MYF(0), "LOGS"); YYABORT; } }; @@ -4522,12 +6164,6 @@ opt_db: /* empty */ { $$= 0; } | from_or_in ident { $$= $2.str; }; -wild: - /* empty */ - | LIKE TEXT_STRING_sys - { Lex->wild= new (YYTHD->mem_root) String($2.str, $2.length, - system_charset_info); }; - opt_full: /* empty */ { Lex->verbose=0; } | FULL { Lex->verbose=1; }; @@ -4544,16 +6180,47 @@ binlog_from: /* empty */ { Lex->mi.pos = 4; /* skip magic number */ } | FROM ulonglong_num { Lex->mi.pos = $2; }; +wild_and_where: + /* empty */ + | LIKE TEXT_STRING_sys + { Lex->wild= new (YYTHD->mem_root) String($2.str, $2.length, + system_charset_info); } + | WHERE expr + { + Select->where= $2; + if ($2) + $2->top_level_item(); + } + ; + +ext_select_item_list: + { + LEX *lex=Lex; + SELECT_LEX *sel= lex->current_select; + lex->lock_option= TL_READ; + mysql_init_select(lex); + lex->current_select->parsing_place= SELECT_LIST; + } + ext_select_item_list2; + +ext_select_item_list2: + /* empty */ {} + | select_item_list {}; + /* A Oracle compatible synonym for show */ describe: describe_command table_ident { - LEX *lex=Lex; - lex->wild=0; - lex->verbose=0; - lex->sql_command=SQLCOM_SHOW_FIELDS; - if (!Select->add_table_to_list(lex->thd, $2, NULL,0)) + LEX *lex= Lex; + lex->lock_option= TL_READ; + mysql_init_select(lex); + lex->current_select->parsing_place= SELECT_LIST; + lex->sql_command= SQLCOM_SELECT; + lex->orig_sql_command= SQLCOM_SHOW_FIELDS; + lex->select_lex.db= 0; + lex->verbose= 0; + if (prepare_schema_table(YYTHD, lex, $2, SCH_COLUMNS)) YYABORT; } opt_describe_column {} @@ -4655,37 +6322,29 @@ purge_option: } | BEFORE_SYM expr { - if ($2->check_cols(1) || $2->fix_fields(Lex->thd, 0, &$2)) - { - net_printf(Lex->thd, ER_WRONG_ARGUMENTS, "PURGE LOGS BEFORE"); - YYABORT; - } - Item *tmp= new Item_func_unix_timestamp($2); - /* - it is OK only emulate fix_fieds, because we need only - value of constant - */ - tmp->quick_fix_field(); - Lex->sql_command = SQLCOM_PURGE_BEFORE; - Lex->purge_time= (ulong) tmp->val_int(); + LEX *lex= Lex; + lex->value_list.empty(); + lex->value_list.push_front($2); + lex->sql_command= SQLCOM_PURGE_BEFORE; } ; /* kill threads */ kill: - KILL_SYM expr + KILL_SYM kill_option expr { LEX *lex=Lex; - if ($2->fix_fields(lex->thd, 0, &$2) || $2->check_cols(1)) - { - send_error(lex->thd, ER_SET_CONSTANTS_ONLY); - YYABORT; - } - lex->sql_command=SQLCOM_KILL; - lex->thread_id= (ulong) $2->val_int(); + lex->value_list.empty(); + lex->value_list.push_front($3); + lex->sql_command= SQLCOM_KILL; }; +kill_option: + /* empty */ { Lex->type= 0; } + | CONNECTION_SYM { Lex->type= 0; } + | QUERY_SYM { Lex->type= ONLY_KILL_QUERY; }; + /* change database */ use: USE_SYM ident @@ -4825,15 +6484,25 @@ text_string: { $$= new (YYTHD->mem_root) String($1.str,$1.length,YYTHD->variables.collation_connection); } | HEX_NUM { - Item *tmp = new Item_varbinary($1.str,$1.length); + Item *tmp= new Item_hex_string($1.str, $1.length); /* - it is OK only emulate fix_fieds, because we need only + it is OK only emulate fix_fields, because we need only value of constant */ $$= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) : (String*) 0; } + | BIN_NUM + { + Item *tmp= new Item_bin_string($1.str, $1.length); + /* + it is OK only emulate fix_fields, because we need only + value of constant + */ + $$= tmp ? tmp->quick_fix_field(), tmp->val_str((String*) 0) : + (String*) 0; + } ; param_marker: @@ -4847,7 +6516,7 @@ param_marker: (uchar *) thd->query)); if (!($$= item) || lex->param_list.push_back(item)) { - send_error(thd, ER_OUT_OF_RESOURCES); + my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); YYABORT; } } @@ -4875,10 +6544,11 @@ literal: | NUM_literal { $$ = $1; } | NULL_SYM { $$ = new Item_null(); Lex->next_state=MY_LEX_OPERATOR_OR_IDENT;} - | HEX_NUM { $$ = new Item_varbinary($1.str,$1.length);} + | HEX_NUM { $$ = new Item_hex_string($1.str, $1.length);} + | BIN_NUM { $$= new Item_bin_string($1.str, $1.length); } | UNDERSCORE_CHARSET HEX_NUM { - Item *tmp= new Item_varbinary($2.str,$2.length); + Item *tmp= new Item_hex_string($2.str, $2.length); /* it is OK only emulate fix_fieds, because we need only value of constant @@ -4890,6 +6560,20 @@ literal: str ? str->length() : 0, Lex->charset); } + | UNDERSCORE_CHARSET BIN_NUM + { + Item *tmp= new Item_bin_string($2.str, $2.length); + /* + it is OK only emulate fix_fieds, because we need only + value of constant + */ + String *str= tmp ? + tmp->quick_fix_field(), tmp->val_str((String*) 0) : + (String*) 0; + $$= new Item_string(str ? str->ptr() : "", + str ? str->length() : 0, + Lex->charset); + } | DATE_SYM text_literal { $$ = $2; } | TIME_SYM text_literal { $$ = $2; } | TIMESTAMP text_literal { $$ = $2; }; @@ -4898,8 +6582,22 @@ NUM_literal: NUM { int error; $$ = new Item_int($1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); } | LONG_NUM { int error; $$ = new Item_int($1.str, (longlong) my_strtoll10($1.str, NULL, &error), $1.length); } | ULONGLONG_NUM { $$ = new Item_uint($1.str, $1.length); } - | REAL_NUM { $$ = new Item_real($1.str, $1.length); } - | FLOAT_NUM { $$ = new Item_float($1.str, $1.length); } + | REAL_NUM + { + $$= new Item_real($1.str, $1.length); + if (YYTHD->net.report_error) + { + YYABORT; + } + } + | FLOAT_NUM + { + $$ = new Item_float($1.str, $1.length); + if (YYTHD->net.report_error) + { + YYABORT; + } + } ; /********************************************************************** @@ -4907,7 +6605,7 @@ NUM_literal: **********************************************************************/ insert_ident: - simple_ident { $$=$1; } + simple_ident_nospvar { $$=$1; } | table_wild { $$=$1; }; table_wild: @@ -4931,28 +6629,101 @@ order_ident: simple_ident: ident { + sp_pvar_t *spv; + LEX *lex = Lex; + sp_pcontext *spc = lex->spcont; + + if (spc && (spv = spc->find_pvar(&$1))) + { /* We're compiling a stored procedure and found a variable */ + $$ = (Item*) new Item_splocal($1, spv->offset); + lex->variables_used= 1; + lex->safe_to_cache_query=0; + } + else + { + SELECT_LEX *sel=Select; + $$= (sel->parsing_place != IN_HAVING || + sel->get_in_sum_expr() > 0) ? + (Item*) new Item_field(NullS,NullS,$1.str) : + (Item*) new Item_ref(NullS,NullS,$1.str); + } + } + | simple_ident_q { $$= $1; } + ; + +simple_ident_nospvar: + ident + { SELECT_LEX *sel=Select; $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? (Item*) new Item_field(NullS,NullS,$1.str) : - (Item*) new Item_ref(NullS, NullS, $1.str); + (Item*) new Item_ref(NullS,NullS,$1.str); } - | ident '.' ident + | simple_ident_q { $$= $1; } + ; + +simple_ident_q: + ident '.' ident { THD *thd= YYTHD; LEX *lex= thd->lex; - SELECT_LEX *sel= lex->current_select; - if (sel->no_table_names_allowed) - { - my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, - ER(ER_TABLENAME_NOT_ALLOWED_HERE), - MYF(0), $1.str, thd->where); - } - $$= (sel->parsing_place != IN_HAVING || - sel->get_in_sum_expr() > 0) ? - (Item*) new Item_field(NullS,$1.str,$3.str) : - (Item*) new Item_ref(NullS, $1.str, $3.str); - } + + /* + FIXME This will work ok in simple_ident_nospvar case because + we can't meet simple_ident_nospvar in trigger now. But it + should be changed in future. + */ + if (lex->sphead && lex->sphead->m_type == TYPE_ENUM_TRIGGER && + (!my_strcasecmp(system_charset_info, $1.str, "NEW") || + !my_strcasecmp(system_charset_info, $1.str, "OLD"))) + { + Item_trigger_field *trg_fld; + bool new_row= ($1.str[0]=='N' || $1.str[0]=='n'); + + if (lex->trg_chistics.event == TRG_EVENT_INSERT && + !new_row) + { + my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "OLD", "on INSERT"); + YYABORT; + } + + if (lex->trg_chistics.event == TRG_EVENT_DELETE && + new_row) + { + my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), "NEW", "on DELETE"); + YYABORT; + } + + if (!(trg_fld= new Item_trigger_field(new_row ? + Item_trigger_field::NEW_ROW: + Item_trigger_field::OLD_ROW, + $3.str))) + YYABORT; + + /* + Let us add this item to list of all Item_trigger_field objects + in trigger. + */ + lex->trg_table_fields.link_in_list((byte *)trg_fld, + (byte**)&trg_fld->next_trg_field); + + $$= (Item *)trg_fld; + } + else + { + SELECT_LEX *sel= lex->current_select; + if (sel->no_table_names_allowed) + { + my_error(ER_TABLENAME_NOT_ALLOWED_HERE, + MYF(0), $1.str, thd->where); + } + $$= (sel->parsing_place != IN_HAVING || + sel->get_in_sum_expr() > 0) ? + (Item*) new Item_field(NullS,$1.str,$3.str) : + (Item*) new Item_ref(NullS,$1.str,$3.str); + } + } | '.' ident '.' ident { THD *thd= YYTHD; @@ -4960,9 +6731,8 @@ simple_ident: SELECT_LEX *sel= lex->current_select; if (sel->no_table_names_allowed) { - my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, - ER(ER_TABLENAME_NOT_ALLOWED_HERE), - MYF(0), $2.str, thd->where); + my_error(ER_TABLENAME_NOT_ALLOWED_HERE, + MYF(0), $2.str, thd->where); } $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? @@ -4976,9 +6746,8 @@ simple_ident: SELECT_LEX *sel= lex->current_select; if (sel->no_table_names_allowed) { - my_printf_error(ER_TABLENAME_NOT_ALLOWED_HERE, - ER(ER_TABLENAME_NOT_ALLOWED_HERE), - MYF(0), $3.str, thd->where); + my_error(ER_TABLENAME_NOT_ALLOWED_HERE, + MYF(0), $3.str, thd->where); } $$= (sel->parsing_place != IN_HAVING || sel->get_in_sum_expr() > 0) ? @@ -5019,8 +6788,8 @@ IDENT_sys: $1.length); if (wlen < $1.length) { - net_printf(YYTHD, ER_INVALID_CHARACTER_STRING, cs->csname, - $1.str + wlen); + my_error(ER_INVALID_CHARACTER_STRING, MYF(0), + cs->csname, $1.str + wlen); YYABORT; } $$= $1; @@ -5115,6 +6884,7 @@ keyword: | AFTER_SYM {} | AGAINST {} | AGGREGATE_SYM {} + | ALGORITHM_SYM {} | ANY_SYM {} | ASCII_SYM {} | AUTO_INC {} @@ -5130,6 +6900,7 @@ keyword: | BYTE_SYM {} | BTREE_SYM {} | CACHE_SYM {} + | CASCADED {} | CHANGED {} | CHARSET {} | CHECKSUM_SYM {} @@ -5137,18 +6908,21 @@ keyword: | CLIENT_SYM {} | CLOSE_SYM {} | COLLATION_SYM {} + | COLUMNS {} | COMMENT_SYM {} | COMMITTED_SYM {} | COMMIT_SYM {} | COMPRESSED_SYM {} | CONCURRENT {} | CONSISTENT_SYM {} + | CONTAINS_SYM {} | CUBE_SYM {} | DATA_SYM {} | DATETIME {} | DATE_SYM {} | DAY_SYM {} | DEALLOCATE_SYM {} + | DEFINER_SYM {} | DELAY_KEY_WRITE_SYM {} | DES_KEY_FILE {} | DIRECTORY_SYM {} @@ -5168,6 +6942,7 @@ keyword: | EXPANSION_SYM {} | EXTENDED_SYM {} | FAST_SYM {} + | FOUND_SYM {} | DISABLE_SYM {} | ENABLE_SYM {} | FULL {} @@ -5175,6 +6950,7 @@ keyword: | FIRST_SYM {} | FIXED_SYM {} | FLUSH_SYM {} + | FRAC_SECOND_SYM {} | GEOMETRY_SYM {} | GEOMETRYCOLLECTION {} | GET_FORMAT {} @@ -5186,6 +6962,7 @@ keyword: | HOSTS_SYM {} | HOUR_SYM {} | IDENTIFIED_SYM {} + | INVOKER_SYM {} | IMPORT {} | INDEXES {} | ISOLATION {} @@ -5193,6 +6970,8 @@ keyword: | INNOBASE_SYM {} | INSERT_METHOD {} | RELAY_THREAD {} + | LABEL_SYM {} + | LANGUAGE_SYM {} | LAST_SYM {} | LEAVES {} | LEVEL_SYM {} @@ -5219,7 +6998,9 @@ keyword: | MAX_CONNECTIONS_PER_HOUR {} | MAX_QUERIES_PER_HOUR {} | MAX_UPDATES_PER_HOUR {} + | MAX_USER_CONNECTIONS_SYM {} | MEDIUM_SYM {} + | MERGE_SYM {} | MICROSECOND_SYM {} | MINUTE_SYM {} | MIN_ROWS {} @@ -5229,6 +7010,8 @@ keyword: | MULTILINESTRING {} | MULTIPOINT {} | MULTIPOLYGON {} + | MUTEX_SYM {} + | NAME_SYM {} | NAMES_SYM {} | NATIONAL_SYM {} | NCHAR_SYM {} @@ -5249,8 +7032,10 @@ keyword: | POLYGON {} | PREPARE_SYM {} | PREV_SYM {} + | PRIVILEGES {} | PROCESS {} | PROCESSLIST_SYM {} + | QUARTER_SYM {} | QUERY_SYM {} | QUICK {} | RAID_0_SYM {} @@ -5267,14 +7052,17 @@ keyword: | RESET_SYM {} | RESOURCES {} | RESTORE_SYM {} + | RETURNS_SYM {} | ROLLBACK_SYM {} | ROLLUP_SYM {} + | ROUTINE_SYM {} | ROWS_SYM {} | ROW_FORMAT_SYM {} | ROW_SYM {} | RTREE_SYM {} | SAVEPOINT_SYM {} | SECOND_SYM {} + | SECURITY_SYM {} | SERIAL_SYM {} | SERIALIZABLE_SYM {} | SESSION_SYM {} @@ -5297,25 +7085,33 @@ keyword: | SUBDATE_SYM {} | SUBJECT_SYM {} | SUPER_SYM {} + | TABLES {} | TABLESPACE {} | TEMPORARY {} + | TEMPTABLE_SYM {} | TEXT_SYM {} | TRANSACTION_SYM {} | TRUNCATE_SYM {} | TIMESTAMP {} + | TIMESTAMP_ADD {} + | TIMESTAMP_DIFF {} | TIME_SYM {} | TYPE_SYM {} | TYPES_SYM {} | UDF_RETURNS_SYM {} - | UDF_SYM {} + | FUNCTION_SYM {} | UNCOMMITTED_SYM {} + | UNDEFINED_SYM {} | UNICODE_SYM {} + | UNKNOWN_SYM {} | UNTIL_SYM {} | USER {} | USE_FRM {} | VARIABLES {} + | VIEW_SYM {} | VALUE_SYM {} | WARNINGS {} + | WEEK_SYM {} | WORK_SYM {} | X509_SYM {} | YEAR_SYM {} @@ -5367,15 +7163,100 @@ opt_var_ident_type: ; option_value: - '@' ident_or_text equal expr - { - Lex->var_list.push_back(new set_var_user(new Item_func_set_user_var($2,$4))); - } + '@' ident_or_text equal expr + { + LEX *lex= Lex; + + if (lex->sphead && lex->sphead->m_type != TYPE_ENUM_PROCEDURE) + { + /* + We have to use special instruction in functions and triggers + because sp_instr_stmt will close all tables and thus ruin + execution of statement invoking function or trigger. + + We also do not want to allow expression with subselects in + this case. + */ + if (lex->query_tables) + { + my_message(ER_SP_SUBSELECT_NYI, ER(ER_SP_SUBSELECT_NYI), + MYF(0)); + YYABORT; + } + sp_instr_set_user_var *i= + new sp_instr_set_user_var(lex->sphead->instructions(), + lex->spcont, $2, $4); + lex->sphead->add_instr(i); + } + else + lex->var_list.push_back(new set_var_user(new Item_func_set_user_var($2,$4))); + + } | internal_variable_name equal set_expr_or_default { LEX *lex=Lex; - lex->var_list.push_back(new set_var(lex->option_type, $1.var, - &$1.base_name, $3)); + + if ($1.var == &trg_new_row_fake_var) + { + /* We are in trigger and assigning value to field of new row */ + Item *it; + sp_instr_set_trigger_field *i; + if (lex->query_tables) + { + my_message(ER_SP_SUBSELECT_NYI, ER(ER_SP_SUBSELECT_NYI), + MYF(0)); + YYABORT; + } + if ($3) + it= $3; + else + { + /* QQ: Shouldn't this be field's default value ? */ + it= new Item_null(); + } + + if (!(i= new sp_instr_set_trigger_field( + lex->sphead->instructions(), lex->spcont, + $1.base_name, it))) + YYABORT; + + /* + Let us add this item to list of all Item_trigger_field + objects in trigger. + */ + lex->trg_table_fields.link_in_list((byte *)&i->trigger_field, + (byte **)&i->trigger_field.next_trg_field); + + lex->sphead->add_instr(i); + } + else if ($1.var) + { /* System variable */ + lex->var_list.push_back(new set_var(lex->option_type, $1.var, + &$1.base_name, $3)); + } + else + { + /* An SP local variable */ + sp_pcontext *ctx= lex->spcont; + sp_pvar_t *spv; + sp_instr_set *i; + Item *it; + + spv= ctx->find_pvar(&$1.base_name); + + if ($3) + it= $3; + else if (spv->dflt) + it= spv->dflt; + else + it= new Item_null(); + i= new sp_instr_set(lex->sphead->instructions(), ctx, + spv->offset, it, spv->type); + i->tables= lex->query_tables; + lex->query_tables= 0; + lex->sphead->add_instr(i); + spv->isset= TRUE; + } } | '@' '@' opt_var_ident_type internal_variable_name equal set_expr_or_default { @@ -5409,7 +7290,8 @@ option_value: $3= $3 ? $3 : $2; if (!my_charset_same($2,$3)) { - net_printf(thd,ER_COLLATION_CHARSET_MISMATCH,$3->name,$2->csname); + my_error(ER_COLLATION_CHARSET_MISMATCH, MYF(0), + $3->name, $2->csname); YYABORT; } lex->var_list.push_back(new set_var_collation_client($3,$3,$3)); @@ -5433,33 +7315,77 @@ option_value: internal_variable_name: ident { - sys_var *tmp=find_sys_var($1.str, $1.length); - if (!tmp) - YYABORT; - $$.var= tmp; - $$.base_name.str=0; - $$.base_name.length=0; - /* - If this is time_zone variable we should open time zone - describing tables - */ - if (tmp == &sys_time_zone) - Lex->time_zone_tables_used= &fake_time_zone_tables_list; + LEX *lex= Lex; + sp_pcontext *spc= lex->spcont; + sp_pvar_t *spv; + + /* We have to lookup here since local vars can shadow sysvars */ + if (!spc || !(spv = spc->find_pvar(&$1))) + { + /* Not an SP local variable */ + sys_var *tmp=find_sys_var($1.str, $1.length); + if (!tmp) + YYABORT; + $$.var= tmp; + $$.base_name.str=0; + $$.base_name.length=0; + /* + If this is time_zone variable we should open time zone + describing tables + */ + if (tmp == &sys_time_zone && + lex->add_time_zone_tables_to_query_tables(YYTHD)) + YYABORT; + } + else + { + /* An SP local variable */ + $$.var= NULL; + $$.base_name= $1; + } } | ident '.' ident { + LEX *lex= Lex; if (check_reserved_words(&$1)) { yyerror(ER(ER_SYNTAX_ERROR)); YYABORT; } - sys_var *tmp=find_sys_var($3.str, $3.length); - if (!tmp) - YYABORT; - if (!tmp->is_struct()) - net_printf(YYTHD, ER_VARIABLE_IS_NOT_STRUCT, $3.str); - $$.var= tmp; - $$.base_name= $1; + if (lex->sphead && lex->sphead->m_type == TYPE_ENUM_TRIGGER && + (!my_strcasecmp(system_charset_info, $1.str, "NEW") || + !my_strcasecmp(system_charset_info, $1.str, "OLD"))) + { + if ($1.str[0]=='O' || $1.str[0]=='o') + { + my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "OLD", ""); + YYABORT; + } + if (lex->trg_chistics.event == TRG_EVENT_DELETE) + { + my_error(ER_TRG_NO_SUCH_ROW_IN_TRG, MYF(0), + "NEW", "on DELETE"); + YYABORT; + } + if (lex->trg_chistics.action_time == TRG_ACTION_AFTER) + { + my_error(ER_TRG_CANT_CHANGE_ROW, MYF(0), "NEW", "after "); + YYABORT; + } + /* This special combination will denote field of NEW row */ + $$.var= &trg_new_row_fake_var; + $$.base_name= $3; + } + else + { + sys_var *tmp=find_sys_var($3.str, $3.length); + if (!tmp) + YYABORT; + if (!tmp->is_struct()) + my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), $3.str); + $$.var= tmp; + $$.base_name= $1; + } } | DEFAULT '.' ident { @@ -5467,7 +7393,7 @@ internal_variable_name: if (!tmp) YYABORT; if (!tmp->is_struct()) - net_printf(YYTHD, ER_VARIABLE_IS_NOT_STRUCT, $3.str); + my_error(ER_VARIABLE_IS_NOT_STRUCT, MYF(0), $3.str); $$.var= tmp; $$.base_name.str= (char*) "default"; $$.base_name.length= 7; @@ -5614,53 +7540,35 @@ handler_rkey_mode: /* GRANT / REVOKE */ revoke: - REVOKE - { - LEX *lex=Lex; - lex->sql_command = SQLCOM_REVOKE; - lex->users_list.empty(); - lex->columns.empty(); - lex->grant= lex->grant_tot_col=0; - lex->select_lex.db=0; - lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; - lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0; - bzero((char*) &lex->mqh, sizeof(lex->mqh)); - } - revoke_command + REVOKE clear_privileges revoke_command {} ; revoke_command: - grant_privileges ON opt_table FROM user_list - {} + grant_privileges ON opt_table FROM grant_list + { + Lex->sql_command = SQLCOM_REVOKE; + } | - ALL opt_privileges ',' GRANT OPTION FROM user_list + ALL opt_privileges ',' GRANT OPTION FROM grant_list { Lex->sql_command = SQLCOM_REVOKE_ALL; } ; grant: - GRANT - { - LEX *lex=Lex; - lex->users_list.empty(); - lex->columns.empty(); - lex->sql_command = SQLCOM_GRANT; - lex->grant= lex->grant_tot_col= 0; - lex->select_lex.db= 0; - lex->ssl_type= SSL_TYPE_NOT_SPECIFIED; - lex->ssl_cipher= lex->x509_subject= lex->x509_issuer= 0; - bzero((char *)&(lex->mqh),sizeof(lex->mqh)); - } - grant_privileges ON opt_table TO_SYM user_list + GRANT clear_privileges grant_privileges ON opt_table TO_SYM grant_list require_clause grant_options - {} + { Lex->sql_command= SQLCOM_GRANT; } ; grant_privileges: - grant_privilege_list {} - | ALL opt_privileges { Lex->grant = GLOBAL_ACLS;} + object_privilege_list { } + | ALL opt_privileges + { + Lex->all_privileges= 1; + Lex->grant= GLOBAL_ACLS; + } ; opt_privileges: @@ -5668,11 +7576,11 @@ opt_privileges: | PRIVILEGES ; -grant_privilege_list: - grant_privilege - | grant_privilege_list ',' grant_privilege; +object_privilege_list: + object_privilege + | object_privilege_list ',' object_privilege; -grant_privilege: +object_privilege: SELECT_SYM { Lex->which_columns = SELECT_ACL;} opt_column_list {} | INSERT { Lex->which_columns = INSERT_ACL;} opt_column_list {} | UPDATE_SYM { Lex->which_columns = UPDATE_ACL; } opt_column_list {} @@ -5693,8 +7601,12 @@ grant_privilege: | SUPER_SYM { Lex->grant |= SUPER_ACL;} | CREATE TEMPORARY TABLES { Lex->grant |= CREATE_TMP_ACL;} | LOCK_SYM TABLES { Lex->grant |= LOCK_TABLES_ACL; } - | REPLICATION SLAVE { Lex->grant |= REPL_SLAVE_ACL;} - | REPLICATION CLIENT_SYM { Lex->grant |= REPL_CLIENT_ACL;} + | REPLICATION SLAVE { Lex->grant |= REPL_SLAVE_ACL; } + | REPLICATION CLIENT_SYM { Lex->grant |= REPL_CLIENT_ACL; } + | CREATE VIEW_SYM { Lex->grant |= CREATE_VIEW_ACL; } + | SHOW VIEW_SYM { Lex->grant |= SHOW_VIEW_ACL; } + | CREATE ROUTINE_SYM { Lex->grant |= CREATE_PROC_ACL; } + | ALTER ROUTINE_SYM { Lex->grant |= ALTER_PROC_ACL; } ; @@ -5714,7 +7626,7 @@ require_list_element: LEX *lex=Lex; if (lex->x509_subject) { - net_printf(lex->thd,ER_DUP_ARGUMENT, "SUBJECT"); + my_error(ER_DUP_ARGUMENT, MYF(0), "SUBJECT"); YYABORT; } lex->x509_subject=$2.str; @@ -5724,7 +7636,7 @@ require_list_element: LEX *lex=Lex; if (lex->x509_issuer) { - net_printf(lex->thd,ER_DUP_ARGUMENT, "ISSUER"); + my_error(ER_DUP_ARGUMENT, MYF(0), "ISSUER"); YYABORT; } lex->x509_issuer=$2.str; @@ -5734,7 +7646,7 @@ require_list_element: LEX *lex=Lex; if (lex->ssl_cipher) { - net_printf(lex->thd,ER_DUP_ARGUMENT, "CIPHER"); + my_error(ER_DUP_ARGUMENT, MYF(0), "CIPHER"); YYABORT; } lex->ssl_cipher=$2.str; @@ -5750,7 +7662,8 @@ opt_table: lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, + ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); YYABORT; } } @@ -5762,7 +7675,8 @@ opt_table: lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, + ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); YYABORT; } } @@ -5774,7 +7688,8 @@ opt_table: lex->grant= GLOBAL_ACLS & ~GRANT_ACL; else if (lex->columns.elements) { - send_error(lex->thd,ER_ILLEGAL_GRANT_FOR_TABLE); + my_message(ER_ILLEGAL_GRANT_FOR_TABLE, + ER(ER_ILLEGAL_GRANT_FOR_TABLE), MYF(0)); YYABORT; } } @@ -5790,8 +7705,18 @@ opt_table: user_list: + user { if (Lex->users_list.push_back($1)) YYABORT;} + | user_list ',' user + { + if (Lex->users_list.push_back($3)) + YYABORT; + } + ; + + +grant_list: grant_user { if (Lex->users_list.push_back($1)) YYABORT;} - | user_list ',' grant_user + | grant_list ',' grant_user { if (Lex->users_list.push_back($3)) YYABORT; @@ -5899,18 +7824,23 @@ grant_option: | MAX_QUERIES_PER_HOUR ULONG_NUM { Lex->mqh.questions=$2; - Lex->mqh.bits |= 1; + Lex->mqh.specified_limits|= USER_RESOURCES::QUERIES_PER_HOUR; } | MAX_UPDATES_PER_HOUR ULONG_NUM { Lex->mqh.updates=$2; - Lex->mqh.bits |= 2; + Lex->mqh.specified_limits|= USER_RESOURCES::UPDATES_PER_HOUR; } | MAX_CONNECTIONS_PER_HOUR ULONG_NUM { - Lex->mqh.connections=$2; - Lex->mqh.bits |= 4; + Lex->mqh.conn_per_hour= $2; + Lex->mqh.specified_limits|= USER_RESOURCES::CONNECTIONS_PER_HOUR; } + | MAX_USER_CONNECTIONS_SYM ULONG_NUM + { + Lex->mqh.user_conn= $2; + Lex->mqh.specified_limits|= USER_RESOURCES::USER_CONNECTIONS; + } ; begin: @@ -5959,7 +7889,7 @@ union_list: if (lex->exchange) { /* Only the last SELECT can have INTO...... */ - net_printf(lex->thd, ER_WRONG_USAGE, "UNION", "INTO"); + my_error(ER_WRONG_USAGE, MYF(0), "UNION", "INTO"); YYABORT; } if (lex->current_select->linkage == GLOBAL_OPTIONS_TYPE) @@ -6083,3 +8013,47 @@ subselect_end: lex->current_select = lex->current_select->return_after_parsing(); }; +opt_view_list: + /* empty */ {} + | '(' view_list ')' + ; + +view_list: + ident + { + Lex->view_list.push_back((LEX_STRING*) + sql_memdup(&$1, sizeof(LEX_STRING))); + } + | view_list ',' ident + { + Lex->view_list.push_back((LEX_STRING*) + sql_memdup(&$3, sizeof(LEX_STRING))); + } + ; + +or_replace: + /* empty */ { Lex->create_view_mode= VIEW_CREATE_NEW; } + | OR_SYM REPLACE { Lex->create_view_mode= VIEW_CREATE_OR_REPLACE; } + ; + +algorithm: + /* empty */ + { Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; } + | ALGORITHM_SYM EQ UNDEFINED_SYM + { Lex->create_view_algorithm= VIEW_ALGORITHM_UNDEFINED; } + | ALGORITHM_SYM EQ MERGE_SYM + { Lex->create_view_algorithm= VIEW_ALGORITHM_MERGE; } + | ALGORITHM_SYM EQ TEMPTABLE_SYM + { Lex->create_view_algorithm= VIEW_ALGORITHM_TMPTABLE; } + ; +check_option: + /* empty */ + { Lex->create_view_check= VIEW_CHECK_NONE; } + | WITH CHECK_SYM OPTION + { Lex->create_view_check= VIEW_CHECK_CASCADED; } + | WITH CASCADED CHECK_SYM OPTION + { Lex->create_view_check= VIEW_CHECK_CASCADED; } + | WITH LOCAL_SYM CHECK_SYM OPTION + { Lex->create_view_check= VIEW_CHECK_LOCAL; } + ; + diff --git a/sql/strfunc.cc b/sql/strfunc.cc index 3ad6b1155d1..824aa15097e 100644 --- a/sql/strfunc.cc +++ b/sql/strfunc.cc @@ -150,7 +150,7 @@ uint find_type2(TYPELIB *typelib, const char *x, uint length, CHARSET_INFO *cs) int find,pos,findpos; const char *j; DBUG_ENTER("find_type2"); - DBUG_PRINT("enter",("x: '%s' lib: 0x%lx",x,typelib)); + DBUG_PRINT("enter",("x: '%.*s' lib: 0x%lx", length, x, typelib)); if (!typelib->count) { diff --git a/sql/structs.h b/sql/structs.h index 846b3400fab..cbc7161ee20 100644 --- a/sql/structs.h +++ b/sql/structs.h @@ -74,7 +74,7 @@ typedef struct st_key_part_info { /* Info about a key part */ uint16 store_length; uint16 key_type; uint16 fieldnr; /* Fieldnum in UNIREG */ - uint8 key_part_flag; /* 0 or HA_REVERSE_SORT */ + uint16 key_part_flag; /* 0 or HA_REVERSE_SORT */ uint8 type; uint8 null_bit; /* Position to null_bit */ } KEY_PART_INFO ; @@ -98,6 +98,7 @@ typedef struct st_key { union { int bdb_return_if_eq; } handler; + struct st_table *table; } KEY; @@ -163,9 +164,10 @@ typedef struct st_known_date_time_format { enum SHOW_TYPE { SHOW_UNDEF, - SHOW_LONG, SHOW_LONGLONG, SHOW_INT, SHOW_CHAR, SHOW_CHAR_PTR, SHOW_BOOL, - SHOW_MY_BOOL, SHOW_OPENTABLES, SHOW_STARTTIME, SHOW_QUESTION, + SHOW_LONG, SHOW_LONGLONG, SHOW_INT, SHOW_CHAR, SHOW_CHAR_PTR, SHOW_DOUBLE, + SHOW_BOOL, SHOW_MY_BOOL, SHOW_OPENTABLES, SHOW_STARTTIME, SHOW_QUESTION, SHOW_LONG_CONST, SHOW_INT_CONST, SHOW_HAVE, SHOW_SYS, SHOW_HA_ROWS, + SHOW_VARS, #ifdef HAVE_OPENSSL SHOW_SSL_CTX_SESS_ACCEPT, SHOW_SSL_CTX_SESS_ACCEPT_GOOD, SHOW_SSL_GET_VERSION, SHOW_SSL_CTX_GET_SESSION_CACHE_MODE, @@ -181,7 +183,8 @@ enum SHOW_TYPE SHOW_SSL_GET_CIPHER_LIST, #endif /* HAVE_OPENSSL */ SHOW_RPL_STATUS, SHOW_SLAVE_RUNNING, - SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG + SHOW_KEY_CACHE_LONG, SHOW_KEY_CACHE_CONST_LONG, + SHOW_LONG_STATUS, SHOW_LONG_CONST_STATUS }; enum SHOW_COMP_OPTION { SHOW_OPTION_YES, SHOW_OPTION_NO, SHOW_OPTION_DISABLED}; @@ -203,16 +206,65 @@ typedef struct st_lex_user { } LEX_USER; +/* + This structure specifies the maximum amount of resources which + can be consumed by each account. Zero value of a member means + there is no limit. +*/ typedef struct user_resources { - uint questions, updates, connections, bits; + /* Maximum number of queries/statements per hour. */ + uint questions; + /* + Maximum number of updating statements per hour (which statements are + updating is defined by uc_update_queries array). + */ + uint updates; + /* Maximum number of connections established per hour. */ + uint conn_per_hour; + /* Maximum number of concurrent connections. */ + uint user_conn; + /* + Values of this enum and specified_limits member are used by the + parser to store which user limits were specified in GRANT statement. + */ + enum {QUERIES_PER_HOUR= 1, UPDATES_PER_HOUR= 2, CONNECTIONS_PER_HOUR= 4, + USER_CONNECTIONS= 8}; + uint specified_limits; } USER_RESOURCES; + +/* + This structure is used for counting resources consumed and for checking + them against specified user limits. +*/ typedef struct user_conn { - char *user, *host; - uint len, connections, conn_per_hour, updates, questions, user_len; + /* + Pointer to user+host key (pair separated by '\0') defining the entity + for which resources are counted (By default it is user account thus + priv_user/priv_host pair is used. If --old-style-user-limits option + is enabled, resources are counted for each user+host separately). + */ + char *user; + /* Pointer to host part of the key. */ + char *host; + /* Total length of the key. */ + uint len; + /* Current amount of concurrent connections for this account. */ + uint connections; + /* + Current number of connections per hour, number of updating statements + per hour and total number of statements per hour for this account. + */ + uint conn_per_hour, updates, questions; + /* Maximum amount of resources which account is allowed to consume. */ USER_RESOURCES user_resources; + /* + The moment of time when per hour counters were reset last time + (i.e. start of "hour" for conn_per_hour, updates, questions counters). + */ time_t intime; } USER_CONN; + /* Bits in form->update */ #define REG_MAKE_DUPP 1 /* Make a copy of record when read */ #define REG_NEW_RECORD 2 /* Write a new record if not found */ diff --git a/sql/table.cc b/sql/table.cc index 5ede9c1e43d..e9b89d6b124 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -20,7 +20,7 @@ #include "mysql_priv.h" #include <errno.h> #include <m_ctype.h> - +#include "md5.h" /* Functions defined in this file */ @@ -61,8 +61,8 @@ static byte* get_field_name(Field **buff,uint *length, 5 Error (see frm_error: charset unavailable) */ -int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, - uint ha_open_flags, TABLE *outparam) +int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, + uint prgflag, uint ha_open_flags, TABLE *outparam) { reg1 uint i; reg2 uchar *strpos; @@ -76,26 +76,55 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, my_string record; const char **int_array; bool use_hash, null_field_first; + bool error_reported= FALSE; File file; Field **field_ptr,*reg_field; KEY *keyinfo; KEY_PART_INFO *key_part; uchar *null_pos; - uint null_bit, new_frm_ver, field_pack_length; + uint null_bit_pos, new_frm_ver, field_pack_length; SQL_CRYPT *crypted=0; MEM_ROOT **root_ptr, *old_root; DBUG_ENTER("openfrm"); - DBUG_PRINT("enter",("name: '%s' form: %lx",name,outparam)); + DBUG_PRINT("enter",("name: '%s' form: 0x%lx",name,outparam)); + + error=1; + disk_buff=NULL; + root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); + old_root= *root_ptr; + + if ((file=my_open(fn_format(index_file, name, "", reg_ext, + MY_UNPACK_FILENAME), + O_RDONLY | O_SHARE, + MYF(0))) + < 0) + { + goto err_w_init; + } + + if (my_read(file,(byte*) head,64,MYF(MY_NABP))) + { + goto err_w_init; + } + + if (memcmp(head, "TYPE=", 5) == 0) + { + // new .frm + my_close(file,MYF(MY_WME)); + + if (db_stat & NO_ERR_ON_NEW_FRM) + DBUG_RETURN(5); + + // caller can't process new .frm + error= 4; + goto err_w_init; + } bzero((char*) outparam,sizeof(*outparam)); + outparam->in_use= thd; outparam->blob_ptr_size=sizeof(char*); - disk_buff=NULL; record= NULL; keynames=NullS; outparam->db_stat = db_stat; - error=1; - init_sql_alloc(&outparam->mem_root, TABLE_ALLOC_BLOCK_SIZE, 0); - root_ptr= my_pthread_getspecific_ptr(MEM_ROOT**, THR_MALLOC); - old_root= *root_ptr; *root_ptr= &outparam->mem_root; outparam->real_name=strdup_root(&outparam->mem_root, @@ -105,21 +134,14 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (!outparam->real_name || !outparam->table_name) goto err_end; - if ((file=my_open(fn_format(index_file,name,"",reg_ext,MY_UNPACK_FILENAME), - O_RDONLY | O_SHARE, - MYF(0))) - < 0) - { - goto err_end; /* purecov: inspected */ - } error=4; if (!(outparam->path= strdup_root(&outparam->mem_root,name))) goto err_not_open; *fn_ext(outparam->path)='\0'; // Remove extension - if (my_read(file,(byte*) head,64,MYF(MY_NABP))) goto err_not_open; if (head[0] != (uchar) 254 || head[1] != 1 || - (head[2] != FRM_VER && head[2] != FRM_VER+1 && head[2] != FRM_VER+3)) + (head[2] != FRM_VER && head[2] != FRM_VER+1 && + ! (head[2] >= FRM_VER+3 && head[2] <= FRM_VER+4))) goto err_not_open; /* purecov: inspected */ new_field_pack_flag=head[27]; new_frm_ver= (head[2] - FRM_VER); @@ -150,7 +172,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->db_record_offset=1; if (db_create_options & HA_OPTION_LONG_BLOB_PTR) outparam->blob_ptr_size=portable_sizeof_char_ptr; - /* Set temporaryly a good value for db_low_byte_first */ + /* Set temporarily a good value for db_low_byte_first */ outparam->db_low_byte_first=test(outparam->db_type != DB_TYPE_ISAM); error=4; outparam->max_rows=uint4korr(head+18); @@ -195,7 +217,8 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, for (i=0 ; i < keys ; i++, keyinfo++) { - if (new_frm_ver == 3) + keyinfo->table= outparam; + if (new_frm_ver >= 3) { keyinfo->flags= (uint) uint2korr(strpos) ^ HA_NOSAME; keyinfo->key_length= (uint) uint2korr(strpos+2); @@ -240,11 +263,6 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, } key_part->store_length=key_part->length; } - set_if_bigger(outparam->max_key_length,keyinfo->key_length+ - keyinfo->key_parts); - outparam->total_key_length+= keyinfo->key_length; - if (keyinfo->flags & HA_NOSAME) - set_if_bigger(outparam->max_unique_length,keyinfo->key_length); } keynames=(char*) key_part; strpos+= (strmov(keynames, (char *) strpos) - keynames)+1; @@ -387,15 +405,15 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (null_field_first) { outparam->null_flags=null_pos=(uchar*) record+1; - null_bit= (db_create_options & HA_OPTION_PACK_RECORD) ? 1 : 2; - outparam->null_bytes=(outparam->null_fields+null_bit+6)/8; + null_bit_pos= (db_create_options & HA_OPTION_PACK_RECORD) ? 0 : 1; + outparam->null_bytes= (outparam->null_fields + null_bit_pos + 7) / 8; } else { outparam->null_bytes=(outparam->null_fields+7)/8; outparam->null_flags=null_pos= (uchar*) (record+1+outparam->reclength-outparam->null_bytes); - null_bit=1; + null_bit_pos= 0; } use_hash= outparam->fields >= MAX_FIELDS_BEFORE_HASH; @@ -413,7 +431,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, Field::geometry_type geom_type= Field::GEOM_GEOMETRY; LEX_STRING comment; - if (new_frm_ver == 3) + if (new_frm_ver >= 3) { /* new frm file in 4.1 */ field_length= uint2korr(strpos+3); @@ -421,11 +439,10 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, pack_flag= uint2korr(strpos+8); unireg_type= (uint) strpos[10]; interval_nr= (uint) strpos[12]; - uint comment_length=uint2korr(strpos+15); field_type=(enum_field_types) (uint) strpos[13]; - // charset and geometry_type share the same byte in frm + /* charset and geometry_type share the same byte in frm */ if (field_type == FIELD_TYPE_GEOMETRY) { #ifdef HAVE_SPATIAL @@ -464,6 +481,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, field_length= (uint) strpos[3]; recpos= uint2korr(strpos+4), pack_flag= uint2korr(strpos+6); + pack_flag&= ~FIELDFLAG_NO_DEFAULT; // Safety for old files unireg_type= (uint) strpos[8]; interval_nr= (uint) strpos[10]; @@ -502,7 +520,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, *field_ptr=reg_field= make_field(record+recpos, (uint32) field_length, - null_pos,null_bit, + null_pos, null_bit_pos, pack_flag, field_type, charset, @@ -519,14 +537,21 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, goto err_not_open; /* purecov: inspected */ } reg_field->comment=comment; - if (!(reg_field->flags & NOT_NULL_FLAG)) + if (field_type == FIELD_TYPE_BIT) { - if ((null_bit<<=1) == 256) + if ((null_bit_pos+= field_length & 7) > 7) { - null_pos++; - null_bit=1; + null_pos++; + null_bit_pos-= 8; } } + if (!(reg_field->flags & NOT_NULL_FLAG)) + { + if (!(null_bit_pos= (null_bit_pos + 1) & 7)) + null_pos++; + } + if (f_no_default(pack_flag)) + reg_field->flags|= NO_DEFAULT_VALUE_FLAG; if (reg_field->unireg_check == Field::NEXT_NUMBER) outparam->found_next_number_field= reg_field; if (outparam->timestamp_field == reg_field) @@ -598,10 +623,12 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, keyinfo->key_length+= HA_KEY_NULL_LENGTH; } if (field->type() == FIELD_TYPE_BLOB || - field->real_type() == FIELD_TYPE_VAR_STRING) + field->real_type() == MYSQL_TYPE_VARCHAR) { if (field->type() == FIELD_TYPE_BLOB) key_part->key_part_flag|= HA_BLOB_PART; + else + key_part->key_part_flag|= HA_VAR_LENGTH_PART; keyinfo->extra_length+=HA_KEY_BLOB_LENGTH; key_part->store_length+=HA_KEY_BLOB_LENGTH; keyinfo->key_length+= HA_KEY_BLOB_LENGTH; @@ -612,6 +639,9 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (!(field->flags & BINARY_FLAG)) keyinfo->flags|= HA_END_SPACE_KEY; } + if (field->type() == MYSQL_TYPE_BIT) + key_part->key_part_flag|= HA_BIT_PART; + if (i == 0 && key != primary_key) field->flags |= ((keyinfo->flags & HA_NOSAME) && @@ -670,6 +700,12 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, } } keyinfo->usable_key_parts=usable_parts; // Filesort + + set_if_bigger(outparam->max_key_length,keyinfo->key_length+ + keyinfo->key_parts); + outparam->total_key_length+= keyinfo->key_length; + if (keyinfo->flags & HA_NOSAME) + set_if_bigger(outparam->max_unique_length,keyinfo->key_length); } if (primary_key < MAX_KEY && (outparam->keys_in_use.is_set(primary_key))) @@ -737,7 +773,7 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, outparam->blob_field= (Field_blob**) (outparam->field+outparam->fields); // Point at null ptr - /* The table struct is now initialzed; Open the table */ + /* The table struct is now initialized; Open the table */ error=2; if (db_stat) { @@ -766,19 +802,29 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, error= 1; my_errno= ENOENT; } + else + { + outparam->file->print_error(err, MYF(0)); + error_reported= TRUE; + } goto err_not_open; /* purecov: inspected */ } } outparam->db_low_byte_first=outparam->file->low_byte_first(); *root_ptr= old_root; - opened_tables++; + thd->status_var.opened_tables++; #ifndef DBUG_OFF if (use_hash) (void) hash_check(&outparam->name_hash); #endif DBUG_RETURN (0); + err_w_init: + /* Avoid problem with uninitialized data */ + bzero((char*) outparam,sizeof(*outparam)); + outparam->real_name= (char*)name+dirname_length(name); + err_not_open: x_free((gptr) disk_buff); if (file > 0) @@ -787,9 +833,10 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, err_end: /* Here when no file */ delete crypted; *root_ptr= old_root; - frm_error(error, outparam, name, ME_ERROR + ME_WAITTANG, errarg); + if (! error_reported) + frm_error(error,outparam,name,ME_ERROR+ME_WAITTANG, errarg); delete outparam->file; - outparam->file=0; // For easyer errorchecking + outparam->file=0; // For easier errorchecking outparam->db_stat=0; hash_free(&outparam->name_hash); free_root(&outparam->mem_root,MYF(0)); @@ -818,7 +865,7 @@ int closefrm(register TABLE *table) table->fields=0; } delete table->file; - table->file=0; /* For easyer errorchecking */ + table->file=0; /* For easier errorchecking */ hash_free(&table->name_hash); free_root(&table->mem_root,MYF(0)); DBUG_RETURN(error); @@ -988,11 +1035,11 @@ static void frm_error(int error, TABLE *form, const char *name, uint length=dirname_part(buff,name); buff[length-1]=0; db=buff+dirname_length(buff); - my_error(ER_NO_SUCH_TABLE,MYF(0),db,form->real_name); + my_error(ER_NO_SUCH_TABLE, MYF(0), db, form->real_name); } else - my_error(ER_FILE_NOT_FOUND,errortype, - fn_format(buff,name,form_dev,reg_ext,0),my_errno); + my_error(ER_FILE_NOT_FOUND, errortype, + fn_format(buff, name, form_dev, reg_ext, 0), my_errno); break; case 2: { @@ -1020,8 +1067,8 @@ static void frm_error(int error, TABLE *form, const char *name, } default: /* Better wrong error than none */ case 4: - my_error(ER_NOT_FORM_FILE,errortype, - fn_format(buff,name,form_dev,reg_ext,0)); + my_error(ER_NOT_FORM_FILE, errortype, + fn_format(buff, name, form_dev, reg_ext, 0)); break; } DBUG_VOID_RETURN; @@ -1030,7 +1077,7 @@ static void frm_error(int error, TABLE *form, const char *name, /* ** fix a str_type to a array type - ** typeparts sepearated with some char. differents types are separated + ** typeparts separated with some char. differents types are separated ** with a '\0' */ @@ -1122,7 +1169,7 @@ static uint find_field(TABLE *form,uint start,uint length) } - /* Check that the integer is in the internvall */ + /* Check that the integer is in the internal */ int set_zone(register int nr, int min_zone, int max_zone) { @@ -1186,7 +1233,7 @@ void append_unescaped(String *res, const char *pos, uint length) res->append('n'); break; case '\r': - res->append('\\'); /* This gives better readbility */ + res->append('\\'); /* This gives better readability */ res->append('r'); break; case '\\': @@ -1231,7 +1278,11 @@ File create_frm(register my_string name, uint reclength, uchar *fileinfo, if ((file=my_create(name,CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0) { bzero((char*) fileinfo,64); - fileinfo[0]=(uchar) 254; fileinfo[1]= 1; fileinfo[2]= FRM_VER+3; // Header + /* header */ + fileinfo[0]=(uchar) 254; + fileinfo[1]= 1; + fileinfo[2]= FRM_VER+3+ test(create_info->varchar); + fileinfo[3]= (uchar) ha_checktype(create_info->db_type); fileinfo[4]=1; int2store(fileinfo+6,IO_SIZE); /* Next block starts here */ @@ -1384,7 +1435,7 @@ bool check_db_name(char *name) if (use_mb(system_charset_info)) { int len=my_ismbchar(system_charset_info, name, - name+system_charset_info->mbmaxlen); + name+system_charset_info->mbmaxlen); if (len) { name += len; @@ -1461,7 +1512,7 @@ bool check_column_name(const char *name) if (use_mb(system_charset_info)) { int len=my_ismbchar(system_charset_info, name, - name+system_charset_info->mbmaxlen); + name+system_charset_info->mbmaxlen); if (len) { name += len; @@ -1496,12 +1547,510 @@ db_type get_table_type(const char *name) error=my_read(file,(byte*) head,4,MYF(MY_NABP)); my_close(file,MYF(0)); if (error || head[0] != (uchar) 254 || head[1] != 1 || - (head[2] != FRM_VER && head[2] != FRM_VER+1 && head[2] != FRM_VER+3)) + (head[2] != FRM_VER && head[2] != FRM_VER+1 && + (head[2] < FRM_VER+3 || head[2] > FRM_VER+4))) DBUG_RETURN(DB_TYPE_UNKNOWN); DBUG_RETURN(ha_checktype((enum db_type) (uint) *(head+3))); } +/* + calculate md5 of query + + SYNOPSIS + st_table_list::calc_md5() + buffer buffer for md5 writing +*/ + +void st_table_list::calc_md5(char *buffer) +{ + my_MD5_CTX context; + uchar digest[16]; + my_MD5Init(&context); + my_MD5Update(&context,(uchar *) query.str, query.length); + my_MD5Final(digest, &context); + sprintf((char *) buffer, + "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", + digest[0], digest[1], digest[2], digest[3], + digest[4], digest[5], digest[6], digest[7], + digest[8], digest[9], digest[10], digest[11], + digest[12], digest[13], digest[14], digest[15]); +} + + +/* + set ancestor TABLE for table place holder of VIEW + + SYNOPSIS + st_table_list::set_ancestor() +*/ + +void st_table_list::set_ancestor() +{ + /* process all tables of view */ + for (TABLE_LIST *tbl= ancestor; tbl; tbl= tbl->next_local) + { + if (tbl->ancestor) + ancestor->set_ancestor(); + tbl->table->grant= grant; + } + /* if view contain only one table, substitute TABLE of it */ + if (!ancestor->next_local) + { + table= ancestor->table; + schema_table= ancestor->schema_table; + } +} + + +/* + Save old want_privilege and clear want_privilege + + SYNOPSIS + save_and_clear_want_privilege() +*/ + +void st_table_list::save_and_clear_want_privilege() +{ + for (TABLE_LIST *tbl= ancestor; tbl; tbl= tbl->next_local) + { + if (tbl->table) + { + privilege_backup= tbl->table->grant.want_privilege; + tbl->table->grant.want_privilege= 0; + } + else + { + DBUG_ASSERT(tbl->view && tbl->ancestor && + tbl->ancestor->next_local); + tbl->save_and_clear_want_privilege(); + } + } +} + + +/* + restore want_privilege saved by save_and_clear_want_privilege + + SYNOPSIS + restore_want_privilege() +*/ + +void st_table_list::restore_want_privilege() +{ + for (TABLE_LIST *tbl= ancestor; tbl; tbl= tbl->next_local) + { + if (tbl->table) + tbl->table->grant.want_privilege= privilege_backup; + else + { + DBUG_ASSERT(tbl->view && tbl->ancestor && + tbl->ancestor->next_local); + tbl->restore_want_privilege(); + } + } +} + + +/* + setup fields of placeholder of merged VIEW + + SYNOPSIS + st_table_list::setup_ancestor() + thd - thread handler + conds - condition of this JOIN + check_opt_type - WHITH CHECK OPTION type (VIEW_CHECK_NONE, + VIEW_CHECK_LOCAL, VIEW_CHECK_CASCADED) + + DESCRIPTION + It is: + - preparing translation table for view columns (fix_fields() for every + call and creation for first call) + - preparing WHERE, ON and CHECK OPTION condition (fix_fields() for every + call and merging for first call). + If there are underlying view(s) procedure first will be called for them. + + RETURN + 0 - OK + 1 - error +*/ + +bool st_table_list::setup_ancestor(THD *thd, Item **conds, + uint8 check_opt_type) +{ + Field_translator *transl; + SELECT_LEX *select= &view->select_lex; + SELECT_LEX *current_select_save= thd->lex->current_select; + Item *item; + TABLE_LIST *tbl; + List_iterator_fast<Item> it(select->item_list); + uint i= 0; + bool save_set_query_id= thd->set_query_id; + bool save_wrapper= thd->lex->select_lex.no_wrap_view_item; + bool save_allow_sum_func= thd->allow_sum_func; + DBUG_ENTER("st_table_list::setup_ancestor"); + + for (tbl= ancestor; tbl; tbl= tbl->next_local) + { + if (tbl->ancestor && + tbl->setup_ancestor(thd, conds, + (check_opt_type == VIEW_CHECK_CASCADED ? + VIEW_CHECK_CASCADED : + VIEW_CHECK_NONE))) + DBUG_RETURN(1); + } + + if (field_translation) + { + /* prevent look up in SELECTs tree */ + thd->lex->current_select= &thd->lex->select_lex; + thd->lex->select_lex.no_wrap_view_item= 1; + thd->set_query_id= 1; + /* this view was prepared already on previous PS/SP execution */ + Field_translator *end= field_translation + select->item_list.elements; + /* real rights will be checked in VIEW field */ + save_and_clear_want_privilege(); + /* aggregate function are allowed */ + thd->allow_sum_func= 1; + for (transl= field_translation; transl < end; transl++) + { + if (!transl->item->fixed && + transl->item->fix_fields(thd, ancestor, &transl->item)) + goto err; + } + for (tbl= ancestor; tbl; tbl= tbl->next_local) + { + if (tbl->on_expr && !tbl->on_expr->fixed && + tbl->on_expr->fix_fields(thd, ancestor, &tbl->on_expr)) + goto err; + } + if (where && !where->fixed && where->fix_fields(thd, ancestor, &where)) + goto err; + restore_want_privilege(); + + /* WHERE/ON resolved => we can rename fields */ + for (transl= field_translation; transl < end; transl++) + { + transl->item->rename((char *)transl->name); + } + goto ok; + } + + /* view fields translation table */ + if (!(transl= + (Field_translator*)(thd->current_arena-> + alloc(select->item_list.elements * + sizeof(Field_translator))))) + { + DBUG_RETURN(1); + } + + /* prevent look up in SELECTs tree */ + thd->lex->current_select= &thd->lex->select_lex; + thd->lex->select_lex.no_wrap_view_item= 1; + + /* + Resolve all view items against ancestor table. + + TODO: do it only for real used fields "on demand" to mark really + used fields correctly. + */ + thd->set_query_id= 1; + /* real rights will be checked in VIEW field */ + save_and_clear_want_privilege(); + /* aggregate function are allowed */ + thd->allow_sum_func= 1; + while ((item= it++)) + { + /* save original name of view column */ + char *name= item->name; + if (!item->fixed && item->fix_fields(thd, ancestor, &item)) + goto err; + /* set new item get in fix fields and original column name */ + transl[i].name= name; + transl[i++].item= item; + } + field_translation= transl; + /* TODO: sort this list? Use hash for big number of fields */ + + for (tbl= ancestor; tbl; tbl= tbl->next_local) + { + if (tbl->on_expr && !tbl->on_expr->fixed && + tbl->on_expr->fix_fields(thd, ancestor, &tbl->on_expr)) + goto err; + } + if (where || + (check_opt_type == VIEW_CHECK_CASCADED && + ancestor->check_option)) + { + Item_arena *arena= thd->current_arena, backup; + TABLE_LIST *tbl= this; + if (arena->is_conventional()) + arena= 0; // For easier test + + if (where && !where->fixed && where->fix_fields(thd, ancestor, &where)) + goto err; + + if (arena) + thd->set_n_backup_item_arena(arena, &backup); + + if (check_opt_type) + { + if (where) + check_option= where->copy_andor_structure(thd); + if (check_opt_type == VIEW_CHECK_CASCADED) + { + check_option= and_conds(check_option, ancestor->check_option); + } + } + + /* + check that it is not VIEW in which we insert with INSERT SELECT + (in this case we can't add view WHERE condition to main SELECT_LEX) + */ + if (where && !no_where_clause) + { + /* Go up to join tree and try to find left join */ + for (; tbl; tbl= tbl->embedding) + { + if (tbl->outer_join) + { + /* + Store WHERE condition to ON expression for outer join, because + we can't use WHERE to correctly execute left joins on VIEWs and + this expression will not be moved to WHERE condition (i.e. will + be clean correctly for PS/SP) + */ + tbl->on_expr= and_conds(tbl->on_expr, where); + break; + } + } + if (tbl == 0) + { + if (outer_join) + { + /* + Store WHERE condition to ON expression for outer join, because + we can't use WHERE to correctly execute left joins on VIEWs and + this expression will not be moved to WHERE condition (i.e. will + be clean correctly for PS/SP) + */ + on_expr= and_conds(on_expr, where); + } + else + { + /* + It is conds of JOIN, but it will be stored in + st_select_lex::prep_where for next reexecution + */ + *conds= and_conds(*conds, where); + } + } + } + + if (arena) + thd->restore_backup_item_arena(arena, &backup); + } + restore_want_privilege(); + + /* + fix_fields do not need tables, because new are only AND operation and we + just need recollect statistics + */ + if (check_option && !check_option->fixed && + check_option->fix_fields(thd, 0, &check_option)) + goto err; + + /* WHERE/ON resolved => we can rename fields */ + { + Field_translator *end= field_translation + select->item_list.elements; + for (transl= field_translation; transl < end; transl++) + { + transl->item->rename((char *)transl->name); + } + } + + /* full text function moving to current select */ + if (view->select_lex.ftfunc_list->elements) + { + Item_func_match *ifm; + List_iterator_fast<Item_func_match> + li(*(view->select_lex.ftfunc_list)); + while ((ifm= li++)) + current_select_save->ftfunc_list->push_front(ifm); + } + +ok: + thd->lex->select_lex.no_wrap_view_item= save_wrapper; + thd->lex->current_select= current_select_save; + thd->set_query_id= save_set_query_id; + thd->allow_sum_func= save_allow_sum_func; + DBUG_RETURN(0); + +err: + /* Hide "Unknown column" or "Unknown function" error */ + if (thd->net.last_errno == ER_BAD_FIELD_ERROR || + thd->net.last_errno == ER_SP_DOES_NOT_EXIST) + { + thd->clear_error(); + my_error(ER_VIEW_INVALID, MYF(0), view_db.str, view_name.str); + } + thd->lex->select_lex.no_wrap_view_item= save_wrapper; + thd->lex->current_select= current_select_save; + thd->set_query_id= save_set_query_id; + thd->allow_sum_func= save_allow_sum_func; + DBUG_RETURN(1); +} + + +/* + cleunup items belonged to view fields translation table + + SYNOPSIS + st_table_list::cleanup_items() +*/ + +void st_table_list::cleanup_items() +{ + if (!field_translation) + return; + + Field_translator *end= (field_translation + + view->select_lex.item_list.elements); + for (Field_translator *transl= field_translation; transl < end; transl++) + transl->item->walk(&Item::cleanup_processor, 0); +} + + +/* + check CHECK OPTION condition + + SYNOPSIS + check_option() + ignore_failure ignore check option fail + + RETURN + VIEW_CHECK_OK OK + VIEW_CHECK_ERROR FAILED + VIEW_CHECK_SKIP FAILED, but continue +*/ + +int st_table_list::view_check_option(THD *thd, bool ignore_failure) +{ + if (check_option && check_option->val_int() == 0) + { + if (ignore_failure) + { + push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, + ER_VIEW_CHECK_FAILED, ER(ER_VIEW_CHECK_FAILED), + view_db.str, view_name.str); + return(VIEW_CHECK_SKIP); + } + else + { + my_error(ER_VIEW_CHECK_FAILED, MYF(0), view_db.str, view_name.str); + return(VIEW_CHECK_ERROR); + } + } + return(VIEW_CHECK_OK); +} + + +/* + Find table in underlying tables by mask and check that only this + table belong to given mask + + SYNOPSIS + st_table_list::check_single_table() + table reference on variable where to store found table + (should be 0 on call, to find table, or point to table for + unique test) + map bit mask of tables + + RETURN + FALSE table not found or found only one + TRUE found several tables +*/ + +bool st_table_list::check_single_table(st_table_list **table, table_map map) +{ + for (TABLE_LIST *tbl= ancestor; tbl; tbl= tbl->next_local) + { + if (tbl->table) + { + if (tbl->table->map & map) + { + if (*table) + return TRUE; + else + *table= tbl; + } + } + else + if (tbl->check_single_table(table, map)) + return TRUE; + } + return FALSE; +} + + +/* + Set insert_values buffer + + SYNOPSIS + set_insert_values() + mem_root memory pool for allocating + + RETURN + FALSE - OK + TRUE - out of memory +*/ + +bool st_table_list::set_insert_values(MEM_ROOT *mem_root) +{ + if (table) + { + if (!table->insert_values && + !(table->insert_values= (byte *)alloc_root(mem_root, + table->rec_buff_length))) + return TRUE; + } + else + { + DBUG_ASSERT(view && ancestor && ancestor->next_local); + for (TABLE_LIST *tbl= ancestor; tbl; tbl= tbl->next_local) + if (tbl->set_insert_values(mem_root)) + return TRUE; + } + return FALSE; +} + + +void Field_iterator_view::set(TABLE_LIST *table) +{ + ptr= table->field_translation; + array_end= ptr + table->view->select_lex.item_list.elements; +} + + +const char *Field_iterator_table::name() +{ + return (*ptr)->field_name; +} + + +Item *Field_iterator_table::item(THD *thd) +{ + return new Item_field(thd, *ptr); +} + + +const char *Field_iterator_view::name() +{ + return ptr->name; +} + + /***************************************************************************** ** Instansiate templates *****************************************************************************/ diff --git a/sql/table.h b/sql/table.h index eed9969dac8..391b4908f96 100644 --- a/sql/table.h +++ b/sql/table.h @@ -20,6 +20,8 @@ class Item; /* Needed by ORDER */ class GRANT_TABLE; class st_select_lex_unit; +class st_select_lex; +class COND_EQUAL; /* Order clause list element */ @@ -27,9 +29,13 @@ typedef struct st_order { struct st_order *next; Item **item; /* Point at item in select fields */ Item *item_ptr; /* Storage for initial item */ + Item **item_copy; /* For SPs; the original item ptr */ + int counter; /* position in SELECT list, correct + only if counter_used is true*/ bool asc; /* true if ascending */ bool free_me; /* true if item isn't shared */ bool in_field_list; /* true if in select field list */ + bool counter_used; /* parameter was counter of columns */ Field *field; /* If tmp-table group */ char *buff; /* If tmp-table group */ table_map used,depend_map; @@ -45,6 +51,13 @@ typedef struct st_grant_info enum tmp_table_type {NO_TMP_TABLE=0, TMP_TABLE=1, TRANSACTIONAL_TMP_TABLE=2}; +enum frm_type_enum +{ + FRMTYPE_ERROR= 0, + FRMTYPE_TABLE, + FRMTYPE_VIEW +}; + typedef struct st_filesort_info { IO_CACHE *io_cache; /* If sorted through filebyte */ @@ -71,6 +84,7 @@ enum timestamp_auto_set_type class Field_timestamp; class Field_blob; +class Table_triggers_list; struct st_table { handler *file; @@ -79,7 +93,7 @@ struct st_table { /* hash of field names (contains pointers to elements of field array) */ HASH name_hash; byte *record[2]; /* Pointer to records */ - byte *default_values; /* Default values for INSERT */ + byte *default_values; /* Default values for INSERT */ byte *insert_values; /* used by INSERT ... UPDATE */ uint fields; /* field count */ uint reclength; /* Recordlength */ @@ -152,6 +166,8 @@ struct st_table { my_bool no_keyread, no_cache; my_bool clear_query_id; /* To reset query_id for tables and cols */ my_bool auto_increment_field_not_null; + my_bool insert_or_update; /* Can be used by the handler */ + my_bool alias_name_used; /* true if table_name is alias */ Field *next_number_field, /* Set if next_number is activated */ *found_next_number_field, /* Set on open */ *rowid_field; @@ -169,6 +185,8 @@ struct st_table { REGINFO reginfo; /* field connections */ MEM_ROOT mem_root; GRANT_INFO grant; + /* Table's triggers, 0 if there are no of them */ + Table_triggers_list *triggers; /* A pair "database_name\0table_name\0", widely used as simply a db name */ char *table_cache_key; @@ -185,12 +203,8 @@ struct st_table { key_part_map const_key_parts[MAX_KEY]; ulong query_id; uchar frm_version; - - union /* Temporary variables */ - { - uint temp_pool_slot; /* Used by intern temp tables */ - struct st_table_list *pos_in_table_list; - }; + uint temp_pool_slot; /* Used by intern temp tables */ + struct st_table_list *pos_in_table_list;/* Element referring to this table */ /* number of select if it is derived table */ uint derived_select_number; THD *in_use; /* Which thread uses this */ @@ -198,35 +212,234 @@ struct st_table { }; +typedef struct st_foreign_key_info +{ + LEX_STRING *forein_id; + LEX_STRING *referenced_db; + LEX_STRING *referenced_table; + LEX_STRING *constraint_method; + List<LEX_STRING> foreign_fields; + List<LEX_STRING> referenced_fields; +} FOREIGN_KEY_INFO; + + +enum enum_schema_tables +{ + SCH_SCHEMATA= 0, SCH_TABLES, SCH_COLUMNS, SCH_CHARSETS, SCH_COLLATIONS, + SCH_COLLATION_CHARACTER_SET_APPLICABILITY, SCH_PROCEDURES, SCH_STATISTICS, + SCH_VIEWS, SCH_USER_PRIVILEGES, SCH_SCHEMA_PRIVILEGES, SCH_TABLE_PRIVILEGES, + SCH_COLUMN_PRIVILEGES, SCH_TABLE_CONSTRAINTS, SCH_KEY_COLUMN_USAGE, + SCH_TABLE_NAMES, SCH_OPEN_TABLES, SCH_STATUS, SCH_VARIABLES +}; + + +typedef struct st_field_info +{ + const char* field_name; + uint field_length; + enum enum_field_types field_type; + int value; + bool maybe_null; + const char* old_name; +} ST_FIELD_INFO; + +struct st_table_list; +typedef class Item COND; + +typedef struct st_schema_table +{ + const char* table_name; + ST_FIELD_INFO *fields_info; + /* Create information_schema table */ + TABLE *(*create_table) (THD *thd, struct st_table_list *table_list); + /* Fill table with data */ + int (*fill_table) (THD *thd, struct st_table_list *tables, COND *cond); + /* Handle fileds for old SHOW */ + int (*old_format) (THD *thd, struct st_schema_table *schema_table); + int (*process_table) (THD *thd, struct st_table_list *tables, + TABLE *table, bool res, const char *base_name, + const char *file_name); + int idx_field1, idx_field2; + bool hidden; +} ST_SCHEMA_TABLE; + + #define JOIN_TYPE_LEFT 1 #define JOIN_TYPE_RIGHT 2 +#define VIEW_ALGORITHM_UNDEFINED 0 +#define VIEW_ALGORITHM_TMPTABLE 1 +#define VIEW_ALGORITHM_MERGE 2 + +/* view WITH CHECK OPTION parameter options */ +#define VIEW_CHECK_NONE 0 +#define VIEW_CHECK_LOCAL 1 +#define VIEW_CHECK_CASCADED 2 + +/* result of view WITH CHECK OPTION parameter check */ +#define VIEW_CHECK_OK 0 +#define VIEW_CHECK_ERROR 1 +#define VIEW_CHECK_SKIP 2 + +struct st_lex; +class select_union; +struct Field_translator +{ + Item *item; + const char *name; +}; + typedef struct st_table_list { - struct st_table_list *next; - char *db, *alias, *real_name; - char *option; /* Used by cache index */ + /* link in a local table list (used by SQL_LIST) */ + struct st_table_list *next_local; + /* link in a global list of all queries tables */ + struct st_table_list *next_global, **prev_global; + char *db, *alias, *real_name, *schema_table_name; + char *option; /* Used by cache index */ Item *on_expr; /* Used with outer join */ + COND_EQUAL *cond_equal; /* Used with outer join */ struct st_table_list *natural_join; /* natural join on this table*/ /* ... join ... USE INDEX ... IGNORE INDEX */ - List<String> *use_index, *ignore_index; - TABLE *table; /* opened table */ - st_table_list *table_list; /* pointer to node of list of all tables */ - class st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ - GRANT_INFO grant; + List<String> *use_index, *ignore_index; + TABLE *table; /* opened table */ + /* + select_result for derived table to pass it from table creation to table + filling procedure + */ + select_union *derived_result; + /* + Reference from aux_tables to local list entry of main select of + multi-delete statement: + delete t1 from t2,t1 where t1.a<'B' and t2.b=t1.b; + here it will be reference of first occurrence of t1 to second (as you + can see this lists can't be merged) + */ + st_table_list *correspondent_table; + st_select_lex_unit *derived; /* SELECT_LEX_UNIT of derived table */ + ST_SCHEMA_TABLE *schema_table; /* Information_schema table */ + st_select_lex *schema_select_lex; + /* link to select_lex where this table was used */ + st_select_lex *select_lex; + st_lex *view; /* link on VIEW lex for merging */ + Field_translator *field_translation; /* array of VIEW fields */ + /* ancestor of this table (VIEW merge algorithm) */ + st_table_list *ancestor; + /* most upper view this table belongs to */ + st_table_list *belong_to_view; + /* list of join table tree leaves */ + st_table_list *next_leaf; + Item *where; /* VIEW WHERE clause condition */ + Item *check_option; /* WITH CHECK OPTION condition */ + LEX_STRING query; /* text of (CRETE/SELECT) statement */ + LEX_STRING md5; /* md5 of query text */ + LEX_STRING source; /* source of CREATE VIEW */ + LEX_STRING view_db; /* saved view database */ + LEX_STRING view_name; /* saved view name */ + LEX_STRING timestamp; /* GMT time stamp of last operation */ + ulonglong file_version; /* version of file's field set */ + ulonglong updatable_view; /* VIEW can be updated */ + ulonglong revision; /* revision control number */ + ulonglong algorithm; /* 0 any, 1 tmp tables , 2 merging */ + ulonglong with_check; /* WITH CHECK OPTION */ + /* + effective value of WITH CHECK OPTION (differ for temporary table + algorithm) + */ + uint8 effective_with_check; + uint effective_algorithm; /* which algorithm was really used */ + uint privilege_backup; /* place for saving privileges */ + GRANT_INFO grant; thr_lock_type lock_type; uint outer_join; /* Which join type */ uint shared; /* Used in multi-upd */ uint32 db_length, real_name_length; + bool updatable; /* VIEW/TABLE can be updated now */ bool straight; /* optimize with prev table */ bool updating; /* for replicate-do/ignore table */ - bool force_index; /* Prefer index over table scan */ - bool ignore_leaves; /* Preload only non-leaf nodes */ + bool force_index; /* prefer index over table scan */ + bool ignore_leaves; /* preload only non-leaf nodes */ + bool no_where_clause; /* do not attach WHERE to SELECT */ + table_map dep_tables; /* tables the table depends on */ + table_map on_expr_dep_tables; /* tables on expression depends on */ + struct st_nested_join *nested_join; /* if the element is a nested join */ + st_table_list *embedding; /* nested join containing the table */ + List<struct st_table_list> *join_list;/* join list the table belongs to */ bool cacheable_table; /* stop PS caching */ - /* used in multi-upd privelege check */ - bool table_in_update_from_clause; + /* used in multi-upd/views privilege check */ + bool table_in_first_from_clause; + bool skip_temporary; /* this table shouldn't be temporary */ + /* TRUE if this merged view contain auto_increment field */ + bool contain_auto_increment; + /* FRMTYPE_ERROR if any type is acceptable */ + enum frm_type_enum required_type; + char timestamp_buffer[20]; /* buffer for timestamp (19+1) */ + + void calc_md5(char *buffer); + void set_ancestor(); + int view_check_option(THD *thd, bool ignore_failure); + bool setup_ancestor(THD *thd, Item **conds, uint8 check_option); + void cleanup_items(); + bool placeholder() {return derived || view; } + void print(THD *thd, String *str); + void save_and_clear_want_privilege(); + void restore_want_privilege(); + bool check_single_table(st_table_list **table, table_map map); + bool set_insert_values(MEM_ROOT *mem_root); } TABLE_LIST; +class Item; + +class Field_iterator: public Sql_alloc +{ +public: + virtual ~Field_iterator() {} + virtual void set(TABLE_LIST *)= 0; + virtual void next()= 0; + virtual bool end_of_fields()= 0; /* Return 1 at end of list */ + virtual const char *name()= 0; + virtual Item *item(THD *)= 0; + virtual Field *field()= 0; +}; + + +class Field_iterator_table: public Field_iterator +{ + Field **ptr; +public: + Field_iterator_table() :ptr(0) {} + void set(TABLE_LIST *table) { ptr= table->table->field; } + void set_table(TABLE *table) { ptr= table->field; } + void next() { ptr++; } + bool end_of_fields() { return *ptr == 0; } + const char *name(); + Item *item(THD *thd); + Field *field() { return *ptr; } +}; + + +class Field_iterator_view: public Field_iterator +{ + Field_translator *ptr, *array_end; +public: + Field_iterator_view() :ptr(0), array_end(0) {} + void set(TABLE_LIST *table); + void next() { ptr++; } + bool end_of_fields() { return ptr == array_end; } + const char *name(); + Item *item(THD *thd) { return ptr->item; } + Field *field() { return 0; } +}; + +typedef struct st_nested_join +{ + List<TABLE_LIST> join_list; /* list of elements in the nested join */ + table_map used_tables; /* bitmap of tables in the nested join */ + table_map not_null_tables; /* tables that rejects nulls */ + struct st_join_table *first_nested;/* the first nested table in the plan */ + uint counter; /* to count tables in the nested join */ +} NESTED_JOIN; + typedef struct st_changed_table_list { struct st_changed_table_list *next; @@ -234,8 +447,7 @@ typedef struct st_changed_table_list uint32 key_length; } CHANGED_TABLE_LIST; -typedef struct st_open_table_list -{ +typedef struct st_open_table_list{ struct st_open_table_list *next; char *db,*table; uint32 in_use,locked; diff --git a/sql/time.cc b/sql/time.cc index e76b169b336..f1d21915c23 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -33,15 +33,6 @@ int calc_weekday(long daynr,bool sunday_first_day_of_week) DBUG_RETURN ((int) ((daynr + 5L + (sunday_first_day_of_week ? 1L : 0L)) % 7)); } - /* Calc days in one year. works with 0 <= year <= 99 */ - -uint calc_days_in_year(uint year) -{ - return (year & 3) == 0 && (year%100 || (year%400 == 0 && year)) ? - 366 : 365; -} - - /* The bits in week_format has the following meaning: WEEK_MONDAY_FIRST (0) If not set Sunday is first day of week @@ -200,9 +191,16 @@ str_to_datetime_with_warn(const char *str, uint length, TIME *l_time, uint flags) { int was_cut; - timestamp_type ts_type= str_to_datetime(str, length, l_time, flags, &was_cut); + THD *thd= current_thd; + timestamp_type ts_type; + + ts_type= str_to_datetime(str, length, l_time, + (flags | (thd->variables.sql_mode & + (MODE_INVALID_DATES | + MODE_NO_ZERO_DATE))), + &was_cut); if (was_cut) - make_truncated_value_warning(current_thd, str, length, ts_type); + make_truncated_value_warning(current_thd, str, length, ts_type, NullS); return ts_type; } @@ -258,101 +256,13 @@ str_to_time_with_warn(const char *str, uint length, TIME *l_time) int was_cut; bool ret_val= str_to_time(str, length, l_time, &was_cut); if (was_cut) - make_truncated_value_warning(current_thd, str, length, MYSQL_TIMESTAMP_TIME); + make_truncated_value_warning(current_thd, str, length, + MYSQL_TIMESTAMP_TIME, NullS); return ret_val; } /* - Convert datetime value specified as number to broken-down TIME - representation and form value of DATETIME type as side-effect. - - SYNOPSIS - number_to_TIME() - nr - datetime value as number - time_res - pointer for structure for broken-down representation - fuzzy_date - indicates whenever we allow fuzzy dates - was_cut - set ot 1 if there was some kind of error during - conversion or to 0 if everything was OK. - - DESCRIPTION - Convert a datetime value of formats YYMMDD, YYYYMMDD, YYMMDDHHMSS, - YYYYMMDDHHMMSS to broken-down TIME representation. Return value in - YYYYMMDDHHMMSS format as side-effect. - - This function also checks if datetime value fits in DATETIME range. - - RETURN VALUE - Datetime value in YYYYMMDDHHMMSS format. - If input value is not valid datetime value then 0 is returned. -*/ - -longlong number_to_TIME(longlong nr, TIME *time_res, bool fuzzy_date, - int *was_cut) -{ - long part1,part2; - - *was_cut= 0; - - if (nr == LL(0) || nr >= LL(10000101000000)) - goto ok; - if (nr < 101) - goto err; - if (nr <= (YY_PART_YEAR-1)*10000L+1231L) - { - nr= (nr+20000000L)*1000000L; // YYMMDD, year: 2000-2069 - goto ok; - } - if (nr < (YY_PART_YEAR)*10000L+101L) - goto err; - if (nr <= 991231L) - { - nr= (nr+19000000L)*1000000L; // YYMMDD, year: 1970-1999 - goto ok; - } - if (nr < 10000101L) - goto err; - if (nr <= 99991231L) - { - nr= nr*1000000L; - goto ok; - } - if (nr < 101000000L) - goto err; - if (nr <= (YY_PART_YEAR-1)*LL(10000000000)+LL(1231235959)) - { - nr= nr+LL(20000000000000); // YYMMDDHHMMSS, 2000-2069 - goto ok; - } - if (nr < YY_PART_YEAR*LL(10000000000)+ LL(101000000)) - goto err; - if (nr <= LL(991231235959)) - nr= nr+LL(19000000000000); // YYMMDDHHMMSS, 1970-1999 - - ok: - part1=(long) (nr/LL(1000000)); - part2=(long) (nr - (longlong) part1*LL(1000000)); - time_res->year= (int) (part1/10000L); part1%=10000L; - time_res->month= (int) part1 / 100; - time_res->day= (int) part1 % 100; - time_res->hour= (int) (part2/10000L); part2%=10000L; - time_res->minute=(int) part2 / 100; - time_res->second=(int) part2 % 100; - - if (time_res->year <= 9999 && time_res->month <= 12 && - time_res->day <= 31 && time_res->hour <= 23 && - time_res->minute <= 59 && time_res->second <= 59 && - (fuzzy_date || (time_res->month != 0 && time_res->day != 0) || nr==0)) - return nr; - - err: - - *was_cut= 1; - return LL(0); -} - - -/* Convert a system time structure to TIME */ @@ -772,16 +682,15 @@ void make_datetime(const DATE_TIME_FORMAT *format __attribute__((unused)), void make_truncated_value_warning(THD *thd, const char *str_val, - uint str_length, timestamp_type time_type) + uint str_length, timestamp_type time_type, + const char *field_name) { char warn_buff[MYSQL_ERRMSG_SIZE]; const char *type_str; - + CHARSET_INFO *cs= &my_charset_latin1; char buff[128]; String str(buff,(uint32) sizeof(buff), system_charset_info); - str.length(0); - str.append(str_val, str_length); - str.append('\0'); + str.copy(str_val, str_length, system_charset_info); switch (time_type) { case MYSQL_TIMESTAMP_DATE: @@ -795,84 +704,18 @@ void make_truncated_value_warning(THD *thd, const char *str_val, type_str= "datetime"; break; } - sprintf(warn_buff, ER(ER_TRUNCATED_WRONG_VALUE), - type_str, str.ptr()); + if (field_name) + cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff), + ER(ER_TRUNCATED_WRONG_VALUE_FOR_FIELD), + type_str, str.c_ptr(), field_name, + (ulong) thd->row_count); + else + cs->cset->snprintf(cs, warn_buff, sizeof(warn_buff), + ER(ER_TRUNCATED_WRONG_VALUE), + type_str, str.ptr()); push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_TRUNCATED_WRONG_VALUE, warn_buff); } -/* Convert time value to integer in YYYYMMDDHHMMSS format */ - -ulonglong TIME_to_ulonglong_datetime(const TIME *time) -{ - return ((ulonglong) (time->year * 10000UL + - time->month * 100UL + - time->day) * ULL(1000000) + - (ulonglong) (time->hour * 10000UL + - time->minute * 100UL + - time->second)); -} - - -/* Convert TIME value to integer in YYYYMMDD format */ - -ulonglong TIME_to_ulonglong_date(const TIME *time) -{ - return (ulonglong) (time->year * 10000UL + time->month * 100UL + time->day); -} - - -/* - Convert TIME value to integer in HHMMSS format. - This function doesn't take into account time->day member: - it's assumed that days have been converted to hours already. -*/ - -ulonglong TIME_to_ulonglong_time(const TIME *time) -{ - return (ulonglong) (time->hour * 10000UL + - time->minute * 100UL + - time->second); -} - - -/* - Convert struct TIME (date and time split into year/month/day/hour/... - to a number in format YYYYMMDDHHMMSS (DATETIME), - YYYYMMDD (DATE) or HHMMSS (TIME). - - SYNOPSIS - TIME_to_ulonglong() - - DESCRIPTION - The function is used when we need to convert value of time item - to a number if it's used in numeric context, i. e.: - SELECT NOW()+1, CURDATE()+0, CURTIMIE()+0; - SELECT ?+1; - - NOTE - This function doesn't check that given TIME structure members are - in valid range. If they are not, return value won't reflect any - valid date either. -*/ - -ulonglong TIME_to_ulonglong(const TIME *time) -{ - switch (time->time_type) { - case MYSQL_TIMESTAMP_DATETIME: - return TIME_to_ulonglong_datetime(time); - case MYSQL_TIMESTAMP_DATE: - return TIME_to_ulonglong_date(time); - case MYSQL_TIMESTAMP_TIME: - return TIME_to_ulonglong_time(time); - case MYSQL_TIMESTAMP_NONE: - case MYSQL_TIMESTAMP_ERROR: - return ULL(0); - default: - DBUG_ASSERT(0); - } - return 0; -} - #endif diff --git a/sql/tztime.cc b/sql/tztime.cc index c2143b0d5dd..dc38580f3b6 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1395,44 +1395,72 @@ extern "C" byte* my_offset_tzs_get_key(Time_zone_offset *entry, uint *length, /* - Prepare table list with time zone related tables from preallocated array. + Prepare table list with time zone related tables from preallocated array + and add to global table list. SYNOPSIS tz_init_table_list() - tz_tabs - pointer to preallocated array of 4 TABLE_LIST objects. + tz_tabs - pointer to preallocated array of 4 TABLE_LIST objects + global_next_ptr - pointer to variable which points to global_next member + of last element of global table list (or list root + then list is empty) (in/out). DESCRIPTION This function prepares list of TABLE_LIST objects which can be used - for opening of time zone tables from preallocated array. + for opening of time zone tables from preallocated array. It also links + this list to the end of global table list (it will read and update + accordingly variable pointed by global_next_ptr for this). */ -void -tz_init_table_list(TABLE_LIST *tz_tabs) +static void +tz_init_table_list(TABLE_LIST *tz_tabs, TABLE_LIST ***global_next_ptr) { bzero(tz_tabs, sizeof(TABLE_LIST) * 4); tz_tabs[0].alias= tz_tabs[0].real_name= (char*)"time_zone_name"; tz_tabs[1].alias= tz_tabs[1].real_name= (char*)"time_zone"; tz_tabs[2].alias= tz_tabs[2].real_name= (char*)"time_zone_transition_type"; tz_tabs[3].alias= tz_tabs[3].real_name= (char*)"time_zone_transition"; - tz_tabs[0].next= tz_tabs+1; - tz_tabs[1].next= tz_tabs+2; - tz_tabs[2].next= tz_tabs+3; + tz_tabs[0].next_global= tz_tabs[0].next_local= tz_tabs+1; + tz_tabs[1].next_global= tz_tabs[1].next_local= tz_tabs+2; + tz_tabs[2].next_global= tz_tabs[2].next_local= tz_tabs+3; tz_tabs[0].lock_type= tz_tabs[1].lock_type= tz_tabs[2].lock_type= tz_tabs[3].lock_type= TL_READ; tz_tabs[0].db= tz_tabs[1].db= tz_tabs[2].db= tz_tabs[3].db= (char *)"mysql"; + + /* Link into global list */ + tz_tabs[0].prev_global= *global_next_ptr; + tz_tabs[1].prev_global= &tz_tabs[0].next_global; + tz_tabs[2].prev_global= &tz_tabs[1].next_global; + tz_tabs[3].prev_global= &tz_tabs[2].next_global; + + **global_next_ptr= tz_tabs; + /* Update last-global-pointer to point to pointer in last table */ + *global_next_ptr= &tz_tabs[3].next_global; } /* - Create table list with time zone related tables. + Fake table list object, pointer to which is returned by + my_tz_get_tables_list() as indication of error. +*/ +TABLE_LIST fake_time_zone_tables_list; + +/* + Create table list with time zone related tables and add it to the end + of global table list. SYNOPSIS my_tz_get_table_list() - thd - current thread object + thd - current thread object + global_next_ptr - pointer to variable which points to global_next member + of last element of global table list (or list root + then list is empty) (in/out). DESCRIPTION This function creates list of TABLE_LIST objects allocated in thd's - memroot, which can be used for opening of time zone tables. + memroot, which can be used for opening of time zone tables. It will also + link this list to the end of global table list (it will read and update + accordingly variable pointed by global_next_ptr for this). NOTE my_tz_check_n_skip_implicit_tables() function depends on fact that @@ -1444,19 +1472,20 @@ tz_init_table_list(TABLE_LIST *tz_tabs) */ TABLE_LIST * -my_tz_get_table_list(THD *thd) +my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr) { TABLE_LIST *tz_tabs; + DBUG_ENTER("my_tz_get_table_list"); if (!time_zone_tables_exist) - return 0; + DBUG_RETURN(0); if (!(tz_tabs= (TABLE_LIST *)thd->alloc(sizeof(TABLE_LIST) * 4))) - return &fake_time_zone_tables_list; + DBUG_RETURN(&fake_time_zone_tables_list); - tz_init_table_list(tz_tabs); + tz_init_table_list(tz_tabs, global_next_ptr); - return tz_tabs; + DBUG_RETURN(tz_tabs); } @@ -1490,7 +1519,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) { THD *thd; TABLE_LIST *tables= 0; - TABLE_LIST tables_buff[5]; + TABLE_LIST tables_buff[5], **last_global_next_ptr; TABLE *table; TZ_NAMES_ENTRY *tmp_tzname; my_bool return_val= 1; @@ -1557,9 +1586,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) (char*)"time_zone_leap_second"; tables_buff[0].lock_type= TL_READ; tables_buff[0].db= thd->db; - tables_buff[0].next= tables_buff + 1; - /* Fill TABLE_LIST for rest of the time zone describing tables */ - tz_init_table_list(tables_buff + 1); + /* + Fill TABLE_LIST for the rest of the time zone describing tables + and link it to first one. + */ + last_global_next_ptr= &(tables_buff[0].next_global); + tz_init_table_list(tables_buff + 1, &last_global_next_ptr); if (open_tables(thd, tables_buff, &counter) || lock_tables(thd, tables_buff, counter)) @@ -1761,8 +1793,9 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) and it is specifically for this purpose). */ table= tz_tables->table; - tz_tables= tz_tables->next; - table->field[0]->store(tz_name->ptr(), tz_name->length(), &my_charset_latin1); + tz_tables= tz_tables->next_local; + table->field[0]->store(tz_name->ptr(), tz_name->length(), + &my_charset_latin1); /* It is OK to ignore ha_index_init()/ha_index_end() return values since mysql.time_zone* tables are MyISAM and these operations always succeed @@ -1773,7 +1806,10 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, 0, HA_READ_KEY_EXACT)) { - sql_print_error("Can't find description of time zone."); +#ifdef EXTRA_DEBUG + sql_print_error("Can't find description of time zone '%.*s'", tz_name->length(), + tz_name->ptr()); +#endif goto end; } @@ -1787,14 +1823,14 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) using the only index in this table). */ table= tz_tables->table; - tz_tables= tz_tables->next; + tz_tables= tz_tables->next_local; table->field[0]->store((longlong)tzid); (void)table->file->ha_index_init(0); if (table->file->index_read(table->record[0], (byte*)table->field[0]->ptr, 0, HA_READ_KEY_EXACT)) { - sql_print_error("Can't find description of time zone."); + sql_print_error("Can't find description of time zone '%u'", tzid); goto end; } @@ -1814,7 +1850,7 @@ tz_load_from_open_tables(const String *tz_name, TABLE_LIST *tz_tables) Right - using special index. */ table= tz_tables->table; - tz_tables= tz_tables->next; + tz_tables= tz_tables->next_local; table->field[0]->store((longlong)tzid); (void)table->file->ha_index_init(0); diff --git a/sql/tztime.h b/sql/tztime.h index 2214c1b29d6..caf663fc8cb 100644 --- a/sql/tztime.h +++ b/sql/tztime.h @@ -59,11 +59,12 @@ public: extern Time_zone * my_tz_UTC; extern Time_zone * my_tz_SYSTEM; -extern TABLE_LIST * my_tz_get_table_list(THD *thd); +extern TABLE_LIST * my_tz_get_table_list(THD *thd, TABLE_LIST ***global_next_ptr); extern Time_zone * my_tz_find(const String *name, TABLE_LIST *tz_tables); extern my_bool my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap); extern void my_tz_free(); +extern TABLE_LIST fake_time_zone_tables_list; /* Check if we have pointer to the begining of list of implicitly used time diff --git a/sql/udf_example.cc b/sql/udf_example.cc index 50de0f187fe..a186b4fbf6c 100644 --- a/sql/udf_example.cc +++ b/sql/udf_example.cc @@ -56,7 +56,9 @@ ** ** Function 'myfunc_int' returns summary length of all its arguments. ** -** Function 'sequence' returns an sequence starting from a certain number +** Function 'sequence' returns an sequence starting from a certain number. +** +** Function 'myfunc_argument_name' returns name of argument. ** ** On the end is a couple of functions that converts hostnames to ip and ** vice versa. @@ -82,6 +84,7 @@ ** CREATE FUNCTION lookup RETURNS STRING SONAME "udf_example.so"; ** CREATE FUNCTION reverse_lookup RETURNS STRING SONAME "udf_example.so"; ** CREATE AGGREGATE FUNCTION avgcost RETURNS REAL SONAME "udf_example.so"; +** CREATE FUNCTION myfunc_argument_name RETURNS STRING SONAME "udf_example.so"; ** ** After this the functions will work exactly like native MySQL functions. ** Functions should be created only once. @@ -94,6 +97,7 @@ ** DROP FUNCTION lookup; ** DROP FUNCTION reverse_lookup; ** DROP FUNCTION avgcost; +** DROP FUNCTION myfunc_argument_name; ** ** The CREATE FUNCTION and DROP FUNCTION update the func@mysql table. All ** Active function will be reloaded on every restart of server @@ -987,4 +991,43 @@ avgcost( UDF_INIT* initid, UDF_ARGS* args, char* is_null, char* error ) return data->totalprice/double(data->totalquantity); } +extern "C" { +my_bool myfunc_argument_name_init(UDF_INIT *initid, UDF_ARGS *args, + char *message); +char *myfunc_argument_name(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *length, char *null_value, + char *error); +} + +my_bool myfunc_argument_name_init(UDF_INIT *initid, UDF_ARGS *args, + char *message) +{ + if (args->arg_count != 1) + { + strmov(message,"myfunc_argument_name_init accepts only one argument"); + return 1; + } + initid->max_length= args->attribute_lengths[0]; + initid->maybe_null= 1; + initid->const_item= 1; + return 0; +} + +char *myfunc_argument_name(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *length, char *null_value, + char *error) +{ + if (!args->attributes[0]) + { + null_value= 0; + return 0; + } + (*length)--; // space for ending \0 (for debugging purposes) + if (*length > args->attribute_lengths[0]) + *length= args->attribute_lengths[0]; + memcpy(result, args->attributes[0], *length); + result[*length]= 0; + return result; +} + #endif /* HAVE_DLOPEN */ diff --git a/sql/uniques.cc b/sql/uniques.cc index d060965aa66..b08727705e4 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -63,12 +63,255 @@ Unique::Unique(qsort_cmp2 comp_func, void * comp_func_fixed_arg, comp_func_fixed_arg); /* If the following fail's the next add will also fail */ my_init_dynamic_array(&file_ptrs, sizeof(BUFFPEK), 16, 16); + /* + If you change the following, change it in get_max_elements function, too. + */ max_elements= max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+size); open_cached_file(&file, mysql_tmpdir,TEMP_PREFIX, DISK_BUFFER_SIZE, MYF(MY_WME)); } +/* + Calculate log2(n!) + + NOTES + Stirling's approximate formula is used: + + n! ~= sqrt(2*M_PI*n) * (n/M_E)^n + + Derivation of formula used for calculations is as follows: + + log2(n!) = log(n!)/log(2) = log(sqrt(2*M_PI*n)*(n/M_E)^n) / log(2) = + + = (log(2*M_PI*n)/2 + n*log(n/M_E)) / log(2). +*/ + +inline double log2_n_fact(double x) +{ + return (log(2*M_PI*x)/2 + x*log(x/M_E)) / M_LN2; +} + + +/* + Calculate cost of merge_buffers function call for given sequence of + input stream lengths and store the number of rows in result stream in *last. + + SYNOPSIS + get_merge_buffers_cost() + buff_elems Array of #s of elements in buffers + elem_size Size of element stored in buffer + first Pointer to first merged element size + last Pointer to last merged element size + + RETURN + Cost of merge_buffers operation in disk seeks. + + NOTES + It is assumed that no rows are eliminated during merge. + The cost is calculated as + + cost(read_and_write) + cost(merge_comparisons). + + All bytes in the sequences is read and written back during merge so cost + of disk io is 2*elem_size*total_buf_elems/IO_SIZE (2 is for read + write) + + For comparisons cost calculations we assume that all merged sequences have + the same length, so each of total_buf_size elements will be added to a sort + heap with (n_buffers-1) elements. This gives the comparison cost: + + total_buf_elems* log2(n_buffers) / TIME_FOR_COMPARE_ROWID; +*/ + +static double get_merge_buffers_cost(uint *buff_elems, uint elem_size, + uint *first, uint *last) +{ + uint total_buf_elems= 0; + for (uint *pbuf= first; pbuf <= last; pbuf++) + total_buf_elems+= *pbuf; + *last= total_buf_elems; + + int n_buffers= last - first + 1; + + /* Using log2(n)=log(n)/log(2) formula */ + return 2*((double)total_buf_elems*elem_size) / IO_SIZE + + total_buf_elems*log((double) n_buffers) / (TIME_FOR_COMPARE_ROWID * M_LN2); +} + + +/* + Calculate cost of merging buffers into one in Unique::get, i.e. calculate + how long (in terms of disk seeks) the two calls + merge_many_buffs(...); + merge_buffers(...); + will take. + + SYNOPSIS + get_merge_many_buffs_cost() + buffer buffer space for temporary data, at least + Unique::get_cost_calc_buff_size bytes + maxbuffer # of full buffers + max_n_elems # of elements in first maxbuffer buffers + last_n_elems # of elements in last buffer + elem_size size of buffer element + + NOTES + maxbuffer+1 buffers are merged, where first maxbuffer buffers contain + max_n_elems elements each and last buffer contains last_n_elems elements. + + The current implementation does a dumb simulation of merge_many_buffs + function actions. + + RETURN + Cost of merge in disk seeks. +*/ + +static double get_merge_many_buffs_cost(uint *buffer, + uint maxbuffer, uint max_n_elems, + uint last_n_elems, int elem_size) +{ + register int i; + double total_cost= 0.0; + uint *buff_elems= buffer; /* #s of elements in each of merged sequences */ + + /* + Set initial state: first maxbuffer sequences contain max_n_elems elements + each, last sequence contains last_n_elems elements. + */ + for(i = 0; i < (int)maxbuffer; i++) + buff_elems[i]= max_n_elems; + buff_elems[maxbuffer]= last_n_elems; + + /* + Do it exactly as merge_many_buff function does, calling + get_merge_buffers_cost to get cost of merge_buffers. + */ + if (maxbuffer >= MERGEBUFF2) + { + while (maxbuffer >= MERGEBUFF2) + { + uint lastbuff= 0; + for (i = 0; i <= (int) maxbuffer - MERGEBUFF*3/2; i += MERGEBUFF) + { + total_cost+=get_merge_buffers_cost(buff_elems, elem_size, + buff_elems + i, + buff_elems + i + MERGEBUFF-1); + lastbuff++; + } + total_cost+=get_merge_buffers_cost(buff_elems, elem_size, + buff_elems + i, + buff_elems + maxbuffer); + maxbuffer= lastbuff; + } + } + + /* Simulate final merge_buff call. */ + total_cost += get_merge_buffers_cost(buff_elems, elem_size, + buff_elems, buff_elems + maxbuffer); + return total_cost; +} + + +/* + Calculate cost of using Unique for processing nkeys elements of size + key_size using max_in_memory_size memory. + + SYNOPSIS + Unique::get_use_cost() + buffer space for temporary data, use Unique::get_cost_calc_buff_size + to get # bytes needed. + nkeys #of elements in Unique + key_size size of each elements in bytes + max_in_memory_size amount of memory Unique will be allowed to use + + RETURN + Cost in disk seeks. + + NOTES + cost(using_unqiue) = + cost(create_trees) + (see #1) + cost(merge) + (see #2) + cost(read_result) (see #3) + + 1. Cost of trees creation + For each Unique::put operation there will be 2*log2(n+1) elements + comparisons, where n runs from 1 tree_size (we assume that all added + elements are different). Together this gives: + + n_compares = 2*(log2(2) + log2(3) + ... + log2(N+1)) = 2*log2((N+1)!) + + then cost(tree_creation) = n_compares*ROWID_COMPARE_COST; + + Total cost of creating trees: + (n_trees - 1)*max_size_tree_cost + non_max_size_tree_cost. + + Approximate value of log2(N!) is calculated by log2_n_fact function. + + 2. Cost of merging. + If only one tree is created by Unique no merging will be necessary. + Otherwise, we model execution of merge_many_buff function and count + #of merges. (The reason behind this is that number of buffers is small, + while size of buffers is big and we don't want to loose precision with + O(x)-style formula) + + 3. If only one tree is created by Unique no disk io will happen. + Otherwise, ceil(key_len*n_keys) disk seeks are necessary. We assume + these will be random seeks. +*/ + +double Unique::get_use_cost(uint *buffer, uint nkeys, uint key_size, + ulong max_in_memory_size) +{ + ulong max_elements_in_tree; + ulong last_tree_elems; + int n_full_trees; /* number of trees in unique - 1 */ + double result; + + max_elements_in_tree= + max_in_memory_size / ALIGN_SIZE(sizeof(TREE_ELEMENT)+key_size); + + n_full_trees= nkeys / max_elements_in_tree; + last_tree_elems= nkeys % max_elements_in_tree; + + /* Calculate cost of creating trees */ + result= 2*log2_n_fact(last_tree_elems + 1.0); + if (n_full_trees) + result+= n_full_trees * log2_n_fact(max_elements_in_tree + 1.0); + result /= TIME_FOR_COMPARE_ROWID; + + DBUG_PRINT("info",("unique trees sizes: %u=%u*%lu + %lu", nkeys, + n_full_trees, n_full_trees?max_elements_in_tree:0, + last_tree_elems)); + + if (!n_full_trees) + return result; + + /* + There is more then one tree and merging is necessary. + First, add cost of writing all trees to disk, assuming that all disk + writes are sequential. + */ + result += DISK_SEEK_BASE_COST * n_full_trees * + ceil(((double) key_size)*max_elements_in_tree / IO_SIZE); + result += DISK_SEEK_BASE_COST * ceil(((double) key_size)*last_tree_elems / IO_SIZE); + + /* Cost of merge */ + double merge_cost= get_merge_many_buffs_cost(buffer, n_full_trees, + max_elements_in_tree, + last_tree_elems, key_size); + if (merge_cost < 0.0) + return merge_cost; + + result += merge_cost; + /* + Add cost of reading the resulting sequence, assuming there were no + duplicate elements. + */ + result += ceil((double)key_size*nkeys/IO_SIZE); + + return result; +} + Unique::~Unique() { close_cached_file(&file); @@ -84,6 +327,7 @@ bool Unique::flush() elements+= tree.elements_in_tree; file_ptr.count=tree.elements_in_tree; file_ptr.file_pos=my_b_tell(&file); + if (tree_walk(&tree, (tree_walk_action) unique_write_to_file, (void*) this, left_root_right) || insert_dynamic(&file_ptrs, (gptr) &file_ptr)) @@ -94,6 +338,237 @@ bool Unique::flush() /* + Clear the tree and the file. + You must call reset() if you want to reuse Unique after walk(). +*/ + +void +Unique::reset() +{ + reset_tree(&tree); + /* + If elements != 0, some trees were stored in the file (see how + flush() works). Note, that we can not count on my_b_tell(&file) == 0 + here, because it can return 0 right after walk(), and walk() does not + reset any Unique member. + */ + if (elements) + { + reset_dynamic(&file_ptrs); + reinit_io_cache(&file, WRITE_CACHE, 0L, 0, 1); + } + elements= 0; +} + +/* + The comparison function, passed to queue_init() in merge_walk() must + use comparison function of Uniques::tree, but compare members of struct + BUFFPEK. +*/ + +struct BUFFPEK_COMPARE_CONTEXT +{ + qsort_cmp2 key_compare; + void *key_compare_arg; +}; + +C_MODE_START + +static int buffpek_compare(void *arg, byte *key_ptr1, byte *key_ptr2) +{ + BUFFPEK_COMPARE_CONTEXT *ctx= (BUFFPEK_COMPARE_CONTEXT *) arg; + return ctx->key_compare(ctx->key_compare_arg, + *((byte **) key_ptr1), *((byte **)key_ptr2)); +} + +C_MODE_END + + +/* + DESCRIPTION + Function is very similar to merge_buffers, but instead of writing sorted + unique keys to the output file, it invokes walk_action for each key. + This saves I/O if you need to pass through all unique keys only once. + SYNOPSIS + merge_walk() + All params are 'IN' (but see comment for begin, end): + merge_buffer buffer to perform cached piece-by-piece loading + of trees; initially the buffer is empty + merge_buffer_size size of merge_buffer. Must be aligned with + key_length + key_length size of tree element; key_length * (end - begin) + must be less or equal than merge_buffer_size. + begin pointer to BUFFPEK struct for the first tree. + end pointer to BUFFPEK struct for the last tree; + end > begin and [begin, end) form a consecutive + range. BUFFPEKs structs in that range are used and + overwritten in merge_walk(). + walk_action element visitor. Action is called for each unique + key. + walk_action_arg argument to walk action. Passed to it on each call. + compare elements comparison function + compare_arg comparison function argument + file file with all trees dumped. Trees in the file + must contain sorted unique values. Cache must be + initialized in read mode. + RETURN VALUE + 0 ok + <> 0 error +*/ + +static bool merge_walk(uchar *merge_buffer, uint merge_buffer_size, + uint key_length, BUFFPEK *begin, BUFFPEK *end, + tree_walk_action walk_action, void *walk_action_arg, + qsort_cmp2 compare, void *compare_arg, + IO_CACHE *file) +{ + BUFFPEK_COMPARE_CONTEXT compare_context = { compare, compare_arg }; + QUEUE queue; + if (end <= begin || + merge_buffer_size < key_length * (end - begin + 1) || + init_queue(&queue, end - begin, offsetof(BUFFPEK, key), 0, + buffpek_compare, &compare_context)) + return 1; + /* we need space for one key when a piece of merge buffer is re-read */ + merge_buffer_size-= key_length; + uchar *save_key_buff= merge_buffer + merge_buffer_size; + uint max_key_count_per_piece= merge_buffer_size/(end-begin)/key_length; + /* if piece_size is aligned reuse_freed_buffer will always hit */ + uint piece_size= max_key_count_per_piece * key_length; + uint bytes_read; /* to hold return value of read_to_buffer */ + BUFFPEK *top; + int res= 1; + /* + Invariant: queue must contain top element from each tree, until a tree + is not completely walked through. + Here we're forcing the invariant, inserting one element from each tree + to the queue. + */ + for (top= begin; top != end; ++top) + { + top->base= merge_buffer + (top - begin) * piece_size; + top->max_keys= max_key_count_per_piece; + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + DBUG_ASSERT(bytes_read); + queue_insert(&queue, (byte *) top); + } + top= (BUFFPEK *) queue_top(&queue); + while (queue.elements > 1) + { + /* + Every iteration one element is removed from the queue, and one is + inserted by the rules of the invariant. If two adjacent elements on + the top of the queue are not equal, biggest one is unique, because all + elements in each tree are unique. Action is applied only to unique + elements. + */ + void *old_key= top->key; + /* + read next key from the cache or from the file and push it to the + queue; this gives new top. + */ + top->key+= key_length; + if (--top->mem_count) + queue_replaced(&queue); + else /* next piece should be read */ + { + /* save old_key not to overwrite it in read_to_buffer */ + memcpy(save_key_buff, old_key, key_length); + old_key= save_key_buff; + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + else if (bytes_read > 0) /* top->key, top->mem_count are reset */ + queue_replaced(&queue); /* in read_to_buffer */ + else + { + /* + Tree for old 'top' element is empty: remove it from the queue and + give all its memory to the nearest tree. + */ + queue_remove(&queue, 0); + reuse_freed_buff(&queue, top, key_length); + } + } + top= (BUFFPEK *) queue_top(&queue); + /* new top has been obtained; if old top is unique, apply the action */ + if (compare(compare_arg, old_key, top->key)) + { + if (walk_action(old_key, 1, walk_action_arg)) + goto end; + } + } + /* + Applying walk_action to the tail of the last tree: this is safe because + either we had only one tree in the beginning, either we work with the + last tree in the queue. + */ + do + { + do + { + if (walk_action(top->key, 1, walk_action_arg)) + goto end; + top->key+= key_length; + } + while (--top->mem_count); + bytes_read= read_to_buffer(file, top, key_length); + if (bytes_read == (uint) (-1)) + goto end; + } + while (bytes_read); + res= 0; +end: + delete_queue(&queue); + return res; +} + + +/* + DESCRIPTION + Walks consecutively through all unique elements: + if all elements are in memory, then it simply invokes 'tree_walk', else + all flushed trees are loaded to memory piece-by-piece, pieces are + sorted, and action is called for each unique value. + Note: so as merging resets file_ptrs state, this method can change + internal Unique state to undefined: if you want to reuse Unique after + walk() you must call reset() first! + SYNOPSIS + Unique:walk() + All params are 'IN': + action function-visitor, typed in include/my_tree.h + function is called for each unique element + arg argument for visitor, which is passed to it on each call + RETURN VALUE + 0 OK + <> 0 error + */ + +bool Unique::walk(tree_walk_action action, void *walk_action_arg) +{ + if (elements == 0) /* the whole tree is in memory */ + return tree_walk(&tree, action, walk_action_arg, left_root_right); + + /* flush current tree to the file to have some memory for merge buffer */ + if (flush()) + return 1; + if (flush_io_cache(&file) || reinit_io_cache(&file, READ_CACHE, 0L, 0, 0)) + return 1; + uchar *merge_buffer= (uchar *) my_malloc(max_in_memory_size, MYF(0)); + if (merge_buffer == 0) + return 1; + int res= merge_walk(merge_buffer, max_in_memory_size, size, + (BUFFPEK *) file_ptrs.buffer, + (BUFFPEK *) file_ptrs.buffer + file_ptrs.elements, + action, walk_action_arg, + tree.compare, tree.custom_arg, &file); + x_free(merge_buffer); + return res; +} + +/* Modify the TABLE element so that when one calls init_records() the rows will be read in priority order. */ @@ -114,7 +589,7 @@ bool Unique::get(TABLE *table) return 0; } } - /* Not enough memory; Save the result to file */ + /* Not enough memory; Save the result to file && free memory used by tree */ if (flush()) return 1; diff --git a/sql/unireg.cc b/sql/unireg.cc index a550b06a466..ee036ed113d 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -210,7 +210,7 @@ err3: keys number of keys to create key_info Keys to create db_file Handler to use. May be zero, in which case we use - create_info->db_type + create_info->db_type RETURN 0 ok 1 error @@ -224,11 +224,11 @@ int rea_create_table(THD *thd, my_string file_name, DBUG_ENTER("rea_create_table"); if (mysql_create_frm(thd, file_name, create_info, - create_fields, keys, key_info, NULL)) + create_fields, keys, key_info, NULL)) DBUG_RETURN(1); - if (ha_create_table(file_name,create_info,0)) + if (!create_info->frm_only && ha_create_table(file_name,create_info,0)) { - my_delete(file_name,MYF(0)); + my_delete(file_name,MYF(0)); DBUG_RETURN(1); } DBUG_RETURN(0); @@ -332,7 +332,7 @@ static uint pack_keys(uchar *keybuff,uint key_count,KEY *keyinfo) pos[6]=pos[7]=0; // For the future pos+=8; key_parts+=key->key_parts; - DBUG_PRINT("loop",("flags: %d key_parts: %d at %lx", + DBUG_PRINT("loop",("flags: %d key_parts: %d at 0x%lx", key->flags,key->key_parts, key->key_part)); for (key_part=key->key_part,key_part_end=key_part+key->key_parts ; @@ -394,7 +394,7 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, if (create_fields.elements > MAX_FIELDS) { - my_error(ER_TOO_MANY_FIELDS,MYF(0)); + my_message(ER_TOO_MANY_FIELDS, ER(ER_TOO_MANY_FIELDS), MYF(0)); DBUG_RETURN(1); } @@ -481,7 +481,7 @@ static bool pack_header(uchar *forminfo, enum db_type table_type, if (info_length+(ulong) create_fields.elements*FCOMP+288+ n_length+int_length+com_length > 65535L || int_count > 255) { - my_error(ER_TOO_MANY_FIELDS,MYF(0)); + my_message(ER_TOO_MANY_FIELDS, ER(ER_TOO_MANY_FIELDS), MYF(0)); DBUG_RETURN(1); } @@ -685,7 +685,7 @@ static bool make_empty_rec(File file,enum db_type table_type, Field *regfield=make_field((char*) buff+field->offset,field->length, field->flags & NOT_NULL_FLAG ? 0: null_pos+null_count/8, - 1 << (null_count & 7), + null_count & 7, field->pack_flag, field->sql_type, field->charset, diff --git a/sql/unireg.h b/sql/unireg.h index 4ab2ba26b15..053ca393ad0 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -37,8 +37,8 @@ #define SHAREDIR "share/" #endif -#define ER(X) errmesg[(X)-1000] -#define ER_SAFE(X) (((X) >= 1000 && (X) < ER_ERROR_MESSAGES + 1000) ? ER(X) : "Invalid error code") +#define ER(X) errmesg[(X) - ER_ERROR_FIRST] +#define ER_SAFE(X) (((X) >= ER_ERROR_FIRST && (X) <= ER_ERROR_LAST) ? ER(X) : "Invalid error code") #define ERRMAPP 1 /* Errormap f|r my_error */ @@ -60,9 +60,14 @@ #define MAX_MBWIDTH 3 /* Max multibyte sequence */ #define MAX_FIELD_CHARLENGTH 255 +#define MAX_FIELD_VARCHARLENGTH 65535 +#define CONVERT_IF_BIGGER_TO_BLOB 512 /* Used for CREATE ... SELECT */ + /* Max column width +1 */ #define MAX_FIELD_WIDTH (MAX_FIELD_CHARLENGTH*MAX_MBWIDTH+1) +#define MAX_BIT_FIELD_LENGTH 64 /* Max length in bits for bit fields */ + #define MAX_DATE_WIDTH 10 /* YYYY-MM-DD */ #define MAX_TIME_WIDTH 23 /* -DDDDDD HH:MM:SS.###### */ #define MAX_DATETIME_FULL_WIDTH 29 /* YYYY-MM-DD HH:MM:SS.###### AM */ @@ -139,11 +144,13 @@ #define DONT_GIVE_ERROR 256 /* Don't do frm_error on openfrm */ #define READ_SCREENS 1024 /* Read screens, info and helpfile */ #define DELAYED_OPEN 4096 /* Open table later */ +#define NO_ERR_ON_NEW_FRM 8192 /* stop error sending on new format */ #define SC_INFO_LENGTH 4 /* Form format constant */ #define TE_INFO_LENGTH 3 #define MTYP_NOEMPTY_BIT 128 +#define FRM_VER_TRUE_VARCHAR (FRM_VER+4) /* Minimum length pattern before Turbo Boyer-Moore is used for SELECT "text" LIKE "%pattern%", excluding the two |