diff options
author | unknown <monty@narttu.mysql.fi> | 2003-03-19 00:45:44 +0200 |
---|---|---|
committer | unknown <monty@narttu.mysql.fi> | 2003-03-19 00:45:44 +0200 |
commit | f45236de394523115dfd160a00a42072b6536fc0 (patch) | |
tree | cb3c86ac4e063681514b1a8ffa9db31b639b9c46 /sql | |
parent | 21b0873a3ce5ee079b120199eb0423bdcff78c22 (diff) | |
download | mariadb-git-f45236de394523115dfd160a00a42072b6536fc0.tar.gz |
Allow optimzation of multi-table-update also for InnoDB tables
MEMORY is alias for HEAP for CREATE TABLE ... TYPE=HEAP
Fixed bug in multi-table-update where a row could be updated several times
mysql-test/r/heap.result:
Test of CREATE TABLE ... type=MEMORY
mysql-test/r/innodb.result:
Added multi-update-test
mysql-test/r/multi_update.result:
Added multi-update-test
mysql-test/t/heap.test:
Test of CREATE TABLE ... type=MEMORY
mysql-test/t/innodb.test:
Added multi-update-test
mysql-test/t/multi_update.test:
Added multi-update-test
sql/ha_innodb.h:
Allow optimzation of multi-table-update also for InnoDB tables
sql/handler.h:
Allow optimzation of multi-update also for InnoDB tables
sql/key.cc:
After merge fix
sql/lex.h:
MEMORY is alias for HEAP
sql/sql_test.cc:
Fixed wrong printf
sql/sql_update.cc:
Fixed bug in multi-table-update where a row could be updated several times
sql/sql_yacc.yy:
MEMORY is alias for HEAP
Diffstat (limited to 'sql')
-rw-r--r-- | sql/ha_innodb.h | 3 | ||||
-rw-r--r-- | sql/handler.h | 1 | ||||
-rw-r--r-- | sql/key.cc | 2 | ||||
-rw-r--r-- | sql/lex.h | 1 | ||||
-rw-r--r-- | sql/sql_test.cc | 2 | ||||
-rw-r--r-- | sql/sql_update.cc | 146 | ||||
-rw-r--r-- | sql/sql_yacc.yy | 3 |
7 files changed, 112 insertions, 46 deletions
diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index 94154d9f05f..8031fa0aa29 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -82,8 +82,7 @@ class ha_innobase: public handler HA_PRIMARY_KEY_IN_READ_INDEX | HA_DROP_BEFORE_CREATE | HA_NO_PREFIX_CHAR_KEYS | - HA_TABLE_SCAN_ON_INDEX | - HA_NOT_MULTI_UPDATE), + HA_TABLE_SCAN_ON_INDEX), last_dup_key((uint) -1), start_of_scan(0) { diff --git a/sql/handler.h b/sql/handler.h index 81724bd3abe..8f1d00f64b5 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -67,7 +67,6 @@ #define HA_CAN_FULLTEXT (HA_NO_PREFIX_CHAR_KEYS*2) #define HA_CAN_SQL_HANDLER (HA_CAN_FULLTEXT*2) #define HA_NO_AUTO_INCREMENT (HA_CAN_SQL_HANDLER*2) -#define HA_NOT_MULTI_UPDATE (HA_NO_AUTO_INCREMENT*2) /* Next record gives next record according last record read (even diff --git a/sql/key.cc b/sql/key.cc index 8520dee215e..809c5a164b9 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -275,7 +275,7 @@ bool check_if_key_used(TABLE *table, uint idx, List<Item> &fields) key is not updated */ if (idx != table->primary_key && table->primary_key < MAX_KEY && - (table->file->option_flag() & HA_PRIMARY_KEY_IN_READ_INDEX)) + (table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX)) return check_if_key_used(table, table->primary_key, fields); return 0; } diff --git a/sql/lex.h b/sql/lex.h index a9e44e034d0..d9a84dd25b4 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -240,6 +240,7 @@ static SYMBOL symbols[] = { { "MEDIUMINT", SYM(MEDIUMINT),0,0}, { "MERGE", SYM(MERGE_SYM),0,0}, { "MEDIUM", SYM(MEDIUM_SYM),0,0}, + { "MEMORY", SYM(MEMORY_SYM),0,0}, { "MIDDLEINT", SYM(MEDIUMINT),0,0}, /* For powerbuilder */ { "MIN_ROWS", SYM(MIN_ROWS),0,0}, { "MINUTE", SYM(MINUTE_SYM),0,0}, diff --git a/sql/sql_test.cc b/sql/sql_test.cc index b226bc1300a..6816bb62047 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -131,7 +131,7 @@ void TEST_filesort(SORT_FIELD *sortorder,uint s_length, ha_rows special) DBUG_LOCK_FILE; VOID(fputs("\nInfo about FILESORT\n",DBUG_FILE)); if (special) - fprintf(DBUG_FILE,"Records to sort: %ld\n",special); + fprintf(DBUG_FILE,"Records to sort: %lu\n",(ulong) special); fprintf(DBUG_FILE,"Sortorder: %s\n",out.ptr()); DBUG_UNLOCK_FILE; DBUG_VOID_RETURN; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index d8842855093..709f88726de 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -23,6 +23,8 @@ #include "sql_acl.h" #include "sql_select.h" +static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields); + /* Return 0 if row hasn't changed */ static bool compare_record(TABLE *table, ulong query_id) @@ -520,11 +522,12 @@ int multi_update::prepare(List<Item> ¬_used_values) /* - Store first used table in main_table as this should be updated first - This is because we know that no row in this table will be read twice. + Initialize table for multi table - Create temporary tables to store changed values for all other tables - that are updated. + IMPLEMENTATION + - Update first table in join on the fly, if possible + - Create temporary tables to store changed values for all other tables + that are updated (and main_table if the above doesn't hold). */ bool @@ -538,52 +541,113 @@ multi_update::initialize_tables(JOIN *join) main_table=join->join_tab->table; trans_safe= transactional_tables= main_table->file->has_transactions(); log_delayed= trans_safe || main_table->tmp_table != NO_TMP_TABLE; - table_to_update= (main_table->file->table_flags() & HA_NOT_MULTI_UPDATE) ? - (TABLE *) 0 : main_table; - /* Create a temporary table for all tables after except main table */ + table_to_update= 0; + + /* Create a temporary table for keys to all tables, except main table */ for (table_ref= update_tables; table_ref; table_ref=table_ref->next) { TABLE *table=table_ref->table; - if (table != table_to_update) - { - uint cnt= table_ref->shared; - ORDER group; - List<Item> temp_fields= *fields_for_table[cnt]; - TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt; - - /* - Create a temporary table to store all fields that are changed for this - table. The first field in the temporary table is a pointer to the - original row so that we can find and update it - */ - - /* ok to be on stack as this is not referenced outside of this func */ - Field_string offset(table->file->ref_length, 0, "offset", - table, 1); - if (temp_fields.push_front(new Item_field(((Field *) &offset)))) - DBUG_RETURN(1); + uint cnt= table_ref->shared; + List<Item> temp_fields= *fields_for_table[cnt]; + ORDER group; - /* Make an unique key over the first field to avoid duplicated updates */ - bzero((char*) &group, sizeof(group)); - group.asc= 1; - group.item= (Item**) temp_fields.head_ref(); - - tmp_param->quick_group=1; - tmp_param->field_count=temp_fields.elements; - tmp_param->group_parts=1; - tmp_param->group_length= table->file->ref_length; - if (!(tmp_tables[cnt]=create_tmp_table(thd, - tmp_param, - temp_fields, - (ORDER*) &group, 0, 0, 0, - TMP_TABLE_ALL_COLUMNS))) - DBUG_RETURN(1); - tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE); + if (table == main_table) // First table in join + { + if (safe_update_on_fly(join->join_tab, &temp_fields)) + { + table_to_update= main_table; // Update table on the fly + continue; + } } + + TMP_TABLE_PARAM *tmp_param= tmp_table_param+cnt; + + /* + Create a temporary table to store all fields that are changed for this + table. The first field in the temporary table is a pointer to the + original row so that we can find and update it + */ + + /* ok to be on stack as this is not referenced outside of this func */ + Field_string offset(table->file->ref_length, 0, "offset", + table, 1); + if (temp_fields.push_front(new Item_field(((Field *) &offset)))) + DBUG_RETURN(1); + + /* Make an unique key over the first field to avoid duplicated updates */ + bzero((char*) &group, sizeof(group)); + group.asc= 1; + group.item= (Item**) temp_fields.head_ref(); + + tmp_param->quick_group=1; + tmp_param->field_count=temp_fields.elements; + tmp_param->group_parts=1; + tmp_param->group_length= table->file->ref_length; + if (!(tmp_tables[cnt]=create_tmp_table(thd, + tmp_param, + temp_fields, + (ORDER*) &group, 0, 0, 0, + TMP_TABLE_ALL_COLUMNS))) + DBUG_RETURN(1); + tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE); } DBUG_RETURN(0); } +/* + Check if table is safe to update on fly + + SYNOPSIS + safe_update_on_fly + join_tab How table is used in join + fields Fields that are updated + + NOTES + We can update the first table in join on the fly if we know that + a row in this tabel will never be read twice. This is true under + the folloing conditions: + + - We are doing a table scan and the data is in a separate file (MyISAM) or + if we don't update a clustered key. + + - We are doing a range scan and we don't update the scan key or + the primary key for a clustered table handler. + + WARNING + This code is a bit dependent of how make_join_readinfo() works. + + RETURN + 0 Not safe to update + 1 Safe to update +*/ + +static bool safe_update_on_fly(JOIN_TAB *join_tab, List<Item> *fields) +{ + TABLE *table= join_tab->table; + switch (join_tab->type) { + case JT_SYSTEM: + case JT_CONST: + case JT_EQ_REF: + return 1; // At most one matching row + case JT_REF: + return !check_if_key_used(table, join_tab->ref.key, *fields); + case JT_ALL: + /* If range search on index */ + if (join_tab->quick) + return !check_if_key_used(table, join_tab->quick->index, + *fields); + /* If scanning in clustered key */ + if ((table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) && + table->primary_key < MAX_KEY) + return !check_if_key_used(table, table->primary_key, *fields); + return 1; + default: + break; // Avoid compler warning + } + return 0; +} + + multi_update::~multi_update() { diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 2f339f30eb4..b986b72df32 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -256,6 +256,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b,int *yystacksize); %token MAX_UPDATES_PER_HOUR %token MEDIUM_SYM %token MERGE_SYM +%token MEMORY_SYM %token MIN_ROWS %token MYISAM_SYM %token NATIONAL_SYM @@ -871,6 +872,7 @@ table_types: | MYISAM_SYM { $$= DB_TYPE_MYISAM; } | MERGE_SYM { $$= DB_TYPE_MRG_MYISAM; } | HEAP_SYM { $$= DB_TYPE_HEAP; } + | MEMORY_SYM { $$= DB_TYPE_HEAP; } | BERKELEY_DB_SYM { $$= DB_TYPE_BERKELEY_DB; } | INNOBASE_SYM { $$= DB_TYPE_INNODB; }; @@ -3316,6 +3318,7 @@ keyword: | MAX_UPDATES_PER_HOUR {} | MEDIUM_SYM {} | MERGE_SYM {} + | MEMORY_SYM {} | MINUTE_SYM {} | MIN_ROWS {} | MODIFY_SYM {} |