summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNikita Malyavin <nikitamalyavin@gmail.com>2022-07-04 15:13:51 +0300
committerNikita Malyavin <nikitamalyavin@gmail.com>2022-08-23 16:36:27 +0300
commit462e736877827a2c4a2338727da02b0fcfcf0477 (patch)
tree38056da987f609723dcabe271b893c45a7f6506d
parente2383d6e0c68602504dbe22ba2fe3bdd6acf7985 (diff)
downloadmariadb-git-462e736877827a2c4a2338727da02b0fcfcf0477.tar.gz
MDEV-29021 ALTER TABLE fails when a stored virtual column is dropped+added
We shouldn't rely on `fill_extra_persistent_columns`, as it only updates fields which have an index > cols->n_bits (replication bitmap width). Actually, it should never be used, as its approach is error-prone. Normal update_virtual_fields+update_default_fields should be done.
-rw-r--r--mysql-test/main/alter_table_online_debug.result61
-rw-r--r--mysql-test/main/alter_table_online_debug.test74
-rw-r--r--mysql-test/suite/rpl/r/rpl_alter_extra_persistent.result134
-rw-r--r--mysql-test/suite/rpl/t/rpl_alter_extra_persistent.test10
-rw-r--r--sql/log_event_server.cc8
-rw-r--r--sql/rpl_record.cc48
6 files changed, 241 insertions, 94 deletions
diff --git a/mysql-test/main/alter_table_online_debug.result b/mysql-test/main/alter_table_online_debug.result
index 79daab9f689..86fcdefa0b5 100644
--- a/mysql-test/main/alter_table_online_debug.result
+++ b/mysql-test/main/alter_table_online_debug.result
@@ -643,6 +643,67 @@ insert t1 (b) values ('k');
insert t1 (b) values ('m');
set debug_sync= 'now signal goforit';
connection con2;
+connection default;
+drop table t1;
+set debug_sync= reset;
+#
+# MDEV-29021 ALTER TABLE fails when a stored virtual column is dropped and added
+#
+create table t1 (a char(9), b char(9) as (a) stored) engine=InnoDB;
+insert into t1(a) values ('foobar');
+set debug_sync= 'now wait_for downgraded';
+connection con2;
+set debug_sync= 'alter_table_online_downgraded signal downgraded wait_for goforit';
+alter ignore table t1 drop b, add b char(3) as (a) stored, algorithm=copy, lock=none;
+connection default;
+update t1 set a = 'foobarqux';
+set debug_sync= 'now signal goforit';
+connection con2;
+Warnings:
+Warning 1265 Data truncated for column 'b' at row 1
+Warning 1265 Data truncated for column 'b' at row 2
+connection default;
+drop table t1;
+set debug_sync= reset;
+# (duplicate) MDEV-29007 Assertion `marked_for_write_or_computed()'
+# failed upon online ADD COLUMN .. FIRST
+create table t (a int);
+insert into t values (1),(2);
+set debug_sync= 'now wait_for downgraded';
+connection con2;
+set debug_sync= 'alter_table_online_downgraded signal downgraded wait_for goforit';
+alter table t add c int first, algorithm=copy, lock=none;
+connection default;
+insert into t values (3);
+set debug_sync= 'now signal goforit';
+connection con2;
+connection default;
+drop table t;
+set debug_sync= reset;
+# UNIQUE blob duplicates are not ignored.
+create table t1 (b blob);
+insert into t1 values ('foo'),('bar');
+set debug_sync= 'now wait_for downgraded';
+connection con2;
+set debug_sync= 'alter_table_online_downgraded signal downgraded wait_for goforit';
+alter table t1 add unique(b), algorithm=copy, lock=none;
+connection default;
+insert into t1 values ('qux'),('foo');
+set debug_sync= 'now signal goforit';
+connection con2;
+ERROR 23000: Duplicate entry 'foo' for key 'b'
+select * from t1;
+b
+foo
+bar
+qux
+foo
+show create table t1;
+Table Create Table
+t1 CREATE TABLE `t1` (
+ `b` blob DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=latin1
+connection default;
drop table t1;
set debug_sync= reset;
#
diff --git a/mysql-test/main/alter_table_online_debug.test b/mysql-test/main/alter_table_online_debug.test
index 285de17600a..c86b4a92701 100644
--- a/mysql-test/main/alter_table_online_debug.test
+++ b/mysql-test/main/alter_table_online_debug.test
@@ -796,7 +796,81 @@ set debug_sync= 'now signal goforit';
--connection con2
--reap
+--connection default
+drop table t1;
+set debug_sync= reset;
+
+--echo #
+--echo # MDEV-29021 ALTER TABLE fails when a stored virtual column is dropped and added
+--echo #
+create table t1 (a char(9), b char(9) as (a) stored) engine=InnoDB;
+insert into t1(a) values ('foobar');
+
+--send set debug_sync= 'now wait_for downgraded'
+
+--connection con2
+set debug_sync= 'alter_table_online_downgraded signal downgraded wait_for goforit';
+--send alter ignore table t1 drop b, add b char(3) as (a) stored, algorithm=copy, lock=none
+
+--connection default
+--reap
+update t1 set a = 'foobarqux';
+set debug_sync= 'now signal goforit';
+
+--connection con2
+--reap
+--connection default
+drop table t1;
+set debug_sync= reset;
+
+--echo # (duplicate) MDEV-29007 Assertion `marked_for_write_or_computed()'
+--echo # failed upon online ADD COLUMN .. FIRST
+create table t (a int);
+insert into t values (1),(2);
+--send
+set debug_sync= 'now wait_for downgraded';
+
+--connection con2
+set debug_sync= 'alter_table_online_downgraded signal downgraded wait_for goforit';
+--send
+alter table t add c int first, algorithm=copy, lock=none;
+--connection default
+--reap
+insert into t values (3);
+set debug_sync= 'now signal goforit';
+
+--connection con2
+--reap
+--connection default
+drop table t;
+set debug_sync= reset;
+
+--echo # UNIQUE blob duplicates are not ignored.
+
+create table t1 (b blob);
+insert into t1 values ('foo'),('bar');
+--send
+set debug_sync= 'now wait_for downgraded';
+
+--connection con2
+set debug_sync= 'alter_table_online_downgraded signal downgraded wait_for goforit';
+--send
+alter table t1 add unique(b), algorithm=copy, lock=none;
+
+--connection default
+--reap
+insert into t1 values ('qux'),('foo');
+set debug_sync= 'now signal goforit';
+
+--connection con2
+--error ER_DUP_ENTRY
+--reap
+select * from t1;
+show create table t1;
+
+# Cleanup
+--connection default
drop table t1;
set debug_sync= reset;
diff --git a/mysql-test/suite/rpl/r/rpl_alter_extra_persistent.result b/mysql-test/suite/rpl/r/rpl_alter_extra_persistent.result
index 96df87d8ad4..acfcf02eacd 100644
--- a/mysql-test/suite/rpl/r/rpl_alter_extra_persistent.result
+++ b/mysql-test/suite/rpl/r/rpl_alter_extra_persistent.result
@@ -32,8 +32,24 @@ a z1 z2
4 5 6
5 6 7
6 7 8
-#UPDATE query
+alter table t1 add column z3 int default(a+2);
connection master;
+insert into t1 values(7);
+insert into t1 values(8);
+connection slave;
+select * from t1 order by a;
+a z1 z2 z3
+1 2 3 3
+2 3 4 4
+3 4 5 5
+4 5 6 6
+5 6 7 7
+6 7 8 8
+7 8 9 9
+8 9 10 10
+connection master;
+delete from t1 where a > 6;
+#UPDATE query
update t1 set a = a+10;
select * from t1 order by a;
a
@@ -45,13 +61,13 @@ a
16
connection slave;
select * from t1 order by a;
-a z1 z2
-11 12 13
-12 13 14
-13 14 15
-14 15 16
-15 16 17
-16 17 18
+a z1 z2 z3
+11 12 13 13
+12 13 14 14
+13 14 15 15
+14 15 16 16
+15 16 17 17
+16 17 18 18
connection master;
update t1 set a = a-10;
select * from t1 order by a;
@@ -64,13 +80,13 @@ a
6
connection slave;
select * from t1 order by a;
-a z1 z2
-1 2 3
-2 3 4
-3 4 5
-4 5 6
-5 6 7
-6 7 8
+a z1 z2 z3
+1 2 3 3
+2 3 4 4
+3 4 5 5
+4 5 6 6
+5 6 7 7
+6 7 8 8
#DELETE quert
connection master;
delete from t1 where a > 2 and a < 4;
@@ -83,12 +99,12 @@ a
6
connection slave;
select * from t1 order by a;
-a z1 z2
-1 2 3
-2 3 4
-4 5 6
-5 6 7
-6 7 8
+a z1 z2 z3
+1 2 3 3
+2 3 4 4
+4 5 6 6
+5 6 7 7
+6 7 8 8
#REPLACE query
connection master;
replace into t1 values(1);
@@ -96,13 +112,13 @@ replace into t1 values(3);
replace into t1 values(1);
connection slave;
select * from t1 order by a;
-a z1 z2
-1 2 3
-2 3 4
-3 4 5
-4 5 6
-5 6 7
-6 7 8
+a z1 z2 z3
+1 2 3 3
+2 3 4 4
+3 4 5 5
+4 5 6 6
+5 6 7 7
+6 7 8 8
#SELECT query
connection master;
select * from t1 where a > 2 and a < 4;
@@ -110,8 +126,8 @@ a
3
connection slave;
select * from t1 where a > 2 and a < 4;
-a z1 z2
-3 4 5
+a z1 z2 z3
+3 4 5 5
#UPDATE with SELECT query
connection master;
update t1 set a = a + 10 where a > 2 and a < 4;
@@ -125,13 +141,13 @@ a
13
connection slave;
select * from t1 order by a;
-a z1 z2
-1 2 3
-2 3 4
-4 5 6
-5 6 7
-6 7 8
-13 14 15
+a z1 z2 z3
+1 2 3 3
+2 3 4 4
+4 5 6 6
+5 6 7 7
+6 7 8 8
+13 14 15 15
connection master;
update t1 set a = a - 10 where a = 13;
select * from t1 order by a;
@@ -144,13 +160,13 @@ a
6
connection slave;
select * from t1 order by a;
-a z1 z2
-1 2 3
-2 3 4
-3 4 5
-4 5 6
-5 6 7
-6 7 8
+a z1 z2 z3
+1 2 3 3
+2 3 4 4
+3 4 5 5
+4 5 6 6
+5 6 7 7
+6 7 8 8
#Break Unique Constraint
alter table t1 add column z4 int as (a % 6) persistent unique;
connection master;
@@ -168,27 +184,27 @@ a
connection slave;
include/wait_for_slave_sql_error.inc [errno=1062]
select * from t1 order by a;
-a z1 z2 z4
-1 2 3 1
-2 3 4 2
-3 4 5 3
-4 5 6 4
-5 6 7 5
-6 7 8 0
+a z1 z2 z3 z4
+1 2 3 3 1
+2 3 4 4 2
+3 4 5 5 3
+4 5 6 6 4
+5 6 7 7 5
+6 7 8 8 0
alter table t1 drop column z4;
start slave;
include/wait_for_slave_sql_to_start.inc
connection master;
connection slave;
select * from t1 order by a;
-a z1 z2
-1 2 3
-2 3 4
-3 4 5
-4 5 6
-5 6 7
-6 7 8
-7 8 9
+a z1 z2 z3
+1 2 3 3
+2 3 4 4
+3 4 5 5
+4 5 6 6
+5 6 7 7
+6 7 8 8
+7 8 9 9
connection master;
select * from t1 order by a;
a
diff --git a/mysql-test/suite/rpl/t/rpl_alter_extra_persistent.test b/mysql-test/suite/rpl/t/rpl_alter_extra_persistent.test
index 4e604787c70..d0c6d6de953 100644
--- a/mysql-test/suite/rpl/t/rpl_alter_extra_persistent.test
+++ b/mysql-test/suite/rpl/t/rpl_alter_extra_persistent.test
@@ -20,11 +20,19 @@ insert into t1 values(6);
--sync_slave_with_master
select * from t1 order by a;
+alter table t1 add column z3 int default(a+2);
+--connection master
+insert into t1 values(7);
+insert into t1 values(8);
+--sync_slave_with_master
+select * from t1 order by a;
+
+--connection master
+delete from t1 where a > 6;
--echo #UPDATE query
---connection master
update t1 set a = a+10;
select * from t1 order by a;
diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc
index 12ea133648e..44a479c8db2 100644
--- a/sql/log_event_server.cc
+++ b/sql/log_event_server.cc
@@ -6044,6 +6044,14 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
&m_cols_ai : &m_cols);
bitmap_intersect(table->write_set, after_image);
+ /* Mark extra replica columns for write */
+ for (Field **field_ptr= table->field; *field_ptr; ++field_ptr)
+ {
+ Field *field= *field_ptr;
+ if (field->field_index >= m_cols.n_bits && field->stored_in_db())
+ bitmap_set_bit(table->write_set, field->field_index);
+ }
+
this->slave_exec_mode= slave_exec_mode_options; // fix the mode
// Do event specific preparations
diff --git a/sql/rpl_record.cc b/sql/rpl_record.cc
index 2b65a965489..1d526bb39c2 100644
--- a/sql/rpl_record.cc
+++ b/sql/rpl_record.cc
@@ -146,33 +146,6 @@ pack_row(TABLE *table, MY_BITMAP const* cols,
#if !defined(MYSQL_CLIENT) && defined(HAVE_REPLICATION)
/**
- Fills @c table->record[0] with computed values of extra persistent column
- which are present on slave but not on master.
-
- @param table Table whose record[0] buffer is prepared.
- @param master_cols No of columns on master
- @returns 0 on success
- */
-static int fill_extra_persistent_columns(TABLE *table, int master_cols)
-{
- int error= 0;
-
- if (!table->vfield)
- return 0;
- for (Field **vfield_ptr= table->vfield; *vfield_ptr; ++vfield_ptr)
- {
- Field *vfield= *vfield_ptr;
- if (vfield->field_index >= master_cols && vfield->stored_in_db())
- {
- bitmap_set_bit(table->write_set, vfield->field_index);
- error= vfield->vcol_info->expr->save_in_field(vfield,0);
- }
- }
- return error;
-}
-
-
-/**
Unpack a row into @c table->record[0].
The function will always unpack into the @c table->record[0]
@@ -409,12 +382,25 @@ int unpack_row(rpl_group_info *rgi, TABLE *table, uint const colcnt,
if (copy_fields)
{
- for (auto *copy=copy_fields; copy != copy_fields_end; copy++)
+ for (const auto *copy=copy_fields; copy != copy_fields_end; copy++)
{
copy->do_copy(copy);
}
}
+ if (table->default_field)
+ {
+ error= table->update_default_fields(table->in_use->lex->ignore);
+ if (unlikely(error))
+ DBUG_RETURN(error);
+ }
+ if (table->vfield)
+ {
+ error= table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE);
+ if (unlikely(error))
+ DBUG_RETURN(error);
+ }
+
/*
throw away master's extra fields
*/
@@ -441,12 +427,6 @@ int unpack_row(rpl_group_info *rgi, TABLE *table, uint const colcnt,
}
/*
- Add Extra slave persistent columns
- */
- if (unlikely(error= fill_extra_persistent_columns(table, cols->n_bits)))
- DBUG_RETURN(error);
-
- /*
We should now have read all the null bytes, otherwise something is
really wrong.
*/