summaryrefslogtreecommitdiff
path: root/sql/log_event_old.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/log_event_old.cc')
-rw-r--r--sql/log_event_old.cc133
1 files changed, 0 insertions, 133 deletions
diff --git a/sql/log_event_old.cc b/sql/log_event_old.cc
index 0cb78686243..eaa882518f5 100644
--- a/sql/log_event_old.cc
+++ b/sql/log_event_old.cc
@@ -302,50 +302,7 @@ last_uniq_key(TABLE *table, uint keyno)
*/
static bool record_compare(TABLE *table)
{
- /*
- Need to set the X bit and the filler bits in both records since
- there are engines that do not set it correctly.
-
- In addition, since MyISAM checks that one hasn't tampered with the
- record, it is necessary to restore the old bytes into the record
- after doing the comparison.
-
- TODO[record format ndb]: Remove it once NDB returns correct
- records. Check that the other engines also return correct records.
- */
-
bool result= FALSE;
- uchar saved_x[2]= {0, 0}, saved_filler[2]= {0, 0};
-
- if (table->s->null_bytes > 0)
- {
- for (int i = 0 ; i < 2 ; ++i)
- {
- /*
- If we have an X bit then we need to take care of it.
- */
- if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD))
- {
- saved_x[i]= table->record[i][0];
- table->record[i][0]|= 1U;
- }
-
- /*
- If (last_null_bit_pos == 0 && null_bytes > 1), then:
-
- X bit (if any) + N nullable fields + M Field_bit fields = 8 bits
-
- Ie, the entire byte is used.
- */
- if (table->s->last_null_bit_pos > 0)
- {
- saved_filler[i]= table->record[i][table->s->null_bytes - 1];
- table->record[i][table->s->null_bytes - 1]|=
- 256U - (1U << table->s->last_null_bit_pos);
- }
- }
- }
-
if (table->s->blob_fields + table->s->varchar_fields == 0)
{
result= cmp_record(table,record[1]);
@@ -372,24 +329,6 @@ static bool record_compare(TABLE *table)
}
record_compare_exit:
- /*
- Restore the saved bytes.
-
- TODO[record format ndb]: Remove this code once NDB returns the
- correct record format.
- */
- if (table->s->null_bytes > 0)
- {
- for (int i = 0 ; i < 2 ; ++i)
- {
- if (!(table->s->db_options_in_use & HA_OPTION_PACK_RECORD))
- table->record[i][0]= saved_x[i];
-
- if (table->s->last_null_bit_pos > 0)
- table->record[i][table->s->null_bytes - 1]= saved_filler[i];
- }
- }
-
return result;
}
@@ -780,21 +719,6 @@ static int find_and_fetch_row(TABLE *table, uchar *key)
{
int error;
- /*
- We need to set the null bytes to ensure that the filler bit
- are all set when returning. There are storage engines that
- just set the necessary bits on the bytes and don't set the
- filler bits correctly.
-
- TODO[record format ndb]: Remove this code once NDB returns the
- correct record format.
- */
- if (table->s->null_bytes > 0)
- {
- table->record[1][table->s->null_bytes - 1]|=
- 256U - (1U << table->s->last_null_bit_pos);
- }
-
while ((error= table->file->ha_index_next(table->record[1])))
{
/* We just skip records that has already been deleted */
@@ -889,34 +813,13 @@ int Write_rows_log_event_old::do_before_row_operations(TABLE *table)
/* Tell the storage engine that we are using REPLACE semantics. */
thd->lex->duplicates= DUP_REPLACE;
- /*
- Pretend we're executing a REPLACE command: this is needed for
- InnoDB and NDB Cluster since they are not (properly) checking the
- lex->duplicates flag.
- */
thd->lex->sql_command= SQLCOM_REPLACE;
/*
Do not raise the error flag in case of hitting to an unique attribute
*/
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- /*
- NDB specific: update from ndb master wrapped as Write_rows
- */
- /*
- so that the event should be applied to replace slave's row
- */
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- /*
- NDB specific: if update from ndb master wrapped as Write_rows
- does not find the row it's assumed idempotent binlog applying
- is taking place; don't raise the error.
- */
table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
- /*
- TODO: the cluster team (Tomas?) says that it's better if the engine knows
- how many rows are going to be inserted, then it can allocate needed memory
- from the start.
- */
table->file->ha_start_bulk_insert(0);
return error;
}
@@ -2375,21 +2278,6 @@ int Old_rows_log_event::find_row(rpl_group_info *rgi)
while (record_compare(table))
{
- /*
- We need to set the null bytes to ensure that the filler bit
- are all set when returning. There are storage engines that
- just set the necessary bits on the bytes and don't set the
- filler bits correctly.
-
- TODO[record format ndb]: Remove this code once NDB returns the
- correct record format.
- */
- if (table->s->null_bytes > 0)
- {
- table->record[0][table->s->null_bytes - 1]|=
- 256U - (1U << table->s->last_null_bit_pos);
- }
-
while ((error= table->file->ha_index_next(table->record[0])))
{
/* We just skip records that has already been deleted */
@@ -2529,34 +2417,13 @@ Write_rows_log_event_old::do_before_row_operations(const Slave_reporting_capabil
/* Tell the storage engine that we are using REPLACE semantics. */
thd->lex->duplicates= DUP_REPLACE;
- /*
- Pretend we're executing a REPLACE command: this is needed for
- InnoDB and NDB Cluster since they are not (properly) checking the
- lex->duplicates flag.
- */
thd->lex->sql_command= SQLCOM_REPLACE;
/*
Do not raise the error flag in case of hitting to an unique attribute
*/
m_table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
- /*
- NDB specific: update from ndb master wrapped as Write_rows
- */
- /*
- so that the event should be applied to replace slave's row
- */
m_table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
- /*
- NDB specific: if update from ndb master wrapped as Write_rows
- does not find the row it's assumed idempotent binlog applying
- is taking place; don't raise the error.
- */
m_table->file->extra(HA_EXTRA_IGNORE_NO_KEY);
- /*
- TODO: the cluster team (Tomas?) says that it's better if the engine knows
- how many rows are going to be inserted, then it can allocate needed memory
- from the start.
- */
m_table->file->ha_start_bulk_insert(0);
return error;
}