summaryrefslogtreecommitdiff
path: root/sql/log_event.cc
diff options
context:
space:
mode:
Diffstat (limited to 'sql/log_event.cc')
-rw-r--r--sql/log_event.cc20
1 files changed, 10 insertions, 10 deletions
diff --git a/sql/log_event.cc b/sql/log_event.cc
index 81cf7d8c250..8932d0f261d 100644
--- a/sql/log_event.cc
+++ b/sql/log_event.cc
@@ -1131,7 +1131,7 @@ int append_query_string(CHARSET_INFO *csinfo, String *to,
*ptr++= '\'';
}
- to->length(orig_len + ptr - beg);
+ to->length((uint32)(orig_len + ptr - beg));
return 0;
}
#endif
@@ -10034,7 +10034,7 @@ Execute_load_query_log_event::do_apply_event(rpl_group_info *rgi)
p= strmake(p, STRING_WITH_LEN(" INTO "));
p= strmake(p, query+fn_pos_end, q_len-fn_pos_end);
- error= Query_log_event::do_apply_event(rgi, buf, p-buf);
+ error= Query_log_event::do_apply_event(rgi, buf, (uint32)(p-buf));
/* Forging file name for deletion in same buffer */
*fname_end= 0;
@@ -10486,7 +10486,7 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length)
if (static_cast<size_t>(m_rows_end - m_rows_cur) <= length)
{
size_t const block_size= 1024;
- ulong cur_size= m_rows_cur - m_rows_buf;
+ size_t cur_size= m_rows_cur - m_rows_buf;
DBUG_EXECUTE_IF("simulate_too_big_row_case1",
cur_size= UINT_MAX32 - (block_size * 10);
length= UINT_MAX32 - (block_size * 10););
@@ -10499,21 +10499,21 @@ int Rows_log_event::do_add_row_data(uchar *row_data, size_t length)
DBUG_EXECUTE_IF("simulate_too_big_row_case4",
cur_size= UINT_MAX32 - (block_size * 10);
length= (block_size * 10) - block_size + 1;);
- ulong remaining_space= UINT_MAX32 - cur_size;
+ size_t remaining_space= UINT_MAX32 - cur_size;
/* Check that the new data fits within remaining space and we can add
block_size without wrapping.
*/
- if (length > remaining_space ||
+ if (cur_size > UINT_MAX32 || length > remaining_space ||
((length + block_size) > remaining_space))
{
sql_print_error("The row data is greater than 4GB, which is too big to "
"write to the binary log.");
DBUG_RETURN(ER_BINLOG_ROW_LOGGING_FAILED);
}
- ulong const new_alloc=
+ size_t const new_alloc=
block_size * ((cur_size + length + block_size - 1) / block_size);
- uchar* const new_buf= (uchar*)my_realloc((uchar*)m_rows_buf, (uint) new_alloc,
+ uchar* const new_buf= (uchar*)my_realloc((uchar*)m_rows_buf, new_alloc,
MYF(MY_ALLOW_ZERO_PTR|MY_WME));
if (unlikely(!new_buf))
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
@@ -11246,11 +11246,11 @@ bool Rows_log_event::write_compressed()
uchar *m_rows_cur_tmp = m_rows_cur;
bool ret = true;
uint32 comlen, alloc_size;
- comlen= alloc_size= binlog_get_compress_len(m_rows_cur_tmp - m_rows_buf_tmp);
+ comlen= alloc_size= binlog_get_compress_len((uint32)(m_rows_cur_tmp - m_rows_buf_tmp));
m_rows_buf = (uchar *)my_safe_alloca(alloc_size);
if(m_rows_buf &&
!binlog_buf_compress((const char *)m_rows_buf_tmp, (char *)m_rows_buf,
- m_rows_cur_tmp - m_rows_buf_tmp, &comlen))
+ (uint32)(m_rows_cur_tmp - m_rows_buf_tmp), &comlen))
{
m_rows_cur= comlen + m_rows_buf;
ret= Log_event::write();
@@ -12477,7 +12477,7 @@ Rows_log_event::write_row(rpl_group_info *rgi,
the size of the first row and use that value to initialize
storage engine for bulk insertion */
DBUG_ASSERT(!(m_curr_row > m_curr_row_end));
- ulong estimated_rows= 0;
+ ha_rows estimated_rows= 0;
if (m_curr_row < m_curr_row_end)
estimated_rows= (m_rows_end - m_curr_row) / (m_curr_row_end - m_curr_row);
else if (m_curr_row == m_curr_row_end)