summaryrefslogtreecommitdiff
path: root/sql
diff options
context:
space:
mode:
Diffstat (limited to 'sql')
-rw-r--r--sql/field.cc6
-rw-r--r--sql/my_decimal.cc11
-rw-r--r--sql/my_decimal.h3
3 files changed, 11 insertions, 9 deletions
diff --git a/sql/field.cc b/sql/field.cc
index c887a5f1c9b..cb23ae4fe9f 100644
--- a/sql/field.cc
+++ b/sql/field.cc
@@ -2583,7 +2583,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
DBUG_ENTER("Field_new_decimal::store_value");
#ifndef DBUG_OFF
{
- char dbug_buff[DECIMAL_MAX_STR_LENGTH+1];
+ char dbug_buff[DECIMAL_MAX_STR_LENGTH+2];
DBUG_PRINT("enter", ("value: %s", dbug_decimal_as_string(dbug_buff, decimal_value)));
}
#endif
@@ -2598,7 +2598,7 @@ bool Field_new_decimal::store_value(const my_decimal *decimal_value)
}
#ifndef DBUG_OFF
{
- char dbug_buff[DECIMAL_MAX_STR_LENGTH+1];
+ char dbug_buff[DECIMAL_MAX_STR_LENGTH+2];
DBUG_PRINT("info", ("saving with precision %d scale: %d value %s",
(int)precision, (int)dec,
dbug_decimal_as_string(dbug_buff, decimal_value)));
@@ -2673,7 +2673,7 @@ int Field_new_decimal::store(const char *from, uint length,
}
#ifndef DBUG_OFF
- char dbug_buff[DECIMAL_MAX_STR_LENGTH+1];
+ char dbug_buff[DECIMAL_MAX_STR_LENGTH+2];
DBUG_PRINT("enter", ("value: %s",
dbug_decimal_as_string(dbug_buff, &decimal_value)));
#endif
diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc
index 3aa01880b83..a38dc341684 100644
--- a/sql/my_decimal.cc
+++ b/sql/my_decimal.cc
@@ -95,10 +95,11 @@ int my_decimal2string(uint mask, const my_decimal *d,
UNSIGNED. Hence the buffer for a ZEROFILLed value is the length
the user requested, plus one for a possible decimal point, plus
one if the user only wanted decimal places, but we force a leading
- zero on them. Because the type is implicitly UNSIGNED, we do not
- need to reserve a character for the sign. For all other cases,
- fixed_prec will be 0, and my_decimal_string_length() will be called
- instead to calculate the required size of the buffer.
+ zero on them, plus one for the '\0' terminator. Because the type
+ is implicitly UNSIGNED, we do not need to reserve a character for
+ the sign. For all other cases, fixed_prec will be 0, and
+ my_decimal_string_length() will be called instead to calculate the
+ required size of the buffer.
*/
int length= (fixed_prec
? (fixed_prec + ((fixed_prec == fixed_dec) ? 1 : 0) + 1)
@@ -275,7 +276,7 @@ print_decimal_buff(const my_decimal *dec, const uchar* ptr, int length)
const char *dbug_decimal_as_string(char *buff, const my_decimal *val)
{
- int length= DECIMAL_MAX_STR_LENGTH;
+ int length= DECIMAL_MAX_STR_LENGTH + 1; /* minimum size for buff */
if (!val)
return "NULL";
(void)decimal2string((decimal_t*) val, buff, &length, 0,0,0);
diff --git a/sql/my_decimal.h b/sql/my_decimal.h
index 21669e82c44..2c13142bb60 100644
--- a/sql/my_decimal.h
+++ b/sql/my_decimal.h
@@ -55,7 +55,7 @@ C_MODE_END
/**
maximum length of string representation (number of maximum decimal
- digits + 1 position for sign + 1 position for decimal point)
+ digits + 1 position for sign + 1 position for decimal point, no terminator)
*/
#define DECIMAL_MAX_STR_LENGTH (DECIMAL_MAX_POSSIBLE_PRECISION + 2)
@@ -212,6 +212,7 @@ inline uint32 my_decimal_precision_to_length(uint precision, uint8 scale,
inline
int my_decimal_string_length(const my_decimal *d)
{
+ /* length of string representation including terminating '\0' */
return decimal_string_size(d);
}