summaryrefslogtreecommitdiff
path: root/sql/field.h
diff options
context:
space:
mode:
authorVarun Gupta <varun.gupta@mariadb.com>2020-10-29 21:38:06 +0530
committerVarun Gupta <varun.gupta@mariadb.com>2020-10-29 22:19:51 +0530
commit7ed231d13e86db11982d3d78c9c9681a88107a5b (patch)
treed854f82fde5c34bec95f2fe848bba79e5c253671 /sql/field.h
parent2e5450af052040848042c6eae4f03efa23c5f8fc (diff)
downloadmariadb-git-10.2-mdev24033.tar.gz
MDEV-24033: SIGSEGV in __memcmp_avx2_movbe from queue_insert | SIGSEGV in __memcmp_avx2_movbe from native_compare10.2-mdev24033
The issue here was the system variable max_sort_length was being applied to decimals and it was truncating the value for decimals to the number of bytes set by max_sort_length. This was leading to a buffer overflow as the values were written to the buffer without truncation and then we moved the offset to the number of bytes(set by max_sort_length), that are needed for comparison. The fix is to not apply max_sort_length for fixed size types like INT, DECIMALS and only apply max_sort_length for CHAR, VARCHARS, TEXT and BLOBS.
Diffstat (limited to 'sql/field.h')
-rw-r--r--sql/field.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/sql/field.h b/sql/field.h
index 18e44f1d9d4..7f4638567b4 100644
--- a/sql/field.h
+++ b/sql/field.h
@@ -1339,6 +1339,8 @@ public:
virtual uint max_packed_col_length(uint max_length)
{ return max_length;}
+ virtual bool is_packable() const { return false; }
+
uint offset(uchar *record) const
{
return (uint) (ptr - record);
@@ -1827,6 +1829,7 @@ public:
bool can_optimize_range(const Item_bool_func *cond,
const Item *item,
bool is_eq_func) const;
+ bool is_packable() const override { return true; }
};
/* base class for float and double and decimal (old one) */