summaryrefslogtreecommitdiff
path: root/sql/sql_class.h
diff options
context:
space:
mode:
authorVarun Gupta <varun.gupta@mariadb.com>2020-10-29 21:38:06 +0530
committerVarun Gupta <varun.gupta@mariadb.com>2020-10-29 22:19:51 +0530
commit7ed231d13e86db11982d3d78c9c9681a88107a5b (patch)
treed854f82fde5c34bec95f2fe848bba79e5c253671 /sql/sql_class.h
parent2e5450af052040848042c6eae4f03efa23c5f8fc (diff)
downloadmariadb-git-10.2-mdev24033.tar.gz
MDEV-24033: SIGSEGV in __memcmp_avx2_movbe from queue_insert | SIGSEGV in __memcmp_avx2_movbe from native_compare10.2-mdev24033
The issue here was the system variable max_sort_length was being applied to decimals and it was truncating the value for decimals to the number of bytes set by max_sort_length. This was leading to a buffer overflow as the values were written to the buffer without truncation and then we moved the offset to the number of bytes(set by max_sort_length), that are needed for comparison. The fix is to not apply max_sort_length for fixed size types like INT, DECIMALS and only apply max_sort_length for CHAR, VARCHARS, TEXT and BLOBS.
Diffstat (limited to 'sql/sql_class.h')
-rw-r--r--sql/sql_class.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/sql/sql_class.h b/sql/sql_class.h
index d693cfa2727..2727b4c84d5 100644
--- a/sql/sql_class.h
+++ b/sql/sql_class.h
@@ -5449,6 +5449,8 @@ struct SORT_FIELD_ATTR
{
uint length; /* Length of sort field */
uint suffix_length; /* Length suffix (0-4) */
+ enum Type { FIXED_SIZE, VARIABLE_SIZE } type;
+ bool is_variable_sized() { return type == VARIABLE_SIZE; }
};