diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 18 | ||||
-rw-r--r-- | lib/Makefile | 1 | ||||
-rw-r--r-- | lib/lz4.c | 243 | ||||
-rw-r--r-- | lib/lz4_wrapper.c | 137 |
4 files changed, 399 insertions, 0 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 067307276e..a8f8460d1d 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -100,6 +100,24 @@ config SHA_PROG_HW_ACCEL is performed in hardware. endmenu +menu "Compression Support" + +config LZ4 + bool "Enable LZ4 decompression support" + help + If this option is set, support for LZ4 compressed images + is included. The LZ4 algorithm can run in-place as long as the + compressed image is loaded to the end of the output buffer, and + trades lower compression ratios for much faster decompression. + + NOTE: This implements the release version of the LZ4 frame + format as generated by default by the 'lz4' command line tool. + This is not the same as the outdated, less efficient legacy + frame format currently (2015) implemented in the Linux kernel + (generated by 'lz4 -l'). The two formats are incompatible. + +endmenu + config ERRNO_STR bool "Enable function for getting errno-related string message" help diff --git a/lib/Makefile b/lib/Makefile index 96f832edd2..3eecefaa79 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_GZIP_COMPRESSED) += gzip.o obj-y += initcall.o obj-$(CONFIG_LMB) += lmb.o obj-y += ldiv.o +obj-$(CONFIG_LZ4) += lz4_wrapper.o obj-$(CONFIG_MD5) += md5.o obj-y += net_utils.o obj-$(CONFIG_PHYSMEM) += physmem.o diff --git a/lib/lz4.c b/lib/lz4.c new file mode 100644 index 0000000000..f518341af5 --- /dev/null +++ b/lib/lz4.c @@ -0,0 +1,243 @@ +/* + LZ4 - Fast LZ compression algorithm + Copyright (C) 2011-2015, Yann Collet. + + SPDX-License-Identifier: BSD-2-Clause + + You can contact the author at : + - LZ4 source repository : https://github.com/Cyan4973/lz4 + - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c +*/ + + +/************************************** +* Reading and writing into memory +**************************************/ + +/* customized version of memcpy, which may overwrite up to 7 bytes beyond dstEnd */ +static void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd) +{ + BYTE* d = (BYTE*)dstPtr; + const BYTE* s = (const BYTE*)srcPtr; + BYTE* e = (BYTE*)dstEnd; + do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e); +} + + +/************************************** +* Common Constants +**************************************/ +#define MINMATCH 4 + +#define COPYLENGTH 8 +#define LASTLITERALS 5 +#define MFLIMIT (COPYLENGTH+MINMATCH) +static const int LZ4_minLength = (MFLIMIT+1); + +#define KB *(1 <<10) +#define MB *(1 <<20) +#define GB *(1U<<30) + +#define MAXD_LOG 16 +#define MAX_DISTANCE ((1 << MAXD_LOG) - 1) + +#define ML_BITS 4 +#define ML_MASK ((1U<<ML_BITS)-1) +#define RUN_BITS (8-ML_BITS) +#define RUN_MASK ((1U<<RUN_BITS)-1) + + +/************************************** +* Local Structures and types +**************************************/ +typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive; +typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive; +typedef enum { full = 0, partial = 1 } earlyEnd_directive; + + + +/******************************* +* Decompression functions +*******************************/ +/* + * This generic decompression function cover all use cases. + * It shall be instantiated several times, using different sets of directives + * Note that it is essential this generic function is really inlined, + * in order to remove useless branches during compilation optimization. + */ +FORCE_INLINE int LZ4_decompress_generic( + const char* const source, + char* const dest, + int inputSize, + int outputSize, /* If endOnInput==endOnInputSize, this value is the max size of Output Buffer. */ + + int endOnInput, /* endOnOutputSize, endOnInputSize */ + int partialDecoding, /* full, partial */ + int targetOutputSize, /* only used if partialDecoding==partial */ + int dict, /* noDict, withPrefix64k, usingExtDict */ + const BYTE* const lowPrefix, /* == dest if dict == noDict */ + const BYTE* const dictStart, /* only if dict==usingExtDict */ + const size_t dictSize /* note : = 0 if noDict */ + ) +{ + /* Local Variables */ + const BYTE* ip = (const BYTE*) source; + const BYTE* const iend = ip + inputSize; + + BYTE* op = (BYTE*) dest; + BYTE* const oend = op + outputSize; + BYTE* cpy; + BYTE* oexit = op + targetOutputSize; + const BYTE* const lowLimit = lowPrefix - dictSize; + + const BYTE* const dictEnd = (const BYTE*)dictStart + dictSize; + const size_t dec32table[] = {4, 1, 2, 1, 4, 4, 4, 4}; + const size_t dec64table[] = {0, 0, 0, (size_t)-1, 0, 1, 2, 3}; + + const int safeDecode = (endOnInput==endOnInputSize); + const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB))); + + + /* Special cases */ + if ((partialDecoding) && (oexit> oend-MFLIMIT)) oexit = oend-MFLIMIT; /* targetOutputSize too high => decode everything */ + if ((endOnInput) && (unlikely(outputSize==0))) return ((inputSize==1) && (*ip==0)) ? 0 : -1; /* Empty output buffer */ + if ((!endOnInput) && (unlikely(outputSize==0))) return (*ip==0?1:-1); + + + /* Main Loop */ + while (1) + { + unsigned token; + size_t length; + const BYTE* match; + + /* get literal length */ + token = *ip++; + if ((length=(token>>ML_BITS)) == RUN_MASK) + { + unsigned s; + do + { + s = *ip++; + length += s; + } + while (likely((endOnInput)?ip<iend-RUN_MASK:1) && (s==255)); + if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)(op))) goto _output_error; /* overflow detection */ + if ((safeDecode) && unlikely((size_t)(ip+length)<(size_t)(ip))) goto _output_error; /* overflow detection */ + } + + /* copy literals */ + cpy = op+length; + if (((endOnInput) && ((cpy>(partialDecoding?oexit:oend-MFLIMIT)) || (ip+length>iend-(2+1+LASTLITERALS))) ) + || ((!endOnInput) && (cpy>oend-COPYLENGTH))) + { + if (partialDecoding) + { + if (cpy > oend) goto _output_error; /* Error : write attempt beyond end of output buffer */ + if ((endOnInput) && (ip+length > iend)) goto _output_error; /* Error : read attempt beyond end of input buffer */ + } + else + { + if ((!endOnInput) && (cpy != oend)) goto _output_error; /* Error : block decoding must stop exactly there */ + if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) goto _output_error; /* Error : input must be consumed */ + } + memcpy(op, ip, length); + ip += length; + op += length; + break; /* Necessarily EOF, due to parsing restrictions */ + } + LZ4_wildCopy(op, ip, cpy); + ip += length; op = cpy; + + /* get offset */ + match = cpy - LZ4_readLE16(ip); ip+=2; + if ((checkOffset) && (unlikely(match < lowLimit))) goto _output_error; /* Error : offset outside destination buffer */ + + /* get matchlength */ + length = token & ML_MASK; + if (length == ML_MASK) + { + unsigned s; + do + { + if ((endOnInput) && (ip > iend-LASTLITERALS)) goto _output_error; + s = *ip++; + length += s; + } while (s==255); + if ((safeDecode) && unlikely((size_t)(op+length)<(size_t)op)) goto _output_error; /* overflow detection */ + } + length += MINMATCH; + + /* check external dictionary */ + if ((dict==usingExtDict) && (match < lowPrefix)) + { + if (unlikely(op+length > oend-LASTLITERALS)) goto _output_error; /* doesn't respect parsing restriction */ + + if (length <= (size_t)(lowPrefix-match)) + { + /* match can be copied as a single segment from external dictionary */ + match = dictEnd - (lowPrefix-match); + memmove(op, match, length); op += length; + } + else + { + /* match encompass external dictionary and current segment */ + size_t copySize = (size_t)(lowPrefix-match); + memcpy(op, dictEnd - copySize, copySize); + op += copySize; + copySize = length - copySize; + if (copySize > (size_t)(op-lowPrefix)) /* overlap within current segment */ + { + BYTE* const endOfMatch = op + copySize; + const BYTE* copyFrom = lowPrefix; + while (op < endOfMatch) *op++ = *copyFrom++; + } + else + { + memcpy(op, lowPrefix, copySize); + op += copySize; + } + } + continue; + } + + /* copy repeated sequence */ + cpy = op + length; + if (unlikely((op-match)<8)) + { + const size_t dec64 = dec64table[op-match]; + op[0] = match[0]; + op[1] = match[1]; + op[2] = match[2]; + op[3] = match[3]; + match += dec32table[op-match]; + LZ4_copy4(op+4, match); + op += 8; match -= dec64; + } else { LZ4_copy8(op, match); op+=8; match+=8; } + + if (unlikely(cpy>oend-12)) + { + if (cpy > oend-LASTLITERALS) goto _output_error; /* Error : last LASTLITERALS bytes must be literals */ + if (op < oend-8) + { + LZ4_wildCopy(op, match, oend-8); + match += (oend-8) - op; + op = oend-8; + } + while (op<cpy) *op++ = *match++; + } + else + LZ4_wildCopy(op, match, cpy); + op=cpy; /* correction */ + } + + /* end of decoding */ + if (endOnInput) + return (int) (((char*)op)-dest); /* Nb of output bytes decoded */ + else + return (int) (((const char*)ip)-source); /* Nb of input bytes read */ + + /* Overflow error detected */ +_output_error: + return (int) (-(((const char*)ip)-source))-1; +} diff --git a/lib/lz4_wrapper.c b/lib/lz4_wrapper.c new file mode 100644 index 0000000000..0739663fca --- /dev/null +++ b/lib/lz4_wrapper.c @@ -0,0 +1,137 @@ +/* + * Copyright 2015 Google Inc. + * + * SPDX-License-Identifier: GPL 2.0+ BSD-3-Clause + */ + +#include <common.h> +#include <compiler.h> +#include <linux/kernel.h> +#include <linux/types.h> + +static u16 LZ4_readLE16(const void *src) { return le16_to_cpu(*(u16 *)src); } +static void LZ4_copy4(void *dst, const void *src) { *(u32 *)dst = *(u32 *)src; } +static void LZ4_copy8(void *dst, const void *src) { *(u64 *)dst = *(u64 *)src; } + +typedef uint8_t BYTE; +typedef uint16_t U16; +typedef uint32_t U32; +typedef int32_t S32; +typedef uint64_t U64; + +#define FORCE_INLINE static inline __attribute__((always_inline)) + +/* Unaltered (except removing unrelated code) from github.com/Cyan4973/lz4. */ +#include "lz4.c" /* #include for inlining, do not link! */ + +#define LZ4F_MAGIC 0x184D2204 + +struct lz4_frame_header { + u32 magic; + union { + u8 flags; + struct { + u8 reserved0:2; + u8 has_content_checksum:1; + u8 has_content_size:1; + u8 has_block_checksum:1; + u8 independent_blocks:1; + u8 version:2; + }; + }; + union { + u8 block_descriptor; + struct { + u8 reserved1:4; + u8 max_block_size:3; + u8 reserved2:1; + }; + }; + /* + u64 content_size iff has_content_size is set */ + /* + u8 header_checksum */ +} __packed; + +struct lz4_block_header { + union { + u32 raw; + struct { + u32 size:31; + u32 not_compressed:1; + }; + }; + /* + size bytes of data */ + /* + u32 block_checksum iff has_block_checksum is set */ +} __packed; + +int ulz4fn(const void *src, size_t srcn, void *dst, size_t *dstn) +{ + const void *end = dst + *dstn; + const void *in = src; + void *out = dst; + int has_block_checksum; + int ret; + *dstn = 0; + + { /* With in-place decompression the header may become invalid later. */ + const struct lz4_frame_header *h = in; + + if (srcn < sizeof(*h) + sizeof(u64) + sizeof(u8)) + return -EINVAL; /* input overrun */ + + /* We assume there's always only a single, standard frame. */ + if (le32_to_cpu(h->magic) != LZ4F_MAGIC || h->version != 1) + return -EPROTONOSUPPORT; /* unknown format */ + if (h->reserved0 || h->reserved1 || h->reserved2) + return -EINVAL; /* reserved must be zero */ + if (!h->independent_blocks) + return -EPROTONOSUPPORT; /* we can't support this yet */ + has_block_checksum = h->has_block_checksum; + + in += sizeof(*h); + if (h->has_content_size) + in += sizeof(u64); + in += sizeof(u8); + } + + while (1) { + struct lz4_block_header b = { .raw = le32_to_cpu(*(u32 *)in) }; + in += sizeof(struct lz4_block_header); + + if (in - src + b.size > srcn) { + ret = -EINVAL; /* input overrun */ + break; + } + + if (!b.size) { + ret = 0; /* decompression successful */ + break; + } + + if (b.not_compressed) { + size_t size = min((ptrdiff_t)b.size, end - out); + memcpy(out, in, size); + out += size; + if (size < b.size) { + ret = -ENOBUFS; /* output overrun */ + break; + } + } else { + /* constant folding essential, do not touch params! */ + ret = LZ4_decompress_generic(in, out, b.size, + end - out, endOnInputSize, + full, 0, noDict, out, NULL, 0); + if (ret < 0) { + ret = -EPROTO; /* decompression error */ + break; + } + out += ret; + } + + in += b.size; + if (has_block_checksum) + in += sizeof(u32); + } + + *dstn = out - dst; + return ret; +} |