summaryrefslogtreecommitdiff
path: root/node
diff options
context:
space:
mode:
authorAdam Ierymenko <adam.ierymenko@gmail.com>2018-01-10 16:55:07 -0800
committerAdam Ierymenko <adam.ierymenko@gmail.com>2018-01-10 16:55:07 -0800
commit5bc4399a589ff4dfe43086fb5a2a3842208c22fa (patch)
tree0ee929c51f1d0d79aba8690cfdf33ac5a8fa226f /node
parentf17cc1c6d80657fe8cd98891cf0bb1bbf3ac62bf (diff)
downloadinfinitytier-5bc4399a589ff4dfe43086fb5a2a3842208c22fa.tar.gz
infinitytier-5bc4399a589ff4dfe43086fb5a2a3842208c22fa.zip
cleanup
Diffstat (limited to 'node')
-rw-r--r--node/Packet.cpp188
1 files changed, 8 insertions, 180 deletions
diff --git a/node/Packet.cpp b/node/Packet.cpp
index de059054..2eeceffa 100644
--- a/node/Packet.cpp
+++ b/node/Packet.cpp
@@ -155,53 +155,22 @@ namespace {
A library is provided to take care of it, see lz4frame.h.
*/
-#define LZ4LIB_API
-
-/*========== Version =========== */
#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
#define LZ4_VERSION_MINOR 7 /* for new (non-breaking) interface capabilities */
#define LZ4_VERSION_RELEASE 5 /* for tweaks, bug-fixes, or development */
-
#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
-
#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
#define LZ4_QUOTE(str) #str
#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION)
-
-/*!
- * LZ4_MEMORY_USAGE :
- * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
- * Increasing memory usage improves compression ratio
- * Reduced memory usage can improve speed, due to cache effect
- * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
- */
#define LZ4_MEMORY_USAGE 14
-
#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
-/*-*********************************************
-* Streaming Compression Functions
-***********************************************/
typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
-/*! LZ4_resetStream() :
- * An LZ4_stream_t structure can be allocated once and re-used multiple times.
- * Use this function to init an allocated `LZ4_stream_t` structure and start a new compression.
- */
-LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
-
-/*^**********************************************
- * !!!!!! STATIC LINKING ONLY !!!!!!
- ***********************************************/
-/*-************************************
- * Private definitions
- **************************************
- * Do not use these definitions.
- * They are exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
- * Using these definitions will expose code to API and/or ABI break in future versions of the library.
- **************************************/
+static inline void LZ4_resetStream (LZ4_stream_t* streamPtr);
+
#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
@@ -222,14 +191,6 @@ typedef struct {
size_t prefixSize;
} LZ4_streamDecode_t_internal;
-/*!
- * LZ4_stream_t :
- * information structure to track an LZ4 stream.
- * init this structure before first use.
- * note : only use in association with static linking !
- * this definition is not API/ABI safe,
- * and may change in a future version !
- */
#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE-3)) + 4)
#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
union LZ4_stream_u {
@@ -237,14 +198,6 @@ union LZ4_stream_u {
LZ4_stream_t_internal internal_donotuse;
} ; /* previously typedef'd to LZ4_stream_t */
-/*!
- * LZ4_streamDecode_t :
- * information structure to track an LZ4 stream during decompression.
- * init this structure using LZ4_setStreamDecode (or memset()) before first use
- * note : only use in association with static linking !
- * this definition is not API/ABI safe,
- * and may change in a future version !
- */
#define LZ4_STREAMDECODESIZE_U64 4
#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
union LZ4_streamDecode_u {
@@ -253,33 +206,7 @@ union LZ4_streamDecode_u {
} ; /* previously typedef'd to LZ4_streamDecode_t */
#ifndef HEAPMODE
-# define HEAPMODE 0
-#endif
-
-//#define ACCELERATION_DEFAULT 1
-
-/* LZ4_FORCE_MEMORY_ACCESS
- * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
- * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
- * The below switch allow to select different access method for improved performance.
- * Method 0 (default) : use `memcpy()`. Safe and portable.
- * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
- * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
- * Method 2 : direct access. This method is portable but violate C standard.
- * It can generate buggy code on targets which generate assembly depending on alignment.
- * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
- * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
- * Prefer these methods in priority order (0 > 1 > 2)
- */
-#if 0
-#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally, on command line for example */
-# if defined(__GNUC__) && ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
-# define LZ4_FORCE_MEMORY_ACCESS 2
-# elif defined(__INTEL_COMPILER) || \
- (defined(__GNUC__) && ( defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7S__) ))
-# define LZ4_FORCE_MEMORY_ACCESS 1
-# endif
-#endif
+#define HEAPMODE 0
#endif
#ifdef ZT_NO_TYPE_PUNNING
@@ -288,61 +215,18 @@ union LZ4_streamDecode_u {
#define LZ4_FORCE_MEMORY_ACCESS 2
#endif
-/*
- * LZ4_FORCE_SW_BITCOUNT
- * Define this parameter if your target system or compiler does not support hardware bit count
- */
#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for Windows CE does not support Hardware bit count */
-# define LZ4_FORCE_SW_BITCOUNT
-#endif
-
-/*-************************************
-* Compiler Options
-**************************************/
-#if 0
-#ifdef _MSC_VER /* Visual Studio */
-# define FORCE_INLINE static __forceinline
-# include <intrin.h>
-# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
-# pragma warning(disable : 4293) /* disable: C4293: too large shift (32-bits) */
-#else
-# if defined(__GNUC__) || defined(__clang__)
-# define FORCE_INLINE static inline __attribute__((always_inline))
-# elif defined(__cplusplus) || (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
-# define FORCE_INLINE static inline
-# else
-# define FORCE_INLINE static
-# endif
-#endif /* _MSC_VER */
+#define LZ4_FORCE_SW_BITCOUNT
#endif
#ifndef FORCE_INLINE
#define FORCE_INLINE static inline
#endif
-#if 0
-#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
-# define expect(expr,value) (__builtin_expect ((expr),(value)) )
-#else
-# define expect(expr,value) (expr)
-#endif
-
-#define likely(expr) expect((expr) != 0, 1)
-#define unlikely(expr) expect((expr) != 0, 0)
-#endif
-
-/*-************************************
-* Memory routines
-**************************************/
-//#include <stdlib.h> /* malloc, calloc, free */
#define ALLOCATOR(n,s) calloc(n,s)
#define FREEMEM free
-//#include <string.h> /* memset, memcpy */
#define MEM_INIT memset
-/*-************************************
-* Basic Types
-**************************************/
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef uint32_t U32;
@@ -351,65 +235,46 @@ typedef uint64_t U64;
typedef uintptr_t uptrval;
typedef uintptr_t reg_t;
-/*-************************************
-* Reading and writing into memory
-**************************************/
-static unsigned LZ4_isLittleEndian(void)
+static inline unsigned LZ4_isLittleEndian(void)
{
const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
return one.c[0];
}
#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
-/* lie to the compiler about data alignment; use with caution */
-
static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
-
static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
-
#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
-
-/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
-/* currently only defined for gcc and icc */
typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
-
static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
-
static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
-
#else /* safe and portable access through memcpy() */
-
static inline U16 LZ4_read16(const void* memPtr)
{
U16 val; ZT_FAST_MEMCPY(&val, memPtr, sizeof(val)); return val;
}
-
static inline U32 LZ4_read32(const void* memPtr)
{
U32 val; ZT_FAST_MEMCPY(&val, memPtr, sizeof(val)); return val;
}
-
static inline reg_t LZ4_read_ARCH(const void* memPtr)
{
reg_t val; ZT_FAST_MEMCPY(&val, memPtr, sizeof(val)); return val;
}
-
static inline void LZ4_write16(void* memPtr, U16 value)
{
ZT_FAST_MEMCPY(memPtr, &value, sizeof(value));
}
-
static inline void LZ4_write32(void* memPtr, U32 value)
{
ZT_FAST_MEMCPY(memPtr, &value, sizeof(value));
}
-
#endif /* LZ4_FORCE_MEMORY_ACCESS */
static inline U16 LZ4_readLE16(const void* memPtr)
@@ -438,19 +303,14 @@ static inline void LZ4_copy8(void* dst, const void* src)
ZT_FAST_MEMCPY(dst,src,8);
}
-/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
static inline void LZ4_wildCopy(void* dstPtr, const void* srcPtr, void* dstEnd)
{
BYTE* d = (BYTE*)dstPtr;
const BYTE* s = (const BYTE*)srcPtr;
BYTE* const e = (BYTE*)dstEnd;
-
do { LZ4_copy8(d,s); d+=8; s+=8; } while (d<e);
}
-/*-************************************
-* Common Constants
-**************************************/
#define MINMATCH 4
#define WILDCOPYLENGTH 8
@@ -470,14 +330,8 @@ static const int LZ4_minLength = (MFLIMIT+1);
#define RUN_BITS (8-ML_BITS)
#define RUN_MASK ((1U<<RUN_BITS)-1)
-/*-************************************
-* Common Utils
-**************************************/
#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
-/*-************************************
-* Common functions
-**************************************/
static inline unsigned LZ4_NbCommonBytes (register reg_t val)
{
if (LZ4_isLittleEndian()) {
@@ -554,15 +408,9 @@ static inline unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE
return (unsigned)(pIn - pStart);
}
-/*-************************************
-* Local Constants
-**************************************/
static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
-/*-************************************
-* Local Structures and types
-**************************************/
typedef enum { notLimited = 0, limitedOutput = 1 } limitedOutput_directive;
typedef enum { byPtr, byU32, byU16 } tableType_t;
@@ -572,17 +420,8 @@ typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
typedef enum { full = 0, partial = 1 } earlyEnd_directive;
-/*-************************************
-* Local Utils
-**************************************/
-//int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
-//const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
-int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
-//int LZ4_sizeofState() { return LZ4_STREAMSIZE; }
-
-/*-******************************
-* Compression functions
-********************************/
+static inline int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
+
static inline U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
{
if (tableType == byU16)
@@ -637,8 +476,6 @@ FORCE_INLINE const BYTE* LZ4_getPosition(const BYTE* p, void* tableBase, tableTy
return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
}
-/** LZ4_compress_generic() :
- inlined, to ensure branches are decided at compilation time */
FORCE_INLINE int LZ4_compress_generic(
LZ4_stream_t_internal* const cctx,
const char* const source,
@@ -874,20 +711,11 @@ static inline int LZ4_compress_fast(const char* source, char* dest, int inputSiz
return result;
}
-void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
+static inline void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
{
MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t));
}
-/*-*****************************
-* Decompression functions
-*******************************/
-/*! LZ4_decompress_generic() :
- * This generic decompression function cover all use cases.
- * It shall be instantiated several times, using different sets of directives
- * Note that it is important this generic function is really inlined,
- * in order to remove useless branches during compilation optimization.
- */
FORCE_INLINE int LZ4_decompress_generic(
const char* const source,
char* const dest,