diff --git a/2020/include/bits.h b/2020/include/bits.h new file mode 100644 index 0000000..8eba8b5 --- /dev/null +++ b/2020/include/bits.h @@ -0,0 +1,521 @@ +/* bits.h - bits functions. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ +#ifndef BITS_H +#define BITS_H + +#include +#include + +/* next include will define __WORDSIZE: 32 or 64 + */ +#include + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +/* no plan to support 32bits for now... + */ +#if __WORDSIZE != 64 +#error "Only 64 bits word size supported." +#endif + +/* fixed-size types + */ +typedef int64_t s64; +typedef int32_t s32; +typedef int16_t s16; +typedef int8_t s8; + +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; + +/* convenience types + */ +typedef unsigned long int ulong; +typedef unsigned int uint; +typedef unsigned short ushort; +typedef unsigned char uchar; + +/* char is a special case, as it can be signed or unsigned + */ +typedef signed char schar; + +/* count trailing zeroes : 00101000 -> 3 + * ^^^ + */ +static __always_inline int ctz64(u64 n) +{ +# if __has_builtin(__builtin_ctzl) +# ifdef DEBUG_BITS + log_f(1, "builtin ctzl.\n"); +# endif + return __builtin_ctzl(n); + +# elif __has_builtin(__builtin_clzl) +# ifdef DEBUG_BITS + log_f(1, "builtin clzl.\n"); +# endif + return __WORDSIZE - (__builtin_clzl(n & -n) + 1); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + return popcount64((n & −n) − 1); +# endif +} + +static __always_inline int ctz32(u32 n) +{ +# if __has_builtin(__builtin_ctz) +# ifdef DEBUG_BITS + log_f(1, "builtin ctz.\n"); +# endif + return __builtin_ctzl(n); + +# elif __has_builtin(__builtin_clz) +# ifdef DEBUG_BITS + log_f(1, "builtin clz.\n"); +# endif + return __WORDSIZE - (__builtin_clz(n & -n) + 1); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + return popcount32((n & −n) − 1); +# endif +} + +/* clz - count leading zeroes : 00101000 -> 2 + * ^^ + */ +static __always_inline int clz64(u64 n) +{ +# if __has_builtin(__builtin_clzl) +# ifdef DEBUG_BITS + log_f(1, "builtin.\n"); +# endif + return __builtin_clzl(n); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + u64 r, q; + + r = (n > 0xFFFFFFFF) << 5; n >>= r; + q = (n > 0xFFFF) << 4; n >>= q; r |= q; + q = (n > 0xFF ) << 3; n >>= q; r |= q; + q = (n > 0xF ) << 2; n >>= q; r |= q; + q = (n > 0x3 ) << 1; n >>= q; r |= q; + r |= (n >> 1); + return 64 - r - 1; +# endif +} + +static __always_inline int clz32(u32 n) +{ +# if __has_builtin(__builtin_clz) +# ifdef DEBUG_BITS + log_f(1, "builtin.\n"); +# endif + return __builtin_clz(n); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + u32 r, q; + + r = (n > 0xFFFF) << 4; n >>= r; + q = (n > 0xFF ) << 3; n >>= q; r |= q; + q = (n > 0xF ) << 2; n >>= q; r |= q; + q = (n > 0x3 ) << 1; n >>= q; r |= q; + r |= (n >> 1); + return 32 - r - 1; +# endif +} + +/* fls - find last set : 00101000 -> 6 + * ^ + */ +static __always_inline int fls64(u64 n) +{ + if (!n) + return 0; + return 64 - clz64(n); +} + +static __always_inline int fls32(u32 n) +{ + if (!n) + return 0; + return 32 - clz32(n); +} + +/* find first set : 00101000 -> 4 + * ^ + */ +static __always_inline uint ffs64(u64 n) +{ +# if __has_builtin(__builtin_ffsl) +# ifdef DEBUG_BITS + log_f(1, "builtin ffsl.\n"); +# endif + return __builtin_ffsl(n); + +# elif __has_builtin(__builtin_ctzl) +# ifdef DEBUG_BITS + log_f(1, "builtin ctzl.\n"); +# endif + if (n == 0) + return (0); + return __builtin_ctzl(n) + 1; + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + return popcount64(n ^ ~-n); +# endif +} + +static __always_inline uint ffs32(u32 n) +{ +# if __has_builtin(__builtin_ffs) +# ifdef DEBUG_BITS + log_f(1, "builtin ffs.\n"); +# endif + return __builtin_ffs(n); + +# elif __has_builtin(__builtin_ctz) +# ifdef DEBUG_BITS + log_f(1, "builtin ctz.\n"); +# endif + if (n == 0) + return (0); + return __builtin_ctz(n) + 1; + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + return popcount32(n ^ ~-n); +# endif +} + +/* count set bits: 10101000 -> 3 + * ^ ^ ^ + */ +static __always_inline int popcount64(u64 n) +{ +# if __has_builtin(__builtin_popcountl) +# ifdef DEBUG_BITS + log_f(1, "builtin.\n"); +# endif + return __builtin_popcountl(n); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + int count = 0; + while (n) { + count++; + n &= (n - 1); + } + return count; +# endif +} + +static __always_inline int popcount32(u32 n) +{ +# if __has_builtin(__builtin_popcount) +# ifdef DEBUG_BITS + log_f(1, "builtin.\n"); +# endif + return __builtin_popcount(n); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + int count = 0; + while (n) { + count++; + n &= (n - 1); + } + return count; +# endif +} + +/* rolXX are taken from kernel's are are: + * SPDX-License-Identifier: GPL-2.0 + */ + +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline u64 rol64(u64 word, unsigned int shift) +{ + return (word << (shift & 63)) | (word >> ((-shift) & 63)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline u64 ror64(u64 word, unsigned int shift) +{ + return (word >> (shift & 63)) | (word << ((-shift) & 63)); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline u32 rol32(u32 word, unsigned int shift) +{ + return (word << (shift & 31)) | (word >> ((-shift) & 31)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline u32 ror32(u32 word, unsigned int shift) +{ + return (word >> (shift & 31)) | (word << ((-shift) & 31)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline u16 rol16(u16 word, unsigned int shift) +{ + return (word << (shift & 15)) | (word >> ((-shift) & 15)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline u16 ror16(u16 word, unsigned int shift) +{ + return (word >> (shift & 15)) | (word << ((-shift) & 15)); +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline u8 rol8(u8 word, unsigned int shift) +{ + return (word << (shift & 7)) | (word >> ((-shift) & 7)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline u8 ror8(u8 word, unsigned int shift) +{ + return (word >> (shift & 7)) | (word << ((-shift) & 7)); +} + +/** + * ilog2 - + */ +static __always_inline __attribute__((const)) +int __ilog2_u32(u32 n) +{ + return fls32(n) - 1; +} + +static __always_inline __attribute__((const)) +int __ilog2_u64(u64 n) +{ + return fls64(n) - 1; +} + +/** + * is_power_of_2() - check if a value is a power of two + * @n: the value to check + * + * Determine whether some value is a power of two, where zero is + * *not* considered a power of two. + * Return: true if @n is a power of 2, otherwise false. + */ +static inline __attribute__((const)) +bool is_power_of_2(unsigned long n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + +/** + * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value + * @n: parameter + * + * constant-capable log of base 2 calculation + * - this can be used to initialise global variables from constant data, hence + * the massive ternary operator construction + * + * selects the appropriately-sized optimised version depending on sizeof(n) + */ +#define ilog2(n) \ +( \ + __builtin_constant_p(n) ? \ + ((n) < 2 ? 0 : \ + 63 - __builtin_clzll(n)) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ + ) + +/** + * roundup_pow_of_two - round the given value up to nearest power of two + * @n: parameter + * + * round the given value up to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define roundup_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 1) ? 1 : \ + (1UL << (ilog2((n) - 1) + 1)) \ + ) : \ + __roundup_pow_of_two(n) \ + ) + +/** + * rounddown_pow_of_two - round the given value down to nearest power of two + * @n: parameter + * + * round the given value down to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define rounddown_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (1UL << ilog2(n))) : \ + __rounddown_pow_of_two(n) \ + ) + +static inline __attribute_const__ +int __order_base_2(unsigned long n) +{ + return n > 1 ? ilog2(n - 1) + 1 : 0; +} + +/** + * order_base_2 - calculate the (rounded up) base 2 order of the argument + * @n: parameter + * + * The first few values calculated by this routine: + * ob2(0) = 0 + * ob2(1) = 0 + * ob2(2) = 1 + * ob2(3) = 2 + * ob2(4) = 2 + * ob2(5) = 3 + * ... and so on. + */ +#define order_base_2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) ? 0 : \ + ilog2((n) - 1) + 1) : \ + __order_base_2(n) \ +) + +static inline __attribute__((const)) +int __bits_per(unsigned long n) +{ + if (n < 2) + return 1; + if (is_power_of_2(n)) + return order_base_2(n) + 1; + return order_base_2(n); +} + +/** + * bits_per - calculate the number of bits required for the argument + * @n: parameter + * + * This is constant-capable and can be used for compile time + * initializations, e.g bitfields. + * + * The first few values calculated by this routine: + * bf(0) = 1 + * bf(1) = 1 + * bf(2) = 2 + * bf(3) = 2 + * bf(4) = 3 + * ... and so on. + */ +#define bits_per(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) \ + ? 1 : ilog2(n) + 1 \ + ) : \ + __bits_per(n) \ +) + +/** bit_for_each - iterate over an u64/u32 bits + * @pos: an int used as current bit + * @tmp: a temp u64/u32 used as temporary storage + * @ul: the u64/u32 to loop over + * + * Usage: + * u64 u=139, _t; // u=b10001011 + * int cur; + * bit_for_each64(cur, _t, u) { + * printf("%d\n", cur); + * } + * This will display the position of each bit set in ul: 1, 2, 4, 8 + * + * I should probably re-think the implementation... + */ +#define bit_for_each64(pos, tmp, ul) \ + for (tmp = ul, pos = ffs64(tmp); tmp; tmp &= (tmp - 1), pos = ffs64(tmp)) + +#define bit_for_each32(pos, tmp, ul) \ + for (tmp = ul, pos = ffs32(tmp); tmp; tmp &= (tmp - 1), pos = ffs32(tmp)) + +/** or would it be more useful (counting bits from zero instead of 1) ? + */ +#define bit_for_each64_2(pos, tmp, ul) \ + for (tmp = ul, pos = ctz64(tmp); tmp; tmp ^= 1UL << pos, pos = ctz64(tmp)) + +#define bit_for_each32_2(pos, tmp, ul) \ + for (tmp = ul, pos = ctz32(tmp); tmp; tmp ^= 1U << pos, pos = ctz32(tmp)) + +#endif /* BITS_H */ diff --git a/2020/include/br.h b/2020/include/br.h new file mode 100644 index 0000000..11d67cd --- /dev/null +++ b/2020/include/br.h @@ -0,0 +1,205 @@ +/* bits.h - bits functions. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + * Some parts are taken from Linux's kernel and others, and are : + * SPDX-License-Identifier: GPL-2.0 + * + * This header contains generic stuff. + */ + +#ifndef _BR_H +#define _BR_H + +/* generate a (maybe) unique id. + */ +#define ___PASTE(x, y) x##y +#define __PASTE(x, y) ___PASTE(x, y) +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) +//__##prefix##__COUNTER__ + +/* see https://lkml.org/lkml/2018/3/20/845 for explanation of this monster + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + +/* + * min()/max()/clamp() macros must accomplish three things: + * + * - avoid multiple evaluations of the arguments (so side-effects like + * "x++" happen only once) when non-constant. + * - perform strict type-checking (to generate warnings instead of + * nasty runtime surprises). See the "unnecessary" pointer comparison + * in __typecheck(). + * - retain result as a constant expressions when called with only + * constant expressions (to avoid tripping VLA warnings in stack + * allocation usage). + */ +#define __typecheck(x, y) \ + (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) + +#define __no_side_effects(x, y) \ + (__is_constexpr(x) && __is_constexpr(y)) + +#define __safe_cmp(x, y) \ + (__typecheck(x, y) && __no_side_effects(x, y)) + +#define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) + +#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ + typeof(x) unique_x = (x); \ + typeof(y) unique_y = (y); \ + __cmp(unique_x, unique_y, op); }) + +#define __careful_cmp(x, y, op) \ + __builtin_choose_expr(__safe_cmp(x, y), \ + __cmp(x, y, op), \ + __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) + +#define __pure __attribute__((__pure__)) + +/** + * min - return minimum of two values of the same or compatible types + * @x: first value + * @y: second value + */ +#define min(x, y) __careful_cmp(x, y, <) + +/** + * max - return maximum of two values of the same or compatible types + * @x: first value + * @y: second value + */ +#define max(x, y) __careful_cmp(x, y, >) + +/** + * min3 - return minimum of three values + * @x: first value + * @y: second value + * @z: third value + */ +#define min3(x, y, z) min((typeof(x))min(x, y), z) + +/** + * max3 - return maximum of three values + * @x: first value + * @y: second value + * @z: third value + */ +#define max3(x, y, z) max((typeof(x))max(x, y), z) + +/** + * min_not_zero - return the minimum that is _not_ zero, unless both are zero + * @x: value1 + * @y: value2 + */ +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +/** + * clamp - return a value clamped to a given range with strict typechecking + * @val: current value + * @lo: lowest allowable value + * @hi: highest allowable value + * + * This macro does strict typechecking of @lo/@hi to make sure they are of the + * same type as @val. See the unnecessary pointer comparisons. + */ +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) + +/* + * ..and if you can't take the strict + * types, you can specify one yourself. + * + * Or not use min/max/clamp at all, of course. + */ + +/** + * min_t - return minimum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) + +/** + * max_t - return maximum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * @type to make all the comparisons. + */ +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument @val is. This is useful when @val is an unsigned + * type and @lo and @hi are literals that will otherwise be assigned a signed + * integer type. + */ +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) + +/** + * swap - swap values of @a and @b + * @a: first value + * @b: second value + */ +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) + +/** + * ARRAY_SIZE - get the number of elements in array @arr + * @arr: array to be sized + */ +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +/** + * abs - return absolute value of an argument + * @x: the value. If it is unsigned type, it is converted to signed type first. + * char is treated as if it was signed (regardless of whether it really is) + * but the macro's return type is preserved as char. + * + * Return: an absolute value of x. + */ +#define abs(x) __abs_choose_expr(x, long long, \ + __abs_choose_expr(x, long, \ + __abs_choose_expr(x, int, \ + __abs_choose_expr(x, short, \ + __abs_choose_expr(x, char, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), char), \ + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ + ((void)0))))))) + +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), signed type) || \ + __builtin_types_compatible_p(typeof(x), unsigned type), \ + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) + + +#endif /* _BR_H */ diff --git a/2020/include/bug.h b/2020/include/bug.h new file mode 100644 index 0000000..fdff9b8 --- /dev/null +++ b/2020/include/bug.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BR_BUG_H +#define _BR_BUG_H + +#include +#include +#include +#include "likely.h" +#include "debug.h" + +/* BUG functions inspired by Linux kernel's + */ + +#define panic() exit(0xff) + +/* + * Don't use BUG() or BUG_ON() unless there's really no way out; one + * example might be detecting data structure corruption in the middle + * of an operation that can't be backed out of. If the (sub)system + * can somehow continue operating, perhaps with reduced functionality, + * it's probably not BUG-worthy. + * + * If you're tempted to BUG(), think again: is completely giving up + * really the *only* solution? There are usually better options, where + * users don't need to reboot ASAP and can mostly shut down cleanly. + */ +#define BUG() do { \ + fprintf(stderr, "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + panic(); \ + } while (0) + +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) + +/* + * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report + * significant kernel issues that need prompt attention if they should ever + * appear at runtime. + * + * Do not use these macros when checking for invalid external inputs + * (e.g. invalid system call arguments, or invalid data coming from + * network/devices), and on transient conditions like ENOMEM or EAGAIN. + * These macros should be used for recoverable kernel issues only. + * For invalid external inputs, transient conditions, etc use + * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary. + * Do not include "BUG"/"WARNING" in format strings manually to make these + * conditions distinguishable from kernel issues. + * + * Use the versions with printk format strings to provide better diagnostics. + */ +#define __WARN() do { \ + fprintf(stderr, "WARNING: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + } while (0) +#define __WARN_printf(arg...) do { \ + vfprintf(stderr, arg); \ + } while (0) + +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + unlikely(__ret_warn_on); \ + }) + +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ + }) + +#endif /* _BR_BUG_H */ diff --git a/2020/include/container-of.h b/2020/include/container-of.h new file mode 100644 index 0000000..1c7f54f --- /dev/null +++ b/2020/include/container-of.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* adaptation of Linux kernel's + */ +#ifndef _BR_CONTAINER_OF_H +#define _BR_CONTAINER_OF_H + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + +/** + * typeof_member - + */ +#define typeof_member(T, m) typeof(((T*)0)->m) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + void *__mptr = (void *)(ptr); \ + _Static_assert(__same_type(*(ptr), ((type *)0)->member) || \ + __same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + ((type *)(__mptr - offsetof(type, member))); }) + +#endif /* BR_CONTAINER_OF_H */ diff --git a/2020/include/debug.h b/2020/include/debug.h new file mode 100644 index 0000000..fcceefd --- /dev/null +++ b/2020/include/debug.h @@ -0,0 +1,98 @@ +/* debug.h - debug/log management. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#ifndef DEBUG_H +#define DEBUG_H + +#include +#include + +#include "bits.h" + +#define _unused __attribute__((__unused__)) +#define _printf __attribute__ ((format (printf, 6, 7))) + +#ifdef DEBUG_DEBUG +void debug_init(u32 level); +void debug_level_set(u32 level); +void _printf debug(u32 level, bool timestamp, + u32 indent, const char *src, + u32 line, const char *, ...); +#else /* DEBUG_DEBUG */ +static inline void debug_init(_unused u32 level) {} +static inline void debug_level_set(_unused u32 level) {} +static inline void _printf debug(_unused u32 level, _unused bool timestamp, + _unused u32 indent, _unused const char *src, + _unused u32 line, const char *, ...) {} +#endif /* DEBUG_DEBUG */ +#undef _unused +#undef _printf + +/** + * log - simple log (no function name, no indent, no timestamp) + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + */ +#define log(level, fmt, args...) \ + debug((level), false, 0, NULL, 0, fmt, ##args) + +/** + * log_i - log with indent (no function name, no timestamp) + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + * + * Output example: + * >>>>val=2 + */ +#define log_i(level, fmt, args...) \ + debug((level), false, (level), NULL, 0, fmt, ##args) + +/** + * log_f - log with function name (no indent name, no timestamp) + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + * + * Output example: + * [function] val=2 + */ +#define log_f(level, fmt, args...) \ + debug((level), false, 0, __func__, 0, fmt, ##args) + +/** + * log_if - log with function name and line number (no indent name, no timestamp) + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + * + * Output example: + * >>>> [function:15] val=2 + */ +#define log_if(level, fmt, args...) \ + debug((level), false, (level), __func__, __LINE__, fmt, ##args) + +/** + * log_it - log with function name, line number, indent, and timestamp + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + * + * Output example: + * >>>> [function:15] val=2 + */ +#define log_it(level, fmt, args...) \ + debug((level), true, (level), __func__, __LINE__, fmt, ##args) + +#endif /* DEBUG_H */ diff --git a/2020/include/hash.h b/2020/include/hash.h new file mode 100644 index 0000000..f4f0475 --- /dev/null +++ b/2020/include/hash.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _BR_HASH_H +#define _BR_HASH_H +/* adaptation of Linux kernel's and + */ + +/* Fast hashing routine for ints, longs and pointers. + (C) 2002 Nadia Yvette Chambers, IBM */ + +//#include +#include +#include "bits.h" +#include "br.h" + +/* + * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and + * fs/inode.c. It's not actually prime any more (the previous primes + * were actively bad for hashing), but the name remains. + */ +#if __BITS_PER_LONG == 32 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32 +#define hash_long(val, bits) hash_32(val, bits) +#elif __BITS_PER_LONG == 64 +#define hash_long(val, bits) hash_64(val, bits) +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64 +#else +#error Wordsize not 32 or 64 +#endif + +/* + * This hash multiplies the input by a large odd number and takes the + * high bits. Since multiplication propagates changes to the most + * significant end only, it is essential that the high bits of the + * product be used for the hash value. + * + * Chuck Lever verified the effectiveness of this technique: + * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf + * + * Although a random odd number will do, it turns out that the golden + * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice + * properties. (See Knuth vol 3, section 6.4, exercise 9.) + * + * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2, + * which is very slightly easier to multiply by and makes no + * difference to the hash distribution. + */ +#define GOLDEN_RATIO_32 0x61C88647 +#define GOLDEN_RATIO_64 0x61C8864680B583EBull + +/* + * The _generic versions exist only so lib/test_hash.c can compare + * the arch-optimized versions with the generic. + * + * Note that if you change these, any that aren't updated + * to match need to have their HAVE_ARCH_* define values updated so the + * self-test will not false-positive. + */ +#ifndef HAVE_ARCH__HASH_32 +#define __hash_32 __hash_32_generic +#endif +static inline u32 __hash_32_generic(u32 val) +{ + return val * GOLDEN_RATIO_32; +} + +static inline u32 hash_32(u32 val, unsigned int bits) +{ + /* High bits are more random, so use them. */ + return __hash_32(val) >> (32 - bits); +} + +#ifndef HAVE_ARCH_HASH_64 +#define hash_64 hash_64_generic +#endif +static __always_inline u32 hash_64_generic(u64 val, unsigned int bits) +{ +#if __BITS_PER_LONG == 64 + /* 64x64-bit multiply is efficient on all 64-bit processors */ + return val * GOLDEN_RATIO_64 >> (64 - bits); +#else + /* Hash 64 bits using only 32x32-bit multiply. */ + return hash_32((u32)val ^ __hash_32(val >> 32), bits); +#endif +} + +static inline u32 hash_ptr(const void *ptr, unsigned int bits) +{ + return hash_long((unsigned long)ptr, bits); +} + +/* This really should be called fold32_ptr; it does no hashing to speak of. */ +static inline u32 hash32_ptr(const void *ptr) +{ + unsigned long val = (unsigned long)ptr; + +#if __BITS_PER_LONG == 64 + val ^= (val >> 32); +#endif + return (u32)val; +} + +/* + * Routines for hashing strings of bytes to a 32-bit hash value. + * + * These hash functions are NOT GUARANTEED STABLE between kernel + * versions, architectures, or even repeated boots of the same kernel. + * (E.g. they may depend on boot-time hardware detection or be + * deliberately randomized.) + * + * They are also not intended to be secure against collisions caused by + * malicious inputs; much slower hash functions are required for that. + * + * They are optimized for pathname components, meaning short strings. + * Even if a majority of files have longer names, the dynamic profile of + * pathname components skews short due to short directory names. + * (E.g. /usr/lib/libsesquipedalianism.so.3.141.) + */ + +/* + * Version 1: one byte at a time. Example of use: + * + * unsigned long hash = init_name_hash; + * while (*p) + * hash = partial_name_hash(tolower(*p++), hash); + * hash = end_name_hash(hash); + * + * Although this is designed for bytes, fs/hfsplus/unicode.c + * abuses it to hash 16-bit values. + */ + +/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ +#define init_name_hash(salt) (unsigned long)(salt) + +/* partial hash update function. Assume roughly 4 bits per character */ +static inline unsigned long +partial_name_hash(unsigned long c, unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + +/* + * Finally: cut down the number of bits to a int value (and try to avoid + * losing bits). This also has the property (wanted by the dcache) + * that the msbits make a good hash table index. + */ +static inline unsigned int end_name_hash(unsigned long hash) +{ + return hash_long(hash, 32); +} + +/* + * Version 2: One word (32 or 64 bits) at a time. + * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning + * exists, which describes major Linux platforms like x86 and ARM), then + * this computes a different hash function much faster. + * + * If not set, this falls back to a wrapper around the preceding. + */ +extern unsigned int __pure hash_string(const void *salt, const char *, unsigned int); + +/* + * A hash_len is a u64 with the hash of a string in the low + * half and the length in the high half. + */ +#define hashlen_hash(hashlen) ((u32)(hashlen)) +#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) +#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) + +/* Return the "hash_len" (hash and length) of a null-terminated string */ +extern u64 __pure hashlen_string(const void *salt, const char *name); + +#endif /* _BR_HASH_H */ diff --git a/2020/include/hashtable.h b/2020/include/hashtable.h new file mode 100644 index 0000000..40fc5ec --- /dev/null +++ b/2020/include/hashtable.h @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* adaptation of Linux kernel's + */ + + +/* + * Statically sized hash table implementation + * (C) 2012 Sasha Levin + */ + +#ifndef _LINUX_HASHTABLE_H +#define _LINUX_HASHTABLE_H + +#include "list.h" +#include +#include "hash.h" +//#include + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +/** + * hash_init - initialize a hash table + * @hashtable: hashtable to be initialized + * + * Calculates the size of the hashtable from the given parameter, otherwise + * same as hash_init_size. + * + * This has to be a macro since HASH_BITS() will not work on pointers since + * it calculates the size during preprocessing. + */ +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +/** + * hash_add - add an object to a hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +/** + * hash_add_rcu - add an object to a rcu enabled hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define hash_add_rcu(hashtable, node, key) \ + hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +/** + * hash_hashed - check whether an object is in any hashtable + * @node: the &struct hlist_node of the object to be checked + */ +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +/** + * hash_empty - check whether a hashtable is empty + * @hashtable: hashtable to check + * + * This has to be a macro since HASH_BITS() will not work on pointers since + * it calculates the size during preprocessing. + */ +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +/** + * hash_del - remove an object from a hashtable + * @node: &struct hlist_node of the object to remove + */ +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} + +/** + * hash_for_each - iterate over a hashtable + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +/** + * hash_for_each_rcu - iterate over a rcu enabled hashtable + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each_rcu(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_rcu(obj, &name[bkt], member) + +/** + * hash_for_each_safe - iterate over a hashtable safe against removal of + * hash entry + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @tmp: a &struct hlist_node used for temporary storage + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +/** + * hash_for_each_possible - iterate over all possible objects hashing to the + * same bucket + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +/** + * hash_for_each_possible_rcu - iterate over all possible objects hashing to the + * same bucket in an rcu enabled hashtable + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \ + hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ + member, ## cond) + +/** + * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing + * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + * + * This is the same as hash_for_each_possible_rcu() except that it does + * not do any RCU debugging or tracing. + */ +#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \ + hlist_for_each_entry_rcu_notrace(obj, \ + &name[hash_min(key, HASH_BITS(name))], member) + +/** + * hash_for_each_possible_safe - iterate over all possible objects hashing to the + * same bucket safe against removals + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @tmp: a &struct hlist_node used for temporary storage + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + + +#endif diff --git a/2020/include/likely.h b/2020/include/likely.h new file mode 100644 index 0000000..a5d151d --- /dev/null +++ b/2020/include/likely.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* taken from Kernel's + * + */ + +#ifndef __BR_LIST_H +#define __BR_LIST_H + +#include +#include +#include "rwonce.h" +#include "container-of.h" + +/************ originally in */ +struct list_head { + struct list_head *next, *prev; +}; +struct hlist_head { + struct hlist_node *first; +}; + +struct hlist_node { + struct hlist_node *next, **pprev; +}; + +/************ originally in */ +# define POISON_POINTER_DELTA 0 +/* These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) +#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) + +/* + * Circular doubly linked list implementation. + * + * Some of the internal functions ("__xxx") are useful when + * manipulating whole lists rather than single entries, as + * sometimes we already know the next/prev entries and we can + * generate better code by using them directly rather than + * using the generic single-entry routines. + */ + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +/** + * INIT_LIST_HEAD - Initialize a list_head structure + * @list: list_head structure to be initialized. + * + * Initializes the list_head to point to itself. If it is a list header, + * the result is an empty list. + */ +static inline void INIT_LIST_HEAD(struct list_head *list) +{ + WRITE_ONCE(list->next, list); + list->prev = list; +} + +/* + * Insert a new entry between two known consecutive entries. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + WRITE_ONCE(prev->next, new); +} + +/** + * list_add - add a new entry + * @new: new entry to be added + * @head: list head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + + +/** + * list_add_tail - add a new entry + * @new: new entry to be added + * @head: list head to add it before + * + * Insert a new entry before the specified head. + * This is useful for implementing queues. + */ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + +/* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ +static inline void __list_del(struct list_head * prev, struct list_head * next) +{ + next->prev = prev; + WRITE_ONCE(prev->next, next); +} + +/* + * Delete a list entry and clear the 'prev' pointer. + * + * This is a special-purpose list clearing method used in the networking code + * for lists allocated as per-cpu, where we don't want to incur the extra + * WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this + * needs to check the node 'prev' pointer instead of calling list_empty(). + */ +static inline void __list_del_clearprev(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); + entry->prev = NULL; +} + +static inline void __list_del_entry(struct list_head *entry) +{ + __list_del(entry->prev, entry->next); +} + +/** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty() on entry does not return true after this, the entry is + * in an undefined state. + */ +static inline void list_del(struct list_head *entry) +{ + __list_del_entry(entry); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; +} + +/** + * list_replace - replace old entry by new one + * @old : the element to be replaced + * @new : the new element to insert + * + * If @old was empty, it will be overwritten. + */ +static inline void list_replace(struct list_head *old, + struct list_head *new) +{ + new->next = old->next; + new->next->prev = new; + new->prev = old->prev; + new->prev->next = new; +} + +/** + * list_replace_init - replace old entry by new one and initialize the old one + * @old : the element to be replaced + * @new : the new element to insert + * + * If @old was empty, it will be overwritten. + */ +static inline void list_replace_init(struct list_head *old, + struct list_head *new) +{ + list_replace(old, new); + INIT_LIST_HEAD(old); +} + +/** + * list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position + * @entry1: the location to place entry2 + * @entry2: the location to place entry1 + */ +static inline void list_swap(struct list_head *entry1, + struct list_head *entry2) +{ + struct list_head *pos = entry2->prev; + + list_del(entry2); + list_replace(entry1, entry2); + if (pos == entry1) + pos = entry2; + list_add(entry1, pos); +} + +/** + * list_del_init - deletes entry from list and reinitialize it. + * @entry: the element to delete from the list. + */ +static inline void list_del_init(struct list_head *entry) +{ + __list_del_entry(entry); + INIT_LIST_HEAD(entry); +} + +/** + * list_move - delete from one list and add as another's head + * @list: the entry to move + * @head: the head that will precede our entry + */ +static inline void list_move(struct list_head *list, struct list_head *head) +{ + __list_del_entry(list); + list_add(list, head); +} + +/** + * list_move_tail - delete from one list and add as another's tail + * @list: the entry to move + * @head: the head that will follow our entry + */ +static inline void list_move_tail(struct list_head *list, + struct list_head *head) +{ + __list_del_entry(list); + list_add_tail(list, head); +} + +/** + * list_bulk_move_tail - move a subsection of a list to its tail + * @head: the head that will follow our entry + * @first: first entry to move + * @last: last entry to move, can be the same as first + * + * Move all entries between @first and including @last before @head. + * All three entries must belong to the same linked list. + */ +static inline void list_bulk_move_tail(struct list_head *head, + struct list_head *first, + struct list_head *last) +{ + first->prev->next = last->next; + last->next->prev = first->prev; + + head->prev->next = first; + first->prev = head->prev; + + last->next = head; + head->prev = last; +} + +/** + * list_is_first -- tests whether @list is the first entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_first(const struct list_head *list, + const struct list_head *head) +{ + return list->prev == head; +} + +/** + * list_is_last - tests whether @list is the last entry in list @head + * @list: the entry to test + * @head: the head of the list + */ +static inline int list_is_last(const struct list_head *list, + const struct list_head *head) +{ + return list->next == head; +} + +/** + * list_empty - tests whether a list is empty + * @head: the list to test. + */ +static inline int list_empty(const struct list_head *head) +{ + return READ_ONCE(head->next) == head; +} + +/** + * list_rotate_left - rotate the list to the left + * @head: the head of the list + */ +static inline void list_rotate_left(struct list_head *head) +{ + struct list_head *first; + + if (!list_empty(head)) { + first = head->next; + list_move_tail(first, head); + } +} + +/** + * list_rotate_to_front() - Rotate list to specific item. + * @list: The desired new front of the list. + * @head: The head of the list. + * + * Rotates list so that @list becomes the new front of the list. + */ +static inline void list_rotate_to_front(struct list_head *list, + struct list_head *head) +{ + /* + * Deletes the list head from the list denoted by @head and + * places it as the tail of @list, this effectively rotates the + * list so that @list is at the front. + */ + list_move_tail(head, list); +} + +/** + * list_is_singular - tests whether a list has just one entry. + * @head: the list to test. + */ +static inline int list_is_singular(const struct list_head *head) +{ + return !list_empty(head) && (head->next == head->prev); +} + +static inline void __list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + struct list_head *new_first = entry->next; + list->next = head->next; + list->next->prev = list; + list->prev = entry; + entry->next = list; + head->next = new_first; + new_first->prev = head; +} + +/** + * list_cut_position - cut a list into two + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * and if so we won't cut the list + * + * This helper moves the initial part of @head, up to and + * including @entry, from @head to @list. You should + * pass on @entry an element you know is on @head. @list + * should be an empty list or a list you do not care about + * losing its data. + * + */ +static inline void list_cut_position(struct list_head *list, + struct list_head *head, struct list_head *entry) +{ + if (list_empty(head)) + return; + if (list_is_singular(head) && + (head->next != entry && head != entry)) + return; + if (entry == head) + INIT_LIST_HEAD(list); + else + __list_cut_position(list, head, entry); +} + +/** + * list_cut_before - cut a list into two, before given entry + * @list: a new list to add all removed entries + * @head: a list with entries + * @entry: an entry within head, could be the head itself + * + * This helper moves the initial part of @head, up to but + * excluding @entry, from @head to @list. You should pass + * in @entry an element you know is on @head. @list should + * be an empty list or a list you do not care about losing + * its data. + * If @entry == @head, all entries on @head are moved to + * @list. + */ +static inline void list_cut_before(struct list_head *list, + struct list_head *head, + struct list_head *entry) +{ + if (head->next == entry) { + INIT_LIST_HEAD(list); + return; + } + list->next = head->next; + list->next->prev = list; + list->prev = entry->prev; + list->prev->next = list; + head->next = entry; + entry->prev = head; +} + +static inline void __list_splice(const struct list_head *list, + struct list_head *prev, + struct list_head *next) +{ + struct list_head *first = list->next; + struct list_head *last = list->prev; + + first->prev = prev; + prev->next = first; + + last->next = next; + next->prev = last; +} + +/** + * list_splice - join two lists, this is designed for stacks + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice(const struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head, head->next); +} + +/** + * list_splice_tail - join two lists, each list being a queue + * @list: the new list to add. + * @head: the place to add it in the first list. + */ +static inline void list_splice_tail(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) + __list_splice(list, head->prev, head); +} + +/** + * list_splice_init - join two lists and reinitialise the emptied list. + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * The list at @list is reinitialised + */ +static inline void list_splice_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head, head->next); + INIT_LIST_HEAD(list); + } +} + +/** + * list_splice_tail_init - join two lists and reinitialise the emptied list + * @list: the new list to add. + * @head: the place to add it in the first list. + * + * Each of the lists is a queue. + * The list at @list is reinitialised + */ +static inline void list_splice_tail_init(struct list_head *list, + struct list_head *head) +{ + if (!list_empty(list)) { + __list_splice(list, head->prev, head); + INIT_LIST_HEAD(list); + } +} + +/** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +/** + * list_first_entry - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_first_entry(ptr, type, member) \ + list_entry((ptr)->next, type, member) + +/** + * list_last_entry - get the last element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note, that list is expected to be not empty. + */ +#define list_last_entry(ptr, type, member) \ + list_entry((ptr)->prev, type, member) + +/** + * list_first_entry_or_null - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_head within the struct. + * + * Note that if the list is empty, it returns NULL. + */ +#define list_first_entry_or_null(ptr, type, member) ({ \ + struct list_head *head__ = (ptr); \ + struct list_head *pos__ = READ_ONCE(head__->next); \ + pos__ != head__ ? list_entry(pos__, type, member) : NULL; \ + }) + +/** + * list_next_entry - get the next element in list + * @pos: the type * to cursor + * @member: the name of the list_head within the struct. + */ +#define list_next_entry(pos, member) \ + list_entry((pos)->member.next, __typeof__(*(pos)), member) + +/** + * list_prev_entry - get the prev element in list + * @pos: the type * to cursor + * @member: the name of the list_head within the struct. + */ +#define list_prev_entry(pos, member) \ + list_entry((pos)->member.prev, __typeof__(*(pos)), member) + +/** + * list_for_each - iterate over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each(pos, head) \ + for (pos = (head)->next; pos != (head); pos = pos->next) + +/** + * list_for_each_continue - continue iteration over a list + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + * + * Continue to iterate over a list, continuing after the current position. + */ +#define list_for_each_continue(pos, head) \ + for (pos = pos->next; pos != (head); pos = pos->next) + +/** + * list_for_each_prev - iterate over a list backwards + * @pos: the &struct list_head to use as a loop cursor. + * @head: the head for your list. + */ +#define list_for_each_prev(pos, head) \ + for (pos = (head)->prev; pos != (head); pos = pos->prev) + +/** + * list_for_each_safe - iterate over a list safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_safe(pos, n, head) \ + for (pos = (head)->next, n = pos->next; pos != (head); \ + pos = n, n = pos->next) + +/** + * list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry + * @pos: the &struct list_head to use as a loop cursor. + * @n: another &struct list_head to use as temporary storage + * @head: the head for your list. + */ +#define list_for_each_prev_safe(pos, n, head) \ + for (pos = (head)->prev, n = pos->prev; \ + pos != (head); \ + pos = n, n = pos->prev) + +/** + * list_entry_is_head - test if the entry points to the head of the list + * @pos: the type * to cursor + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_entry_is_head(pos, head, member) \ + (&pos->member == (head)) + +/** + * list_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_for_each_entry(pos, head, member) \ + for (pos = list_first_entry(head, __typeof__(*pos), member); \ + !list_entry_is_head(pos, head, member); \ + pos = list_next_entry(pos, member)) + +/** + * list_for_each_entry_reverse - iterate backwards over list of given type. + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_for_each_entry_reverse(pos, head, member) \ + for (pos = list_last_entry(head, __typeof__(*pos), member); \ + !list_entry_is_head(pos, head, member); \ + pos = list_prev_entry(pos, member)) + +/** + * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() + * @pos: the type * to use as a start point + * @head: the head of the list + * @member: the name of the list_head within the struct. + * + * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). + */ +#define list_prepare_entry(pos, head, member) \ + ((pos) ? : list_entry(head, __typeof__(*pos), member)) + +/** + * list_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define list_for_each_entry_continue(pos, head, member) \ + for (pos = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = list_next_entry(pos, member)) + +/** + * list_for_each_entry_continue_reverse - iterate backwards from the given point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Start to iterate over list of given type backwards, continuing after + * the current position. + */ +#define list_for_each_entry_continue_reverse(pos, head, member) \ + for (pos = list_prev_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = list_prev_entry(pos, member)) + +/** + * list_for_each_entry_from - iterate over list of given type from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate over list of given type, continuing from current position. + */ +#define list_for_each_entry_from(pos, head, member) \ + for (; !list_entry_is_head(pos, head, member); \ + pos = list_next_entry(pos, member)) + +/** + * list_for_each_entry_from_reverse - iterate backwards over list of given type + * from the current point + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate backwards over list of given type, continuing from current position. + */ +#define list_for_each_entry_from_reverse(pos, head, member) \ + for (; !list_entry_is_head(pos, head, member); \ + pos = list_prev_entry(pos, member)) + +/** + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_head within the struct. + */ +#define list_for_each_entry_safe(pos, n, head, member) \ + for (pos = list_first_entry(head, __typeof__(*pos), member), \ + n = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_next_entry(n, member)) + +/** + * list_for_each_entry_safe_continue - continue list iteration safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate over list of given type, continuing after current point, + * safe against removal of list entry. + */ +#define list_for_each_entry_safe_continue(pos, n, head, member) \ + for (pos = list_next_entry(pos, member), \ + n = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_next_entry(n, member)) + +/** + * list_for_each_entry_safe_from - iterate over list from current point safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate over list of given type from current point, safe against + * removal of list entry. + */ +#define list_for_each_entry_safe_from(pos, n, head, member) \ + for (n = list_next_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_next_entry(n, member)) + +/** + * list_for_each_entry_safe_reverse - iterate backwards over list safe against removal + * @pos: the type * to use as a loop cursor. + * @n: another type * to use as temporary storage + * @head: the head for your list. + * @member: the name of the list_head within the struct. + * + * Iterate backwards over list of given type, safe against removal + * of list entry. + */ +#define list_for_each_entry_safe_reverse(pos, n, head, member) \ + for (pos = list_last_entry(head, __typeof__(*pos), member), \ + n = list_prev_entry(pos, member); \ + !list_entry_is_head(pos, head, member); \ + pos = n, n = list_prev_entry(n, member)) + +/** + * list_safe_reset_next - reset a stale list_for_each_entry_safe loop + * @pos: the loop cursor used in the list_for_each_entry_safe loop + * @n: temporary storage used in list_for_each_entry_safe + * @member: the name of the list_head within the struct. + * + * list_safe_reset_next is not safe to use in general if the list may be + * modified concurrently (eg. the lock is dropped in the loop body). An + * exception to this is if the cursor element (pos) is pinned in the list, + * and list_safe_reset_next is called after re-taking the lock and before + * completing the current iteration of the loop body. + */ +#define list_safe_reset_next(pos, n, member) \ + n = list_next_entry(pos, member) + +/* + * Double linked lists with a single pointer list head. + * Mostly useful for hash tables where the two pointer list head is + * too wasteful. + * You lose the ability to access the tail in O(1). + */ + +#define HLIST_HEAD_INIT { .first = NULL } +#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } +#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) +static inline void INIT_HLIST_NODE(struct hlist_node *h) +{ + h->next = NULL; + h->pprev = NULL; +} + +/** + * hlist_unhashed - Has node been removed from list and reinitialized? + * @h: Node to be checked + * + * Not that not all removal functions will leave a node in unhashed + * state. For example, hlist_nulls_del_init_rcu() does leave the + * node in unhashed state, but hlist_nulls_del() does not. + */ +static inline int hlist_unhashed(const struct hlist_node *h) +{ + return !h->pprev; +} + +/** + * hlist_unhashed_lockless - Version of hlist_unhashed for lockless use + * @h: Node to be checked + * + * This variant of hlist_unhashed() must be used in lockless contexts + * to avoid potential load-tearing. The READ_ONCE() is paired with the + * various WRITE_ONCE() in hlist helpers that are defined below. + */ +static inline int hlist_unhashed_lockless(const struct hlist_node *h) +{ + return !READ_ONCE(h->pprev); +} + +/** + * hlist_empty - Is the specified hlist_head structure an empty hlist? + * @h: Structure to check. + */ +static inline int hlist_empty(const struct hlist_head *h) +{ + return !READ_ONCE(h->first); +} + +static inline void __hlist_del(struct hlist_node *n) +{ + struct hlist_node *next = n->next; + struct hlist_node **pprev = n->pprev; + + WRITE_ONCE(*pprev, next); + if (next) + WRITE_ONCE(next->pprev, pprev); +} + +/** + * hlist_del - Delete the specified hlist_node from its list + * @n: Node to delete. + * + * Note that this function leaves the node in hashed state. Use + * hlist_del_init() or similar instead to unhash @n. + */ +static inline void hlist_del(struct hlist_node *n) +{ + __hlist_del(n); + n->next = LIST_POISON1; + n->pprev = LIST_POISON2; +} + +/** + * hlist_del_init - Delete the specified hlist_node from its list and initialize + * @n: Node to delete. + * + * Note that this function leaves the node in unhashed state. + */ +static inline void hlist_del_init(struct hlist_node *n) +{ + if (!hlist_unhashed(n)) { + __hlist_del(n); + INIT_HLIST_NODE(n); + } +} + +/** + * hlist_add_head - add a new entry at the beginning of the hlist + * @n: new entry to be added + * @h: hlist head to add it after + * + * Insert a new entry after the specified head. + * This is good for implementing stacks. + */ +static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) +{ + struct hlist_node *first = h->first; + WRITE_ONCE(n->next, first); + if (first) + WRITE_ONCE(first->pprev, &n->next); + WRITE_ONCE(h->first, n); + WRITE_ONCE(n->pprev, &h->first); +} + +/** + * hlist_add_before - add a new entry before the one specified + * @n: new entry to be added + * @next: hlist node to add it before, which must be non-NULL + */ +static inline void hlist_add_before(struct hlist_node *n, + struct hlist_node *next) +{ + WRITE_ONCE(n->pprev, next->pprev); + WRITE_ONCE(n->next, next); + WRITE_ONCE(next->pprev, &n->next); + WRITE_ONCE(*(n->pprev), n); +} + +/** + * hlist_add_behind - add a new entry after the one specified + * @n: new entry to be added + * @prev: hlist node to add it after, which must be non-NULL + */ +static inline void hlist_add_behind(struct hlist_node *n, + struct hlist_node *prev) +{ + WRITE_ONCE(n->next, prev->next); + WRITE_ONCE(prev->next, n); + WRITE_ONCE(n->pprev, &prev->next); + + if (n->next) + WRITE_ONCE(n->next->pprev, &n->next); +} + +/** + * hlist_add_fake - create a fake hlist consisting of a single headless node + * @n: Node to make a fake list out of + * + * This makes @n appear to be its own predecessor on a headless hlist. + * The point of this is to allow things like hlist_del() to work correctly + * in cases where there is no list. + */ +static inline void hlist_add_fake(struct hlist_node *n) +{ + n->pprev = &n->next; +} + +/** + * hlist_fake: Is this node a fake hlist? + * @h: Node to check for being a self-referential fake hlist. + */ +static inline bool hlist_fake(struct hlist_node *h) +{ + return h->pprev == &h->next; +} + +/** + * hlist_is_singular_node - is node the only element of the specified hlist? + * @n: Node to check for singularity. + * @h: Header for potentially singular list. + * + * Check whether the node is the only node of the head without + * accessing head, thus avoiding unnecessary cache misses. + */ +static inline bool +hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h) +{ + return !n->next && n->pprev == &h->first; +} + +/** + * hlist_move_list - Move an hlist + * @old: hlist_head for old list. + * @new: hlist_head for new list. + * + * Move a list from one list head to another. Fixup the pprev + * reference of the first entry if it exists. + */ +static inline void hlist_move_list(struct hlist_head *old, + struct hlist_head *new) +{ + new->first = old->first; + if (new->first) + new->first->pprev = &new->first; + old->first = NULL; +} + +#define hlist_entry(ptr, type, member) container_of(ptr,type,member) + +#define hlist_for_each(pos, head) \ + for (pos = (head)->first; pos ; pos = pos->next) + +#define hlist_for_each_safe(pos, n, head) \ + for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ + pos = n) + +#define hlist_entry_safe(ptr, type, member) \ + ({ __typeof__(ptr) ____ptr = (ptr); \ + ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ + }) + +/** + * hlist_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop cursor. + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry(pos, head, member) \ + for (pos = hlist_entry_safe((head)->first, __typeof__(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, __typeof__(*(pos)), member)) + +/** + * hlist_for_each_entry_continue - iterate over a hlist continuing after current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_continue(pos, member) \ + for (pos = hlist_entry_safe((pos)->member.next, __typeof__(*(pos)), member); \ + pos; \ + pos = hlist_entry_safe((pos)->member.next, __typeof__(*(pos)), member)) + +/** + * hlist_for_each_entry_from - iterate over a hlist continuing from current point + * @pos: the type * to use as a loop cursor. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_from(pos, member) \ + for (; pos; \ + pos = hlist_entry_safe((pos)->member.next, __typeof__(*(pos)), member)) + +/** + * hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry + * @pos: the type * to use as a loop cursor. + * @n: a &struct hlist_node to use as temporary storage + * @head: the head for your list. + * @member: the name of the hlist_node within the struct. + */ +#define hlist_for_each_entry_safe(pos, n, head, member) \ + for (pos = hlist_entry_safe((head)->first, __typeof__(*pos), member); \ + pos && ({ n = pos->member.next; 1; }); \ + pos = hlist_entry_safe(n, __typeof__(*pos), member)) + +#endif /* __BR_LIST_H */ diff --git a/2020/include/pool.h b/2020/include/pool.h new file mode 100644 index 0000000..7f070bd --- /dev/null +++ b/2020/include/pool.h @@ -0,0 +1,90 @@ +/* pool.h - A simple memory pool manager. + * + * Copyright (C) 2021 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#ifndef POOL_H +#define POOL_H + +#include +#include +#include "list.h" +#include "bits.h" + +#define POOL_NAME_LENGTH (16) /* max name length including trailing \0 */ + +typedef struct { + struct list_head list_blocks; /* list of allocated blocks in pool */ + char data[]; /* objects block */ +} block_t; + +typedef struct { + char name[POOL_NAME_LENGTH]; /* pool name */ + size_t eltsize; /* object size */ + u32 available; /* current available elements */ + u32 allocated; /* total objects allocated */ + u32 growsize; /* number of objects per block allocated */ + u32 nblocks; /* number of blocks allocated */ + struct list_head list_available; /* available nodes */ + struct list_head list_blocks; /* allocated blocks */ +} pool_t; + +/** + * pool_stats - display some pool statistics + * @pool: the pool address. + */ +void pool_stats(pool_t *pool); + +/** + * pool_create - create a new memory pool + * @name: the name to give to the pool. + * @grow: the number of elements to add when no more available. + * @size: the size of an element in pool. + * + * The name will be truncated to 16 characters (including the final '\0'). + * + * Return: The address of the created pool, or NULL if error. + */ +pool_t *pool_create(const char *name, u32 grow, size_t size); + +/** + * pool_get() - Get an element from a pool. + * @pool: The pool address. + * + * Get an object from the pool. + * + * Return: The address of the object, or NULL if error. + */ +void *pool_get(pool_t *pool); + +/** + * pool_add() - Add (free) an element to a pool. + * @pool: The pool address. + * @elt: The address of the object to add to the pool. + * + * The object will be available for further pool_get(). + * + * Return: The current number of available elements in pool (including + * @elt). + */ +u32 pool_add(pool_t *pool, void *elt); + +/** + * pool_destroy() - destroy a pool. + * @pool: The pool address. + * + * Attention: All memory is freed, but no check is done whether all pool + * elements have been released. Referencing any pool object after this call + * will likely imply some memory corruption. + */ +void pool_destroy(pool_t *pool); + +#endif diff --git a/2020/include/rwonce.h b/2020/include/rwonce.h new file mode 100644 index 0000000..90a28bc --- /dev/null +++ b/2020/include/rwonce.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* adaptation of kernel's + * See https://www.kernel.org/doc/Documentation/memory-barriers.txt + */ +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements. + * + * These two macros will also work on aggregate data types like structs or + * unions. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ +#ifndef __BR_RWONCE_H +#define __BR_RWONCE_H + +/************ originally in */ +#if __has_attribute(__error__) +# define __compiletime_error(msg) __attribute__((__error__(msg))) +#else +# define __compiletime_error(msg) +#endif + +/************ originally in */ +/* + * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving + * non-scalar types unchanged. + */ +/* + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' + * is not type-compatible with 'signed char', and we define a separate case. + */ +#define __scalar_type_to_expr_cases(type) \ + unsigned type: (unsigned type)0, \ + signed type: (signed type)0 + +#define __unqual_scalar_typeof(x) \ + typeof(_Generic((x), \ + char: (char)0, \ + __scalar_type_to_expr_cases(char), \ + __scalar_type_to_expr_cases(short), \ + __scalar_type_to_expr_cases(int), \ + __scalar_type_to_expr_cases(long), \ + __scalar_type_to_expr_cases(long long), \ + default: (x))) + +/* Is this type a native word size -- useful for atomic operations */ +#define __native_word(t) \ + (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ + sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) + +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (!(condition)) \ + prefix ## suffix(); \ + } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) + +/** + * compiletime_assert - break build and emit msg if condition is false + * @condition: a compile-time constant condition to check + * @msg: a message to emit if condition is false + * + * In tradition of POSIX assert, this macro will break the build if the + * supplied condition is *false*, emitting the supplied error message if the + * compiler has support to do so. + */ +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + +/************ originally in */ +/* + * Yes, this permits 64-bit accesses on 32-bit architectures. These will + * actually be atomic in some cases (namely Armv7 + LPAE), but for others we + * rely on the access being split into 2x32-bit accesses for a 32-bit quantity + * (e.g. a virtual address) and a strong prevailing wind. + */ +#define compiletime_assert_rwonce_type(t) \ + compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ + "Unsupported access size for {READ,WRITE}_ONCE().") + +/* + * Use __READ_ONCE() instead of READ_ONCE() if you do not require any + * atomicity. Note that this may result in tears! + */ +#ifndef __READ_ONCE +#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) +#endif + +#define READ_ONCE(x) \ +({ \ + compiletime_assert_rwonce_type(x); \ + __READ_ONCE(x); \ +}) + +#define __WRITE_ONCE(x, val) \ +do { \ + *(volatile typeof(x) *)&(x) = (val); \ +} while (0) + +#define WRITE_ONCE(x, val) \ +do { \ + compiletime_assert_rwonce_type(x); \ + __WRITE_ONCE(x, val); \ +} while (0) + +#endif /* __BR_RWONCE_H */ diff --git a/2020/libsrc/debug.c b/2020/libsrc/debug.c new file mode 100644 index 0000000..c1e5b9c --- /dev/null +++ b/2020/libsrc/debug.c @@ -0,0 +1,111 @@ +/* debug.c - debug/log management + * + * Copyright (C) 2021 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#include +#include +#include + +#ifndef DEBUG_DEBUG +#define DEBUG_DEBUG +#endif + +#include "debug.h" + +#define NANOSEC 1000000000 /* nano sec in sec */ +#define MILLISEC 1000000 /* milli sec in sec */ + +static s64 timer_start; /* in nanosecond */ +static u32 debug_level=0; + +void debug_level_set(u32 level) +{ + debug_level = level; + + log(1, "debug level set to %u\n", level); +} + +void debug_init(u32 level) +{ + struct timespec timer; + + debug_level_set(level); + if (!clock_gettime(CLOCK_MONOTONIC, &timer)) { + timer_start = timer.tv_sec * NANOSEC + timer.tv_nsec; + } + else { + timer_start = 0; + } + log(0, "timer started.\n"); +} + +inline static s64 timer_elapsed() +{ + struct timespec timer; + + clock_gettime(CLOCK_MONOTONIC, &timer); + return (timer.tv_sec * NANOSEC + timer.tv_nsec) - timer_start; +} + + +/* void debug - log function + * @timestamp : boolean + * @indent : indent level (2 spaces each) + * @src : source file/func name (or NULL) + * @line : line number + */ +void debug(u32 level, bool timestamp, u32 indent, const char *src, + u32 line, const char *fmt, ...) +{ + if (level > debug_level) + return; + + va_list ap; + + if (indent) + printf("%*s", 2*(indent-1), ""); + + if (timestamp) { + s64 diff = timer_elapsed(); + printf("%ld.%03ld ", diff/NANOSEC, (diff/1000000)%1000); + printf("%010ld ", diff); + } + + if (src) { + if (line) + printf("[%s:%u] ", src, line); + else + printf("[%s] ", src); + } + va_start(ap, fmt); + vprintf(fmt, ap); + va_end(ap); +} + +#ifdef BIN_debug +#include + +int main() +{ + int foo=1; + debug_init(5); + + log(0, "log0=%d\n", foo++); + log(1, "log1=%d\n", foo++); + log(2, "log2=%d\n", foo++); + log_i(2, "log_i 2=%d\n", foo++); + log_i(5, "log_i 5=%d\n", foo++); + log_i(6, "log_i 6=%d\n", foo++); + log_it(4, "log_it 4=%d\n", foo++); + log_f(1, "log_f 5=%d\n", foo++); +} +#endif diff --git a/2020/libsrc/pool.c b/2020/libsrc/pool.c new file mode 100644 index 0000000..f2f6cda --- /dev/null +++ b/2020/libsrc/pool.c @@ -0,0 +1,225 @@ +/* pool.c - A simple pool manager. + * + * Copyright (C) 2021 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#include +#include +#include +#include +#include + +#include "list.h" +#include "pool.h" +#include "debug.h" +#include "bits.h" + +void pool_stats(pool_t *pool) +{ + if (pool) { +# ifdef DEBUG_POOL + block_t *block; + + log_f(1, "[%s] pool [%p]: blocks:%u avail:%u alloc:%u grow:%u eltsize:%lu\n", + pool->name, (void *)pool, pool->nblocks, pool->available, pool->allocated, + pool->growsize, pool->eltsize); + log(5, "\tblocks: "); + list_for_each_entry(block, &pool->list_blocks, list_blocks) { + log(5, "%p ", block); + } + log(5, "\n"); +# endif + } +} + +pool_t *pool_create(const char *name, u32 growsize, size_t eltsize) +{ + pool_t *pool; + +# ifdef DEBUG_POOL + log_f(1, "name=[%s] growsize=%u eltsize=%lu\n", + name, growsize, eltsize); +# endif + /* we need at least sizeof(struct list_head) space in pool elements + */ + if (eltsize < sizeof (struct list_head)) { +# ifdef DEBUG_POOL + log_f(1, "[%s]: structure size too small (%lu < %lu), adjusting to %lu.\n", + name, eltsize, sizeof(struct list_head), sizeof(struct list_head)); +# endif + eltsize = sizeof(struct list_head); + } + if ((pool = malloc(sizeof (*pool)))) { + strncpy(pool->name, name, POOL_NAME_LENGTH - 1); + pool->name[POOL_NAME_LENGTH - 1] = 0; + pool->growsize = growsize; + pool->eltsize = eltsize; + pool->available = 0; + pool->allocated = 0; + pool->nblocks = 0; + INIT_LIST_HEAD(&pool->list_available); + INIT_LIST_HEAD(&pool->list_blocks); + } + return pool; +} + +static u32 _pool_add(pool_t *pool, struct list_head *elt) +{ +# ifdef DEBUG_POOL + log_f(6, "pool=%p &head=%p elt=%p off1=%lu off2=%lu\n", + (void *)pool, + (void *)&pool->list_available, + (void *)elt, + (void *)&pool->list_available-(void *)pool, + offsetof(pool_t, list_available)); +# endif + + list_add(elt, &pool->list_available); + return ++pool->available; +} + +u32 pool_add(pool_t *pool, void *elt) +{ + return _pool_add(pool, elt); +} + +static struct list_head *_pool_get(pool_t *pool) +{ + struct list_head *res = pool->list_available.next; + pool->available--; + list_del(res); + return res; +} + +void *pool_get(pool_t *pool) +{ + if (!pool) + return NULL; + if (!pool->available) { + block_t *block = malloc(sizeof(block_t) + pool->eltsize * pool->growsize); + void *cur; + u32 i; + + if (!block) { +# ifdef DEBUG_POOL + log_f(1, "[%s]: failed block allocation\n", pool->name); +# endif + errno = ENOMEM; + return NULL; + } + + /* maintain list of allocated blocks + */ + list_add(&block->list_blocks, &pool->list_blocks); + pool->nblocks++; + +# ifdef DEBUG_POOL + log_f(1, "[%s]: growing pool from %u to %u elements. block=%p nblocks=%u\n", + pool->name, + pool->allocated, + pool->allocated + pool->growsize, + block, + pool->nblocks); +# endif + + pool->allocated += pool->growsize; + for (i = 0; i < pool->growsize; ++i) { + cur = block->data + i * pool->eltsize; +# ifdef DEBUG_POOL + log_f(7, "alloc=%p cur=%p\n", block, cur); +# endif + _pool_add(pool, (struct list_head *)cur); + } + //pool_stats(pool); + } + /* this is the effective address if the object (and also the + * pool list_head address) + */ + return _pool_get(pool); +} + +void pool_destroy(pool_t *pool) +{ + block_t *block, *tmp; + if (!pool) + return; + /* release memory blocks */ +# ifdef DEBUG_POOL + log_f(1, "[%s]: releasing %d blocks and main structure\n", pool->name, pool->nblocks); + log(5, "blocks:"); +# endif + list_for_each_entry_safe(block, tmp, &pool->list_blocks, list_blocks) { + list_del(&block->list_blocks); + free(block); +# ifdef DEBUG_POOL + log(5, " %p", block); +# endif + } +# ifdef DEBUG_POOL + log(5, "\n"); +# endif + free(pool); +} + +#ifdef BIN_pool +struct d { + u16 data1; + char c; + struct list_head list; +}; + +static LIST_HEAD (head); + +int main(int ac, char**av) +{ + pool_t *pool; + int total; + int action=0; + u16 icur=0; + char ccur='z'; + struct d *elt; + + debug_init(3); + log_f(1, "%s: sizeof(d)=%lu sizeof(*d)=%lu off=%lu\n", *av, sizeof(elt), + sizeof(*elt), offsetof(struct d, list)); + + if ((pool = pool_create("dummy", 3, sizeof(*elt)))) { + pool_stats(pool); + for (int cur=1; curdata1 = icur++; + elt->c = ccur--; + list_add(&elt->list, &head); + } + pool_stats(pool); + action = 1; + } else { /* remove one elt from list */ + log_f(2, "deleting %d elements\n", total); + for (int i = 0; i < total; ++i) { + if (!list_empty(&head)) { + elt = list_last_entry(&head, struct d, list); + printf("elt=[%d, %c]\n", elt->data1, elt->c); + list_del(&elt->list); + pool_add(pool, elt); + } + } + pool_stats(pool); + action = 0; + } + } + } + pool_stats(pool); +} +#endif diff --git a/2020/libsrc/xxhash.c b/2020/libsrc/xxhash.c new file mode 100644 index 0000000..d5bb9ff --- /dev/null +++ b/2020/libsrc/xxhash.c @@ -0,0 +1,500 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. This program is dual-licensed; you may select + * either version 2 of the GNU General Public License ("GPL") or BSD license + * ("BSD"). + * + * You can contact the author at: + * - xxHash homepage: https://cyan4973.github.io/xxHash/ + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +#include +#include +#include +#include +#include +#include +#include + +/*-************************************* + * Macros + **************************************/ +#define xxh_rotl32(x, r) ((x << r) | (x >> (32 - r))) +#define xxh_rotl64(x, r) ((x << r) | (x >> (64 - r))) + +#ifdef __LITTLE_ENDIAN +# define XXH_CPU_LITTLE_ENDIAN 1 +#else +# define XXH_CPU_LITTLE_ENDIAN 0 +#endif + +/*-************************************* + * Constants + **************************************/ +static const uint32_t PRIME32_1 = 2654435761U; +static const uint32_t PRIME32_2 = 2246822519U; +static const uint32_t PRIME32_3 = 3266489917U; +static const uint32_t PRIME32_4 = 668265263U; +static const uint32_t PRIME32_5 = 374761393U; + +static const uint64_t PRIME64_1 = 11400714785074694791ULL; +static const uint64_t PRIME64_2 = 14029467366897019727ULL; +static const uint64_t PRIME64_3 = 1609587929392839161ULL; +static const uint64_t PRIME64_4 = 9650029242287828579ULL; +static const uint64_t PRIME64_5 = 2870177450012600261ULL; + +/*-************************** + * Utils + ***************************/ +void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src) +{ + memcpy(dst, src, sizeof(*dst)); +} +EXPORT_SYMBOL(xxh32_copy_state); + +void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src) +{ + memcpy(dst, src, sizeof(*dst)); +} +EXPORT_SYMBOL(xxh64_copy_state); + +/*-*************************** + * Simple Hash Functions + ****************************/ +static uint32_t xxh32_round(uint32_t seed, const uint32_t input) +{ + seed += input * PRIME32_2; + seed = xxh_rotl32(seed, 13); + seed *= PRIME32_1; + return seed; +} + +uint32_t xxh32(const void *input, const size_t len, const uint32_t seed) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *b_end = p + len; + uint32_t h32; + + if (len >= 16) { + const uint8_t *const limit = b_end - 16; + uint32_t v1 = seed + PRIME32_1 + PRIME32_2; + uint32_t v2 = seed + PRIME32_2; + uint32_t v3 = seed + 0; + uint32_t v4 = seed - PRIME32_1; + + do { + v1 = xxh32_round(v1, get_unaligned_le32(p)); + p += 4; + v2 = xxh32_round(v2, get_unaligned_le32(p)); + p += 4; + v3 = xxh32_round(v3, get_unaligned_le32(p)); + p += 4; + v4 = xxh32_round(v4, get_unaligned_le32(p)); + p += 4; + } while (p <= limit); + + h32 = xxh_rotl32(v1, 1) + xxh_rotl32(v2, 7) + + xxh_rotl32(v3, 12) + xxh_rotl32(v4, 18); + } else { + h32 = seed + PRIME32_5; + } + + h32 += (uint32_t)len; + + while (p + 4 <= b_end) { + h32 += get_unaligned_le32(p) * PRIME32_3; + h32 = xxh_rotl32(h32, 17) * PRIME32_4; + p += 4; + } + + while (p < b_end) { + h32 += (*p) * PRIME32_5; + h32 = xxh_rotl32(h32, 11) * PRIME32_1; + p++; + } + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} +EXPORT_SYMBOL(xxh32); + +static uint64_t xxh64_round(uint64_t acc, const uint64_t input) +{ + acc += input * PRIME64_2; + acc = xxh_rotl64(acc, 31); + acc *= PRIME64_1; + return acc; +} + +static uint64_t xxh64_merge_round(uint64_t acc, uint64_t val) +{ + val = xxh64_round(0, val); + acc ^= val; + acc = acc * PRIME64_1 + PRIME64_4; + return acc; +} + +uint64_t xxh64(const void *input, const size_t len, const uint64_t seed) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + uint64_t h64; + + if (len >= 32) { + const uint8_t *const limit = b_end - 32; + uint64_t v1 = seed + PRIME64_1 + PRIME64_2; + uint64_t v2 = seed + PRIME64_2; + uint64_t v3 = seed + 0; + uint64_t v4 = seed - PRIME64_1; + + do { + v1 = xxh64_round(v1, get_unaligned_le64(p)); + p += 8; + v2 = xxh64_round(v2, get_unaligned_le64(p)); + p += 8; + v3 = xxh64_round(v3, get_unaligned_le64(p)); + p += 8; + v4 = xxh64_round(v4, get_unaligned_le64(p)); + p += 8; + } while (p <= limit); + + h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + + xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); + h64 = xxh64_merge_round(h64, v1); + h64 = xxh64_merge_round(h64, v2); + h64 = xxh64_merge_round(h64, v3); + h64 = xxh64_merge_round(h64, v4); + + } else { + h64 = seed + PRIME64_5; + } + + h64 += (uint64_t)len; + + while (p + 8 <= b_end) { + const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); + + h64 ^= k1; + h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; + p += 8; + } + + if (p + 4 <= b_end) { + h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; + h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p += 4; + } + + while (p < b_end) { + h64 ^= (*p) * PRIME64_5; + h64 = xxh_rotl64(h64, 11) * PRIME64_1; + p++; + } + + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} +EXPORT_SYMBOL(xxh64); + +/*-************************************************** + * Advanced Hash Functions + ***************************************************/ +void xxh32_reset(struct xxh32_state *statePtr, const uint32_t seed) +{ + /* use a local state for memcpy() to avoid strict-aliasing warnings */ + struct xxh32_state state; + + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME32_1 + PRIME32_2; + state.v2 = seed + PRIME32_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME32_1; + memcpy(statePtr, &state, sizeof(state)); +} +EXPORT_SYMBOL(xxh32_reset); + +void xxh64_reset(struct xxh64_state *statePtr, const uint64_t seed) +{ + /* use a local state for memcpy() to avoid strict-aliasing warnings */ + struct xxh64_state state; + + memset(&state, 0, sizeof(state)); + state.v1 = seed + PRIME64_1 + PRIME64_2; + state.v2 = seed + PRIME64_2; + state.v3 = seed + 0; + state.v4 = seed - PRIME64_1; + memcpy(statePtr, &state, sizeof(state)); +} +EXPORT_SYMBOL(xxh64_reset); + +int xxh32_update(struct xxh32_state *state, const void *input, const size_t len) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + + if (input == NULL) + return -EINVAL; + + state->total_len_32 += (uint32_t)len; + state->large_len |= (len >= 16) | (state->total_len_32 >= 16); + + if (state->memsize + len < 16) { /* fill in tmp buffer */ + memcpy((uint8_t *)(state->mem32) + state->memsize, input, len); + state->memsize += (uint32_t)len; + return 0; + } + + if (state->memsize) { /* some data left from previous update */ + const uint32_t *p32 = state->mem32; + + memcpy((uint8_t *)(state->mem32) + state->memsize, input, + 16 - state->memsize); + + state->v1 = xxh32_round(state->v1, get_unaligned_le32(p32)); + p32++; + state->v2 = xxh32_round(state->v2, get_unaligned_le32(p32)); + p32++; + state->v3 = xxh32_round(state->v3, get_unaligned_le32(p32)); + p32++; + state->v4 = xxh32_round(state->v4, get_unaligned_le32(p32)); + p32++; + + p += 16-state->memsize; + state->memsize = 0; + } + + if (p <= b_end - 16) { + const uint8_t *const limit = b_end - 16; + uint32_t v1 = state->v1; + uint32_t v2 = state->v2; + uint32_t v3 = state->v3; + uint32_t v4 = state->v4; + + do { + v1 = xxh32_round(v1, get_unaligned_le32(p)); + p += 4; + v2 = xxh32_round(v2, get_unaligned_le32(p)); + p += 4; + v3 = xxh32_round(v3, get_unaligned_le32(p)); + p += 4; + v4 = xxh32_round(v4, get_unaligned_le32(p)); + p += 4; + } while (p <= limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < b_end) { + memcpy(state->mem32, p, (size_t)(b_end-p)); + state->memsize = (uint32_t)(b_end-p); + } + + return 0; +} +EXPORT_SYMBOL(xxh32_update); + +uint32_t xxh32_digest(const struct xxh32_state *state) +{ + const uint8_t *p = (const uint8_t *)state->mem32; + const uint8_t *const b_end = (const uint8_t *)(state->mem32) + + state->memsize; + uint32_t h32; + + if (state->large_len) { + h32 = xxh_rotl32(state->v1, 1) + xxh_rotl32(state->v2, 7) + + xxh_rotl32(state->v3, 12) + xxh_rotl32(state->v4, 18); + } else { + h32 = state->v3 /* == seed */ + PRIME32_5; + } + + h32 += state->total_len_32; + + while (p + 4 <= b_end) { + h32 += get_unaligned_le32(p) * PRIME32_3; + h32 = xxh_rotl32(h32, 17) * PRIME32_4; + p += 4; + } + + while (p < b_end) { + h32 += (*p) * PRIME32_5; + h32 = xxh_rotl32(h32, 11) * PRIME32_1; + p++; + } + + h32 ^= h32 >> 15; + h32 *= PRIME32_2; + h32 ^= h32 >> 13; + h32 *= PRIME32_3; + h32 ^= h32 >> 16; + + return h32; +} +EXPORT_SYMBOL(xxh32_digest); + +int xxh64_update(struct xxh64_state *state, const void *input, const size_t len) +{ + const uint8_t *p = (const uint8_t *)input; + const uint8_t *const b_end = p + len; + + if (input == NULL) + return -EINVAL; + + state->total_len += len; + + if (state->memsize + len < 32) { /* fill in tmp buffer */ + memcpy(((uint8_t *)state->mem64) + state->memsize, input, len); + state->memsize += (uint32_t)len; + return 0; + } + + if (state->memsize) { /* tmp buffer is full */ + uint64_t *p64 = state->mem64; + + memcpy(((uint8_t *)p64) + state->memsize, input, + 32 - state->memsize); + + state->v1 = xxh64_round(state->v1, get_unaligned_le64(p64)); + p64++; + state->v2 = xxh64_round(state->v2, get_unaligned_le64(p64)); + p64++; + state->v3 = xxh64_round(state->v3, get_unaligned_le64(p64)); + p64++; + state->v4 = xxh64_round(state->v4, get_unaligned_le64(p64)); + + p += 32 - state->memsize; + state->memsize = 0; + } + + if (p + 32 <= b_end) { + const uint8_t *const limit = b_end - 32; + uint64_t v1 = state->v1; + uint64_t v2 = state->v2; + uint64_t v3 = state->v3; + uint64_t v4 = state->v4; + + do { + v1 = xxh64_round(v1, get_unaligned_le64(p)); + p += 8; + v2 = xxh64_round(v2, get_unaligned_le64(p)); + p += 8; + v3 = xxh64_round(v3, get_unaligned_le64(p)); + p += 8; + v4 = xxh64_round(v4, get_unaligned_le64(p)); + p += 8; + } while (p <= limit); + + state->v1 = v1; + state->v2 = v2; + state->v3 = v3; + state->v4 = v4; + } + + if (p < b_end) { + memcpy(state->mem64, p, (size_t)(b_end-p)); + state->memsize = (uint32_t)(b_end - p); + } + + return 0; +} +EXPORT_SYMBOL(xxh64_update); + +uint64_t xxh64_digest(const struct xxh64_state *state) +{ + const uint8_t *p = (const uint8_t *)state->mem64; + const uint8_t *const b_end = (const uint8_t *)state->mem64 + + state->memsize; + uint64_t h64; + + if (state->total_len >= 32) { + const uint64_t v1 = state->v1; + const uint64_t v2 = state->v2; + const uint64_t v3 = state->v3; + const uint64_t v4 = state->v4; + + h64 = xxh_rotl64(v1, 1) + xxh_rotl64(v2, 7) + + xxh_rotl64(v3, 12) + xxh_rotl64(v4, 18); + h64 = xxh64_merge_round(h64, v1); + h64 = xxh64_merge_round(h64, v2); + h64 = xxh64_merge_round(h64, v3); + h64 = xxh64_merge_round(h64, v4); + } else { + h64 = state->v3 + PRIME64_5; + } + + h64 += (uint64_t)state->total_len; + + while (p + 8 <= b_end) { + const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p)); + + h64 ^= k1; + h64 = xxh_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; + p += 8; + } + + if (p + 4 <= b_end) { + h64 ^= (uint64_t)(get_unaligned_le32(p)) * PRIME64_1; + h64 = xxh_rotl64(h64, 23) * PRIME64_2 + PRIME64_3; + p += 4; + } + + while (p < b_end) { + h64 ^= (*p) * PRIME64_5; + h64 = xxh_rotl64(h64, 11) * PRIME64_1; + p++; + } + + h64 ^= h64 >> 33; + h64 *= PRIME64_2; + h64 ^= h64 >> 29; + h64 *= PRIME64_3; + h64 ^= h64 >> 32; + + return h64; +} +EXPORT_SYMBOL(xxh64_digest); + +MODULE_LICENSE("Dual BSD/GPL"); +MODULE_DESCRIPTION("xxHash");