diff --git a/include/bits.h b/include/bits.h new file mode 100644 index 0000000..6a841af --- /dev/null +++ b/include/bits.h @@ -0,0 +1,540 @@ +/* bits.h - bits functions. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ +#ifndef _BITS_H +#define _BITS_H + +#include +#include + +/* next include will define __WORDSIZE: 32 or 64 + */ +#include + +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +/* no plan to support 32bits for now... + * #if __WORDSIZE != 64 + * #error "Only 64 bits word size supported." + * #endif + */ + +/* fixed-size types + */ +typedef int64_t s64; +typedef int32_t s32; +typedef int16_t s16; +typedef int8_t s8; + +typedef uint64_t u64; +typedef uint32_t u32; +typedef uint16_t u16; +typedef uint8_t u8; + +/* convenience types + */ +typedef long long int llong; +typedef unsigned long long int ullong; +typedef unsigned long int ulong; +typedef unsigned int uint; +typedef unsigned short ushort; +typedef unsigned char uchar; + +/* define common types sizes + */ +#define BITS_PER_CHAR 8 + +#ifndef BITS_PER_SHORT +#define BITS_PER_SHORT (BITS_PER_CHAR * sizeof (short)) +#endif +#ifndef BITS_PER_INT +#define BITS_PER_INT (BITS_PER_CHAR * sizeof (int)) +#endif +#ifndef BITS_PER_LONG +#define BITS_PER_LONG (BITS_PER_CHAR * sizeof (long)) +#endif +#ifndef BITS_PER_LLONG +#define BITS_PER_LLONG (BITS_PER_CHAR * sizeof (long long)) +#endif + +/* count set bits: 10101000 -> 3 + * ^ ^ ^ + */ +static __always_inline int popcount64(u64 n) +{ +# if __has_builtin(__builtin_popcountl) +# ifdef DEBUG_BITS + log_f(1, "builtin.\n"); +# endif + return __builtin_popcountl(n); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + int count = 0; + while (n) { + count++; + n &= (n - 1); + } + return count; +# endif +} + +static __always_inline int popcount32(u32 n) +{ +# if __has_builtin(__builtin_popcount) +# ifdef DEBUG_BITS + log_f(1, "builtin.\n"); +# endif + return __builtin_popcount(n); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + int count = 0; + while (n) { + count++; + n &= (n - 1); + } + return count; +# endif +} + +/* char is a special case, as it can be signed or unsigned + */ +typedef signed char schar; + +/* count trailing zeroes : 00101000 -> 3 + * ^^^ + */ +static __always_inline int ctz64(u64 n) +{ +# if __has_builtin(__builtin_ctzl) +# ifdef DEBUG_BITS + log_f(1, "builtin ctzl.\n"); +# endif + return __builtin_ctzl(n); + +# elif __has_builtin(__builtin_clzl) +# ifdef DEBUG_BITS + log_f(1, "builtin clzl.\n"); +# endif + return __WORDSIZE - (__builtin_clzl(n & -n) + 1); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + return popcount64((n & -n) - 1); +# endif +} + +static __always_inline int ctz32(u32 n) +{ +# if __has_builtin(__builtin_ctz) +# ifdef DEBUG_BITS + log_f(1, "builtin ctz.\n"); +# endif + return __builtin_ctzl(n); + +# elif __has_builtin(__builtin_clz) +# ifdef DEBUG_BITS + log_f(1, "builtin clz.\n"); +# endif + return __WORDSIZE - (__builtin_clz(n & -n) + 1); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + return popcount32((n & -n) - 1); +# endif +} + +/* clz - count leading zeroes : 00101000 -> 2 + * ^^ + */ +static __always_inline int clz64(u64 n) +{ +# if __has_builtin(__builtin_clzl) +# ifdef DEBUG_BITS + log_f(1, "builtin.\n"); +# endif + return __builtin_clzl(n); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + u64 r, q; + + r = (n > 0xFFFFFFFF) << 5; n >>= r; + q = (n > 0xFFFF) << 4; n >>= q; r |= q; + q = (n > 0xFF ) << 3; n >>= q; r |= q; + q = (n > 0xF ) << 2; n >>= q; r |= q; + q = (n > 0x3 ) << 1; n >>= q; r |= q; + r |= (n >> 1); + return 64 - r - 1; +# endif +} + +static __always_inline int clz32(u32 n) +{ +# if __has_builtin(__builtin_clz) +# ifdef DEBUG_BITS + log_f(1, "builtin.\n"); +# endif + return __builtin_clz(n); + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + u32 r, q; + + r = (n > 0xFFFF) << 4; n >>= r; + q = (n > 0xFF ) << 3; n >>= q; r |= q; + q = (n > 0xF ) << 2; n >>= q; r |= q; + q = (n > 0x3 ) << 1; n >>= q; r |= q; + r |= (n >> 1); + return 32 - r - 1; +# endif +} + +/* fls - find last set : 00101000 -> 6 + * ^ + */ +static __always_inline int fls64(u64 n) +{ + if (!n) + return 0; + return 64 - clz64(n); +} + +static __always_inline int fls32(u32 n) +{ + if (!n) + return 0; + return 32 - clz32(n); +} + +/* find first set : 00101000 -> 4 + * ^ + */ +static __always_inline uint ffs64(u64 n) +{ +# if __has_builtin(__builtin_ffsl) +# ifdef DEBUG_BITS + log_f(1, "builtin ffsl.\n"); +# endif + return __builtin_ffsl(n); + +# elif __has_builtin(__builtin_ctzl) +# ifdef DEBUG_BITS + log_f(1, "builtin ctzl.\n"); +# endif + if (n == 0) + return (0); + return __builtin_ctzl(n) + 1; + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + return popcount64(n ^ ~-n); +# endif +} + +static __always_inline uint ffs32(u32 n) +{ +# if __has_builtin(__builtin_ffs) +# ifdef DEBUG_BITS + log_f(1, "builtin ffs.\n"); +# endif + return __builtin_ffs(n); + +# elif __has_builtin(__builtin_ctz) +# ifdef DEBUG_BITS + log_f(1, "builtin ctz.\n"); +# endif + if (n == 0) + return (0); + return __builtin_ctz(n) + 1; + +# else +# ifdef DEBUG_BITS + log_f(1, "emulated.\n"); +# endif + return popcount32(n ^ ~-n); +# endif +} + +/* rolXX are taken from kernel's are are: + * SPDX-License-Identifier: GPL-2.0 + */ + +/** + * rol64 - rotate a 64-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline u64 rol64(u64 word, unsigned int shift) +{ + return (word << (shift & 63)) | (word >> ((-shift) & 63)); +} + +/** + * ror64 - rotate a 64-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline u64 ror64(u64 word, unsigned int shift) +{ + return (word >> (shift & 63)) | (word << ((-shift) & 63)); +} + +/** + * rol32 - rotate a 32-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline u32 rol32(u32 word, unsigned int shift) +{ + return (word << (shift & 31)) | (word >> ((-shift) & 31)); +} + +/** + * ror32 - rotate a 32-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline u32 ror32(u32 word, unsigned int shift) +{ + return (word >> (shift & 31)) | (word << ((-shift) & 31)); +} + +/** + * rol16 - rotate a 16-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline u16 rol16(u16 word, unsigned int shift) +{ + return (word << (shift & 15)) | (word >> ((-shift) & 15)); +} + +/** + * ror16 - rotate a 16-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline u16 ror16(u16 word, unsigned int shift) +{ + return (word >> (shift & 15)) | (word << ((-shift) & 15)); +} + +/** + * rol8 - rotate an 8-bit value left + * @word: value to rotate + * @shift: bits to roll + */ +static inline u8 rol8(u8 word, unsigned int shift) +{ + return (word << (shift & 7)) | (word >> ((-shift) & 7)); +} + +/** + * ror8 - rotate an 8-bit value right + * @word: value to rotate + * @shift: bits to roll + */ +static inline u8 ror8(u8 word, unsigned int shift) +{ + return (word >> (shift & 7)) | (word << ((-shift) & 7)); +} + +/** + * ilog2 - + */ +static __always_inline __attribute__((const)) +int __ilog2_u32(u32 n) +{ + return fls32(n) - 1; +} + +static __always_inline __attribute__((const)) +int __ilog2_u64(u64 n) +{ + return fls64(n) - 1; +} + +/** + * is_power_of_2() - check if a value is a power of two + * @n: the value to check + * + * Determine whether some value is a power of two, where zero is + * *not* considered a power of two. + * Return: true if @n is a power of 2, otherwise false. + */ +static inline __attribute__((const)) +bool is_power_of_2(unsigned long n) +{ + return (n != 0 && ((n & (n - 1)) == 0)); +} + +/** + * ilog2 - log base 2 of 32-bit or a 64-bit unsigned value + * @n: parameter + * + * constant-capable log of base 2 calculation + * - this can be used to initialise global variables from constant data, hence + * the massive ternary operator construction + * + * selects the appropriately-sized optimised version depending on sizeof(n) + */ +#define ilog2(n) \ +( \ + __builtin_constant_p(n) ? \ + ((n) < 2 ? 0 : \ + 63 - __builtin_clzll(n)) : \ + (sizeof(n) <= 4) ? \ + __ilog2_u32(n) : \ + __ilog2_u64(n) \ + ) + +/** + * roundup_pow_of_two - round the given value up to nearest power of two + * @n: parameter + * + * round the given value up to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define roundup_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 1) ? 1 : \ + (1UL << (ilog2((n) - 1) + 1)) \ + ) : \ + __roundup_pow_of_two(n) \ + ) + +/** + * rounddown_pow_of_two - round the given value down to nearest power of two + * @n: parameter + * + * round the given value down to the nearest power of two + * - the result is undefined when n == 0 + * - this can be used to initialise global variables from constant data + */ +#define rounddown_pow_of_two(n) \ +( \ + __builtin_constant_p(n) ? ( \ + (1UL << ilog2(n))) : \ + __rounddown_pow_of_two(n) \ + ) + +static inline __attribute_const__ +int __order_base_2(unsigned long n) +{ + return n > 1 ? ilog2(n - 1) + 1 : 0; +} + +/** + * order_base_2 - calculate the (rounded up) base 2 order of the argument + * @n: parameter + * + * The first few values calculated by this routine: + * ob2(0) = 0 + * ob2(1) = 0 + * ob2(2) = 1 + * ob2(3) = 2 + * ob2(4) = 2 + * ob2(5) = 3 + * ... and so on. + */ +#define order_base_2(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) ? 0 : \ + ilog2((n) - 1) + 1) : \ + __order_base_2(n) \ +) + +static inline __attribute__((const)) +int __bits_per(unsigned long n) +{ + if (n < 2) + return 1; + if (is_power_of_2(n)) + return order_base_2(n) + 1; + return order_base_2(n); +} + +/** + * bits_per - calculate the number of bits required for the argument + * @n: parameter + * + * This is constant-capable and can be used for compile time + * initializations, e.g bitfields. + * + * The first few values calculated by this routine: + * bf(0) = 1 + * bf(1) = 1 + * bf(2) = 2 + * bf(3) = 2 + * bf(4) = 3 + * ... and so on. + */ +#define bits_per(n) \ +( \ + __builtin_constant_p(n) ? ( \ + ((n) == 0 || (n) == 1) \ + ? 1 : ilog2(n) + 1 \ + ) : \ + __bits_per(n) \ +) + +/** bit_for_each - iterate over an u64/u32 bits + * @pos: an int used as current bit + * @tmp: a temp u64/u32 used as temporary storage + * @ul: the u64/u32 to loop over + * + * Usage: + * u64 u=139, _t; // u=b10001011 + * int cur; + * bit_for_each64(cur, _t, u) { + * printf("%d\n", cur); + * } + * This will display the position of each bit set in ul: 1, 2, 4, 8 + * + * I should probably re-think the implementation... + */ +#define bit_for_each64(pos, tmp, ul) \ + for (tmp = ul, pos = ffs64(tmp); tmp; tmp &= (tmp - 1), pos = ffs64(tmp)) + +#define bit_for_each32(pos, tmp, ul) \ + for (tmp = ul, pos = ffs32(tmp); tmp; tmp &= (tmp - 1), pos = ffs32(tmp)) + +/** or would it be more useful (counting bits from zero instead of 1) ? + */ +#define bit_for_each64_2(pos, tmp, ul) \ + for (tmp = ul, pos = ctz64(tmp); tmp; tmp ^= 1UL << pos, pos = ctz64(tmp)) + +#define bit_for_each32_2(pos, tmp, ul) \ + for (tmp = ul, pos = ctz32(tmp); tmp; tmp ^= 1U << pos, pos = ctz32(tmp)) + +#endif /* _BITS_H */ diff --git a/include/br.h b/include/br.h new file mode 100644 index 0000000..8475d55 --- /dev/null +++ b/include/br.h @@ -0,0 +1,224 @@ +/* br.h - misc macros. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + * Some parts are taken from Linux's kernel and others, and are : + * SPDX-License-Identifier: GPL-2.0 + * + * This header contains generic stuff. + */ + +#ifndef _BR_H +#define _BR_H + +#include "struct-group.h" + +/* Indirect stringification. Doing two levels allows the parameter to be a + * macro itself. For example, compile with -DFOO=bar, __stringify(FOO) + * converts to "bar". + */ +#define __stringify_1(x...) #x +#define __stringify(x...) __stringify_1(x) + +/* generate a (maybe) unique id. + */ +#define ___PASTE(x, y) x##y +#define __PASTE(x, y) ___PASTE(x, y) +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +/* unused/used parameters/functions + * https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute + * https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute + * https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute + * https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute + * https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute + * https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute + */ +#define __unused __attribute__((__unused__)) +#define __used __attribute__((__used__)) + +/* see https://lkml.org/lkml/2018/3/20/845 for explanation of this monster + */ +#define __is_constexpr(x) \ + (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8))) + +/* + * min()/max()/clamp() macros must accomplish three things: + * + * - avoid multiple evaluations of the arguments (so side-effects like + * "x++" happen only once) when non-constant. + * - perform strict type-checking (to generate warnings instead of + * nasty runtime surprises). See the "unnecessary" pointer comparison + * in __typecheck(). + * - retain result as a constant expressions when called with only + * constant expressions (to avoid tripping VLA warnings in stack + * allocation usage). + */ +#define __typecheck(x, y) \ + (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) + +#define __no_side_effects(x, y) \ + (__is_constexpr(x) && __is_constexpr(y)) + +#define __safe_cmp(x, y) \ + (__typecheck(x, y) && __no_side_effects(x, y)) + +#define __cmp(x, y, op) ((x) op (y) ? (x) : (y)) + +#define __cmp_once(x, y, unique_x, unique_y, op) ({ \ + typeof(x) unique_x = (x); \ + typeof(y) unique_y = (y); \ + __cmp(unique_x, unique_y, op); }) + +#define __careful_cmp(x, y, op) \ + __builtin_choose_expr(__safe_cmp(x, y), \ + __cmp(x, y, op), \ + __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op)) + +#define __pure __attribute__((__pure__)) + +/** + * min - return minimum of two values of the same or compatible types + * @x: first value + * @y: second value + */ +#define min(x, y) __careful_cmp(x, y, <) + +/** + * max - return maximum of two values of the same or compatible types + * @x: first value + * @y: second value + */ +#define max(x, y) __careful_cmp(x, y, >) + +/** + * min3 - return minimum of three values + * @x: first value + * @y: second value + * @z: third value + */ +#define min3(x, y, z) min((typeof(x))min(x, y), z) + +/** + * max3 - return maximum of three values + * @x: first value + * @y: second value + * @z: third value + */ +#define max3(x, y, z) max((typeof(x))max(x, y), z) + +/** + * min_not_zero - return the minimum that is _not_ zero, unless both are zero + * @x: value1 + * @y: value2 + */ +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +/** + * clamp - return a value clamped to a given range with strict typechecking + * @val: current value + * @lo: lowest allowable value + * @hi: highest allowable value + * + * This macro does strict typechecking of @lo/@hi to make sure they are of the + * same type as @val. See the unnecessary pointer comparisons. + */ +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) + +/* + * ..and if you can't take the strict + * types, you can specify one yourself. + * + * Or not use min/max/clamp at all, of course. + */ + +/** + * min_t - return minimum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <) + +/** + * max_t - return maximum of two values, using the specified type + * @type: data type to use + * @x: first value + * @y: second value + */ +#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >) + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * @type to make all the comparisons. + */ +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument @val is. This is useful when @val is an unsigned + * type and @lo and @hi are literals that will otherwise be assigned a signed + * integer type. + */ +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) + +/** + * swap - swap values of @a and @b + * @a: first value + * @b: second value + */ +#define swap(a, b) \ + do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) + +/** + * ARRAY_SIZE - get the number of elements in array @arr + * @arr: array to be sized + */ +#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +/** + * abs - return absolute value of an argument + * @x: the value. If it is unsigned type, it is converted to signed type first. + * char is treated as if it was signed (regardless of whether it really is) + * but the macro's return type is preserved as char. + * + * Return: an absolute value of x. + */ +#define abs(x) __abs_choose_expr(x, long long, \ + __abs_choose_expr(x, long, \ + __abs_choose_expr(x, int, \ + __abs_choose_expr(x, short, \ + __abs_choose_expr(x, char, \ + __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), char), \ + (char)({ signed char __x = (x); __x<0?-__x:__x; }), \ + ((void)0))))))) + +#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \ + __builtin_types_compatible_p(typeof(x), signed type) || \ + __builtin_types_compatible_p(typeof(x), unsigned type), \ + ({ signed type __x = (x); __x < 0 ? -__x : __x; }), other) + + +#endif /* _BR_H */ diff --git a/include/bug.h b/include/bug.h new file mode 100644 index 0000000..fdff9b8 --- /dev/null +++ b/include/bug.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _BR_BUG_H +#define _BR_BUG_H + +#include +#include +#include +#include "likely.h" +#include "debug.h" + +/* BUG functions inspired by Linux kernel's + */ + +#define panic() exit(0xff) + +/* + * Don't use BUG() or BUG_ON() unless there's really no way out; one + * example might be detecting data structure corruption in the middle + * of an operation that can't be backed out of. If the (sub)system + * can somehow continue operating, perhaps with reduced functionality, + * it's probably not BUG-worthy. + * + * If you're tempted to BUG(), think again: is completely giving up + * really the *only* solution? There are usually better options, where + * users don't need to reboot ASAP and can mostly shut down cleanly. + */ +#define BUG() do { \ + fprintf(stderr, "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + panic(); \ + } while (0) + +#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0) + +/* + * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report + * significant kernel issues that need prompt attention if they should ever + * appear at runtime. + * + * Do not use these macros when checking for invalid external inputs + * (e.g. invalid system call arguments, or invalid data coming from + * network/devices), and on transient conditions like ENOMEM or EAGAIN. + * These macros should be used for recoverable kernel issues only. + * For invalid external inputs, transient conditions, etc use + * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary. + * Do not include "BUG"/"WARNING" in format strings manually to make these + * conditions distinguishable from kernel issues. + * + * Use the versions with printk format strings to provide better diagnostics. + */ +#define __WARN() do { \ + fprintf(stderr, "WARNING: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \ + } while (0) +#define __WARN_printf(arg...) do { \ + vfprintf(stderr, arg); \ + } while (0) + +#define WARN_ON(condition) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN(); \ + unlikely(__ret_warn_on); \ + }) + +#define WARN(condition, format...) ({ \ + int __ret_warn_on = !!(condition); \ + if (unlikely(__ret_warn_on)) \ + __WARN_printf(format); \ + unlikely(__ret_warn_on); \ + }) + +#endif /* _BR_BUG_H */ diff --git a/include/circ_buf.h b/include/circ_buf.h new file mode 100644 index 0000000..510baf0 --- /dev/null +++ b/include/circ_buf.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * See Documentation/core-api/circular-buffers.rst for more information. + */ + +#ifndef _LINUX_CIRC_BUF_H +#define _LINUX_CIRC_BUF_H 1 + +#define CIRC_BUF(name, type, bits) \ + struct s##name { \ + type buf[1 << (bits)]; \ + int head; \ + int tail; \ + }; + +struct circ_buf { + char *buf; + int head; + int tail; +}; + +/* Return count in buffer. */ +#define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1)) + +/* Return space available, 0..size-1. We always leave one free char + as a completely full buffer has head == tail, which is the same as + empty. */ +#define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size)) + +/* Return count up to the end of the buffer. Carefully avoid + accessing head and tail more than once, so they can change + underneath us without returning inconsistent results. */ +#define CIRC_CNT_TO_END(head,tail,size) \ + ({int end = (size) - (tail); \ + int n = ((head) + end) & ((size)-1); \ + n < end ? n : end;}) + +/* Return space available up to the end of the buffer. */ +#define CIRC_SPACE_TO_END(head,tail,size) \ + ({int end = (size) - 1 - (head); \ + int n = (end + (tail)) & ((size)-1); \ + n <= end ? n : end+1;}) + +#endif /* _LINUX_CIRC_BUF_H */ diff --git a/include/container-of.h b/include/container-of.h new file mode 100644 index 0000000..1c7f54f --- /dev/null +++ b/include/container-of.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* adaptation of Linux kernel's + */ +#ifndef _BR_CONTAINER_OF_H +#define _BR_CONTAINER_OF_H + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + +/** + * typeof_member - + */ +#define typeof_member(T, m) typeof(((T*)0)->m) + +/** + * container_of - cast a member of a structure out to the containing structure + * @ptr: the pointer to the member. + * @type: the type of the container struct this is embedded in. + * @member: the name of the member within the struct. + * + */ +#define container_of(ptr, type, member) ({ \ + void *__mptr = (void *)(ptr); \ + _Static_assert(__same_type(*(ptr), ((type *)0)->member) || \ + __same_type(*(ptr), void), \ + "pointer type mismatch in container_of()"); \ + ((type *)(__mptr - offsetof(type, member))); }) + +#endif /* BR_CONTAINER_OF_H */ diff --git a/include/debug.h b/include/debug.h new file mode 100644 index 0000000..1c12acf --- /dev/null +++ b/include/debug.h @@ -0,0 +1,99 @@ +/* debug.h - debug/log management. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#ifndef DEBUG_H +#define DEBUG_H + +#include +#include + +#include "bits.h" + +#define _unused __attribute__((__unused__)) +#define _printf __attribute__ ((format (printf, 6, 7))) + +#ifdef DEBUG_DEBUG +void debug_init(u32 level); +void debug_level_set(u32 level); +u32 debug_level_get(void); +void _printf debug(u32 level, bool timestamp, + u32 indent, const char *src, + u32 line, const char *, ...); +#else /* DEBUG_DEBUG */ +static inline void debug_init(_unused u32 level) {} +static inline void debug_level_set(_unused u32 level) {} +static inline void _printf debug(_unused u32 level, _unused bool timestamp, + _unused u32 indent, _unused const char *src, + _unused u32 line, const char *, ...) {} +#endif /* DEBUG_DEBUG */ +#undef _unused +#undef _printf + +/** + * log - simple log (no function name, no indent, no timestamp) + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + */ +#define log(level, fmt, args...) \ + debug((level), false, 0, NULL, 0, fmt, ##args) + +/** + * log_i - log with indent (no function name, no timestamp) + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + * + * Output example: + * >>>>val=2 + */ +#define log_i(level, fmt, args...) \ + debug((level), false, (level), NULL, 0, fmt, ##args) + +/** + * log_f - log with function name (no indent name, no timestamp) + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + * + * Output example: + * [function] val=2 + */ +#define log_f(level, fmt, args...) \ + debug((level), false, 0, __func__, 0, fmt, ##args) + +/** + * log_if - log with function name and line number (no indent name, no timestamp) + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + * + * Output example: + * >>>> [function:15] val=2 + */ +#define log_if(level, fmt, args...) \ + debug((level), false, (level), __func__, __LINE__, fmt, ##args) + +/** + * log_it - log with function name, line number, indent, and timestamp + * @level: log level + * @fmt: printf format string + * @args: subsequent arguments to printf + * + * Output example: + * >>>> [function:15] val=2 + */ +#define log_it(level, fmt, args...) \ + debug((level), true, (level), __func__, __LINE__, fmt, ##args) + +#endif /* DEBUG_H */ diff --git a/include/hash.h b/include/hash.h new file mode 100644 index 0000000..3c9b99d --- /dev/null +++ b/include/hash.h @@ -0,0 +1,172 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _BR_HASH_H +#define _BR_HASH_H +/* adaptation of Linux kernel's and + */ + +/* Fast hashing routine for ints, longs and pointers. + (C) 2002 Nadia Yvette Chambers, IBM */ + +#include +#include "bits.h" +#include "br.h" + +/* + * The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and + * fs/inode.c. It's not actually prime any more (the previous primes + * were actively bad for hashing), but the name remains. + */ +#if __BITS_PER_LONG == 32 +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32 +#define hash_long(val, bits) hash_32(val, bits) +#elif __BITS_PER_LONG == 64 +#define hash_long(val, bits) hash_64(val, bits) +#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64 +#else +#error Wordsize not 32 or 64 +#endif + +/* + * This hash multiplies the input by a large odd number and takes the + * high bits. Since multiplication propagates changes to the most + * significant end only, it is essential that the high bits of the + * product be used for the hash value. + * + * Chuck Lever verified the effectiveness of this technique: + * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf + * + * Although a random odd number will do, it turns out that the golden + * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice + * properties. (See Knuth vol 3, section 6.4, exercise 9.) + * + * These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2, + * which is very slightly easier to multiply by and makes no + * difference to the hash distribution. + */ +#define GOLDEN_RATIO_32 0x61C88647 +#define GOLDEN_RATIO_64 0x61C8864680B583EBull + +/* + * The _generic versions exist only so lib/test_hash.c can compare + * the arch-optimized versions with the generic. + * + * Note that if you change these, any that aren't updated + * to match need to have their HAVE_ARCH_* define values updated so the + * self-test will not false-positive. + */ +#ifndef HAVE_ARCH__HASH_32 +#define __hash_32 __hash_32_generic +#endif +static inline u32 __hash_32_generic(u32 val) +{ + return val * GOLDEN_RATIO_32; +} + +static inline u32 hash_32(u32 val, unsigned int bits) +{ + /* High bits are more random, so use them. */ + return __hash_32(val) >> (32 - bits); +} + +#ifndef HAVE_ARCH_HASH_64 +#define hash_64 hash_64_generic +#endif +static __always_inline u32 hash_64_generic(u64 val, unsigned int bits) +{ +#if __BITS_PER_LONG == 64 + /* 64x64-bit multiply is efficient on all 64-bit processors */ + return val * GOLDEN_RATIO_64 >> (64 - bits); +#else + /* Hash 64 bits using only 32x32-bit multiply. */ + return hash_32((u32)val ^ __hash_32(val >> 32), bits); +#endif +} + +static inline u32 hash_ptr(const void *ptr, unsigned int bits) +{ + return hash_long((unsigned long)ptr, bits); +} + +/* This really should be called fold32_ptr; it does no hashing to speak of. */ +static inline u32 hash32_ptr(const void *ptr) +{ + unsigned long val = (unsigned long)ptr; + +#if __BITS_PER_LONG == 64 + val ^= (val >> 32); +#endif + return (u32)val; +} + +/* + * Routines for hashing strings of bytes to a 32-bit hash value. + * + * These hash functions are NOT GUARANTEED STABLE between kernel + * versions, architectures, or even repeated boots of the same kernel. + * (E.g. they may depend on boot-time hardware detection or be + * deliberately randomized.) + * + * They are also not intended to be secure against collisions caused by + * malicious inputs; much slower hash functions are required for that. + * + * They are optimized for pathname components, meaning short strings. + * Even if a majority of files have longer names, the dynamic profile of + * pathname components skews short due to short directory names. + * (E.g. /usr/lib/libsesquipedalianism.so.3.141.) + */ + +/* + * Version 1: one byte at a time. Example of use: + * + * unsigned long hash = init_name_hash; + * while (*p) + * hash = partial_name_hash(tolower(*p++), hash); + * hash = end_name_hash(hash); + * + * Although this is designed for bytes, fs/hfsplus/unicode.c + * abuses it to hash 16-bit values. + */ + +/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ +#define init_name_hash(salt) (unsigned long)(salt) + +/* partial hash update function. Assume roughly 4 bits per character */ +static inline unsigned long +partial_name_hash(unsigned long c, unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + +/* + * Finally: cut down the number of bits to a int value (and try to avoid + * losing bits). This also has the property (wanted by the dcache) + * that the msbits make a good hash table index. + */ +static inline unsigned int end_name_hash(unsigned long hash) +{ + return hash_long(hash, 32); +} + +/* + * Version 2: One word (32 or 64 bits) at a time. + * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning + * exists, which describes major Linux platforms like x86 and ARM), then + * this computes a different hash function much faster. + * + * If not set, this falls back to a wrapper around the preceding. + */ +extern unsigned int __pure hash_string(const void *salt, const char *, unsigned int); + +/* + * A hash_len is a u64 with the hash of a string in the low + * half and the length in the high half. + */ +#define hashlen_hash(hashlen) ((u32)(hashlen)) +#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) +#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) + +/* Return the "hash_len" (hash and length) of a null-terminated string */ +extern u64 __pure hashlen_string(const void *salt, const char *name); + +#endif /* _BR_HASH_H */ diff --git a/include/hashtable.h b/include/hashtable.h new file mode 100644 index 0000000..b8d844e --- /dev/null +++ b/include/hashtable.h @@ -0,0 +1,202 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* adaptation of Linux kernel's + */ + + +/* + * Statically sized hash table implementation + * (C) 2012 Sasha Levin + */ + +#ifndef _LINUX_HASHTABLE_H +#define _LINUX_HASHTABLE_H + +#include "list.h" +#include "hash.h" +//#include + +#define DEFINE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] __read_mostly = \ + { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } + +#define DECLARE_HASHTABLE(name, bits) \ + struct hlist_head name[1 << (bits)] + +#define HASH_SIZE(name) (ARRAY_SIZE(name)) +#define HASH_BITS(name) ilog2(HASH_SIZE(name)) + +/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ +#define hash_min(val, bits) \ + (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) + +static inline void __hash_init(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + INIT_HLIST_HEAD(&ht[i]); +} + +/** + * hash_init - initialize a hash table + * @hashtable: hashtable to be initialized + * + * Calculates the size of the hashtable from the given parameter, otherwise + * same as hash_init_size. + * + * This has to be a macro since HASH_BITS() will not work on pointers since + * it calculates the size during preprocessing. + */ +#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) + +/** + * hash_add - add an object to a hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define hash_add(hashtable, node, key) \ + hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +/** + * hash_add_rcu - add an object to a rcu enabled hashtable + * @hashtable: hashtable to add to + * @node: the &struct hlist_node of the object to be added + * @key: the key of the object to be added + */ +#define hash_add_rcu(hashtable, node, key) \ + hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) + +/** + * hash_hashed - check whether an object is in any hashtable + * @node: the &struct hlist_node of the object to be checked + */ +static inline bool hash_hashed(struct hlist_node *node) +{ + return !hlist_unhashed(node); +} + +static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) +{ + unsigned int i; + + for (i = 0; i < sz; i++) + if (!hlist_empty(&ht[i])) + return false; + + return true; +} + +/** + * hash_empty - check whether a hashtable is empty + * @hashtable: hashtable to check + * + * This has to be a macro since HASH_BITS() will not work on pointers since + * it calculates the size during preprocessing. + */ +#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) + +/** + * hash_del - remove an object from a hashtable + * @node: &struct hlist_node of the object to remove + */ +static inline void hash_del(struct hlist_node *node) +{ + hlist_del_init(node); +} + +/** + * hash_for_each - iterate over a hashtable + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry(obj, &name[bkt], member) + +/** + * hash_for_each_rcu - iterate over a rcu enabled hashtable + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each_rcu(name, bkt, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_rcu(obj, &name[bkt], member) + +/** + * hash_for_each_safe - iterate over a hashtable safe against removal of + * hash entry + * @name: hashtable to iterate + * @bkt: integer to use as bucket loop cursor + * @tmp: a &struct hlist_node used for temporary storage + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + */ +#define hash_for_each_safe(name, bkt, tmp, obj, member) \ + for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ + (bkt)++)\ + hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) + +/** + * hash_for_each_possible - iterate over all possible objects hashing to the + * same bucket + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible(name, obj, member, key) \ + hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) + +/** + * hash_for_each_possible_rcu - iterate over all possible objects hashing to the + * same bucket in an rcu enabled hashtable + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \ + hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\ + member, ## cond) + +/** + * hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing + * to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + * + * This is the same as hash_for_each_possible_rcu() except that it does + * not do any RCU debugging or tracing. + */ +#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \ + hlist_for_each_entry_rcu_notrace(obj, \ + &name[hash_min(key, HASH_BITS(name))], member) + +/** + * hash_for_each_possible_safe - iterate over all possible objects hashing to the + * same bucket safe against removals + * @name: hashtable to iterate + * @obj: the type * to use as a loop cursor for each entry + * @tmp: a &struct hlist_node used for temporary storage + * @member: the name of the hlist_node within the struct + * @key: the key of the objects to iterate over + */ +#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ + hlist_for_each_entry_safe(obj, tmp,\ + &name[hash_min(key, HASH_BITS(name))], member) + + +#endif diff --git a/include/likely.h b/include/likely.h new file mode 100644 index 0000000..a5d151d --- /dev/null +++ b/include/likely.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* taken from Kernel's - * Main change is that I don't use READ_ONCE and WRITE_ONCE - * See https://www.kernel.org/doc/Documentation/memory-barriers.txt + * */ + #ifndef __BR_LIST_H #define __BR_LIST_H #include #include +#include "rwonce.h" +#include "container-of.h" /************ originally in */ struct list_head { @@ -31,11 +33,6 @@ struct hlist_node { #define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA) #define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA) -/************ originally in */ -#define container_of(ptr, type, member) ({ \ - void *__mptr = (void *)(ptr); \ - ((type *)(__mptr - offsetof(type, member))); }) - /* * Circular doubly linked list implementation. * @@ -60,7 +57,7 @@ struct hlist_node { */ static inline void INIT_LIST_HEAD(struct list_head *list) { - list->next = list; + WRITE_ONCE(list->next, list); list->prev = list; } @@ -77,7 +74,7 @@ static inline void __list_add(struct list_head *new, next->prev = new; new->next = next; new->prev = prev; - prev->next = new; + WRITE_ONCE(prev->next, new); } /** @@ -117,7 +114,7 @@ static inline void list_add_tail(struct list_head *new, struct list_head *head) static inline void __list_del(struct list_head * prev, struct list_head * next) { next->prev = prev; - prev->next = next; + WRITE_ONCE(prev->next, next); } /* @@ -283,7 +280,7 @@ static inline int list_is_last(const struct list_head *list, */ static inline int list_empty(const struct list_head *head) { - return head->next == head; + return READ_ONCE(head->next) == head; } /** @@ -509,7 +506,7 @@ static inline void list_splice_tail_init(struct list_head *list, */ #define list_first_entry_or_null(ptr, type, member) ({ \ struct list_head *head__ = (ptr); \ - struct list_head *pos__ = head__->next; \ + struct list_head *pos__ = READ_ONCE(head__->next); \ pos__ != head__ ? list_entry(pos__, type, member) : NULL; \ }) @@ -785,7 +782,7 @@ static inline int hlist_unhashed(const struct hlist_node *h) */ static inline int hlist_unhashed_lockless(const struct hlist_node *h) { - return !h->pprev; + return !READ_ONCE(h->pprev); } /** @@ -794,7 +791,7 @@ static inline int hlist_unhashed_lockless(const struct hlist_node *h) */ static inline int hlist_empty(const struct hlist_head *h) { - return !h->first; + return !READ_ONCE(h->first); } static inline void __hlist_del(struct hlist_node *n) @@ -802,9 +799,9 @@ static inline void __hlist_del(struct hlist_node *n) struct hlist_node *next = n->next; struct hlist_node **pprev = n->pprev; - *pprev = next; + WRITE_ONCE(*pprev, next); if (next) - next->pprev = pprev; + WRITE_ONCE(next->pprev, pprev); } /** @@ -846,11 +843,11 @@ static inline void hlist_del_init(struct hlist_node *n) static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { struct hlist_node *first = h->first; - n->next = first; + WRITE_ONCE(n->next, first); if (first) - first->pprev = &n->next; - h->first = n; - n->pprev = &h->first; + WRITE_ONCE(first->pprev, &n->next); + WRITE_ONCE(h->first, n); + WRITE_ONCE(n->pprev, &h->first); } /** @@ -861,10 +858,10 @@ static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { - n->pprev = next->pprev; - n->next = next; - next->pprev = &n->next; - *(n->pprev) = n; + WRITE_ONCE(n->pprev, next->pprev); + WRITE_ONCE(n->next, next); + WRITE_ONCE(next->pprev, &n->next); + WRITE_ONCE(*(n->pprev), n); } /** @@ -875,12 +872,12 @@ static inline void hlist_add_before(struct hlist_node *n, static inline void hlist_add_behind(struct hlist_node *n, struct hlist_node *prev) { - n->next = prev->next; - prev->next = n; - n->pprev = &prev->next; + WRITE_ONCE(n->next, prev->next); + WRITE_ONCE(prev->next, n); + WRITE_ONCE(n->pprev, &prev->next); if (n->next) - n->next->pprev = &n->next; + WRITE_ONCE(n->next->pprev, &n->next); } /** @@ -992,4 +989,4 @@ static inline void hlist_move_list(struct hlist_head *old, pos && ({ n = pos->member.next; 1; }); \ pos = hlist_entry_safe(n, __typeof__(*pos), member)) -#endif +#endif /* __BR_LIST_H */ diff --git a/include/packed_struct.h b/include/packed_struct.h new file mode 100644 index 0000000..f4c8eaf --- /dev/null +++ b/include/packed_struct.h @@ -0,0 +1,46 @@ +#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H +#define _LINUX_UNALIGNED_PACKED_STRUCT_H + +#include + +struct __una_u16 { u16 x; } __packed; +struct __una_u32 { u32 x; } __packed; +struct __una_u64 { u64 x; } __packed; + +static inline u16 __get_unaligned_cpu16(const void *p) +{ + const struct __una_u16 *ptr = (const struct __una_u16 *)p; + return ptr->x; +} + +static inline u32 __get_unaligned_cpu32(const void *p) +{ + const struct __una_u32 *ptr = (const struct __una_u32 *)p; + return ptr->x; +} + +static inline u64 __get_unaligned_cpu64(const void *p) +{ + const struct __una_u64 *ptr = (const struct __una_u64 *)p; + return ptr->x; +} + +static inline void __put_unaligned_cpu16(u16 val, void *p) +{ + struct __una_u16 *ptr = (struct __una_u16 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu32(u32 val, void *p) +{ + struct __una_u32 *ptr = (struct __una_u32 *)p; + ptr->x = val; +} + +static inline void __put_unaligned_cpu64(u64 val, void *p) +{ + struct __una_u64 *ptr = (struct __una_u64 *)p; + ptr->x = val; +} + +#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ diff --git a/include/pjwhash-inline.h b/include/pjwhash-inline.h new file mode 100644 index 0000000..ed5e8f5 --- /dev/null +++ b/include/pjwhash-inline.h @@ -0,0 +1,53 @@ +/* pjwhash-inline.h - PJW hash function, inline version. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#ifndef _PJWHASH_INLINE_H +#define _PJWHASH_INLINE_H + +#include "bits.h" + +#define THREE_QUARTERS ((int) ((BITS_PER_INT * 3) / 4)) +#define ONE_EIGHTH ((int) (BITS_PER_INT / 8)) +#define HIGH_BITS ( ~((uint)(~0) >> ONE_EIGHTH )) + +#ifndef _pjw_inline +#define _pjw_inline static inline +#endif + +/** + * unsigned int pjwhash - PJW hash function + * @key: the key address. + * @length: the length of key. + * + * This hash was created by Peter Jay Weinberger (AT&T Bell Labs): + * https://en.wikipedia.org/wiki/PJW_hash_function + * + * Return: the PJW hash. + */ +_pjw_inline uint pjwhash(const void* key, uint length) +{ + uint hash = 0, high; + const u8 *k = key; + + for (uint i = 0; i < length; ++k, ++i) { + hash = (hash << ONE_EIGHTH) + *k; + high = hash & HIGH_BITS; + if (high != 0) { + hash ^= high >> THREE_QUARTERS; + hash &= ~high; + } + } + return hash; +} + +#endif /* _PJWHASH_INLINE_H */ diff --git a/include/pjwhash.h b/include/pjwhash.h new file mode 100644 index 0000000..19b0d76 --- /dev/null +++ b/include/pjwhash.h @@ -0,0 +1,30 @@ +/* pjwhash.h - PJW hash function, extern version. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + */ + +#ifndef _PJWHASH_H +#define _PJWHASH_H + +#include "bits.h" + +/** + * unsigned int pjwhash - PJW hash function + * @key: the key address. + * @length: the length of key. + * + * This hash was created by Peter Jay Weinberger (AT&T Bell Labs): + * https://en.wikipedia.org/wiki/PJW_hash_function + * + * Return: the PJW hash. + */ +extern uint pjwhash (const void* key, uint length); + +#endif /* _PJWHASH_H */ diff --git a/include/plist.h b/include/plist.h new file mode 100644 index 0000000..fc0bb3d --- /dev/null +++ b/include/plist.h @@ -0,0 +1,345 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* adaptation of kernel's + * + */ + +/* + * Descending-priority-sorted double-linked list + * + * (C) 2002-2003 Intel Corp + * Inaky Perez-Gonzalez . + * + * 2001-2005 (c) MontaVista Software, Inc. + * Daniel Walker + * + * (C) 2005 Thomas Gleixner + * + * Simplifications of the original code by + * Oleg Nesterov + * + * Based on simple lists (include/linux/list.h). + * + * This is a priority-sorted list of nodes; each node has a + * priority from INT_MIN (highest) to INT_MAX (lowest). + * + * Addition is O(K), removal is O(1), change of priority of a node is + * O(K) and K is the number of RT priority levels used in the system. + * (1 <= K <= 99) + * + * This list is really a list of lists: + * + * - The tier 1 list is the prio_list, different priority nodes. + * + * - The tier 2 list is the node_list, serialized nodes. + * + * Simple ASCII art explanation: + * + * pl:prio_list (only for plist_node) + * nl:node_list + * HEAD| NODE(S) + * | + * ||------------------------------------| + * ||->|pl|<->|pl|<--------------->|pl|<-| + * | |10| |21| |21| |21| |40| (prio) + * | | | | | | | | | | | + * | | | | | | | | | | | + * |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-| + * |-------------------------------------------| + * + * The nodes on the prio_list list are sorted by priority to simplify + * the insertion of new nodes. There are no nodes with duplicate + * priorites on the list. + * + * The nodes on the node_list are ordered by priority and can contain + * entries which have the same priority. Those entries are ordered + * FIFO + * + * Addition means: look for the prio_list node in the prio_list + * for the priority of the node and insert it before the node_list + * entry of the next prio_list node. If it is the first node of + * that priority, add it to the prio_list in the right position and + * insert it into the serialized node_list list + * + * Removal means remove it from the node_list and remove it from + * the prio_list if the node_list list_head is non empty. In case + * of removal from the prio_list it must be checked whether other + * entries of the same priority are on the list or not. If there + * is another entry of the same priority then this entry has to + * replace the removed entry on the prio_list. If the entry which + * is removed is the only entry of this priority then a simple + * remove from both list is sufficient. + * + * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX + * is lowest priority. + * + * No locking is done, up to the caller. + */ +#ifndef _LINUX_PLIST_H_ +#define _LINUX_PLIST_H_ + +#include "container-of.h" +#include "list.h" +//#include + +// #include + +struct plist_head { + struct list_head node_list; +}; + +struct plist_node { + int prio; + struct list_head prio_list; + struct list_head node_list; +}; + +/** + * PLIST_HEAD_INIT - static struct plist_head initializer + * @head: struct plist_head variable name + */ +#define PLIST_HEAD_INIT(head) \ + { \ + .node_list = LIST_HEAD_INIT((head).node_list) \ + } + +/** + * PLIST_HEAD - declare and init plist_head + * @head: name for struct plist_head variable + */ +#define PLIST_HEAD(head) \ + struct plist_head head = PLIST_HEAD_INIT(head) + +/** + * PLIST_NODE_INIT - static struct plist_node initializer + * @node: struct plist_node variable name + * @__prio: initial node priority + */ +#define PLIST_NODE_INIT(node, __prio) \ + { \ + .prio = (__prio), \ + .prio_list = LIST_HEAD_INIT((node).prio_list), \ + .node_list = LIST_HEAD_INIT((node).node_list), \ + } + +/** + * plist_head_init - dynamic struct plist_head initializer + * @head: &struct plist_head pointer + */ +static inline void +plist_head_init(struct plist_head *head) +{ + INIT_LIST_HEAD(&head->node_list); +} + +/** + * plist_node_init - Dynamic struct plist_node initializer + * @node: &struct plist_node pointer + * @prio: initial node priority + */ +static inline void plist_node_init(struct plist_node *node, int prio) +{ + node->prio = prio; + INIT_LIST_HEAD(&node->prio_list); + INIT_LIST_HEAD(&node->node_list); +} + +extern void plist_add(struct plist_node *node, struct plist_head *head); +extern void plist_del(struct plist_node *node, struct plist_head *head); + +extern void plist_requeue(struct plist_node *node, struct plist_head *head); + +/** + * plist_for_each - iterate over the plist + * @pos: the type * to use as a loop counter + * @head: the head for your list + */ +#define plist_for_each(pos, head) \ + list_for_each_entry(pos, &(head)->node_list, node_list) + +/** + * plist_for_each_reverse - iterate backwards over the plist + * @pos: the type * to use as a loop counter + * @head: the head for your list + */ +#define plist_for_each_reverse(pos, head) \ + list_for_each_entry_reverse(pos, &(head)->node_list, node_list) + +/** + * plist_for_each_continue - continue iteration over the plist + * @pos: the type * to use as a loop cursor + * @head: the head for your list + * + * Continue to iterate over plist, continuing after the current position. + */ +#define plist_for_each_continue(pos, head) \ + list_for_each_entry_continue(pos, &(head)->node_list, node_list) + +/** + * plist_for_each_continue_reverse - continue iteration over the plist + * @pos: the type * to use as a loop cursor + * @head: the head for your list + * + * Continue to iterate backwards over plist, continuing after the current + * position. + */ +#define plist_for_each_continue_reverse(pos, head) \ + list_for_each_entry_continue_reverse(pos, &(head)->node_list, node_list) + +/** + * plist_for_each_safe - iterate safely over a plist of given type + * @pos: the type * to use as a loop counter + * @n: another type * to use as temporary storage + * @head: the head for your list + * + * Iterate over a plist of given type, safe against removal of list entry. + */ +#define plist_for_each_safe(pos, n, head) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, node_list) + +/** + * plist_for_each_safe_reverse - iterate backwards safely over a plist of given type + * @pos: the type * to use as a loop counter + * @n: another type * to use as temporary storage + * @head: the head for your list + * + * Iterate backwards over a plist of given type, safe against removal of list entry. + */ +#define plist_for_each_safe_reverse(pos, n, head) \ + list_for_each_entry_safe_reverse(pos, n, &(head)->node_list, node_list) + +/** + * plist_for_each_entry - iterate over list of given type + * @pos: the type * to use as a loop counter + * @head: the head for your list + * @mem: the name of the list_head within the struct + */ +#define plist_for_each_entry(pos, head, mem) \ + list_for_each_entry(pos, &(head)->node_list, mem.node_list) + +/** + * plist_for_each_entry_reverse - iterate backwards over list of given type + * @pos: the type * to use as a loop counter + * @head: the head for your list + * @mem: the name of the list_head within the struct + */ +#define plist_for_each_entry_reverse(pos, head, mem) \ + list_for_each_entry_reverse(pos, &(head)->node_list, mem.node_list) + +/** + * plist_for_each_entry_continue - continue iteration over list of given type + * @pos: the type * to use as a loop cursor + * @head: the head for your list + * @m: the name of the list_head within the struct + * + * Continue to iterate over list of given type, continuing after + * the current position. + */ +#define plist_for_each_entry_continue(pos, head, m) \ + list_for_each_entry_continue(pos, &(head)->node_list, m.node_list) + +/** + * plist_for_each_entry_safe - iterate safely over list of given type + * @pos: the type * to use as a loop counter + * @n: another type * to use as temporary storage + * @head: the head for your list + * @m: the name of the list_head within the struct + * + * Iterate over list of given type, safe against removal of list entry. + */ +#define plist_for_each_entry_safe(pos, n, head, m) \ + list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list) + +/** + * plist_head_empty - return !0 if a plist_head is empty + * @head: &struct plist_head pointer + */ +static inline int plist_head_empty(const struct plist_head *head) +{ + return list_empty(&head->node_list); +} + +/** + * plist_node_empty - return !0 if plist_node is not on a list + * @node: &struct plist_node pointer + */ +static inline int plist_node_empty(const struct plist_node *node) +{ + return list_empty(&node->node_list); +} + +/* All functions below assume the plist_head is not empty. */ + +/** + * plist_first_entry - get the struct for the first entry + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_head within the struct + */ +#ifdef CONFIG_DEBUG_PLIST +# define plist_first_entry(head, type, member) \ + ({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_first(head), type, member); \ + }) +#else +# define plist_first_entry(head, type, member) \ + container_of(plist_first(head), type, member) +#endif + +/** + * plist_last_entry - get the struct for the last entry + * @head: the &struct plist_head pointer + * @type: the type of the struct this is embedded in + * @member: the name of the list_head within the struct + */ +#ifdef CONFIG_DEBUG_PLIST +# define plist_last_entry(head, type, member) \ + ({ \ + WARN_ON(plist_head_empty(head)); \ + container_of(plist_last(head), type, member); \ + }) +#else +# define plist_last_entry(head, type, member) \ + container_of(plist_last(head), type, member) +#endif + +/** + * plist_next - get the next entry in list + * @pos: the type * to cursor + */ +#define plist_next(pos) \ + list_next_entry(pos, node_list) + +/** + * plist_prev - get the prev entry in list + * @pos: the type * to cursor + */ +#define plist_prev(pos) \ + list_prev_entry(pos, node_list) + +/** + * plist_first - return the first node (and thus, highest priority) + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node *plist_first(const struct plist_head *head) +{ + return list_entry(head->node_list.next, + struct plist_node, node_list); +} + +/** + * plist_last - return the last node (and thus, lowest priority) + * @head: the &struct plist_head pointer + * + * Assumes the plist is _not_ empty. + */ +static inline struct plist_node *plist_last(const struct plist_head *head) +{ + return list_entry(head->node_list.prev, + struct plist_node, node_list); +} + +#endif diff --git a/include/pool.h b/include/pool.h new file mode 100644 index 0000000..a207012 --- /dev/null +++ b/include/pool.h @@ -0,0 +1,90 @@ +/* pool.h - A simple memory pool manager. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#ifndef POOL_H +#define POOL_H + +#include +#include +#include "list.h" +#include "bits.h" + +#define POOL_NAME_LENGTH (16) /* max name length including trailing \0 */ + +typedef struct { + struct list_head list_blocks; /* list of allocated blocks in pool */ + char data[]; /* objects block */ +} block_t; + +typedef struct { + char name[POOL_NAME_LENGTH]; /* pool name */ + size_t eltsize; /* object size */ + u32 available; /* current available elements */ + u32 allocated; /* total objects allocated */ + u32 growsize; /* number of objects per block allocated */ + u32 nblocks; /* number of blocks allocated */ + struct list_head list_available; /* available nodes */ + struct list_head list_blocks; /* allocated blocks */ +} pool_t; + +/** + * pool_stats - display some pool statistics + * @pool: the pool address. + */ +void pool_stats(pool_t *pool); + +/** + * pool_create - create a new memory pool + * @name: the name to give to the pool. + * @grow: the number of elements to add when no more available. + * @size: the size of an element in pool. + * + * The name will be truncated to 16 characters (including the final '\0'). + * + * Return: The address of the created pool, or NULL if error. + */ +pool_t *pool_create(const char *name, u32 grow, size_t size); + +/** + * pool_get() - Get an element from a pool. + * @pool: The pool address. + * + * Get an object from the pool. + * + * Return: The address of the object, or NULL if error. + */ +void *pool_get(pool_t *pool); + +/** + * pool_add() - Add (free) an element to a pool. + * @pool: The pool address. + * @elt: The address of the object to add to the pool. + * + * The object will be available for further pool_get(). + * + * Return: The current number of available elements in pool (including + * @elt). + */ +u32 pool_add(pool_t *pool, void *elt); + +/** + * pool_destroy() - destroy a pool. + * @pool: The pool address. + * + * Attention: All memory is freed, but no check is done whether all pool + * elements have been released. Referencing any pool object after this call + * will likely imply some memory corruption. + */ +void pool_destroy(pool_t *pool); + +#endif diff --git a/include/rwonce.h b/include/rwonce.h new file mode 100644 index 0000000..90a28bc --- /dev/null +++ b/include/rwonce.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* adaptation of kernel's + * See https://www.kernel.org/doc/Documentation/memory-barriers.txt + */ +/* + * Prevent the compiler from merging or refetching reads or writes. The + * compiler is also forbidden from reordering successive instances of + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements. + * + * These two macros will also work on aggregate data types like structs or + * unions. + * + * Their two major use cases are: (1) Mediating communication between + * process-level code and irq/NMI handlers, all running on the same CPU, + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * mutilate accesses that either do not require ordering or that interact + * with an explicit memory barrier or atomic instruction that provides the + * required ordering. + */ +#ifndef __BR_RWONCE_H +#define __BR_RWONCE_H + +/************ originally in */ +#if __has_attribute(__error__) +# define __compiletime_error(msg) __attribute__((__error__(msg))) +#else +# define __compiletime_error(msg) +#endif + +/************ originally in */ +/* + * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving + * non-scalar types unchanged. + */ +/* + * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char' + * is not type-compatible with 'signed char', and we define a separate case. + */ +#define __scalar_type_to_expr_cases(type) \ + unsigned type: (unsigned type)0, \ + signed type: (signed type)0 + +#define __unqual_scalar_typeof(x) \ + typeof(_Generic((x), \ + char: (char)0, \ + __scalar_type_to_expr_cases(char), \ + __scalar_type_to_expr_cases(short), \ + __scalar_type_to_expr_cases(int), \ + __scalar_type_to_expr_cases(long), \ + __scalar_type_to_expr_cases(long long), \ + default: (x))) + +/* Is this type a native word size -- useful for atomic operations */ +#define __native_word(t) \ + (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ + sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) + +#ifdef __OPTIMIZE__ +# define __compiletime_assert(condition, msg, prefix, suffix) \ + do { \ + extern void prefix ## suffix(void) __compiletime_error(msg); \ + if (!(condition)) \ + prefix ## suffix(); \ + } while (0) +#else +# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) +#endif + +#define _compiletime_assert(condition, msg, prefix, suffix) \ + __compiletime_assert(condition, msg, prefix, suffix) + +/** + * compiletime_assert - break build and emit msg if condition is false + * @condition: a compile-time constant condition to check + * @msg: a message to emit if condition is false + * + * In tradition of POSIX assert, this macro will break the build if the + * supplied condition is *false*, emitting the supplied error message if the + * compiler has support to do so. + */ +#define compiletime_assert(condition, msg) \ + _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__) + +#define compiletime_assert_atomic_type(t) \ + compiletime_assert(__native_word(t), \ + "Need native word sized stores/loads for atomicity.") + +/************ originally in */ +/* + * Yes, this permits 64-bit accesses on 32-bit architectures. These will + * actually be atomic in some cases (namely Armv7 + LPAE), but for others we + * rely on the access being split into 2x32-bit accesses for a 32-bit quantity + * (e.g. a virtual address) and a strong prevailing wind. + */ +#define compiletime_assert_rwonce_type(t) \ + compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \ + "Unsupported access size for {READ,WRITE}_ONCE().") + +/* + * Use __READ_ONCE() instead of READ_ONCE() if you do not require any + * atomicity. Note that this may result in tears! + */ +#ifndef __READ_ONCE +#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x)) +#endif + +#define READ_ONCE(x) \ +({ \ + compiletime_assert_rwonce_type(x); \ + __READ_ONCE(x); \ +}) + +#define __WRITE_ONCE(x, val) \ +do { \ + *(volatile typeof(x) *)&(x) = (val); \ +} while (0) + +#define WRITE_ONCE(x, val) \ +do { \ + compiletime_assert_rwonce_type(x); \ + __WRITE_ONCE(x, val); \ +} while (0) + +#endif /* __BR_RWONCE_H */ diff --git a/include/stringhash.h b/include/stringhash.h new file mode 100644 index 0000000..c0c5c5b --- /dev/null +++ b/include/stringhash.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_STRINGHASH_H +#define __LINUX_STRINGHASH_H + +#include /* For __pure */ +#include /* For u32, u64 */ +#include + +/* + * Routines for hashing strings of bytes to a 32-bit hash value. + * + * These hash functions are NOT GUARANTEED STABLE between kernel + * versions, architectures, or even repeated boots of the same kernel. + * (E.g. they may depend on boot-time hardware detection or be + * deliberately randomized.) + * + * They are also not intended to be secure against collisions caused by + * malicious inputs; much slower hash functions are required for that. + * + * They are optimized for pathname components, meaning short strings. + * Even if a majority of files have longer names, the dynamic profile of + * pathname components skews short due to short directory names. + * (E.g. /usr/lib/libsesquipedalianism.so.3.141.) + */ + +/* + * Version 1: one byte at a time. Example of use: + * + * unsigned long hash = init_name_hash; + * while (*p) + * hash = partial_name_hash(tolower(*p++), hash); + * hash = end_name_hash(hash); + * + * Although this is designed for bytes, fs/hfsplus/unicode.c + * abuses it to hash 16-bit values. + */ + +/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */ +#define init_name_hash(salt) (unsigned long)(salt) + +/* partial hash update function. Assume roughly 4 bits per character */ +static inline unsigned long +partial_name_hash(unsigned long c, unsigned long prevhash) +{ + return (prevhash + (c << 4) + (c >> 4)) * 11; +} + +/* + * Finally: cut down the number of bits to a int value (and try to avoid + * losing bits). This also has the property (wanted by the dcache) + * that the msbits make a good hash table index. + */ +static inline unsigned int end_name_hash(unsigned long hash) +{ + return hash_long(hash, 32); +} + +/* + * Version 2: One word (32 or 64 bits) at a time. + * If CONFIG_DCACHE_WORD_ACCESS is defined (meaning + * exists, which describes major Linux platforms like x86 and ARM), then + * this computes a different hash function much faster. + * + * If not set, this falls back to a wrapper around the preceding. + */ +extern unsigned int __pure full_name_hash(const void *salt, const char *, unsigned int); + +/* + * A hash_len is a u64 with the hash of a string in the low + * half and the length in the high half. + */ +#define hashlen_hash(hashlen) ((u32)(hashlen)) +#define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) +#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash)) + +/* Return the "hash_len" (hash and length) of a null-terminated string */ +extern u64 __pure hashlen_string(const void *salt, const char *name); + +#endif /* __LINUX_STRINGHASH_H */ diff --git a/include/struct-group.h b/include/struct-group.h new file mode 100644 index 0000000..4afeeb3 --- /dev/null +++ b/include/struct-group.h @@ -0,0 +1,105 @@ +/* struct-group.h - mirrored structure macros. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + * Some parts are taken from Linux's kernel and others, and are : + * SPDX-License-Identifier: GPL-2.0 + * + */ + +#ifndef _STRUCT_GROUP_H +#define _STRUCT_GROUP_H + +/** + * __struct_group() - Create a mirrored named and anonyomous struct + * + * @TAG: The tag name for the named sub-struct (usually empty) + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes (usually empty) + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical layout + * and size: one anonymous and one named. The former's members can be used + * normally without sub-struct naming, and the latter can be used to + * reason about the start, end, and size of the group of struct members. + * The named struct can also be explicitly tagged for layer reuse, as well + * as both having struct attributes appended. + */ +#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \ + union { \ + struct { MEMBERS } ATTRS; \ + struct TAG { MEMBERS } ATTRS NAME; \ + } + +/** + * DECLARE_FLEX_ARRAY() - Declare a flexible array usable in a union + * + * @TYPE: The type of each flexible array element + * @NAME: The name of the flexible array member + * + * In order to have a flexible array member in a union or alone in a + * struct, it needs to be wrapped in an anonymous struct with at least 1 + * named member, but that member can be empty. + */ +#define DECLARE_FLEX_ARRAY(TYPE, NAME) \ + struct { \ + struct { } __empty_ ## NAME; \ + TYPE NAME[]; \ + } + +/** + * struct_group() - Wrap a set of declarations in a mirrored struct + * + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. + */ +#define struct_group(NAME, MEMBERS...) \ + __struct_group(/* no tag */, NAME, /* no attrs */, MEMBERS) + +/** + * struct_group_attr() - Create a struct_group() with trailing attributes + * + * @NAME: The identifier name of the mirrored sub-struct + * @ATTRS: Any struct attributes to apply + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. Includes structure attributes argument. + */ +#define struct_group_attr(NAME, ATTRS, MEMBERS...) \ + __struct_group(/* no tag */, NAME, ATTRS, MEMBERS) + +/** + * struct_group_tagged() - Create a struct_group with a reusable tag + * + * @TAG: The tag name for the named sub-struct + * @NAME: The identifier name of the mirrored sub-struct + * @MEMBERS: The member declarations for the mirrored structs + * + * Used to create an anonymous union of two structs with identical + * layout and size: one anonymous and one named. The former can be + * used normally without sub-struct naming, and the latter can be + * used to reason about the start, end, and size of the group of + * struct members. Includes struct tag argument for the named copy, + * so the specified layout can be reused later. + */ +#define struct_group_tagged(TAG, NAME, MEMBERS...) \ + __struct_group(TAG, NAME, /* no attrs */, MEMBERS) + +#endif /* _STRUCT_GROUP_H */ diff --git a/include/xxhash.h b/include/xxhash.h new file mode 100644 index 0000000..df42511 --- /dev/null +++ b/include/xxhash.h @@ -0,0 +1,259 @@ +/* + * xxHash - Extremely Fast Hash algorithm + * Copyright (C) 2012-2016, Yann Collet. + * + * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following disclaimer + * in the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * This program is free software; you can redistribute it and/or modify it under + * the terms of the GNU General Public License version 2 as published by the + * Free Software Foundation. This program is dual-licensed; you may select + * either version 2 of the GNU General Public License ("GPL") or BSD license + * ("BSD"). + * + * You can contact the author at: + * - xxHash homepage: https://cyan4973.github.io/xxHash/ + * - xxHash source repository: https://github.com/Cyan4973/xxHash + */ + +/* + * Notice extracted from xxHash homepage: + * + * xxHash is an extremely fast Hash algorithm, running at RAM speed limits. + * It also successfully passes all tests from the SMHasher suite. + * + * Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 + * Duo @3GHz) + * + * Name Speed Q.Score Author + * xxHash 5.4 GB/s 10 + * CrapWow 3.2 GB/s 2 Andrew + * MumurHash 3a 2.7 GB/s 10 Austin Appleby + * SpookyHash 2.0 GB/s 10 Bob Jenkins + * SBox 1.4 GB/s 9 Bret Mulvey + * Lookup3 1.2 GB/s 9 Bob Jenkins + * SuperFastHash 1.2 GB/s 1 Paul Hsieh + * CityHash64 1.05 GB/s 10 Pike & Alakuijala + * FNV 0.55 GB/s 5 Fowler, Noll, Vo + * CRC32 0.43 GB/s 9 + * MD5-32 0.33 GB/s 10 Ronald L. Rivest + * SHA1-32 0.28 GB/s 10 + * + * Q.Score is a measure of quality of the hash function. + * It depends on successfully passing SMHasher test set. + * 10 is a perfect score. + * + * A 64-bits version, named xxh64 offers much better speed, + * but for 64-bits applications only. + * Name Speed on 64 bits Speed on 32 bits + * xxh64 13.8 GB/s 1.9 GB/s + * xxh32 6.8 GB/s 6.0 GB/s + */ + +#ifndef XXHASH_H +#define XXHASH_H + +#include + +/*-**************************** + * Simple Hash Functions + *****************************/ + +/** + * xxh32() - calculate the 32-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s + * + * Return: The 32-bit hash of the data. + */ +uint32_t xxh32(const void *input, size_t length, uint32_t seed); + +/** + * xxh64() - calculate the 64-bit hash of the input with a given seed. + * + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * This function runs 2x faster on 64-bit systems, but slower on 32-bit systems. + * + * Return: The 64-bit hash of the data. + */ +uint64_t xxh64(const void *input, size_t length, uint64_t seed); + +/** + * xxhash() - calculate wordsize hash of the input with a given seed + * @input: The data to hash. + * @length: The length of the data to hash. + * @seed: The seed can be used to alter the result predictably. + * + * If the hash does not need to be comparable between machines with + * different word sizes, this function will call whichever of xxh32() + * or xxh64() is faster. + * + * Return: wordsize hash of the data. + */ + +static inline unsigned long xxhash(const void *input, size_t length, + uint64_t seed) +{ +#if BITS_PER_LONG == 64 + return xxh64(input, length, seed); +#else + return xxh32(input, length, seed); +#endif +} + +/*-**************************** + * Streaming Hash Functions + *****************************/ + +/* + * These definitions are only meant to allow allocation of XXH state + * statically, on stack, or in a struct for example. + * Do not use members directly. + */ + +/** + * struct xxh32_state - private xxh32 state, do not use members directly + */ +struct xxh32_state { + uint32_t total_len_32; + uint32_t large_len; + uint32_t v1; + uint32_t v2; + uint32_t v3; + uint32_t v4; + uint32_t mem32[4]; + uint32_t memsize; +}; + +/** + * struct xxh32_state - private xxh64 state, do not use members directly + */ +struct xxh64_state { + uint64_t total_len; + uint64_t v1; + uint64_t v2; + uint64_t v3; + uint64_t v4; + uint64_t mem64[4]; + uint32_t memsize; +}; + +/** + * xxh32_reset() - reset the xxh32 state to start a new hashing operation + * + * @state: The xxh32 state to reset. + * @seed: Initialize the hash state with this seed. + * + * Call this function on any xxh32_state to prepare for a new hashing operation. + */ +void xxh32_reset(struct xxh32_state *state, uint32_t seed); + +/** + * xxh32_update() - hash the data given and update the xxh32 state + * + * @state: The xxh32 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh32_reset() call xxh32_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +int xxh32_update(struct xxh32_state *state, const void *input, size_t length); + +/** + * xxh32_digest() - produce the current xxh32 hash + * + * @state: Produce the current xxh32 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh32_digest(), and + * generate new hashes later on, by calling xxh32_digest() again. + * + * Return: The xxh32 hash stored in the state. + */ +uint32_t xxh32_digest(const struct xxh32_state *state); + +/** + * xxh64_reset() - reset the xxh64 state to start a new hashing operation + * + * @state: The xxh64 state to reset. + * @seed: Initialize the hash state with this seed. + */ +void xxh64_reset(struct xxh64_state *state, uint64_t seed); + +/** + * xxh64_update() - hash the data given and update the xxh64 state + * @state: The xxh64 state to update. + * @input: The data to hash. + * @length: The length of the data to hash. + * + * After calling xxh64_reset() call xxh64_update() as many times as necessary. + * + * Return: Zero on success, otherwise an error code. + */ +int xxh64_update(struct xxh64_state *state, const void *input, size_t length); + +/** + * xxh64_digest() - produce the current xxh64 hash + * + * @state: Produce the current xxh64 hash of this state. + * + * A hash value can be produced at any time. It is still possible to continue + * inserting input into the hash state after a call to xxh64_digest(), and + * generate new hashes later on, by calling xxh64_digest() again. + * + * Return: The xxh64 hash stored in the state. + */ +uint64_t xxh64_digest(const struct xxh64_state *state); + +/*-************************** + * Utils + ***************************/ + +/** + * xxh32_copy_state() - copy the source state into the destination state + * + * @src: The source xxh32 state. + * @dst: The destination xxh32 state. + */ +void xxh32_copy_state(struct xxh32_state *dst, const struct xxh32_state *src); + +/** + * xxh64_copy_state() - copy the source state into the destination state + * + * @src: The source xxh64 state. + * @dst: The destination xxh64 state. + */ +void xxh64_copy_state(struct xxh64_state *dst, const struct xxh64_state *src); + +#endif /* XXHASH_H */ diff --git a/src/debug.c b/libsrc/debug.c similarity index 82% rename from src/debug.c rename to libsrc/debug.c index 2bfaea1..ac1d2a6 100644 --- a/src/debug.c +++ b/libsrc/debug.c @@ -1,11 +1,11 @@ /* debug.c - debug/log management * - * Copyright (C) 2021 Bruno Raoult ("br") + * Copyright (C) 2021-2022 Bruno Raoult ("br") * Licensed under the GNU General Public License v3.0 or later. * Some rights reserved. See COPYING. * * You should have received a copy of the GNU General Public License along with this - * program. If not, see . + * program. If not, see . * * SPDX-License-Identifier: GPL-3.0-or-later * @@ -14,19 +14,25 @@ #include #include #include + +#ifndef DEBUG_DEBUG +#define DEBUG_DEBUG +#endif + +#include "bits.h" #include "debug.h" #define NANOSEC 1000000000 /* nano sec in sec */ #define MILLISEC 1000000 /* milli sec in sec */ -static s64 timer_start; /* in nanosecond */ +static long long timer_start; /* in nanosecond */ static u32 debug_level=0; void debug_level_set(u32 level) { - debug_level = level;; + debug_level = level; - log(0, "debug level set to %u\n", level); + log(1, "debug level set to %u\n", level); } void debug_init(u32 level) @@ -43,7 +49,7 @@ void debug_init(u32 level) log(0, "timer started.\n"); } -inline static s64 timer_elapsed() +inline static long long timer_elapsed() { struct timespec timer; @@ -51,7 +57,6 @@ inline static s64 timer_elapsed() return (timer.tv_sec * NANOSEC + timer.tv_nsec) - timer_start; } - /* void debug - log function * @timestamp : boolean * @indent : indent level (2 spaces each) @@ -70,9 +75,9 @@ void debug(u32 level, bool timestamp, u32 indent, const char *src, printf("%*s", 2*(indent-1), ""); if (timestamp) { - s64 diff = timer_elapsed(); - printf("%ld.%03ld ", diff/NANOSEC, (diff/1000000)%1000); - printf("%010ld ", diff); + long long diff = timer_elapsed(); + printf("%lld.%03lld ", diff/NANOSEC, (diff/1000000)%1000); + printf("%010lld ", diff); } if (src) { diff --git a/libsrc/hash.c b/libsrc/hash.c new file mode 100644 index 0000000..c9c0093 --- /dev/null +++ b/libsrc/hash.c @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* inspired from kernel's + */ +#include "hash.h" + +/* Return the hash of a string of known length */ +unsigned int hash_string(const void *salt, const char *name, unsigned int len) +{ + unsigned long hash = init_name_hash(salt); + while (len--) + hash = partial_name_hash((unsigned char)*name++, hash); + return end_name_hash(hash); +} + +/* Return the "hash_len" (hash and length) of a null-terminated string */ +u64 hashlen_string(const void *salt, const char *name) +{ + unsigned long hash = init_name_hash(salt); + unsigned long len = 0, c; + + c = (unsigned char)*name; + while (c) { + len++; + hash = partial_name_hash(c, hash); + c = (unsigned char)name[len]; + } + return hashlen_create(end_name_hash(hash), len); +} diff --git a/libsrc/pjwhash.c b/libsrc/pjwhash.c new file mode 100644 index 0000000..554f9e3 --- /dev/null +++ b/libsrc/pjwhash.c @@ -0,0 +1,20 @@ +/* pjwhash.c - PJW hash function. + * + * Copyright (C) 2021-2022 Bruno Raoult ("br") + * Licensed under the GNU General Public License v3.0 or later. + * Some rights reserved. See COPYING. + * + * You should have received a copy of the GNU General Public License along with this + * program. If not, see . + * + * SPDX-License-Identifier: GPL-3.0-or-later + * + */ + +#define _pjw_inline extern + +//#include "bits.h" +//extern unsigned int pjwhash (const void* key, uint length); + +#include "pjwhash.h" +#include "pjwhash-inline.h" diff --git a/libsrc/plist.c b/libsrc/plist.c new file mode 100644 index 0000000..72e1fb1 --- /dev/null +++ b/libsrc/plist.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * adapted from Linux kernel lib/plist.c + * + * Descending-priority-sorted double-linked list + * + * (C) 2002-2003 Intel Corp + * Inaky Perez-Gonzalez . + * + * 2001-2005 (c) MontaVista Software, Inc. + * Daniel Walker + * + * (C) 2005 Thomas Gleixner + * + * Simplifications of the original code by + * Oleg Nesterov + * + * Based on simple lists (include/linux/list.h). + * + * This file contains the add / del functions which are considered to + * be too large to inline. See include/linux/plist.h for further + * information. + */ + +#include "plist.h" +#include "bug.h" + +#ifdef DEBUG_PLIST + +static struct plist_head test_head; + +static void plist_check_prev_next(struct list_head *t, struct list_head *p, + struct list_head *n) +{ + WARN(n->prev != p || p->next != n, + "top: %p, n: %p, p: %p\n" + "prev: %p, n: %p, p: %p\n" + "next: %p, n: %p, p: %p\n", + t, t->next, t->prev, + p, p->next, p->prev, + n, n->next, n->prev); +} + +static void plist_check_list(struct list_head *top) +{ + struct list_head *prev = top, *next = top->next; + + plist_check_prev_next(top, prev, next); + while (next != top) { + prev = next; + next = prev->next; + plist_check_prev_next(top, prev, next); + } +} + +static void plist_check_head(struct plist_head *head) +{ + if (!plist_head_empty(head)) + plist_check_list(&plist_first(head)->prio_list); + plist_check_list(&head->node_list); +} + +#else +# define plist_check_head(h) do { } while (0) +#endif + +/** + * plist_add - add @node to @head + * + * @node: &struct plist_node pointer + * @head: &struct plist_head pointer + */ +void plist_add(struct plist_node *node, struct plist_head *head) +{ + struct plist_node *first, *iter, *prev = NULL; + struct list_head *node_next = &head->node_list; + + plist_check_head(head); + WARN_ON(!plist_node_empty(node)); + WARN_ON(!list_empty(&node->prio_list)); + + if (plist_head_empty(head)) + goto ins_node; + + first = iter = plist_first(head); + + do { + if (node->prio < iter->prio) { + node_next = &iter->node_list; + break; + } + + prev = iter; + iter = list_entry(iter->prio_list.next, + struct plist_node, prio_list); + } while (iter != first); + + if (!prev || prev->prio != node->prio) + list_add_tail(&node->prio_list, &iter->prio_list); +ins_node: + list_add_tail(&node->node_list, node_next); + + plist_check_head(head); +} + +/** + * plist_del - Remove a @node from plist. + * + * @node: &struct plist_node pointer - entry to be removed + * @head: &struct plist_head pointer - list head + */ +void plist_del(struct plist_node *node, struct plist_head *head) +{ + plist_check_head(head); + + if (!list_empty(&node->prio_list)) { + if (node->node_list.next != &head->node_list) { + struct plist_node *next; + + next = list_entry(node->node_list.next, + struct plist_node, node_list); + + /* add the next plist_node into prio_list */ + if (list_empty(&next->prio_list)) + list_add(&next->prio_list, &node->prio_list); + } + list_del_init(&node->prio_list); + } + + list_del_init(&node->node_list); + + plist_check_head(head); +} + +/** + * plist_requeue - Requeue @node at end of same-prio entries. + * + * This is essentially an optimized plist_del() followed by + * plist_add(). It moves an entry already in the plist to + * after any other same-priority entries. + * + * @node: &struct plist_node pointer - entry to be moved + * @head: &struct plist_head pointer - list head + */ +void plist_requeue(struct plist_node *node, struct plist_head *head) +{ + struct plist_node *iter; + struct list_head *node_next = &head->node_list; + + plist_check_head(head); + BUG_ON(plist_head_empty(head)); + BUG_ON(plist_node_empty(node)); + + if (node == plist_last(head)) + return; + + iter = plist_next(node); + + if (node->prio != iter->prio) + return; + + plist_del(node, head); + + plist_for_each_continue(iter, head) { + if (node->prio != iter->prio) { + node_next = &iter->node_list; + break; + } + } + list_add_tail(&node->node_list, node_next); + + plist_check_head(head); +} diff --git a/src/pool.c b/libsrc/pool.c similarity index 50% rename from src/pool.c rename to libsrc/pool.c index 7ab0265..d1653f1 100644 --- a/src/pool.c +++ b/libsrc/pool.c @@ -1,25 +1,22 @@ /* pool.c - A simple pool manager. * - * Copyright (C) 2021 Bruno Raoult ("br") + * Copyright (C) 2021-2022 Bruno Raoult ("br") * Licensed under the GNU General Public License v3.0 or later. * Some rights reserved. See COPYING. * * You should have received a copy of the GNU General Public License along with this - * program. If not, see . + * program. If not, see . * * SPDX-License-Identifier: GPL-3.0-or-later * */ -/* -#include -#include -*/ - #include #include #include #include +#include + #include "list.h" #include "pool.h" #include "debug.h" @@ -28,32 +25,47 @@ void pool_stats(pool_t *pool) { if (pool) { -# ifdef DEBUG_POOL - log_f(1, "[%s] pool [%p]: avail:%u alloc:%u grow:%u eltsize:%lu\n", - pool->name, (void *)pool, pool->available, pool->allocated, - pool->growsize, pool->eltsize); -# endif + block_t *block; + + log_f(1, "[%s] pool [%p]: blocks:%u avail:%u alloc:%u grow:%u eltsize:%zu\n", + pool->name, (void *)pool, pool->nblocks, pool->available, + pool->allocated, pool->growsize, pool->eltsize); + log(5, "\tblocks: "); + list_for_each_entry(block, &pool->list_blocks, list_blocks) { + log(5, "%p ", block); + } + log(5, "\n"); } } -pool_t *pool_init(const char *name, u32 growsize, size_t eltsize) +pool_t *pool_create(const char *name, u32 growsize, size_t eltsize) { pool_t *pool; # ifdef DEBUG_POOL - log_f(1, "name=[%s] growsize=%u eltsize=%lu\n", - name, growsize, eltsize); + log_f(1, "name=[%s] growsize=%u eltsize=%zu\n", name, growsize, eltsize); # endif - /* we need at least this space in struct */ - if (eltsize < sizeof (struct list_head)) - return NULL; + /* we need at least sizeof(struct list_head) space in pool elements + */ + if (eltsize < sizeof (struct list_head)) { +# ifdef DEBUG_POOL + log_f(1, "[%s]: structure size too small (%zu < %zu), adjusting to %zu.\n", + name, eltsize, sizeof(struct list_head), sizeof(struct list_head)); +# endif + eltsize = sizeof(struct list_head); + } if ((pool = malloc(sizeof (*pool)))) { - pool->name = strdup(name); + strncpy(pool->name, name, POOL_NAME_LENGTH - 1); + pool->name[POOL_NAME_LENGTH - 1] = 0; pool->growsize = growsize; pool->eltsize = eltsize; pool->available = 0; pool->allocated = 0; - INIT_LIST_HEAD(&pool->head); + pool->nblocks = 0; + INIT_LIST_HEAD(&pool->list_available); + INIT_LIST_HEAD(&pool->list_blocks); + } else { + errno = ENOMEM; } return pool; } @@ -61,15 +73,13 @@ pool_t *pool_init(const char *name, u32 growsize, size_t eltsize) static u32 _pool_add(pool_t *pool, struct list_head *elt) { # ifdef DEBUG_POOL - log_f(10, "pool=%p &head=%p elt=%p off1=%lu off2=%lu\n", - (void *)pool, - (void *)&pool->head, - (void *)elt, - (void *)&pool->head-(void *)pool, - offsetof(pool_t, head)); + log_f(6, "pool=%p &head=%p elt=%p off1=%zu off2=%zu\n", + (void *)pool, (void *)&pool->list_available, (void *)elt, + (void *)&pool->list_available - (void *)pool, + offsetof(pool_t, list_available)); # endif - list_add(elt, &pool->head); + list_add(elt, &pool->list_available); return ++pool->available; } @@ -80,7 +90,7 @@ u32 pool_add(pool_t *pool, void *elt) static struct list_head *_pool_get(pool_t *pool) { - struct list_head *res = pool->head.next; + struct list_head *res = pool->list_available.next; pool->available--; list_del(res); return res; @@ -91,39 +101,67 @@ void *pool_get(pool_t *pool) if (!pool) return NULL; if (!pool->available) { - void *alloc = malloc(pool->eltsize * pool->growsize); - void *cur; - u32 i; -# ifdef DEBUG_POOL - log_f(1, "[%s]: growing pool from %u to %u elements.\n", - pool->name, - pool->allocated, - pool->allocated + pool->growsize); -# endif - if (!alloc) - return NULL; -# ifdef DEBUG_POOL - log_f(5, " (old=%u)\n", pool->allocated); -# endif - pool->allocated += pool->growsize; -# ifdef DEBUG_POOL - log_f(5, " (new=%u)\n", pool->allocated); -# endif - for (i = 0; i < pool->growsize; ++i) { - cur = alloc + i * pool->eltsize; + block_t *block = malloc(sizeof(block_t) + pool->eltsize * pool->growsize); + if (!block) { # ifdef DEBUG_POOL - log_f(5, "alloc=%p cur=%p\n", alloc, cur); + log_f(1, "[%s]: failed block allocation\n", pool->name); +# endif + errno = ENOMEM; + return NULL; + } + + /* maintain list of allocated blocks + */ + list_add(&block->list_blocks, &pool->list_blocks); + pool->nblocks++; + +# ifdef DEBUG_POOL + log_f(1, "[%s]: growing pool from %u to %u elements. block=%p nblocks=%u\n", + pool->name, + pool->allocated, + pool->allocated + pool->growsize, + block, + pool->nblocks); +# endif + + pool->allocated += pool->growsize; + for (u32 i = 0; i < pool->growsize; ++i) { + void *cur = block->data + i * pool->eltsize; +# ifdef DEBUG_POOL + log_f(7, "alloc=%p cur=%p\n", block, cur); # endif _pool_add(pool, (struct list_head *)cur); } - pool_stats(pool); } - /* this is the effective address if the object (and also the + /* this is the effective address of the object (and also the * pool list_head address) */ return _pool_get(pool); } +void pool_destroy(pool_t *pool) +{ + block_t *block, *tmp; + if (!pool) + return; + /* release memory blocks */ +# ifdef DEBUG_POOL + log_f(1, "[%s]: releasing %d blocks and main structure\n", pool->name, pool->nblocks); + log(5, "blocks:"); +# endif + list_for_each_entry_safe(block, tmp, &pool->list_blocks, list_blocks) { +# ifdef DEBUG_POOL + log(5, " %p", block); +# endif + list_del(&block->list_blocks); + free(block); + } +# ifdef DEBUG_POOL + log(5, "\n"); +# endif + free(pool); +} + #ifdef BIN_pool struct d { u16 data1; @@ -144,9 +182,9 @@ int main(int ac, char**av) debug_init(3); log_f(1, "%s: sizeof(d)=%lu sizeof(*d)=%lu off=%lu\n", *av, sizeof(elt), - sizeof(*elt), offsetof(struct d, list)); + sizeof(*elt), offsetof(struct d, list)); - if ((pool = pool_init("dummy", 3, sizeof(*elt)))) { + if ((pool = pool_create("dummy", 3, sizeof(*elt)))) { pool_stats(pool); for (int cur=1; cur. - * - * SPDX-License-Identifier: GPL-3.0-or-later - * - */ - -#include "bits.h" - -#ifdef BIN_bits -#include -#include - -static inline int _popcount64(u64 n) -{ - int count = 0; - while (n) { - count++; - n &= (n - 1); - } - return count; -} - -static inline int _ctz64(u64 n) -{ - return _popcount64((n & -n) - 1); -} - -static inline int _clz64(u64 n) -{ - u64 r, q; - - r = (n > 0xFFFFFFFF) << 5; n >>= r; - q = (n > 0xFFFF) << 4; n >>= q; r |= q; - q = (n > 0xFF ) << 3; n >>= q; r |= q; - q = (n > 0xF ) << 2; n >>= q; r |= q; - q = (n > 0x3 ) << 1; n >>= q; r |= q; - r |= (n >> 1); - return __WORDSIZE - r - 1; -} - -static inline int _ffs64(u64 n) -{ - if (n == 0) - return (0); - - return _popcount64(n ^ ~-n); -} - - -int main(int ac, char **av) -{ - u64 u = 123, _tmp; - int curbit; - int base = 10; - debug_init(0); - if (ac > 2) - base = atoi(*(av+2)); - if (ac > 1) { - u = strtoul(*(av+1), NULL, base); - printf("base=%d input=%#lx\n", base, u); - printf("popcount64(%lu) = %d/%d\n", u, popcount64(u), _popcount64(u)); - printf("ctz64(%lu) = %d/%d\n", u, ctz64(u), _ctz64(u)); - printf("clz64(%lu) = %d/%d\n", u, clz64(u), _clz64(u)); - printf("ffs64(%lu) = %d/%d\n", u, ffs64(u), _ffs64(u)); - printf("\n"); - - bit_for_each64(curbit, _tmp, u) { - printf("loop: curbit=%d tmp=%ld\n", curbit, _tmp); - } - printf("\n"); - bit_for_each64_2(curbit, _tmp, u) { - printf("loop2: curbit=%d tmp=%ld\n", curbit, _tmp); - } - - } - return 0; -} -#endif /* BIN_bits */ diff --git a/src/bits.h b/src/bits.h deleted file mode 100644 index 68f0af2..0000000 --- a/src/bits.h +++ /dev/null @@ -1,168 +0,0 @@ -/* bits.h - bits functions. - * - * Copyright (C) 2021 Bruno Raoult ("br") - * Licensed under the GNU General Public License v3.0 or later. - * Some rights reserved. See COPYING. - * - * You should have received a copy of the GNU General Public License along with this - * program. If not, see . - * - * SPDX-License-Identifier: GPL-3.0-or-later - * - */ -#ifndef BITS_H -#define BITS_H - -#include - -/* next include will define __WORDSIZE: 32 or 64 - */ -#include -#include "debug.h" - -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif - -/* no plan to support 32bits for now... - */ -#if __WORDSIZE != 64 -ERROR_64_BYTES_WORDSIZE_ONLY -#endif - -typedef int64_t s64; -typedef int32_t s32; -typedef int16_t s16; -typedef int8_t s8; - -typedef uint64_t u64; -typedef uint32_t u32; -typedef uint16_t u16; -typedef uint8_t u8; -typedef unsigned int uint; -typedef unsigned char uchar; - -/* count trailing zeroes : 00101000 -> 3 - * ^^^ - */ -static inline int ctz64(u64 n) -{ -# if __has_builtin(__builtin_ctzl) -# ifdef DEBUG_BITS - log_f(1, "builtin ctzl.\n"); -# endif - return __builtin_ctzl(n); - -# elif __has_builtin(__builtin_clzl) -# ifdef DEBUG_BITS - log_f(1, "builtin clzl.\n"); -# endif - return __WORDSIZE - (__builtin_clzl(n & -n) + 1); - -# else -# ifdef DEBUG_BITS - log_f(1, "emulated.\n"); -# endif - return popcount64((n & −n) − 1); -# endif -} - -/* count leading zeroes : 00101000 -> 2 - * ^^ - */ -static inline int clz64(u64 n) -{ -# if __has_builtin(__builtin_clzl) -# ifdef DEBUG_BITS - log_f(1, "builtin.\n"); -# endif - return __builtin_clzl(n); - -# else -# ifdef DEBUG_BITS - log_f(1, "emulated.\n"); -# endif - u64 r, q; - - r = (n > 0xFFFFFFFF) << 5; n >>= r; - q = (n > 0xFFFF) << 4; n >>= q; r |= q; - q = (n > 0xFF ) << 3; n >>= q; r |= q; - q = (n > 0xF ) << 2; n >>= q; r |= q; - q = (n > 0x3 ) << 1; n >>= q; r |= q; - r |= (n >> 1); - return __WORDSIZE - r - 1; -# endif -} - -/* find first set : 00101000 -> 4 - * ^ - */ -static inline uint ffs64(u64 n) -{ -# if __has_builtin(__builtin_ffsl) -# ifdef DEBUG_BITS - log_f(1, "builtin ffsl.\n"); -# endif - return __builtin_ffsll(n); - -# elif __has_builtin(__builtin_ctzl) -# ifdef DEBUG_BITS - log_f(1, "builtin ctzl.\n"); -# endif - if (n == 0) - return (0); - return __builtin_ctzl(n) + 1; - -# else -# ifdef DEBUG_BITS - log_f(1, "emulated.\n"); -# endif - return popcount64(n ^ ~-n); -# endif -} - -static inline int popcount64(u64 n) -{ -# if __has_builtin(__builtin_popcountl) -# ifdef DEBUG_BITS - log_f(1, "builtin.\n"); -# endif - return __builtin_popcountl(n); - -# else -# ifdef DEBUG_BITS - log_f(1, "emulated.\n"); -# endif - int count = 0; - while (n) { - count++; - n &= (n - 1); - } - return count; -# endif -} - -/** bit_for_each64 - iterate over an u64 bits - * @pos: an int used as current bit - * @tmp: a temp u64 used as temporary storage - * @ul: the u64 to loop over - * - * Usage: - * u64 u=139, _t; // u=b10001011 - * int cur; - * bit_for_each64(cur, _t, u) { - * printf("%d\n", cur); - * } - * This will display the position of each bit in u: 1, 2, 4, 8 - * - * I should probably re-think the implementation... - */ -#define bit_for_each64(pos, tmp, ul) \ - for (tmp = ul, pos = ffs64(tmp); tmp; tmp &= (tmp - 1), pos = ffs64(tmp)) - -/** or would it be more useful (counting bits from zero instead of 1) ? - */ -#define bit_for_each64_2(pos, tmp, ul) \ - for (tmp = ul, pos = ctz64(tmp); tmp; tmp ^= 1<. - * - * SPDX-License-Identifier: GPL-3.0-or-later - * - */ - -#ifndef DEBUG_H -#define DEBUG_H - -#include -#include - -#include "bits.h" - -void debug_init(u32 level); -void debug_level_set(u32 level); -void debug_devel_set(u32 level); -void debug(u32 level, bool timestamp, u32 indent, - const char *src, u32 line, const char *, ...); - -#ifdef DEBUG - -/* format: only printf - */ -#define log(level, fmt, args...) \ - debug((level), false, 0, NULL, 0, fmt, ##args) - -/* format: func name, no line number, no indent, no timestamp - * foo:15 val=2 - */ -#define log_f(level, fmt, args...) \ - debug((level), false, 0, __func__, 0, fmt, ##args) - -/* format : func name, indent, no timestamp - * foo:15 val=2 - */ -#define log_i(level, fmt, args...) \ - debug((level), false, (level), __func__, __LINE__, fmt, ##args) - -/* format : func name, indent, timestamp - * []foo:15 val=2 - */ -#define log_it(level, fmt, args...) \ - debug((level), true, (level), __func__, __LINE__, fmt, ##args) - -/* format: file name, no indent, no timestamp - * foo:15 val=2 - * - * #define log_f(level, fmt, args...) \ - * debug((level), false, 0, __FILE__, __LINE__, fmt, args) - */ - -#else -#define log(level, fmt, args...) -#define log_i(...) -#define log_it(...) -#define log_f(...) - -#endif /* DEBUG */ - -#endif /* DEBUG_H */ diff --git a/src/pool.h b/src/pool.h deleted file mode 100644 index 37fd667..0000000 --- a/src/pool.h +++ /dev/null @@ -1,36 +0,0 @@ -/* pool.h - A simple memory pool manager. - * - * Copyright (C) 2021 Bruno Raoult ("br") - * Licensed under the GNU General Public License v3.0 or later. - * Some rights reserved. See COPYING. - * - * You should have received a copy of the GNU General Public License along with this - * program. If not, see . - * - * SPDX-License-Identifier: GPL-3.0-or-later - * - */ - -#ifndef POOL_H -#define POOL_H - -#include -#include -#include "list.h" -#include "bits.h" - -typedef struct { - char *name; - u32 available; - u32 allocated; - u32 growsize; - size_t eltsize; - struct list_head head; -} pool_t; - -void pool_stats(pool_t *pool); -pool_t *pool_init(const char *name, u32 grow, size_t size); -void *pool_get(pool_t *pool); -u32 pool_add(pool_t *pool, void *elt); - -#endif