init AoC 2022, cleanup useless files

This commit is contained in:
2022-12-01 07:50:35 +01:00
parent 58ba8b4ab8
commit 0fb4219c92
40 changed files with 3614 additions and 327 deletions

View File

@@ -1,36 +0,0 @@
ex1.bash : 33:1701 180:319 sum=2020 mul=542619
time: 0:00.06 real, 0.06 user, 0.00 sys
context-switch: 20+4, page-faults: 0+305
ex1-cob : res= 542619
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+172
ex1-c : 33:1701 180:319 sum=2020 mul=542619
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+73
ex2.bash : 80:1450 94:43 185:527 sum=2020 mul=32858450
time: 0:10.00 real, 10.00 user, 0.00 sys
context-switch: 1005+3, page-faults: 0+302
ex2-sort.bash : 0:43 2:527 81:1450 sum=2020 mul=32858450
time: 0:00.01 real, 0.01 user, 0.00 sys
context-switch: 2+7, page-faults: 0+479
ex2-pure-sort.bash : 0:43 2:527 81:1450 sum=2020 mul=32858450
time: 0:00.50 real, 0.50 user, 0.00 sys
context-switch: 62+4, page-faults: 0+311
ex2-sort-cob : res= 32858450
time: 0:00.01 real, 0.01 user, 0.00 sys
context-switch: 1+1, page-faults: 0+170
ex2-pure-sort-cob : res= 32858450
time: 0:00.01 real, 0.01 user, 0.00 sys
context-switch: 1+1, page-faults: 0+173
ex2-c : 80:1450 94:43 185:527 sum=2020 mul=32858450
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+75

View File

@@ -1,12 +0,0 @@
ex1.bash : lines: 1000 matched:607
time: 0:00.03 real, 0.03 user, 0.00 sys
ex1-c : lines: 1000 matched:607
time: 0:00.00 real, 0.00 user, 0.00 sys
ex2.bash : lines: 1000 matched:321
time: 0:00.03 real, 0.03 user, 0.00 sys
ex2-c : lines: 1000 matched:321
time: 0:00.00 real, 0.00 user, 0.00 sys

View File

@@ -1,12 +0,0 @@
ex1.bash : lines:322 pos=966 found:169
time: 0:00.01 real, 0.01 user, 0.00 sys
ex1-c : lines:322 pos:966 found:169
time: 0:00.00 real, 0.00 user, 0.00 sys
ex2.bash : lines=323 res=6847128288
time: 0:00.04 real, 0.03 user, 0.00 sys
ex2-c : lines=323 res=6847128288
time: 0:00.00 real, 0.00 user, 0.00 sys

View File

@@ -1,12 +0,0 @@
ex1.bash : valid=182/251
time: 0:00.06 real, 0.06 user, 0.00 sys
ex1-c : valid=182/251
time: 0:00.00 real, 0.00 user, 0.00 sys
ex2.bash : valid=109/251
time: 0:00.16 real, 0.16 user, 0.00 sys
ex2-c : valid=109/251
time: 0:00.00 real, 0.00 user, 0.00 sys

View File

@@ -1,12 +0,0 @@
ex1.bash : lines=743 max=838
time: 0:00.94 real, 0.62 user, 0.41 sys
ex1-c : lines=743 max=838
time: 0:00.00 real, 0.00 user, 0.00 sys
ex2.bash : lines=743 seat=714
time: 0:00.99 real, 0.65 user, 0.43 sys
ex2-c : lines=743 seat=714
time: 0:00.00 real, 0.00 user, 0.00 sys

View File

@@ -1,24 +0,0 @@
ex1.bash : groups=484 count=6585
time: 0:01.42 real, 1.26 user, 0.20 sys
context-switch: 135+1393, page-faults: 0+59578
ex1-bis.bash : groups=484 count=6585
time: 0:01.22 real, 1.09 user, 0.16 sys
context-switch: 145+1311, page-faults: 0+60076
ex1-c : groups=484 count=6585
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+71
ex2.bash : groups=484 count=3276
time: 0:01.35 real, 1.18 user, 0.20 sys
context-switch: 139+1360, page-faults: 0+59397
ex2-bis.bash : groups=484 count=3276
time: 0:01.30 real, 1.11 user, 0.22 sys
context-switch: 112+1356, page-faults: 0+57986
ex2-c : groups=484 count=3276
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+72

View File

@@ -1,20 +0,0 @@
ex1.bash : target=shinygold nkeys=594 res=287
time: 0:06.99 real, 6.99 user, 0.00 sys
context-switch: 720+1, page-faults: 0+403
ex1-bis.bash : target=shinygold res=287
time: 0:00.12 real, 0.12 user, 0.00 sys
context-switch: 14+1, page-faults: 0+209
ex1-c : target=shinygold nkeys=594 res=287
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+117
ex2.bash : target=shinygold res=48160
time: 0:00.34 real, 0.28 user, 0.06 sys
context-switch: 37+253, page-faults: 0+12039
ex2-c : target=shinygold nkeys=594 res=48160
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+118

View File

@@ -1,16 +0,0 @@
ex1.bash : res=1594
time: 0:00.01 real, 0.01 user, 0.00 sys
context-switch: 0+4, page-faults: 0+303
ex1-c : res=1594
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 1+0, page-faults: 0+79
ex2.bash : res:758
time: 0:02.26 real, 2.12 user, 0.16 sys
context-switch: 212+1084, page-faults: 0+43039
ex2-c : res=758
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 1+1, page-faults: 0+78

View File

@@ -1,16 +0,0 @@
ex1.bash : res=167829540
time: 0:00.42 real, 0.42 user, 0.00 sys
context-switch: 57+1, page-faults: 0+172
ex1-c : res=167829540
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+76
ex2.bash : res=167829540 sum=28045630
time: 0:03.22 real, 3.01 user, 0.21 sys
context-switch: 359+921, page-faults: 0+49208
ex2-c : res=167829540 sum=28045630
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+75

View File

@@ -1,16 +0,0 @@
ex1.bash : diff1=71 diff2=27 res=1917
time: 0:00.01 real, 0.01 user, 0.00 sys
context-switch: 7+1, page-faults: 0+234
ex1-c : diff1=71 diff2=27 res=1917
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+73
ex2.bash : size=99 res=113387824750592
time: 0:00.02 real, 0.02 user, 0.00 sys
context-switch: 10+1, page-faults: 0+236
ex2-c : size=99 res=113387824750592
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+74

View File

@@ -1,16 +0,0 @@
ex1.bash : res=2386
time: 0:33.55 real, 33.12 user, 0.42 sys
context-switch: 179+18, page-faults: 0+1948
ex1-c : res=2386
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+90
ex2.bash : res=2091
time: 1:08.60 real, 68.56 user, 0.02 sys
context-switch: 530+10, page-faults: 0+1480
ex2-c : res=2091
time: 0:00.01 real, 0.01 user, 0.00 sys
context-switch: 6+1, page-faults: 0+90

View File

@@ -1,16 +0,0 @@
ex1.bash : res=1457
time: 0:00.02 real, 0.02 user, 0.00 sys
context-switch: 5+1, page-faults: 0+152
ex1-c : res=1457
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+72
ex2.bash : res=106860
time: 0:00.09 real, 0.08 user, 0.00 sys
context-switch: 10+1, page-faults: 0+153
ex2-c : res=106860
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 1+1, page-faults: 0+75

View File

@@ -1,16 +0,0 @@
ex1.bash : res=410
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 2+1, page-faults: 0+154
ex1-c : res=410
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+76
ex2.bash : res=600691418730595
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 1+1, page-faults: 0+156
ex2-c : res=600691418730595
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 1+1, page-faults: 0+76

View File

@@ -1,16 +0,0 @@
ex1.bash : res=10035335144067
time: 0:00.06 real, 0.04 user, 0.01 sys
context-switch: 5+1, page-faults: 0+200
ex1-c : res=10035335144067
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+80
ex2.bash : res=3817372618036
time: 0:06.69 real, 6.68 user, 0.00 sys
context-switch: 689+1, page-faults: 0+5949
ex2-c : res=3817372618036
time: 0:00.06 real, 0.05 user, 0.00 sys
context-switch: 6+1, page-faults: 0+662

View File

@@ -1,15 +0,0 @@
ex1.bash : res[2020]=1618
time: 0:00.03 real, 0.02 user, 0.00 sys
context-switch: 6+1, page-faults: 0+158
ex1-c : res[2020]=1618
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+77
ex1.bash : res[30000000]=548531
time: 12:22:02 real, 44488.91 user, 2.70 sys
context-switch: 4552388+2, page-faults: 0+98912
ex1-c : res[30000000]=548531
time: 0:00.56 real, 0.49 user, 0.06 sys
context-switch: 55+1, page-faults: 0+29369

View File

@@ -1,8 +0,0 @@
ex1.bash : res=1618
time: 0:00.07 real, 0.07 user, 0.00 sys
context-switch: 7+1, page-faults: 0+172
ex3.bash : res=548531
time: 12:22:02 real, 44488.91 user, 2.70 sys
context-switch: 4552388+2, page-faults: 0+98912

View File

@@ -1,18 +0,0 @@
cc -w -O3 ex1-c.c -o ex1-c
ex1.bash : res=21996
time: 0:00.14 real, 0.11 user, 0.02 sys
context-switch: 12+120, page-faults: 0+7974
ex1-c : res=21996
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+76
cc -w -O3 ex2-c.c -o ex2-c
ex2.bash : res=650080463519
time: 0:06.58 real, 6.54 user, 0.04 sys
context-switch: 201+114, page-faults: 0+8893
ex2-c : res=650080463519
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+74

View File

@@ -1,20 +0,0 @@
ex1-v1.bash : res=263
time: 1:47.86 real, 74.63 user, 39.28 sys
context-switch: 6224+166221, page-faults: 0+7568225
ex1.bash : res=263
time: 0:00.37 real, 0.36 user, 0.00 sys
context-switch: 1+1, page-faults: 0+322
ex1-c : res=263
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+150
ex2.bash : res=1680
time: 0:06.47 real, 6.47 user, 0.00 sys
context-switch: 23+1, page-faults: 0+2852
ex2-c : res=1680
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+1634

View File

@@ -24,7 +24,7 @@ ex2: ex12-c
@$(TIME) ex12-c 2 < $(INPUT) 2>&1
clean:
@rm -f ex1-c ex2-c core
@rm -f ex12-c core
deploy:
@$(MAKE) -C .. deploy

View File

@@ -1,16 +0,0 @@
ex1.bash : res=650217205854
time: 0:01.80 real, 1.78 user, 0.01 sys
context-switch: 28+1, page-faults: 0+190
ex12-c : res=650217205854
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+75
ex2.bash : res=20394514442037
time: 0:01.56 real, 1.55 user, 0.01 sys
context-switch: 16+1, page-faults: 0+196
ex12-c : res=20394514442037
time: 0:00.00 real, 0.00 user, 0.00 sys
context-switch: 0+1, page-faults: 0+74

View File

@@ -1,8 +0,0 @@
ex1.bash : res=5966506063747
time: 0:00.56 real, 0.56 user, 0.00 sys
context-switch: 4+1, page-faults: 0+224
ex2.bash res=1714
time: 0:02.14 real, 2.05 user, 0.02 sys
context-switch: 1146+1, page-faults: 0+605

View File

@@ -50,7 +50,7 @@ VALGRINDFLAGS := --leak-check=full --show-leak-kinds=all --track-origins=yes \
TIME := \time -f "\ttime: %E real, %U user, %S sys\n\tcontext-switch:\t%c+%w, page-faults: %F+%R\n"
export PATH := .:$(PATH)
.PHONY: clean all compile assembly memcheck memcheck1 memcheck2 ex1 ex2 ccls
.PHONY: clean all compile assembly memcheck memcheck1 memcheck2 ex1 ex2 ccls bear
all: README.org ccls ex1 ex2

87
2022/Makefile Normal file
View File

@@ -0,0 +1,87 @@
# AOC Makefile - GNU make only.
#
# Copyright (C) 2021-2022 Bruno Raoult ("br")
# Licensed under the GNU General Public License v3.0 or later.
# Some rights reserved. See COPYING.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <https://www.gnu.org/licenses/gpl-3.0-standalone.html>.
#
# SPDX-License-Identifier: GPL-3.0-or-later <https://spdx.org/licenses/GPL-3.0-or-later.html>
#
SUBDIRS := $(shell echo day??)
CC = gcc
#LIBS = -lreadline -lncurses
CFLAGS += -std=gnu11
CFLAGS += -O2
CFLAGS += -g
#CFLAGS += -pg
CFLAGS += -Wall
CFLAGS += -Wextra
CFLAGS += -march=native
CFLAGS += -DDEBUG_DEBUG # activate general debug (debug.c)
CFLAGS += -DDEBUG_POOL # memory pools management
INCDIR := ./include
LIBSRCDIR := ./libsrc
LIBDIR := ./lib
LIB := libaoc_$(shell uname -m)
SLIB := $(LIBDIR)/$(LIB).a
DLIB := $(LIBDIR)/$(LIB).so
LIBSRC := $(wildcard $(LIBSRCDIR)/*.c)
LIBOBJ := $(patsubst %.c,%.o,$(LIBSRC))
LDFLAGS := -L$(LIBDIR)
LDLIB := -l$(LIB)
.PHONY: clean cleanlib cleanall all redo output lib $(SUBDIRS)
all: lib $(SUBDIRS)
clean:
@for dir in $(SUBDIRS) ; do \
$(MAKE) --no-print-directory -C $$dir clean ; \
done
cleanlib: clean
@$(RM) -f $(SLIB) $(DLIB) $(LIBOBJ)
cleanall: clean cleanlib
redo: cleanall all
$(SUBDIRS):
@echo "========================================="
@echo "================= $@ ================="
@echo "========================================="
@echo
@echo "+++++++++++++++++ part 1"
+@$(MAKE) --no-print-directory -C $@ ex1 2>&1
@echo "+++++++++++++++++ part 2"
+@$(MAKE) --no-print-directory -C $@ ex2 2>&1
output:
@$(MAKE) --no-print-directory all >OUTPUT 2>&1
lib: $(DLIB) $(SLIB)
$(SLIB): $(LIBOBJ)
@echo building $@ static library.
@mkdir -p $(LIBDIR)
@$(AR) $(ARFLAGS) -o $@ $^
$(DLIB): CFLAGS += -fPIC
$(DLIB): LDFLAGS += -shared
$(DLIB): $(LIBOBJ)
@echo building $@ shared library.
@mkdir -p $(LIBDIR)
@$(CC) $(LDFLAGS) $^ -o $@
.c.o:
@echo compiling $<.
@$(CC) -c $(CFLAGS) $(LDFLAGS) -I $(INCDIR) -o $@ $<

109
2022/day01/Makefile Normal file
View File

@@ -0,0 +1,109 @@
# AOC daily Makefile - GNU make only.
#
# Copyright (C) 2021-2022 Bruno Raoult ("br")
# Licensed under the GNU General Public License v3.0 or later.
# Some rights reserved. See COPYING.
#
# You should have received a copy of the GNU General Public License along with this
# program. If not, see <https://www.gnu.org/licenses/gpl-3.0-standalone.html>.
#
# SPDX-License-Identifier: GPL-3.0-or-later <https://spdx.org/licenses/GPL-3.0-or-later.html>
#
INPUT := INPUT.txt
SHELL := /bin/bash
CC := gcc
BEAR := bear
CCLSFILE:= compile_commands.json
LIB := aoc_$(shell uname -m)
INCDIR := ../include
LIBDIR := ../lib
LDFLAGS := -L$(LIBDIR)
#LDLIB := -l$(LIB) -lm
LDLIB := -l$(LIB)
export LD_LIBRARY_PATH = $(LIBDIR)
CFLAGS += -std=gnu11
CFLAGS += -O2
CFLAGS += -g
# for gprof
# CFLAGS += -pg
CFLAGS += -Wall
CFLAGS += -Wextra
CFLAGS += -march=native
# Next one may be useful for valgrind (some invalid instructions)
# CFLAGS += -mno-tbm
CFLAGS += -Wmissing-declarations
CFLAGS += -Wno-unused-result
CFLAGS += -DDEBUG_DEBUG # activate general debug (debug.c)
CFLAGS += -DDEBUG_POOL # memory pools management
VALGRIND := valgrind
VALGRINDFLAGS := --leak-check=full --show-leak-kinds=all --track-origins=yes \
--sigill-diagnostics=yes --quiet --show-error-list=yes
TIME := \time -f "\ttime: %E real, %U user, %S sys\n\tcontext-switch:\t%c+%w, page-faults: %F+%R\n"
export PATH := .:$(PATH)
.PHONY: clean all compile assembly memcheck memcheck1 memcheck2 ex1 ex2 ccls bear org
all: README.org ccls ex1 ex2
memcheck: memcheck1 memcheck2
memcheck1: aoc-c
@$(VALGRIND) $(VALGRINDFLAGS) aoc-c -p 1 < $(INPUT)
memcheck2: aoc-c
@$(VALGRIND) $(VALGRINDFLAGS) aoc-c -p 2 < $(INPUT)
@#@valgrind -s --track-origins=yes aoc-c -p 2 < $(INPUT)
compile: aoc-c
cpp: aoc-c.i
assembly: aoc-c.s
ex1: aoc-c
@$(TIME) aoc-c -p 1 < $(INPUT)
ex2: aoc-c
@$(TIME) aoc-c -p 2 < $(INPUT)
ccls: $(CCLSFILE)
clean:
@rm -f aoc-c core* vgcore* gmon.out aoc-c.s aoc-c.i README.html compile_commands.json
.c:
@echo compiling $<
@$(CC) $(CFLAGS) $(LDFLAGS) -I $(INCDIR) $< $(LDLIB) -o $@
# generate pre-processed file (.i) and assembler (.s)
%.i: %.c
@echo generating $@
@$(CC) -E $(CFLAGS) -I $(INCDIR) $< -o $@
%.s: %.c
@echo generating $@
@$(CC) -S -fverbose-asm $(CFLAGS) -I $(INCDIR) $< -o $@
# generate README.org from README.html (must cleanup !)
org: README.org
%.org: %.html
@echo generating $@. Cleanup before commit !
@pandoc $< -o $@
# generate compile_commands.json
$(CCLSFILE): aoc-c.c Makefile
$(BEAR) -- make clean compile
bear: clean
@touch .ccls-root
@$(BEAR) -- make compile

73
2022/day01/README.org Normal file
View File

@@ -0,0 +1,73 @@
** --- Day 1: Calorie Counting ---
Santa's reindeer typically eat regular reindeer food, but they need a
lot of [[/2018/day/25][magical energy]] to deliver presents on
Christmas. For that, their favorite snack is a special type of /star/
fruit that only grows deep in the jungle. The Elves have brought you on
their annual expedition to the grove where the fruit grows.
To supply enough magical energy, the expedition needs to retrieve a
minimum of /fifty stars/ by December 25th. Although the Elves assure you
that the grove has plenty of fruit, you decide to grab any fruit you see
along the way, just in case.
Collect stars by solving puzzles. Two puzzles will be made available on
each day in the Advent calendar; the second puzzle is unlocked when you
complete the first. Each puzzle grants /one star/. Good luck!
The jungle must be too overgrown and difficult to navigate in vehicles
or access from the air; the Elves' expedition traditionally goes on
foot. As your boats approach land, the Elves begin taking inventory of
their supplies. One important consideration is food - in particular, the
number of /Calories/ each Elf is carrying (your puzzle input).
The Elves take turns writing down the number of Calories contained by
the various meals, snacks, rations, etc. that they've brought with them,
one item per line. Each Elf separates their own inventory from the
previous Elf's inventory (if any) by a blank line.
For example, suppose the Elves finish writing their items' Calories and
end up with the following list:
#+begin_example
1000
2000
3000
4000
5000
6000
7000
8000
9000
10000
#+end_example
This list represents the Calories of the food carried by five Elves:
- The first Elf is carrying food with =1000=, =2000=, and =3000=
Calories, a total of =6000= Calories.
- The second Elf is carrying one food item with =4000= Calories.
- The third Elf is carrying food with =5000= and =6000= Calories, a
total of =11000= Calories.
- The fourth Elf is carrying food with =7000=, =8000=, and =9000=
Calories, a total of =24000= Calories.
- The fifth Elf is carrying one food item with =10000= Calories.
In case the Elves get hungry and need extra snacks, they need to know
which Elf to ask: they'd like to know how many Calories are being
carried by the Elf carrying the /most/ Calories. In the example above,
this is /=24000=/ (carried by the fourth Elf).
Find the Elf carrying the most Calories. /How many total Calories is
that Elf carrying?/
To begin, [[file:1/input][get your puzzle input]].
Answer:
You can also [Shareon
[[https://twitter.com/intent/tweet?text=%22Calorie+Counting%22+%2D+Day+1+%2D+Advent+of+Code+2022&url=https%3A%2F%2Fadventofcode%2Ecom%2F2022%2Fday%2F1&related=ericwastl&hashtags=AdventOfCode][Twitter]]
[[javascript:void(0);][Mastodon]]] this puzzle.

521
2022/include/bits.h Normal file
View File

@@ -0,0 +1,521 @@
/* bits.h - bits functions.
*
* Copyright (C) 2021-2022 Bruno Raoult ("br")
* Licensed under the GNU General Public License v3.0 or later.
* Some rights reserved. See COPYING.
*
* You should have received a copy of the GNU General Public License along with this
* program. If not, see <https://www.gnu.org/licenses/gpl-3.0-standalone.html>.
*
* SPDX-License-Identifier: GPL-3.0-or-later <https://spdx.org/licenses/GPL-3.0-or-later.html>
*
*/
#ifndef BITS_H
#define BITS_H
#include <stdint.h>
#include <stdbool.h>
/* next include will define __WORDSIZE: 32 or 64
*/
#include <bits/wordsize.h>
#ifndef __has_builtin
#define __has_builtin(x) 0
#endif
/* no plan to support 32bits for now...
*/
#if __WORDSIZE != 64
#error "Only 64 bits word size supported."
#endif
/* fixed-size types
*/
typedef int64_t s64;
typedef int32_t s32;
typedef int16_t s16;
typedef int8_t s8;
typedef uint64_t u64;
typedef uint32_t u32;
typedef uint16_t u16;
typedef uint8_t u8;
/* convenience types
*/
typedef unsigned long int ulong;
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned char uchar;
/* char is a special case, as it can be signed or unsigned
*/
typedef signed char schar;
/* count trailing zeroes : 00101000 -> 3
* ^^^
*/
static __always_inline int ctz64(u64 n)
{
# if __has_builtin(__builtin_ctzl)
# ifdef DEBUG_BITS
log_f(1, "builtin ctzl.\n");
# endif
return __builtin_ctzl(n);
# elif __has_builtin(__builtin_clzl)
# ifdef DEBUG_BITS
log_f(1, "builtin clzl.\n");
# endif
return __WORDSIZE - (__builtin_clzl(n & -n) + 1);
# else
# ifdef DEBUG_BITS
log_f(1, "emulated.\n");
# endif
return popcount64((n & n) 1);
# endif
}
static __always_inline int ctz32(u32 n)
{
# if __has_builtin(__builtin_ctz)
# ifdef DEBUG_BITS
log_f(1, "builtin ctz.\n");
# endif
return __builtin_ctzl(n);
# elif __has_builtin(__builtin_clz)
# ifdef DEBUG_BITS
log_f(1, "builtin clz.\n");
# endif
return __WORDSIZE - (__builtin_clz(n & -n) + 1);
# else
# ifdef DEBUG_BITS
log_f(1, "emulated.\n");
# endif
return popcount32((n & n) 1);
# endif
}
/* clz - count leading zeroes : 00101000 -> 2
* ^^
*/
static __always_inline int clz64(u64 n)
{
# if __has_builtin(__builtin_clzl)
# ifdef DEBUG_BITS
log_f(1, "builtin.\n");
# endif
return __builtin_clzl(n);
# else
# ifdef DEBUG_BITS
log_f(1, "emulated.\n");
# endif
u64 r, q;
r = (n > 0xFFFFFFFF) << 5; n >>= r;
q = (n > 0xFFFF) << 4; n >>= q; r |= q;
q = (n > 0xFF ) << 3; n >>= q; r |= q;
q = (n > 0xF ) << 2; n >>= q; r |= q;
q = (n > 0x3 ) << 1; n >>= q; r |= q;
r |= (n >> 1);
return 64 - r - 1;
# endif
}
static __always_inline int clz32(u32 n)
{
# if __has_builtin(__builtin_clz)
# ifdef DEBUG_BITS
log_f(1, "builtin.\n");
# endif
return __builtin_clz(n);
# else
# ifdef DEBUG_BITS
log_f(1, "emulated.\n");
# endif
u32 r, q;
r = (n > 0xFFFF) << 4; n >>= r;
q = (n > 0xFF ) << 3; n >>= q; r |= q;
q = (n > 0xF ) << 2; n >>= q; r |= q;
q = (n > 0x3 ) << 1; n >>= q; r |= q;
r |= (n >> 1);
return 32 - r - 1;
# endif
}
/* fls - find last set : 00101000 -> 6
* ^
*/
static __always_inline int fls64(u64 n)
{
if (!n)
return 0;
return 64 - clz64(n);
}
static __always_inline int fls32(u32 n)
{
if (!n)
return 0;
return 32 - clz32(n);
}
/* find first set : 00101000 -> 4
* ^
*/
static __always_inline uint ffs64(u64 n)
{
# if __has_builtin(__builtin_ffsl)
# ifdef DEBUG_BITS
log_f(1, "builtin ffsl.\n");
# endif
return __builtin_ffsl(n);
# elif __has_builtin(__builtin_ctzl)
# ifdef DEBUG_BITS
log_f(1, "builtin ctzl.\n");
# endif
if (n == 0)
return (0);
return __builtin_ctzl(n) + 1;
# else
# ifdef DEBUG_BITS
log_f(1, "emulated.\n");
# endif
return popcount64(n ^ ~-n);
# endif
}
static __always_inline uint ffs32(u32 n)
{
# if __has_builtin(__builtin_ffs)
# ifdef DEBUG_BITS
log_f(1, "builtin ffs.\n");
# endif
return __builtin_ffs(n);
# elif __has_builtin(__builtin_ctz)
# ifdef DEBUG_BITS
log_f(1, "builtin ctz.\n");
# endif
if (n == 0)
return (0);
return __builtin_ctz(n) + 1;
# else
# ifdef DEBUG_BITS
log_f(1, "emulated.\n");
# endif
return popcount32(n ^ ~-n);
# endif
}
/* count set bits: 10101000 -> 3
* ^ ^ ^
*/
static __always_inline int popcount64(u64 n)
{
# if __has_builtin(__builtin_popcountl)
# ifdef DEBUG_BITS
log_f(1, "builtin.\n");
# endif
return __builtin_popcountl(n);
# else
# ifdef DEBUG_BITS
log_f(1, "emulated.\n");
# endif
int count = 0;
while (n) {
count++;
n &= (n - 1);
}
return count;
# endif
}
static __always_inline int popcount32(u32 n)
{
# if __has_builtin(__builtin_popcount)
# ifdef DEBUG_BITS
log_f(1, "builtin.\n");
# endif
return __builtin_popcount(n);
# else
# ifdef DEBUG_BITS
log_f(1, "emulated.\n");
# endif
int count = 0;
while (n) {
count++;
n &= (n - 1);
}
return count;
# endif
}
/* rolXX are taken from kernel's <linux/bitops.h> are are:
* SPDX-License-Identifier: GPL-2.0
*/
/**
* rol64 - rotate a 64-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline u64 rol64(u64 word, unsigned int shift)
{
return (word << (shift & 63)) | (word >> ((-shift) & 63));
}
/**
* ror64 - rotate a 64-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline u64 ror64(u64 word, unsigned int shift)
{
return (word >> (shift & 63)) | (word << ((-shift) & 63));
}
/**
* rol32 - rotate a 32-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline u32 rol32(u32 word, unsigned int shift)
{
return (word << (shift & 31)) | (word >> ((-shift) & 31));
}
/**
* ror32 - rotate a 32-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline u32 ror32(u32 word, unsigned int shift)
{
return (word >> (shift & 31)) | (word << ((-shift) & 31));
}
/**
* rol16 - rotate a 16-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline u16 rol16(u16 word, unsigned int shift)
{
return (word << (shift & 15)) | (word >> ((-shift) & 15));
}
/**
* ror16 - rotate a 16-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline u16 ror16(u16 word, unsigned int shift)
{
return (word >> (shift & 15)) | (word << ((-shift) & 15));
}
/**
* rol8 - rotate an 8-bit value left
* @word: value to rotate
* @shift: bits to roll
*/
static inline u8 rol8(u8 word, unsigned int shift)
{
return (word << (shift & 7)) | (word >> ((-shift) & 7));
}
/**
* ror8 - rotate an 8-bit value right
* @word: value to rotate
* @shift: bits to roll
*/
static inline u8 ror8(u8 word, unsigned int shift)
{
return (word >> (shift & 7)) | (word << ((-shift) & 7));
}
/**
* ilog2 -
*/
static __always_inline __attribute__((const))
int __ilog2_u32(u32 n)
{
return fls32(n) - 1;
}
static __always_inline __attribute__((const))
int __ilog2_u64(u64 n)
{
return fls64(n) - 1;
}
/**
* is_power_of_2() - check if a value is a power of two
* @n: the value to check
*
* Determine whether some value is a power of two, where zero is
* *not* considered a power of two.
* Return: true if @n is a power of 2, otherwise false.
*/
static inline __attribute__((const))
bool is_power_of_2(unsigned long n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
/**
* ilog2 - log base 2 of 32-bit or a 64-bit unsigned value
* @n: parameter
*
* constant-capable log of base 2 calculation
* - this can be used to initialise global variables from constant data, hence
* the massive ternary operator construction
*
* selects the appropriately-sized optimised version depending on sizeof(n)
*/
#define ilog2(n) \
( \
__builtin_constant_p(n) ? \
((n) < 2 ? 0 : \
63 - __builtin_clzll(n)) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
)
/**
* roundup_pow_of_two - round the given value up to nearest power of two
* @n: parameter
*
* round the given value up to the nearest power of two
* - the result is undefined when n == 0
* - this can be used to initialise global variables from constant data
*/
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \
)
/**
* rounddown_pow_of_two - round the given value down to nearest power of two
* @n: parameter
*
* round the given value down to the nearest power of two
* - the result is undefined when n == 0
* - this can be used to initialise global variables from constant data
*/
#define rounddown_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
(1UL << ilog2(n))) : \
__rounddown_pow_of_two(n) \
)
static inline __attribute_const__
int __order_base_2(unsigned long n)
{
return n > 1 ? ilog2(n - 1) + 1 : 0;
}
/**
* order_base_2 - calculate the (rounded up) base 2 order of the argument
* @n: parameter
*
* The first few values calculated by this routine:
* ob2(0) = 0
* ob2(1) = 0
* ob2(2) = 1
* ob2(3) = 2
* ob2(4) = 2
* ob2(5) = 3
* ... and so on.
*/
#define order_base_2(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0 || (n) == 1) ? 0 : \
ilog2((n) - 1) + 1) : \
__order_base_2(n) \
)
static inline __attribute__((const))
int __bits_per(unsigned long n)
{
if (n < 2)
return 1;
if (is_power_of_2(n))
return order_base_2(n) + 1;
return order_base_2(n);
}
/**
* bits_per - calculate the number of bits required for the argument
* @n: parameter
*
* This is constant-capable and can be used for compile time
* initializations, e.g bitfields.
*
* The first few values calculated by this routine:
* bf(0) = 1
* bf(1) = 1
* bf(2) = 2
* bf(3) = 2
* bf(4) = 3
* ... and so on.
*/
#define bits_per(n) \
( \
__builtin_constant_p(n) ? ( \
((n) == 0 || (n) == 1) \
? 1 : ilog2(n) + 1 \
) : \
__bits_per(n) \
)
/** bit_for_each - iterate over an u64/u32 bits
* @pos: an int used as current bit
* @tmp: a temp u64/u32 used as temporary storage
* @ul: the u64/u32 to loop over
*
* Usage:
* u64 u=139, _t; // u=b10001011
* int cur;
* bit_for_each64(cur, _t, u) {
* printf("%d\n", cur);
* }
* This will display the position of each bit set in ul: 1, 2, 4, 8
*
* I should probably re-think the implementation...
*/
#define bit_for_each64(pos, tmp, ul) \
for (tmp = ul, pos = ffs64(tmp); tmp; tmp &= (tmp - 1), pos = ffs64(tmp))
#define bit_for_each32(pos, tmp, ul) \
for (tmp = ul, pos = ffs32(tmp); tmp; tmp &= (tmp - 1), pos = ffs32(tmp))
/** or would it be more useful (counting bits from zero instead of 1) ?
*/
#define bit_for_each64_2(pos, tmp, ul) \
for (tmp = ul, pos = ctz64(tmp); tmp; tmp ^= 1UL << pos, pos = ctz64(tmp))
#define bit_for_each32_2(pos, tmp, ul) \
for (tmp = ul, pos = ctz32(tmp); tmp; tmp ^= 1U << pos, pos = ctz32(tmp))
#endif /* BITS_H */

211
2022/include/br.h Normal file
View File

@@ -0,0 +1,211 @@
/* bits.h - bits functions.
*
* Copyright (C) 2021-2022 Bruno Raoult ("br")
* Licensed under the GNU General Public License v3.0 or later.
* Some rights reserved. See COPYING.
*
* You should have received a copy of the GNU General Public License along with this
* program. If not, see <https://www.gnu.org/licenses/gpl-3.0-standalone.html>.
*
* SPDX-License-Identifier: GPL-3.0-or-later <https://spdx.org/licenses/GPL-3.0-or-later.html>
*
* Some parts are taken from Linux's kernel <linux/kernel.h> and others, and are :
* SPDX-License-Identifier: GPL-2.0
*
* This header contains generic stuff.
*/
#ifndef _BR_H
#define _BR_H
/* Indirect stringification. Doing two levels allows the parameter to be a
* macro itself. For example, compile with -DFOO=bar, __stringify(FOO)
* converts to "bar".
*/
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
/* generate a (maybe) unique id.
*/
#define ___PASTE(x, y) x##y
#define __PASTE(x, y) ___PASTE(x, y)
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
/* see https://lkml.org/lkml/2018/3/20/845 for explanation of this monster
*/
#define __is_constexpr(x) \
(sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
/*
* min()/max()/clamp() macros must accomplish three things:
*
* - avoid multiple evaluations of the arguments (so side-effects like
* "x++" happen only once) when non-constant.
* - perform strict type-checking (to generate warnings instead of
* nasty runtime surprises). See the "unnecessary" pointer comparison
* in __typecheck().
* - retain result as a constant expressions when called with only
* constant expressions (to avoid tripping VLA warnings in stack
* allocation usage).
*/
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
#define __no_side_effects(x, y) \
(__is_constexpr(x) && __is_constexpr(y))
#define __safe_cmp(x, y) \
(__typecheck(x, y) && __no_side_effects(x, y))
#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
typeof(x) unique_x = (x); \
typeof(y) unique_y = (y); \
__cmp(unique_x, unique_y, op); })
#define __careful_cmp(x, y, op) \
__builtin_choose_expr(__safe_cmp(x, y), \
__cmp(x, y, op), \
__cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
#define __pure __attribute__((__pure__))
/**
* min - return minimum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
#define min(x, y) __careful_cmp(x, y, <)
/**
* max - return maximum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
#define max(x, y) __careful_cmp(x, y, >)
/**
* min3 - return minimum of three values
* @x: first value
* @y: second value
* @z: third value
*/
#define min3(x, y, z) min((typeof(x))min(x, y), z)
/**
* max3 - return maximum of three values
* @x: first value
* @y: second value
* @z: third value
*/
#define max3(x, y, z) max((typeof(x))max(x, y), z)
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
* @x: value1
* @y: value2
*/
#define min_not_zero(x, y) ({ \
typeof(x) __x = (x); \
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
/**
* clamp - return a value clamped to a given range with strict typechecking
* @val: current value
* @lo: lowest allowable value
* @hi: highest allowable value
*
* This macro does strict typechecking of @lo/@hi to make sure they are of the
* same type as @val. See the unnecessary pointer comparisons.
*/
#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi)
/*
* ..and if you can't take the strict
* types, you can specify one yourself.
*
* Or not use min/max/clamp at all, of course.
*/
/**
* min_t - return minimum of two values, using the specified type
* @type: data type to use
* @x: first value
* @y: second value
*/
#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
/**
* max_t - return maximum of two values, using the specified type
* @type: data type to use
* @x: first value
* @y: second value
*/
#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
/**
* clamp_t - return a value clamped to a given range using a given type
* @type: the type of variable to use
* @val: current value
* @lo: minimum allowable value
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
* @type to make all the comparisons.
*/
#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi)
/**
* clamp_val - return a value clamped to a given range using val's type
* @val: current value
* @lo: minimum allowable value
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
* type the input argument @val is. This is useful when @val is an unsigned
* type and @lo and @hi are literals that will otherwise be assigned a signed
* integer type.
*/
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
/**
* swap - swap values of @a and @b
* @a: first value
* @b: second value
*/
#define swap(a, b) \
do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
/**
* ARRAY_SIZE - get the number of elements in array @arr
* @arr: array to be sized
*/
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first.
* char is treated as if it was signed (regardless of whether it really is)
* but the macro's return type is preserved as char.
*
* Return: an absolute value of x.
*/
#define abs(x) __abs_choose_expr(x, long long, \
__abs_choose_expr(x, long, \
__abs_choose_expr(x, int, \
__abs_choose_expr(x, short, \
__abs_choose_expr(x, char, \
__builtin_choose_expr( \
__builtin_types_compatible_p(typeof(x), char), \
(char)({ signed char __x = (x); __x<0?-__x:__x; }), \
((void)0)))))))
#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
__builtin_types_compatible_p(typeof(x), signed type) || \
__builtin_types_compatible_p(typeof(x), unsigned type), \
({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
#endif /* _BR_H */

71
2022/include/bug.h Normal file
View File

@@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BR_BUG_H
#define _BR_BUG_H
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include "likely.h"
#include "debug.h"
/* BUG functions inspired by Linux kernel's <asm/bug.h>
*/
#define panic() exit(0xff)
/*
* Don't use BUG() or BUG_ON() unless there's really no way out; one
* example might be detecting data structure corruption in the middle
* of an operation that can't be backed out of. If the (sub)system
* can somehow continue operating, perhaps with reduced functionality,
* it's probably not BUG-worthy.
*
* If you're tempted to BUG(), think again: is completely giving up
* really the *only* solution? There are usually better options, where
* users don't need to reboot ASAP and can mostly shut down cleanly.
*/
#define BUG() do { \
fprintf(stderr, "BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
panic(); \
} while (0)
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant kernel issues that need prompt attention if they should ever
* appear at runtime.
*
* Do not use these macros when checking for invalid external inputs
* (e.g. invalid system call arguments, or invalid data coming from
* network/devices), and on transient conditions like ENOMEM or EAGAIN.
* These macros should be used for recoverable kernel issues only.
* For invalid external inputs, transient conditions, etc use
* pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary.
* Do not include "BUG"/"WARNING" in format strings manually to make these
* conditions distinguishable from kernel issues.
*
* Use the versions with printk format strings to provide better diagnostics.
*/
#define __WARN() do { \
fprintf(stderr, "WARNING: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
} while (0)
#define __WARN_printf(arg...) do { \
vfprintf(stderr, arg); \
} while (0)
#define WARN_ON(condition) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN(); \
unlikely(__ret_warn_on); \
})
#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_printf(format); \
unlikely(__ret_warn_on); \
})
#endif /* _BR_BUG_H */

View File

@@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* adaptation of Linux kernel's <linux/container_of.h>
*/
#ifndef _BR_CONTAINER_OF_H
#define _BR_CONTAINER_OF_H
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
/**
* typeof_member -
*/
#define typeof_member(T, m) typeof(((T*)0)->m)
/**
* container_of - cast a member of a structure out to the containing structure
* @ptr: the pointer to the member.
* @type: the type of the container struct this is embedded in.
* @member: the name of the member within the struct.
*
*/
#define container_of(ptr, type, member) ({ \
void *__mptr = (void *)(ptr); \
_Static_assert(__same_type(*(ptr), ((type *)0)->member) || \
__same_type(*(ptr), void), \
"pointer type mismatch in container_of()"); \
((type *)(__mptr - offsetof(type, member))); })
#endif /* BR_CONTAINER_OF_H */

98
2022/include/debug.h Normal file
View File

@@ -0,0 +1,98 @@
/* debug.h - debug/log management.
*
* Copyright (C) 2021-2022 Bruno Raoult ("br")
* Licensed under the GNU General Public License v3.0 or later.
* Some rights reserved. See COPYING.
*
* You should have received a copy of the GNU General Public License along with this
* program. If not, see <https://www.gnu.org/licenses/gpl-3.0-standalone.html>.
*
* SPDX-License-Identifier: GPL-3.0-or-later <https://spdx.org/licenses/GPL-3.0-or-later.html>
*
*/
#ifndef DEBUG_H
#define DEBUG_H
#include <stdbool.h>
#include <stdint.h>
#include "bits.h"
#define _unused __attribute__((__unused__))
#define _printf __attribute__ ((format (printf, 6, 7)))
#ifdef DEBUG_DEBUG
void debug_init(u32 level);
void debug_level_set(u32 level);
void _printf debug(u32 level, bool timestamp,
u32 indent, const char *src,
u32 line, const char *, ...);
#else /* DEBUG_DEBUG */
static inline void debug_init(_unused u32 level) {}
static inline void debug_level_set(_unused u32 level) {}
static inline void _printf debug(_unused u32 level, _unused bool timestamp,
_unused u32 indent, _unused const char *src,
_unused u32 line, const char *, ...) {}
#endif /* DEBUG_DEBUG */
#undef _unused
#undef _printf
/**
* log - simple log (no function name, no indent, no timestamp)
* @level: log level
* @fmt: printf format string
* @args: subsequent arguments to printf
*/
#define log(level, fmt, args...) \
debug((level), false, 0, NULL, 0, fmt, ##args)
/**
* log_i - log with indent (no function name, no timestamp)
* @level: log level
* @fmt: printf format string
* @args: subsequent arguments to printf
*
* Output example:
* >>>>val=2
*/
#define log_i(level, fmt, args...) \
debug((level), false, (level), NULL, 0, fmt, ##args)
/**
* log_f - log with function name (no indent name, no timestamp)
* @level: log level
* @fmt: printf format string
* @args: subsequent arguments to printf
*
* Output example:
* [function] val=2
*/
#define log_f(level, fmt, args...) \
debug((level), false, 0, __func__, 0, fmt, ##args)
/**
* log_if - log with function name and line number (no indent name, no timestamp)
* @level: log level
* @fmt: printf format string
* @args: subsequent arguments to printf
*
* Output example:
* >>>> [function:15] val=2
*/
#define log_if(level, fmt, args...) \
debug((level), false, (level), __func__, __LINE__, fmt, ##args)
/**
* log_it - log with function name, line number, indent, and timestamp
* @level: log level
* @fmt: printf format string
* @args: subsequent arguments to printf
*
* Output example:
* >>>> [function:15] val=2
*/
#define log_it(level, fmt, args...) \
debug((level), true, (level), __func__, __LINE__, fmt, ##args)
#endif /* DEBUG_H */

173
2022/include/hash.h Normal file
View File

@@ -0,0 +1,173 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BR_HASH_H
#define _BR_HASH_H
/* adaptation of Linux kernel's <linux/hash.h> and <linux/stringhash.h>
*/
/* Fast hashing routine for ints, longs and pointers.
(C) 2002 Nadia Yvette Chambers, IBM */
#include <asm/types.h>
#include <asm/bitsperlong.h>
#include "bits.h"
#include "br.h"
/*
* The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
* fs/inode.c. It's not actually prime any more (the previous primes
* were actively bad for hashing), but the name remains.
*/
#if __BITS_PER_LONG == 32
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
#define hash_long(val, bits) hash_32(val, bits)
#elif __BITS_PER_LONG == 64
#define hash_long(val, bits) hash_64(val, bits)
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
#else
#error Wordsize not 32 or 64
#endif
/*
* This hash multiplies the input by a large odd number and takes the
* high bits. Since multiplication propagates changes to the most
* significant end only, it is essential that the high bits of the
* product be used for the hash value.
*
* Chuck Lever verified the effectiveness of this technique:
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
*
* Although a random odd number will do, it turns out that the golden
* ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
* properties. (See Knuth vol 3, section 6.4, exercise 9.)
*
* These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
* which is very slightly easier to multiply by and makes no
* difference to the hash distribution.
*/
#define GOLDEN_RATIO_32 0x61C88647
#define GOLDEN_RATIO_64 0x61C8864680B583EBull
/*
* The _generic versions exist only so lib/test_hash.c can compare
* the arch-optimized versions with the generic.
*
* Note that if you change these, any <asm/hash.h> that aren't updated
* to match need to have their HAVE_ARCH_* define values updated so the
* self-test will not false-positive.
*/
#ifndef HAVE_ARCH__HASH_32
#define __hash_32 __hash_32_generic
#endif
static inline u32 __hash_32_generic(u32 val)
{
return val * GOLDEN_RATIO_32;
}
static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
}
#ifndef HAVE_ARCH_HASH_64
#define hash_64 hash_64_generic
#endif
static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
{
#if __BITS_PER_LONG == 64
/* 64x64-bit multiply is efficient on all 64-bit processors */
return val * GOLDEN_RATIO_64 >> (64 - bits);
#else
/* Hash 64 bits using only 32x32-bit multiply. */
return hash_32((u32)val ^ __hash_32(val >> 32), bits);
#endif
}
static inline u32 hash_ptr(const void *ptr, unsigned int bits)
{
return hash_long((unsigned long)ptr, bits);
}
/* This really should be called fold32_ptr; it does no hashing to speak of. */
static inline u32 hash32_ptr(const void *ptr)
{
unsigned long val = (unsigned long)ptr;
#if __BITS_PER_LONG == 64
val ^= (val >> 32);
#endif
return (u32)val;
}
/*
* Routines for hashing strings of bytes to a 32-bit hash value.
*
* These hash functions are NOT GUARANTEED STABLE between kernel
* versions, architectures, or even repeated boots of the same kernel.
* (E.g. they may depend on boot-time hardware detection or be
* deliberately randomized.)
*
* They are also not intended to be secure against collisions caused by
* malicious inputs; much slower hash functions are required for that.
*
* They are optimized for pathname components, meaning short strings.
* Even if a majority of files have longer names, the dynamic profile of
* pathname components skews short due to short directory names.
* (E.g. /usr/lib/libsesquipedalianism.so.3.141.)
*/
/*
* Version 1: one byte at a time. Example of use:
*
* unsigned long hash = init_name_hash;
* while (*p)
* hash = partial_name_hash(tolower(*p++), hash);
* hash = end_name_hash(hash);
*
* Although this is designed for bytes, fs/hfsplus/unicode.c
* abuses it to hash 16-bit values.
*/
/* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
#define init_name_hash(salt) (unsigned long)(salt)
/* partial hash update function. Assume roughly 4 bits per character */
static inline unsigned long
partial_name_hash(unsigned long c, unsigned long prevhash)
{
return (prevhash + (c << 4) + (c >> 4)) * 11;
}
/*
* Finally: cut down the number of bits to a int value (and try to avoid
* losing bits). This also has the property (wanted by the dcache)
* that the msbits make a good hash table index.
*/
static inline unsigned int end_name_hash(unsigned long hash)
{
return hash_long(hash, 32);
}
/*
* Version 2: One word (32 or 64 bits) at a time.
* If CONFIG_DCACHE_WORD_ACCESS is defined (meaning <asm/word-at-a-time.h>
* exists, which describes major Linux platforms like x86 and ARM), then
* this computes a different hash function much faster.
*
* If not set, this falls back to a wrapper around the preceding.
*/
extern unsigned int __pure hash_string(const void *salt, const char *, unsigned int);
/*
* A hash_len is a u64 with the hash of a string in the low
* half and the length in the high half.
*/
#define hashlen_hash(hashlen) ((u32)(hashlen))
#define hashlen_len(hashlen) ((u32)((hashlen) >> 32))
#define hashlen_create(hash, len) ((u64)(len)<<32 | (u32)(hash))
/* Return the "hash_len" (hash and length) of a null-terminated string */
extern u64 __pure hashlen_string(const void *salt, const char *name);
#endif /* _BR_HASH_H */

204
2022/include/hashtable.h Normal file
View File

@@ -0,0 +1,204 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* adaptation of Linux kernel's <linux/hashtable.h>
*/
/*
* Statically sized hash table implementation
* (C) 2012 Sasha Levin <levinsasha928@gmail.com>
*/
#ifndef _LINUX_HASHTABLE_H
#define _LINUX_HASHTABLE_H
#include "list.h"
#include <linux/types.h>
#include <linux/kernel.h>
#include "hash.h"
//#include <linux/rculist.h>
#define DEFINE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)] __read_mostly = \
{ [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT }
#define DECLARE_HASHTABLE(name, bits) \
struct hlist_head name[1 << (bits)]
#define HASH_SIZE(name) (ARRAY_SIZE(name))
#define HASH_BITS(name) ilog2(HASH_SIZE(name))
/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */
#define hash_min(val, bits) \
(sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits))
static inline void __hash_init(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
INIT_HLIST_HEAD(&ht[i]);
}
/**
* hash_init - initialize a hash table
* @hashtable: hashtable to be initialized
*
* Calculates the size of the hashtable from the given parameter, otherwise
* same as hash_init_size.
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable))
/**
* hash_add - add an object to a hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add(hashtable, node, key) \
hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_add_rcu - add an object to a rcu enabled hashtable
* @hashtable: hashtable to add to
* @node: the &struct hlist_node of the object to be added
* @key: the key of the object to be added
*/
#define hash_add_rcu(hashtable, node, key) \
hlist_add_head_rcu(node, &hashtable[hash_min(key, HASH_BITS(hashtable))])
/**
* hash_hashed - check whether an object is in any hashtable
* @node: the &struct hlist_node of the object to be checked
*/
static inline bool hash_hashed(struct hlist_node *node)
{
return !hlist_unhashed(node);
}
static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz)
{
unsigned int i;
for (i = 0; i < sz; i++)
if (!hlist_empty(&ht[i]))
return false;
return true;
}
/**
* hash_empty - check whether a hashtable is empty
* @hashtable: hashtable to check
*
* This has to be a macro since HASH_BITS() will not work on pointers since
* it calculates the size during preprocessing.
*/
#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable))
/**
* hash_del - remove an object from a hashtable
* @node: &struct hlist_node of the object to remove
*/
static inline void hash_del(struct hlist_node *node)
{
hlist_del_init(node);
}
/**
* hash_for_each - iterate over a hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry(obj, &name[bkt], member)
/**
* hash_for_each_rcu - iterate over a rcu enabled hashtable
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_rcu(name, bkt, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_rcu(obj, &name[bkt], member)
/**
* hash_for_each_safe - iterate over a hashtable safe against removal of
* hash entry
* @name: hashtable to iterate
* @bkt: integer to use as bucket loop cursor
* @tmp: a &struct hlist_node used for temporary storage
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
*/
#define hash_for_each_safe(name, bkt, tmp, obj, member) \
for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\
(bkt)++)\
hlist_for_each_entry_safe(obj, tmp, &name[bkt], member)
/**
* hash_for_each_possible - iterate over all possible objects hashing to the
* same bucket
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible(name, obj, member, key) \
hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_rcu - iterate over all possible objects hashing to the
* same bucket in an rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_rcu(name, obj, member, key, cond...) \
hlist_for_each_entry_rcu(obj, &name[hash_min(key, HASH_BITS(name))],\
member, ## cond)
/**
* hash_for_each_possible_rcu_notrace - iterate over all possible objects hashing
* to the same bucket in an rcu enabled hashtable in a rcu enabled hashtable
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*
* This is the same as hash_for_each_possible_rcu() except that it does
* not do any RCU debugging or tracing.
*/
#define hash_for_each_possible_rcu_notrace(name, obj, member, key) \
hlist_for_each_entry_rcu_notrace(obj, \
&name[hash_min(key, HASH_BITS(name))], member)
/**
* hash_for_each_possible_safe - iterate over all possible objects hashing to the
* same bucket safe against removals
* @name: hashtable to iterate
* @obj: the type * to use as a loop cursor for each entry
* @tmp: a &struct hlist_node used for temporary storage
* @member: the name of the hlist_node within the struct
* @key: the key of the objects to iterate over
*/
#define hash_for_each_possible_safe(name, obj, tmp, member, key) \
hlist_for_each_entry_safe(obj, tmp,\
&name[hash_min(key, HASH_BITS(name))], member)
#endif

18
2022/include/likely.h Normal file
View File

@@ -0,0 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* taken from Kernel's <linux/compiler.h
*/
#ifndef __LIKELY_H
#define __LIKELY_H
/* See https://kernelnewbies.org/FAQ/LikelyUnlikely
*
* In 2 words:
* "You should use it [likely() and unlikely()] only in cases when the likeliest
* branch is very very very likely, or when the unlikeliest branch is very very
* very unlikely."
*/
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif /* __LIKELY_H */

992
2022/include/list.h Normal file
View File

@@ -0,0 +1,992 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* adaptation of kernel's <linux/list.h>
*
*/
#ifndef __BR_LIST_H
#define __BR_LIST_H
#include <stddef.h>
#include <stdbool.h>
#include "rwonce.h"
#include "container-of.h"
/************ originally in <include/linux/types.h> */
struct list_head {
struct list_head *next, *prev;
};
struct hlist_head {
struct hlist_node *first;
};
struct hlist_node {
struct hlist_node *next, **pprev;
};
/************ originally in <include/linux/poison.h> */
# define POISON_POINTER_DELTA 0
/* These are non-NULL pointers that will result in page faults
* under normal circumstances, used to verify that nobody uses
* non-initialized list entries.
*/
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
/*
* Circular doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
* manipulating whole lists rather than single entries, as
* sometimes we already know the next/prev entries and we can
* generate better code by using them directly rather than
* using the generic single-entry routines.
*/
#define LIST_HEAD_INIT(name) { &(name), &(name) }
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
/**
* INIT_LIST_HEAD - Initialize a list_head structure
* @list: list_head structure to be initialized.
*
* Initializes the list_head to point to itself. If it is a list header,
* the result is an empty list.
*/
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
list->prev = list;
}
/*
* Insert a new entry between two known consecutive entries.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
WRITE_ONCE(prev->next, new);
}
/**
* list_add - add a new entry
* @new: new entry to be added
* @head: list head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void list_add(struct list_head *new, struct list_head *head)
{
__list_add(new, head, head->next);
}
/**
* list_add_tail - add a new entry
* @new: new entry to be added
* @head: list head to add it before
*
* Insert a new entry before the specified head.
* This is useful for implementing queues.
*/
static inline void list_add_tail(struct list_head *new, struct list_head *head)
{
__list_add(new, head->prev, head);
}
/*
* Delete a list entry by making the prev/next entries
* point to each other.
*
* This is only for internal list manipulation where we know
* the prev/next entries already!
*/
static inline void __list_del(struct list_head * prev, struct list_head * next)
{
next->prev = prev;
WRITE_ONCE(prev->next, next);
}
/*
* Delete a list entry and clear the 'prev' pointer.
*
* This is a special-purpose list clearing method used in the networking code
* for lists allocated as per-cpu, where we don't want to incur the extra
* WRITE_ONCE() overhead of a regular list_del_init(). The code that uses this
* needs to check the node 'prev' pointer instead of calling list_empty().
*/
static inline void __list_del_clearprev(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
entry->prev = NULL;
}
static inline void __list_del_entry(struct list_head *entry)
{
__list_del(entry->prev, entry->next);
}
/**
* list_del - deletes entry from list.
* @entry: the element to delete from the list.
* Note: list_empty() on entry does not return true after this, the entry is
* in an undefined state.
*/
static inline void list_del(struct list_head *entry)
{
__list_del_entry(entry);
entry->next = LIST_POISON1;
entry->prev = LIST_POISON2;
}
/**
* list_replace - replace old entry by new one
* @old : the element to be replaced
* @new : the new element to insert
*
* If @old was empty, it will be overwritten.
*/
static inline void list_replace(struct list_head *old,
struct list_head *new)
{
new->next = old->next;
new->next->prev = new;
new->prev = old->prev;
new->prev->next = new;
}
/**
* list_replace_init - replace old entry by new one and initialize the old one
* @old : the element to be replaced
* @new : the new element to insert
*
* If @old was empty, it will be overwritten.
*/
static inline void list_replace_init(struct list_head *old,
struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
}
/**
* list_swap - replace entry1 with entry2 and re-add entry1 at entry2's position
* @entry1: the location to place entry2
* @entry2: the location to place entry1
*/
static inline void list_swap(struct list_head *entry1,
struct list_head *entry2)
{
struct list_head *pos = entry2->prev;
list_del(entry2);
list_replace(entry1, entry2);
if (pos == entry1)
pos = entry2;
list_add(entry1, pos);
}
/**
* list_del_init - deletes entry from list and reinitialize it.
* @entry: the element to delete from the list.
*/
static inline void list_del_init(struct list_head *entry)
{
__list_del_entry(entry);
INIT_LIST_HEAD(entry);
}
/**
* list_move - delete from one list and add as another's head
* @list: the entry to move
* @head: the head that will precede our entry
*/
static inline void list_move(struct list_head *list, struct list_head *head)
{
__list_del_entry(list);
list_add(list, head);
}
/**
* list_move_tail - delete from one list and add as another's tail
* @list: the entry to move
* @head: the head that will follow our entry
*/
static inline void list_move_tail(struct list_head *list,
struct list_head *head)
{
__list_del_entry(list);
list_add_tail(list, head);
}
/**
* list_bulk_move_tail - move a subsection of a list to its tail
* @head: the head that will follow our entry
* @first: first entry to move
* @last: last entry to move, can be the same as first
*
* Move all entries between @first and including @last before @head.
* All three entries must belong to the same linked list.
*/
static inline void list_bulk_move_tail(struct list_head *head,
struct list_head *first,
struct list_head *last)
{
first->prev->next = last->next;
last->next->prev = first->prev;
head->prev->next = first;
first->prev = head->prev;
last->next = head;
head->prev = last;
}
/**
* list_is_first -- tests whether @list is the first entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
static inline int list_is_first(const struct list_head *list,
const struct list_head *head)
{
return list->prev == head;
}
/**
* list_is_last - tests whether @list is the last entry in list @head
* @list: the entry to test
* @head: the head of the list
*/
static inline int list_is_last(const struct list_head *list,
const struct list_head *head)
{
return list->next == head;
}
/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
static inline int list_empty(const struct list_head *head)
{
return READ_ONCE(head->next) == head;
}
/**
* list_rotate_left - rotate the list to the left
* @head: the head of the list
*/
static inline void list_rotate_left(struct list_head *head)
{
struct list_head *first;
if (!list_empty(head)) {
first = head->next;
list_move_tail(first, head);
}
}
/**
* list_rotate_to_front() - Rotate list to specific item.
* @list: The desired new front of the list.
* @head: The head of the list.
*
* Rotates list so that @list becomes the new front of the list.
*/
static inline void list_rotate_to_front(struct list_head *list,
struct list_head *head)
{
/*
* Deletes the list head from the list denoted by @head and
* places it as the tail of @list, this effectively rotates the
* list so that @list is at the front.
*/
list_move_tail(head, list);
}
/**
* list_is_singular - tests whether a list has just one entry.
* @head: the list to test.
*/
static inline int list_is_singular(const struct list_head *head)
{
return !list_empty(head) && (head->next == head->prev);
}
static inline void __list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
struct list_head *new_first = entry->next;
list->next = head->next;
list->next->prev = list;
list->prev = entry;
entry->next = list;
head->next = new_first;
new_first->prev = head;
}
/**
* list_cut_position - cut a list into two
* @list: a new list to add all removed entries
* @head: a list with entries
* @entry: an entry within head, could be the head itself
* and if so we won't cut the list
*
* This helper moves the initial part of @head, up to and
* including @entry, from @head to @list. You should
* pass on @entry an element you know is on @head. @list
* should be an empty list or a list you do not care about
* losing its data.
*
*/
static inline void list_cut_position(struct list_head *list,
struct list_head *head, struct list_head *entry)
{
if (list_empty(head))
return;
if (list_is_singular(head) &&
(head->next != entry && head != entry))
return;
if (entry == head)
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
}
/**
* list_cut_before - cut a list into two, before given entry
* @list: a new list to add all removed entries
* @head: a list with entries
* @entry: an entry within head, could be the head itself
*
* This helper moves the initial part of @head, up to but
* excluding @entry, from @head to @list. You should pass
* in @entry an element you know is on @head. @list should
* be an empty list or a list you do not care about losing
* its data.
* If @entry == @head, all entries on @head are moved to
* @list.
*/
static inline void list_cut_before(struct list_head *list,
struct list_head *head,
struct list_head *entry)
{
if (head->next == entry) {
INIT_LIST_HEAD(list);
return;
}
list->next = head->next;
list->next->prev = list;
list->prev = entry->prev;
list->prev->next = list;
head->next = entry;
entry->prev = head;
}
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
{
struct list_head *first = list->next;
struct list_head *last = list->prev;
first->prev = prev;
prev->next = first;
last->next = next;
next->prev = last;
}
/**
* list_splice - join two lists, this is designed for stacks
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice(const struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head, head->next);
}
/**
* list_splice_tail - join two lists, each list being a queue
* @list: the new list to add.
* @head: the place to add it in the first list.
*/
static inline void list_splice_tail(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list))
__list_splice(list, head->prev, head);
}
/**
* list_splice_init - join two lists and reinitialise the emptied list.
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* The list at @list is reinitialised
*/
static inline void list_splice_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head, head->next);
INIT_LIST_HEAD(list);
}
}
/**
* list_splice_tail_init - join two lists and reinitialise the emptied list
* @list: the new list to add.
* @head: the place to add it in the first list.
*
* Each of the lists is a queue.
* The list at @list is reinitialised
*/
static inline void list_splice_tail_init(struct list_head *list,
struct list_head *head)
{
if (!list_empty(list)) {
__list_splice(list, head->prev, head);
INIT_LIST_HEAD(list);
}
}
/**
* list_entry - get the struct for this entry
* @ptr: the &struct list_head pointer.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*/
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)
/**
* list_first_entry - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
/**
* list_last_entry - get the last element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note, that list is expected to be not empty.
*/
#define list_last_entry(ptr, type, member) \
list_entry((ptr)->prev, type, member)
/**
* list_first_entry_or_null - get the first element from a list
* @ptr: the list head to take the element from.
* @type: the type of the struct this is embedded in.
* @member: the name of the list_head within the struct.
*
* Note that if the list is empty, it returns NULL.
*/
#define list_first_entry_or_null(ptr, type, member) ({ \
struct list_head *head__ = (ptr); \
struct list_head *pos__ = READ_ONCE(head__->next); \
pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
})
/**
* list_next_entry - get the next element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
*/
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, __typeof__(*(pos)), member)
/**
* list_prev_entry - get the prev element in list
* @pos: the type * to cursor
* @member: the name of the list_head within the struct.
*/
#define list_prev_entry(pos, member) \
list_entry((pos)->member.prev, __typeof__(*(pos)), member)
/**
* list_for_each - iterate over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
for (pos = (head)->next; pos != (head); pos = pos->next)
/**
* list_for_each_continue - continue iteration over a list
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*
* Continue to iterate over a list, continuing after the current position.
*/
#define list_for_each_continue(pos, head) \
for (pos = pos->next; pos != (head); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
for (pos = (head)->prev; pos != (head); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
* @pos: the &struct list_head to use as a loop cursor.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
for (pos = (head)->next, n = pos->next; pos != (head); \
pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
* @pos: the &struct list_head to use as a loop cursor.
* @n: another &struct list_head to use as temporary storage
* @head: the head for your list.
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
pos != (head); \
pos = n, n = pos->prev)
/**
* list_entry_is_head - test if the entry points to the head of the list
* @pos: the type * to cursor
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_entry_is_head(pos, head, member) \
(&pos->member == (head))
/**
* list_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_for_each_entry(pos, head, member) \
for (pos = list_first_entry(head, __typeof__(*pos), member); \
!list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_reverse - iterate backwards over list of given type.
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_reverse(pos, head, member) \
for (pos = list_last_entry(head, __typeof__(*pos), member); \
!list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
* @pos: the type * to use as a start point
* @head: the head of the list
* @member: the name of the list_head within the struct.
*
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
*/
#define list_prepare_entry(pos, head, member) \
((pos) ? : list_entry(head, __typeof__(*pos), member))
/**
* list_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
* the current position.
*/
#define list_for_each_entry_continue(pos, head, member) \
for (pos = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_continue_reverse - iterate backwards from the given point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Start to iterate over list of given type backwards, continuing after
* the current position.
*/
#define list_for_each_entry_continue_reverse(pos, head, member) \
for (pos = list_prev_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
* list_for_each_entry_from - iterate over list of given type from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing from current position.
*/
#define list_for_each_entry_from(pos, head, member) \
for (; !list_entry_is_head(pos, head, member); \
pos = list_next_entry(pos, member))
/**
* list_for_each_entry_from_reverse - iterate backwards over list of given type
* from the current point
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate backwards over list of given type, continuing from current position.
*/
#define list_for_each_entry_from_reverse(pos, head, member) \
for (; !list_entry_is_head(pos, head, member); \
pos = list_prev_entry(pos, member))
/**
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*/
#define list_for_each_entry_safe(pos, n, head, member) \
for (pos = list_first_entry(head, __typeof__(*pos), member), \
n = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_continue - continue list iteration safe against removal
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate over list of given type, continuing after current point,
* safe against removal of list entry.
*/
#define list_for_each_entry_safe_continue(pos, n, head, member) \
for (pos = list_next_entry(pos, member), \
n = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_from - iterate over list from current point safe against removal
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate over list of given type from current point, safe against
* removal of list entry.
*/
#define list_for_each_entry_safe_from(pos, n, head, member) \
for (n = list_next_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_next_entry(n, member))
/**
* list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
* @pos: the type * to use as a loop cursor.
* @n: another type * to use as temporary storage
* @head: the head for your list.
* @member: the name of the list_head within the struct.
*
* Iterate backwards over list of given type, safe against removal
* of list entry.
*/
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
for (pos = list_last_entry(head, __typeof__(*pos), member), \
n = list_prev_entry(pos, member); \
!list_entry_is_head(pos, head, member); \
pos = n, n = list_prev_entry(n, member))
/**
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop
* @pos: the loop cursor used in the list_for_each_entry_safe loop
* @n: temporary storage used in list_for_each_entry_safe
* @member: the name of the list_head within the struct.
*
* list_safe_reset_next is not safe to use in general if the list may be
* modified concurrently (eg. the lock is dropped in the loop body). An
* exception to this is if the cursor element (pos) is pinned in the list,
* and list_safe_reset_next is called after re-taking the lock and before
* completing the current iteration of the loop body.
*/
#define list_safe_reset_next(pos, n, member) \
n = list_next_entry(pos, member)
/*
* Double linked lists with a single pointer list head.
* Mostly useful for hash tables where the two pointer list head is
* too wasteful.
* You lose the ability to access the tail in O(1).
*/
#define HLIST_HEAD_INIT { .first = NULL }
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
static inline void INIT_HLIST_NODE(struct hlist_node *h)
{
h->next = NULL;
h->pprev = NULL;
}
/**
* hlist_unhashed - Has node been removed from list and reinitialized?
* @h: Node to be checked
*
* Not that not all removal functions will leave a node in unhashed
* state. For example, hlist_nulls_del_init_rcu() does leave the
* node in unhashed state, but hlist_nulls_del() does not.
*/
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
/**
* hlist_unhashed_lockless - Version of hlist_unhashed for lockless use
* @h: Node to be checked
*
* This variant of hlist_unhashed() must be used in lockless contexts
* to avoid potential load-tearing. The READ_ONCE() is paired with the
* various WRITE_ONCE() in hlist helpers that are defined below.
*/
static inline int hlist_unhashed_lockless(const struct hlist_node *h)
{
return !READ_ONCE(h->pprev);
}
/**
* hlist_empty - Is the specified hlist_head structure an empty hlist?
* @h: Structure to check.
*/
static inline int hlist_empty(const struct hlist_head *h)
{
return !READ_ONCE(h->first);
}
static inline void __hlist_del(struct hlist_node *n)
{
struct hlist_node *next = n->next;
struct hlist_node **pprev = n->pprev;
WRITE_ONCE(*pprev, next);
if (next)
WRITE_ONCE(next->pprev, pprev);
}
/**
* hlist_del - Delete the specified hlist_node from its list
* @n: Node to delete.
*
* Note that this function leaves the node in hashed state. Use
* hlist_del_init() or similar instead to unhash @n.
*/
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
n->next = LIST_POISON1;
n->pprev = LIST_POISON2;
}
/**
* hlist_del_init - Delete the specified hlist_node from its list and initialize
* @n: Node to delete.
*
* Note that this function leaves the node in unhashed state.
*/
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
INIT_HLIST_NODE(n);
}
}
/**
* hlist_add_head - add a new entry at the beginning of the hlist
* @n: new entry to be added
* @h: hlist head to add it after
*
* Insert a new entry after the specified head.
* This is good for implementing stacks.
*/
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
WRITE_ONCE(n->next, first);
if (first)
WRITE_ONCE(first->pprev, &n->next);
WRITE_ONCE(h->first, n);
WRITE_ONCE(n->pprev, &h->first);
}
/**
* hlist_add_before - add a new entry before the one specified
* @n: new entry to be added
* @next: hlist node to add it before, which must be non-NULL
*/
static inline void hlist_add_before(struct hlist_node *n,
struct hlist_node *next)
{
WRITE_ONCE(n->pprev, next->pprev);
WRITE_ONCE(n->next, next);
WRITE_ONCE(next->pprev, &n->next);
WRITE_ONCE(*(n->pprev), n);
}
/**
* hlist_add_behind - add a new entry after the one specified
* @n: new entry to be added
* @prev: hlist node to add it after, which must be non-NULL
*/
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
WRITE_ONCE(n->next, prev->next);
WRITE_ONCE(prev->next, n);
WRITE_ONCE(n->pprev, &prev->next);
if (n->next)
WRITE_ONCE(n->next->pprev, &n->next);
}
/**
* hlist_add_fake - create a fake hlist consisting of a single headless node
* @n: Node to make a fake list out of
*
* This makes @n appear to be its own predecessor on a headless hlist.
* The point of this is to allow things like hlist_del() to work correctly
* in cases where there is no list.
*/
static inline void hlist_add_fake(struct hlist_node *n)
{
n->pprev = &n->next;
}
/**
* hlist_fake: Is this node a fake hlist?
* @h: Node to check for being a self-referential fake hlist.
*/
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
/**
* hlist_is_singular_node - is node the only element of the specified hlist?
* @n: Node to check for singularity.
* @h: Header for potentially singular list.
*
* Check whether the node is the only node of the head without
* accessing head, thus avoiding unnecessary cache misses.
*/
static inline bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
{
return !n->next && n->pprev == &h->first;
}
/**
* hlist_move_list - Move an hlist
* @old: hlist_head for old list.
* @new: hlist_head for new list.
*
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
static inline void hlist_move_list(struct hlist_head *old,
struct hlist_head *new)
{
new->first = old->first;
if (new->first)
new->first->pprev = &new->first;
old->first = NULL;
}
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
#define hlist_for_each(pos, head) \
for (pos = (head)->first; pos ; pos = pos->next)
#define hlist_for_each_safe(pos, n, head) \
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
pos = n)
#define hlist_entry_safe(ptr, type, member) \
({ __typeof__(ptr) ____ptr = (ptr); \
____ptr ? hlist_entry(____ptr, type, member) : NULL; \
})
/**
* hlist_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry(pos, head, member) \
for (pos = hlist_entry_safe((head)->first, __typeof__(*(pos)), member); \
pos; \
pos = hlist_entry_safe((pos)->member.next, __typeof__(*(pos)), member))
/**
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point
* @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_continue(pos, member) \
for (pos = hlist_entry_safe((pos)->member.next, __typeof__(*(pos)), member); \
pos; \
pos = hlist_entry_safe((pos)->member.next, __typeof__(*(pos)), member))
/**
* hlist_for_each_entry_from - iterate over a hlist continuing from current point
* @pos: the type * to use as a loop cursor.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_from(pos, member) \
for (; pos; \
pos = hlist_entry_safe((pos)->member.next, __typeof__(*(pos)), member))
/**
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
* @pos: the type * to use as a loop cursor.
* @n: a &struct hlist_node to use as temporary storage
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
*/
#define hlist_for_each_entry_safe(pos, n, head, member) \
for (pos = hlist_entry_safe((head)->first, __typeof__(*pos), member); \
pos && ({ n = pos->member.next; 1; }); \
pos = hlist_entry_safe(n, __typeof__(*pos), member))
#endif /* __BR_LIST_H */

301
2022/include/plist.h Normal file
View File

@@ -0,0 +1,301 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
* Descending-priority-sorted double-linked list
*
* (C) 2002-2003 Intel Corp
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
*
* 2001-2005 (c) MontaVista Software, Inc.
* Daniel Walker <dwalker@mvista.com>
*
* (C) 2005 Thomas Gleixner <tglx@linutronix.de>
*
* Simplifications of the original code by
* Oleg Nesterov <oleg@tv-sign.ru>
*
* Based on simple lists (include/linux/list.h).
*
* This is a priority-sorted list of nodes; each node has a
* priority from INT_MIN (highest) to INT_MAX (lowest).
*
* Addition is O(K), removal is O(1), change of priority of a node is
* O(K) and K is the number of RT priority levels used in the system.
* (1 <= K <= 99)
*
* This list is really a list of lists:
*
* - The tier 1 list is the prio_list, different priority nodes.
*
* - The tier 2 list is the node_list, serialized nodes.
*
* Simple ASCII art explanation:
*
* pl:prio_list (only for plist_node)
* nl:node_list
* HEAD| NODE(S)
* |
* ||------------------------------------|
* ||->|pl|<->|pl|<--------------->|pl|<-|
* | |10| |21| |21| |21| |40| (prio)
* | | | | | | | | | | |
* | | | | | | | | | | |
* |->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
* |-------------------------------------------|
*
* The nodes on the prio_list list are sorted by priority to simplify
* the insertion of new nodes. There are no nodes with duplicate
* priorites on the list.
*
* The nodes on the node_list are ordered by priority and can contain
* entries which have the same priority. Those entries are ordered
* FIFO
*
* Addition means: look for the prio_list node in the prio_list
* for the priority of the node and insert it before the node_list
* entry of the next prio_list node. If it is the first node of
* that priority, add it to the prio_list in the right position and
* insert it into the serialized node_list list
*
* Removal means remove it from the node_list and remove it from
* the prio_list if the node_list list_head is non empty. In case
* of removal from the prio_list it must be checked whether other
* entries of the same priority are on the list or not. If there
* is another entry of the same priority then this entry has to
* replace the removed entry on the prio_list. If the entry which
* is removed is the only entry of this priority then a simple
* remove from both list is sufficient.
*
* INT_MIN is the highest priority, 0 is the medium highest, INT_MAX
* is lowest priority.
*
* No locking is done, up to the caller.
*/
#ifndef _LINUX_PLIST_H_
#define _LINUX_PLIST_H_
#include "container-of.h"
#include "list.h"
//#include <types.h>
// #include <asm/bug.h>
struct plist_head {
struct list_head node_list;
};
struct plist_node {
int prio;
struct list_head prio_list;
struct list_head node_list;
};
/**
* PLIST_HEAD_INIT - static struct plist_head initializer
* @head: struct plist_head variable name
*/
#define PLIST_HEAD_INIT(head) \
{ \
.node_list = LIST_HEAD_INIT((head).node_list) \
}
/**
* PLIST_HEAD - declare and init plist_head
* @head: name for struct plist_head variable
*/
#define PLIST_HEAD(head) \
struct plist_head head = PLIST_HEAD_INIT(head)
/**
* PLIST_NODE_INIT - static struct plist_node initializer
* @node: struct plist_node variable name
* @__prio: initial node priority
*/
#define PLIST_NODE_INIT(node, __prio) \
{ \
.prio = (__prio), \
.prio_list = LIST_HEAD_INIT((node).prio_list), \
.node_list = LIST_HEAD_INIT((node).node_list), \
}
/**
* plist_head_init - dynamic struct plist_head initializer
* @head: &struct plist_head pointer
*/
static inline void
plist_head_init(struct plist_head *head)
{
INIT_LIST_HEAD(&head->node_list);
}
/**
* plist_node_init - Dynamic struct plist_node initializer
* @node: &struct plist_node pointer
* @prio: initial node priority
*/
static inline void plist_node_init(struct plist_node *node, int prio)
{
node->prio = prio;
INIT_LIST_HEAD(&node->prio_list);
INIT_LIST_HEAD(&node->node_list);
}
extern void plist_add(struct plist_node *node, struct plist_head *head);
extern void plist_del(struct plist_node *node, struct plist_head *head);
extern void plist_requeue(struct plist_node *node, struct plist_head *head);
/**
* plist_for_each - iterate over the plist
* @pos: the type * to use as a loop counter
* @head: the head for your list
*/
#define plist_for_each(pos, head) \
list_for_each_entry(pos, &(head)->node_list, node_list)
/**
* plist_for_each_continue - continue iteration over the plist
* @pos: the type * to use as a loop cursor
* @head: the head for your list
*
* Continue to iterate over plist, continuing after the current position.
*/
#define plist_for_each_continue(pos, head) \
list_for_each_entry_continue(pos, &(head)->node_list, node_list)
/**
* plist_for_each_safe - iterate safely over a plist of given type
* @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
* @head: the head for your list
*
* Iterate over a plist of given type, safe against removal of list entry.
*/
#define plist_for_each_safe(pos, n, head) \
list_for_each_entry_safe(pos, n, &(head)->node_list, node_list)
/**
* plist_for_each_entry - iterate over list of given type
* @pos: the type * to use as a loop counter
* @head: the head for your list
* @mem: the name of the list_head within the struct
*/
#define plist_for_each_entry(pos, head, mem) \
list_for_each_entry(pos, &(head)->node_list, mem.node_list)
/**
* plist_for_each_entry_continue - continue iteration over list of given type
* @pos: the type * to use as a loop cursor
* @head: the head for your list
* @m: the name of the list_head within the struct
*
* Continue to iterate over list of given type, continuing after
* the current position.
*/
#define plist_for_each_entry_continue(pos, head, m) \
list_for_each_entry_continue(pos, &(head)->node_list, m.node_list)
/**
* plist_for_each_entry_safe - iterate safely over list of given type
* @pos: the type * to use as a loop counter
* @n: another type * to use as temporary storage
* @head: the head for your list
* @m: the name of the list_head within the struct
*
* Iterate over list of given type, safe against removal of list entry.
*/
#define plist_for_each_entry_safe(pos, n, head, m) \
list_for_each_entry_safe(pos, n, &(head)->node_list, m.node_list)
/**
* plist_head_empty - return !0 if a plist_head is empty
* @head: &struct plist_head pointer
*/
static inline int plist_head_empty(const struct plist_head *head)
{
return list_empty(&head->node_list);
}
/**
* plist_node_empty - return !0 if plist_node is not on a list
* @node: &struct plist_node pointer
*/
static inline int plist_node_empty(const struct plist_node *node)
{
return list_empty(&node->node_list);
}
/* All functions below assume the plist_head is not empty. */
/**
* plist_first_entry - get the struct for the first entry
* @head: the &struct plist_head pointer
* @type: the type of the struct this is embedded in
* @member: the name of the list_head within the struct
*/
#ifdef CONFIG_DEBUG_PLIST
# define plist_first_entry(head, type, member) \
({ \
WARN_ON(plist_head_empty(head)); \
container_of(plist_first(head), type, member); \
})
#else
# define plist_first_entry(head, type, member) \
container_of(plist_first(head), type, member)
#endif
/**
* plist_last_entry - get the struct for the last entry
* @head: the &struct plist_head pointer
* @type: the type of the struct this is embedded in
* @member: the name of the list_head within the struct
*/
#ifdef CONFIG_DEBUG_PLIST
# define plist_last_entry(head, type, member) \
({ \
WARN_ON(plist_head_empty(head)); \
container_of(plist_last(head), type, member); \
})
#else
# define plist_last_entry(head, type, member) \
container_of(plist_last(head), type, member)
#endif
/**
* plist_next - get the next entry in list
* @pos: the type * to cursor
*/
#define plist_next(pos) \
list_next_entry(pos, node_list)
/**
* plist_prev - get the prev entry in list
* @pos: the type * to cursor
*/
#define plist_prev(pos) \
list_prev_entry(pos, node_list)
/**
* plist_first - return the first node (and thus, highest priority)
* @head: the &struct plist_head pointer
*
* Assumes the plist is _not_ empty.
*/
static inline struct plist_node *plist_first(const struct plist_head *head)
{
return list_entry(head->node_list.next,
struct plist_node, node_list);
}
/**
* plist_last - return the last node (and thus, lowest priority)
* @head: the &struct plist_head pointer
*
* Assumes the plist is _not_ empty.
*/
static inline struct plist_node *plist_last(const struct plist_head *head)
{
return list_entry(head->node_list.prev,
struct plist_node, node_list);
}
#endif

90
2022/include/pool.h Normal file
View File

@@ -0,0 +1,90 @@
/* pool.h - A simple memory pool manager.
*
* Copyright (C) 2021-2022 Bruno Raoult ("br")
* Licensed under the GNU General Public License v3.0 or later.
* Some rights reserved. See COPYING.
*
* You should have received a copy of the GNU General Public License along with this
* program. If not, see <https://www.gnu.org/licenses/gpl-3.0-standalone.html>.
*
* SPDX-License-Identifier: GPL-3.0-or-later <https://spdx.org/licenses/GPL-3.0-or-later.html>
*
*/
#ifndef POOL_H
#define POOL_H
#include <stdint.h>
#include <stddef.h>
#include "list.h"
#include "bits.h"
#define POOL_NAME_LENGTH (16) /* max name length including trailing \0 */
typedef struct {
struct list_head list_blocks; /* list of allocated blocks in pool */
char data[]; /* objects block */
} block_t;
typedef struct {
char name[POOL_NAME_LENGTH]; /* pool name */
size_t eltsize; /* object size */
u32 available; /* current available elements */
u32 allocated; /* total objects allocated */
u32 growsize; /* number of objects per block allocated */
u32 nblocks; /* number of blocks allocated */
struct list_head list_available; /* available nodes */
struct list_head list_blocks; /* allocated blocks */
} pool_t;
/**
* pool_stats - display some pool statistics
* @pool: the pool address.
*/
void pool_stats(pool_t *pool);
/**
* pool_create - create a new memory pool
* @name: the name to give to the pool.
* @grow: the number of elements to add when no more available.
* @size: the size of an element in pool.
*
* The name will be truncated to 16 characters (including the final '\0').
*
* Return: The address of the created pool, or NULL if error.
*/
pool_t *pool_create(const char *name, u32 grow, size_t size);
/**
* pool_get() - Get an element from a pool.
* @pool: The pool address.
*
* Get an object from the pool.
*
* Return: The address of the object, or NULL if error.
*/
void *pool_get(pool_t *pool);
/**
* pool_add() - Add (free) an element to a pool.
* @pool: The pool address.
* @elt: The address of the object to add to the pool.
*
* The object will be available for further pool_get().
*
* Return: The current number of available elements in pool (including
* @elt).
*/
u32 pool_add(pool_t *pool, void *elt);
/**
* pool_destroy() - destroy a pool.
* @pool: The pool address.
*
* Attention: All memory is freed, but no check is done whether all pool
* elements have been released. Referencing any pool object after this call
* will likely imply some memory corruption.
*/
void pool_destroy(pool_t *pool);
#endif

128
2022/include/rwonce.h Normal file
View File

@@ -0,0 +1,128 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* adaptation of kernel's <asm-generic/rwonce.h>
* See https://www.kernel.org/doc/Documentation/memory-barriers.txt
*/
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
* READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
* particular ordering. One way to make the compiler aware of ordering is to
* put the two invocations of READ_ONCE or WRITE_ONCE in different C
* statements.
*
* These two macros will also work on aggregate data types like structs or
* unions.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*/
#ifndef __BR_RWONCE_H
#define __BR_RWONCE_H
/************ originally in <include/linux/compiler_attributes.h> */
#if __has_attribute(__error__)
# define __compiletime_error(msg) __attribute__((__error__(msg)))
#else
# define __compiletime_error(msg)
#endif
/************ originally in <include/linux/compiler_types.h> */
/*
* __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
* non-scalar types unchanged.
*/
/*
* Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
* is not type-compatible with 'signed char', and we define a separate case.
*/
#define __scalar_type_to_expr_cases(type) \
unsigned type: (unsigned type)0, \
signed type: (signed type)0
#define __unqual_scalar_typeof(x) \
typeof(_Generic((x), \
char: (char)0, \
__scalar_type_to_expr_cases(char), \
__scalar_type_to_expr_cases(short), \
__scalar_type_to_expr_cases(int), \
__scalar_type_to_expr_cases(long), \
__scalar_type_to_expr_cases(long long), \
default: (x)))
/* Is this type a native word size -- useful for atomic operations */
#define __native_word(t) \
(sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
extern void prefix ## suffix(void) __compiletime_error(msg); \
if (!(condition)) \
prefix ## suffix(); \
} while (0)
#else
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
#endif
#define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert(condition, msg, prefix, suffix)
/**
* compiletime_assert - break build and emit msg if condition is false
* @condition: a compile-time constant condition to check
* @msg: a message to emit if condition is false
*
* In tradition of POSIX assert, this macro will break the build if the
* supplied condition is *false*, emitting the supplied error message if the
* compiler has support to do so.
*/
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
/************ originally in <asm-generic/rwonce.h> */
/*
* Yes, this permits 64-bit accesses on 32-bit architectures. These will
* actually be atomic in some cases (namely Armv7 + LPAE), but for others we
* rely on the access being split into 2x32-bit accesses for a 32-bit quantity
* (e.g. a virtual address) and a strong prevailing wind.
*/
#define compiletime_assert_rwonce_type(t) \
compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
"Unsupported access size for {READ,WRITE}_ONCE().")
/*
* Use __READ_ONCE() instead of READ_ONCE() if you do not require any
* atomicity. Note that this may result in tears!
*/
#ifndef __READ_ONCE
#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
#endif
#define READ_ONCE(x) \
({ \
compiletime_assert_rwonce_type(x); \
__READ_ONCE(x); \
})
#define __WRITE_ONCE(x, val) \
do { \
*(volatile typeof(x) *)&(x) = (val); \
} while (0)
#define WRITE_ONCE(x, val) \
do { \
compiletime_assert_rwonce_type(x); \
__WRITE_ONCE(x, val); \
} while (0)
#endif /* __BR_RWONCE_H */

111
2022/libsrc/debug.c Normal file
View File

@@ -0,0 +1,111 @@
/* debug.c - debug/log management
*
* Copyright (C) 2021-2022 Bruno Raoult ("br")
* Licensed under the GNU General Public License v3.0 or later.
* Some rights reserved. See COPYING.
*
* You should have received a copy of the GNU General Public License along with this
* program. If not, see <https://www.gnu.org/licenses/gpl-3.0-standalone.html>.
*
* SPDX-License-Identifier: GPL-3.0-or-later <https://spdx.org/licenses/GPL-3.0-or-later.html>
*
*/
#include <stdio.h>
#include <stdarg.h>
#include <time.h>
#ifndef DEBUG_DEBUG
#define DEBUG_DEBUG
#endif
#include "debug.h"
#define NANOSEC 1000000000 /* nano sec in sec */
#define MILLISEC 1000000 /* milli sec in sec */
static s64 timer_start; /* in nanosecond */
static u32 debug_level=0;
void debug_level_set(u32 level)
{
debug_level = level;
log(1, "debug level set to %u\n", level);
}
void debug_init(u32 level)
{
struct timespec timer;
debug_level_set(level);
if (!clock_gettime(CLOCK_MONOTONIC, &timer)) {
timer_start = timer.tv_sec * NANOSEC + timer.tv_nsec;
}
else {
timer_start = 0;
}
log(0, "timer started.\n");
}
inline static s64 timer_elapsed()
{
struct timespec timer;
clock_gettime(CLOCK_MONOTONIC, &timer);
return (timer.tv_sec * NANOSEC + timer.tv_nsec) - timer_start;
}
/* void debug - log function
* @timestamp : boolean
* @indent : indent level (2 spaces each)
* @src : source file/func name (or NULL)
* @line : line number
*/
void debug(u32 level, bool timestamp, u32 indent, const char *src,
u32 line, const char *fmt, ...)
{
if (level > debug_level)
return;
va_list ap;
if (indent)
printf("%*s", 2*(indent-1), "");
if (timestamp) {
s64 diff = timer_elapsed();
printf("%ld.%03ld ", diff/NANOSEC, (diff/1000000)%1000);
printf("%010ld ", diff);
}
if (src) {
if (line)
printf("[%s:%u] ", src, line);
else
printf("[%s] ", src);
}
va_start(ap, fmt);
vprintf(fmt, ap);
va_end(ap);
}
#ifdef BIN_debug
#include <unistd.h>
int main()
{
int foo=1;
debug_init(5);
log(0, "log0=%d\n", foo++);
log(1, "log1=%d\n", foo++);
log(2, "log2=%d\n", foo++);
log_i(2, "log_i 2=%d\n", foo++);
log_i(5, "log_i 5=%d\n", foo++);
log_i(6, "log_i 6=%d\n", foo++);
log_it(4, "log_it 4=%d\n", foo++);
log_f(1, "log_f 5=%d\n", foo++);
}
#endif

173
2022/libsrc/plist.c Normal file
View File

@@ -0,0 +1,173 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* adapted from Linux kernel lib/plist.c
*
* Descending-priority-sorted double-linked list
*
* (C) 2002-2003 Intel Corp
* Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
*
* 2001-2005 (c) MontaVista Software, Inc.
* Daniel Walker <dwalker@mvista.com>
*
* (C) 2005 Thomas Gleixner <tglx@linutronix.de>
*
* Simplifications of the original code by
* Oleg Nesterov <oleg@tv-sign.ru>
*
* Based on simple lists (include/linux/list.h).
*
* This file contains the add / del functions which are considered to
* be too large to inline. See include/linux/plist.h for further
* information.
*/
#include "plist.h"
#include "bug.h"
#ifdef DEBUG_PLIST
static struct plist_head test_head;
static void plist_check_prev_next(struct list_head *t, struct list_head *p,
struct list_head *n)
{
WARN(n->prev != p || p->next != n,
"top: %p, n: %p, p: %p\n"
"prev: %p, n: %p, p: %p\n"
"next: %p, n: %p, p: %p\n",
t, t->next, t->prev,
p, p->next, p->prev,
n, n->next, n->prev);
}
static void plist_check_list(struct list_head *top)
{
struct list_head *prev = top, *next = top->next;
plist_check_prev_next(top, prev, next);
while (next != top) {
prev = next;
next = prev->next;
plist_check_prev_next(top, prev, next);
}
}
static void plist_check_head(struct plist_head *head)
{
if (!plist_head_empty(head))
plist_check_list(&plist_first(head)->prio_list);
plist_check_list(&head->node_list);
}
#else
# define plist_check_head(h) do { } while (0)
#endif
/**
* plist_add - add @node to @head
*
* @node: &struct plist_node pointer
* @head: &struct plist_head pointer
*/
void plist_add(struct plist_node *node, struct plist_head *head)
{
struct plist_node *first, *iter, *prev = NULL;
struct list_head *node_next = &head->node_list;
plist_check_head(head);
WARN_ON(!plist_node_empty(node));
WARN_ON(!list_empty(&node->prio_list));
if (plist_head_empty(head))
goto ins_node;
first = iter = plist_first(head);
do {
if (node->prio < iter->prio) {
node_next = &iter->node_list;
break;
}
prev = iter;
iter = list_entry(iter->prio_list.next,
struct plist_node, prio_list);
} while (iter != first);
if (!prev || prev->prio != node->prio)
list_add_tail(&node->prio_list, &iter->prio_list);
ins_node:
list_add_tail(&node->node_list, node_next);
plist_check_head(head);
}
/**
* plist_del - Remove a @node from plist.
*
* @node: &struct plist_node pointer - entry to be removed
* @head: &struct plist_head pointer - list head
*/
void plist_del(struct plist_node *node, struct plist_head *head)
{
plist_check_head(head);
if (!list_empty(&node->prio_list)) {
if (node->node_list.next != &head->node_list) {
struct plist_node *next;
next = list_entry(node->node_list.next,
struct plist_node, node_list);
/* add the next plist_node into prio_list */
if (list_empty(&next->prio_list))
list_add(&next->prio_list, &node->prio_list);
}
list_del_init(&node->prio_list);
}
list_del_init(&node->node_list);
plist_check_head(head);
}
/**
* plist_requeue - Requeue @node at end of same-prio entries.
*
* This is essentially an optimized plist_del() followed by
* plist_add(). It moves an entry already in the plist to
* after any other same-priority entries.
*
* @node: &struct plist_node pointer - entry to be moved
* @head: &struct plist_head pointer - list head
*/
void plist_requeue(struct plist_node *node, struct plist_head *head)
{
struct plist_node *iter;
struct list_head *node_next = &head->node_list;
plist_check_head(head);
BUG_ON(plist_head_empty(head));
BUG_ON(plist_node_empty(node));
if (node == plist_last(head))
return;
iter = plist_next(node);
if (node->prio != iter->prio)
return;
plist_del(node, head);
plist_for_each_continue(iter, head) {
if (node->prio != iter->prio) {
node_next = &iter->node_list;
break;
}
}
list_add_tail(&node->node_list, node_next);
plist_check_head(head);
}

222
2022/libsrc/pool.c Normal file
View File

@@ -0,0 +1,222 @@
/* pool.c - A simple pool manager.
*
* Copyright (C) 2021-2022 Bruno Raoult ("br")
* Licensed under the GNU General Public License v3.0 or later.
* Some rights reserved. See COPYING.
*
* You should have received a copy of the GNU General Public License along with this
* program. If not, see <https://www.gnu.org/licenses/gpl-3.0-standalone.html>.
*
* SPDX-License-Identifier: GPL-3.0-or-later <https://spdx.org/licenses/GPL-3.0-or-later.html>
*
*/
#include <stddef.h>
#include <malloc.h>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include "list.h"
#include "pool.h"
#include "debug.h"
#include "bits.h"
void pool_stats(pool_t *pool)
{
if (pool) {
block_t *block;
log_f(1, "[%s] pool [%p]: blocks:%u avail:%u alloc:%u grow:%u eltsize:%lu\n",
pool->name, (void *)pool, pool->nblocks, pool->available, pool->allocated,
pool->growsize, pool->eltsize);
log(5, "\tblocks: ");
list_for_each_entry(block, &pool->list_blocks, list_blocks) {
log(5, "%p ", block);
}
log(5, "\n");
}
}
pool_t *pool_create(const char *name, u32 growsize, size_t eltsize)
{
pool_t *pool;
# ifdef DEBUG_POOL
log_f(1, "name=[%s] growsize=%u eltsize=%lu\n",
name, growsize, eltsize);
# endif
/* we need at least sizeof(struct list_head) space in pool elements
*/
if (eltsize < sizeof (struct list_head)) {
# ifdef DEBUG_POOL
log_f(1, "[%s]: structure size too small (%lu < %lu), adjusting to %lu.\n",
name, eltsize, sizeof(struct list_head), sizeof(struct list_head));
# endif
eltsize = sizeof(struct list_head);
}
if ((pool = malloc(sizeof (*pool)))) {
strncpy(pool->name, name, POOL_NAME_LENGTH - 1);
pool->name[POOL_NAME_LENGTH - 1] = 0;
pool->growsize = growsize;
pool->eltsize = eltsize;
pool->available = 0;
pool->allocated = 0;
pool->nblocks = 0;
INIT_LIST_HEAD(&pool->list_available);
INIT_LIST_HEAD(&pool->list_blocks);
} else {
errno = ENOMEM;
}
return pool;
}
static u32 _pool_add(pool_t *pool, struct list_head *elt)
{
# ifdef DEBUG_POOL
log_f(6, "pool=%p &head=%p elt=%p off1=%lu off2=%lu\n",
(void *)pool,
(void *)&pool->list_available,
(void *)elt,
(void *)&pool->list_available-(void *)pool,
offsetof(pool_t, list_available));
# endif
list_add(elt, &pool->list_available);
return ++pool->available;
}
u32 pool_add(pool_t *pool, void *elt)
{
return _pool_add(pool, elt);
}
static struct list_head *_pool_get(pool_t *pool)
{
struct list_head *res = pool->list_available.next;
pool->available--;
list_del(res);
return res;
}
void *pool_get(pool_t *pool)
{
if (!pool)
return NULL;
if (!pool->available) {
block_t *block = malloc(sizeof(block_t) + pool->eltsize * pool->growsize);
if (!block) {
# ifdef DEBUG_POOL
log_f(1, "[%s]: failed block allocation\n", pool->name);
# endif
errno = ENOMEM;
return NULL;
}
/* maintain list of allocated blocks
*/
list_add(&block->list_blocks, &pool->list_blocks);
pool->nblocks++;
# ifdef DEBUG_POOL
log_f(1, "[%s]: growing pool from %u to %u elements. block=%p nblocks=%u\n",
pool->name,
pool->allocated,
pool->allocated + pool->growsize,
block,
pool->nblocks);
# endif
pool->allocated += pool->growsize;
for (u32 i = 0; i < pool->growsize; ++i) {
void *cur = block->data + i * pool->eltsize;
# ifdef DEBUG_POOL
log_f(7, "alloc=%p cur=%p\n", block, cur);
# endif
_pool_add(pool, (struct list_head *)cur);
}
}
/* this is the effective address of the object (and also the
* pool list_head address)
*/
return _pool_get(pool);
}
void pool_destroy(pool_t *pool)
{
block_t *block, *tmp;
if (!pool)
return;
/* release memory blocks */
# ifdef DEBUG_POOL
log_f(1, "[%s]: releasing %d blocks and main structure\n", pool->name, pool->nblocks);
log(5, "blocks:");
# endif
list_for_each_entry_safe(block, tmp, &pool->list_blocks, list_blocks) {
list_del(&block->list_blocks);
free(block);
# ifdef DEBUG_POOL
log(5, " %p", block);
# endif
}
# ifdef DEBUG_POOL
log(5, "\n");
# endif
free(pool);
}
#ifdef BIN_pool
struct d {
u16 data1;
char c;
struct list_head list;
};
static LIST_HEAD (head);
int main(int ac, char**av)
{
pool_t *pool;
int total;
int action=0;
u16 icur=0;
char ccur='z';
struct d *elt;
debug_init(3);
log_f(1, "%s: sizeof(d)=%lu sizeof(*d)=%lu off=%lu\n", *av, sizeof(elt),
sizeof(*elt), offsetof(struct d, list));
if ((pool = pool_create("dummy", 3, sizeof(*elt)))) {
pool_stats(pool);
for (int cur=1; cur<ac; ++cur) {
total = atoi(av[cur]);
if (action == 0) { /* add elt to list */
log_f(2, "adding %d elements\n", total);
for (int i = 0; i < total; ++i) {
elt = pool_get(pool);
elt->data1 = icur++;
elt->c = ccur--;
list_add(&elt->list, &head);
}
pool_stats(pool);
action = 1;
} else { /* remove one elt from list */
log_f(2, "deleting %d elements\n", total);
for (int i = 0; i < total; ++i) {
if (!list_empty(&head)) {
elt = list_last_entry(&head, struct d, list);
printf("elt=[%d, %c]\n", elt->data1, elt->c);
list_del(&elt->list);
pool_add(pool, elt);
}
}
pool_stats(pool);
action = 0;
}
}
}
pool_stats(pool);
pool_destroy(pool);
}
#endif