1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 
19 /// D translation of kerncompat.h from btrfs-progs (v5.9)
20 module btrfs.c.kerncompat;
21 
22 import btrfs.c.dcompat;
23 
24 import core.stdc.config;
25 import core.stdc.stdint;
26 import core.stdc.stdlib;
27 
28 auto ptr_to_u64(T)(T x) { return cast(u64)cast(uintptr_t)x; }
29 auto u64_to_ptr(T)(T x) { return cast(void *)cast(uintptr_t)x; }
30 
31 enum READ = 0;
32 enum WRITE = 1;
33 enum READA = 2;
34 
35 alias gfp_t = int;
36 auto get_cpu_var(T)(auto ref T p) { return p; }
37 auto __get_cpu_var(T)(auto ref T p) { return p; }
38 enum BITS_PER_BYTE = 8;
39 enum BITS_PER_LONG = (__SIZEOF_LONG__ * BITS_PER_BYTE);
40 enum __GFP_BITS_SHIFT = 20;
41 enum __GFP_BITS_MASK = (cast(int)((1 << __GFP_BITS_SHIFT) - 1));
42 enum GFP_KERNEL = 0;
43 enum GFP_NOFS = 0;
44 auto ARRAY_SIZE(T, size_t n)(ref T[n] x) { return n; }
45 
46 public import core.stdc.limits : ULONG_MAX;
47 
48 alias s8 = byte;
49 alias u8 = ubyte;
50 alias s16 = short;
51 alias u16 = ushort;
52 alias s32 = int;
53 alias u32 = uint;
54 alias s64 = long;
55 alias u64 = ulong;
56 
57 alias __s8 = s8;
58 alias __u8 = u8;
59 alias __s16 = s16;
60 alias __u16 = u16;
61 alias __s32 = s32;
62 alias __u32 = u32;
63 alias __s64 = s64;
64 alias __u64 = u64;
65 
66 struct vma_shared { int prio_tree_node; }
67 struct vm_area_struct {
68 	c_ulong vm_pgoff;
69 	c_ulong vm_start;
70 	c_ulong vm_end;
71 	vma_shared shared_;
72 }
73 
74 struct page {
75 	c_ulong index;
76 }
77 
78 struct mutex {
79 	c_ulong lock;
80 }
81 
82 void mutex_init()(mutex *m) { m.lock = 1; }
83 
84 void mutex_lock()(mutex *m)
85 {
86 	m.lock--;
87 }
88 
89 void mutex_unlock()(mutex *m)
90 {
91 	m.lock++;
92 }
93 
94 int mutex_is_locked()(mutex *m)
95 {
96 	return m.lock != 1;
97 }
98 
99 void cond_resched()() {}
100 void preempt_enable()() {}
101 void preempt_disable()() {}
102 
103 auto BITOP_MASK(T)(T nr) { return c_ulong(1) << (nr % BITS_PER_LONG); }
104 auto BITOP_WORD(T)(T nr) { return nr / BITS_PER_LONG; }
105 
106 /**
107  * __set_bit - Set a bit in memory
108  * @nr: the bit to set
109  * @addr: the address to start counting from
110  *
111  * Unlike set_bit(), this function is non-atomic and may be reordered.
112  * If it's called on the same region of memory simultaneously, the effect
113  * may be that only one operation succeeds.
114  */
115 void __set_bit()(int nr, shared c_ulong *addr)
116 {
117 	c_ulong mask = BITOP_MASK(nr);
118 	c_ulong *p = (cast(c_ulong *)addr) + BITOP_WORD(nr);
119 
120 	*p  |= mask;
121 }
122 
123 void __clear_bit()(int nr, shared c_ulong *addr)
124 {
125 	c_ulong mask = BITOP_MASK(nr);
126 	c_ulong *p = (cast(c_ulong *)addr) + BITOP_WORD(nr);
127 
128 	*p &= ~mask;
129 }
130 
131 /**
132  * test_bit - Determine whether a bit is set
133  * @nr: bit number to test
134  * @addr: Address to start counting from
135  */
136 int test_bit()(int nr, const shared c_ulong *addr)
137 {
138 	return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
139 }
140 
141 /*
142  * error pointer
143  */
144 enum MAX_ERRNO	= 4095;
145 auto IS_ERR_VALUE(T)(T x) { return (x >= cast(c_ulong)-MAX_ERRNO); }
146 
147 void *ERR_PTR()(c_long error)
148 {
149 	return cast(void *) error;
150 }
151 
152 c_long PTR_ERR(const void *ptr)
153 {
154 	return cast(c_long) ptr;
155 }
156 
157 c_int IS_ERR()(const void *ptr)
158 {
159 	return IS_ERR_VALUE(cast(c_ulong)ptr);
160 }
161 
162 c_int IS_ERR_OR_NULL()(const void *ptr)
163 {
164 	return !ptr || IS_ERR(ptr);
165 }
166 
167 auto div_u64(X, Y)(X x, Y y) { return x / y; }
168 
169 /**
170  * __swap - swap values of @a and @b
171  * @a: first value
172  * @b: second value
173  */
174 void __swap(A, B)(ref A a, ref B b) 
175         { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; }
176 
177 /*
178  * This looks more complex than it should be. But we need to
179  * get the type for the ~ right in round_down (it needs to be
180  * as wide as the result!), and we want to evaluate the macro
181  * arguments just once each.
182  */
183 X __round_mask(X, Y)(X x, Y y) { return cast(X)(y-1); }
184 auto __round_up(X, Y)(X x, Y y) { return ((x-1) | __round_mask(x, y))+1; }
185 auto __round_down(X, Y)(X x, Y y) { return x & ~__round_mask(x, y); }
186 
187 /*
188  * printk
189  */
190 void printk(Args...)(const char *fmt, auto ref Args args) { import core.stdc.stdio; fprintf(stderr, fmt, args); }
191 enum	KERN_CRIT	= "";
192 enum	KERN_ERR	= "";
193 
194 /*
195  * kmalloc/kfree
196  */
197 auto kmalloc(X, Y)(X x, Y y) { return malloc(x); }
198 auto kzalloc(X, Y)(X x, Y y) { return calloc(1, x); }
199 auto kstrdup(X, Y)(X x, Y y) { return strdup(x); }
200 alias kfree = free;
201 alias vmalloc = malloc;
202 alias vfree = free;
203 alias kvzalloc = kzalloc;
204 alias kvfree = free;
205 auto memalloc_nofs_save()() { return 0; }
206 void memalloc_nofs_restore(X)(X x) {}
207 
208 // #define container_of(ptr, type, member) ({                      \
209 //         const typeof( ((type *)0)->member ) *__mptr = (ptr);    \
210 // 	        (type *)( (char *)__mptr - offsetof(type,member) );})
211 
212 /* Alignment check */
213 auto IS_ALIGNED(X, A)(X x, A a) { return (x & (cast(X)a - 1)) == 0; }
214 
215 c_int is_power_of_2()(c_ulong n)
216 {
217 	return (n != 0 && ((n & (n - 1)) == 0));
218 }
219 
220 static if (!typesafe) {
221 alias __le16 = u16;
222 alias __be16 = u16;
223 alias __le32 = u32;
224 alias __be32 = u32;
225 alias __le64 = u64;
226 alias __be64 = u64;
227 } else {
228 import ae.utils.bitmanip;
229 alias __le16 = LittleEndian!ushort;
230 alias __be16 = BigEndian!ushort;
231 alias __le32 = LittleEndian!uint;
232 alias __be32 = BigEndian!uint;
233 alias __le64 = LittleEndian!ulong;
234 alias __be64 = BigEndian!ulong;
235 }
236 
237 /* Macros to generate set/get funcs for the struct fields
238  * assume there is a lefoo_to_cpu for every type, so lets make a simple
239  * one for u8:
240  */
241 auto le8_to_cpu(V)(V v) { return v; }
242 auto cpu_to_le8(V)(V v) { return v; }
243 alias __le8 = u8;
244 
245 static if (!typesafe) {
246 // version (BigEndian) {
247 // #define cpu_to_le64(x) ((__force __le64)(u64)(bswap_64(x)))
248 // #define le64_to_cpu(x) ((__force u64)(__le64)(bswap_64(x)))
249 // #define cpu_to_le32(x) ((__force __le32)(u32)(bswap_32(x)))
250 // #define le32_to_cpu(x) ((__force u32)(__le32)(bswap_32(x)))
251 // #define cpu_to_le16(x) ((__force __le16)(u16)(bswap_16(x)))
252 // #define le16_to_cpu(x) ((__force u16)(__le16)(bswap_16(x)))
253 // } else {
254 // #define cpu_to_le64(x) ((__force __le64)(u64)(x))
255 // #define le64_to_cpu(x) ((__force u64)(__le64)(x))
256 // #define cpu_to_le32(x) ((__force __le32)(u32)(x))
257 // #define le32_to_cpu(x) ((__force u32)(__le32)(x))
258 // #define cpu_to_le16(x) ((__force __le16)(u16)(x))
259 // #define le16_to_cpu(x) ((__force u16)(__le16)(x))
260 // }
261 } else {
262 __le64 cpu_to_le64(u64 x) { return __le64(x); }
263 u64 le64_to_cpu(const __le64 x) { return x; }
264 __le32 cpu_to_le32(u32 x) { return __le32(x); }
265 u32 le32_to_cpu(const __le32 x) { return x; }
266 __le16 cpu_to_le16(u16 x) { return __le16(x); }
267 u16 le16_to_cpu(const __le16 x) { return x; }
268 }
269 
270 static if (!typesafe) {
271 align(1) struct __una_u16 { __le16 x; }
272 align(1) struct __una_u32 { __le32 x; }
273 align(1) struct __una_u64 { __le64 x; }
274 
275 auto get_unaligned_le8(P)(P p) { return (*(cast(u8*)(p))); }
276 auto get_unaligned_8(P)(P p) { return (*(cast(u8*)(p))); }
277 auto put_unaligned_le8(Val, P)(Val val, P p) { return ((*(cast(u8*)(p))) = (val)); }
278 auto put_unaligned_8(Val, P)(Val val, P p) { return ((*(cast(u8*)(p))) = (val)); }
279 auto get_unaligned_le16(P)(P p) { return le16_to_cpu((cast(const __una_u16 *)(p)).x); }
280 auto get_unaligned_16(P)(P p) { return ((cast(const __una_u16 *)(p)).x); }
281 auto put_unaligned_le16(Val, P)(Val val, P p) { return ((cast(__una_u16 *)(p)).x = cpu_to_le16(val)); }
282 auto put_unaligned_16(Val, P)(Val val, P p) { return ((cast(__una_u16 *)(p)).x = (val)); }
283 auto get_unaligned_le32(P)(P p) { return le32_to_cpu((cast(const __una_u32 *)(p)).x); }
284 auto get_unaligned_32(P)(P p) { return ((cast(const __una_u32 *)(p)).x); }
285 auto put_unaligned_le32(Val, P)(Val val, P p) { return ((cast(__una_u32 *)(p)).x = cpu_to_le32(val)); }
286 auto put_unaligned_32(Val, P)(Val val, P p) { return ((cast(__una_u32 *)(p)).x = (val)); }
287 auto get_unaligned_le64(P)(P p) { return le64_to_cpu((cast(const __una_u64 *)(p)).x); }
288 auto get_unaligned_64(P)(P p) { return ((cast(const __una_u64 *)(p)).x); }
289 auto put_unaligned_le64(Val, P)(Val val, P p) { return ((cast(__una_u64 *)(p)).x = cpu_to_le64(val)); }
290 auto put_unaligned_64(Val, P)(Val val, P p) { return ((cast(__una_u64 *)(p)).x = (val)); }
291 } else {
292 align(1) struct __una_le16 { __le16 x; }
293 align(1) struct __una_le32 { __le32 x; }
294 align(1) struct __una_le64 { __le64 x; }
295 align(1) struct __una_u16 { __u16 x; }
296 align(1) struct __una_u32 { __u32 x; }
297 align(1) struct __una_u64 { __u64 x; }
298 
299 auto get_unaligned_le8(P)(P p) { return (*(cast(u8*)(p))); }
300 auto get_unaligned_8(P)(P p) { return (*(cast(u8*)(p))); }
301 auto put_unaligned_le8(Val, P)(Val val, P p) { return ((*(cast(u8*)(p))) = (val)); }
302 auto put_unaligned_8(Val, P)(Val val, P p) { return ((*(cast(u8*)(p))) = (val)); }
303 auto get_unaligned_le16(P)(P p) { return le16_to_cpu((cast(const __una_le16 *)(p)).x); }
304 auto get_unaligned_16(P)(P p) { return ((cast(const __una_u16 *)(p)).x); }
305 auto put_unaligned_le16(Val, P)(Val val, P p) { return ((cast(__una_le16 *)(p)).x = cpu_to_le16(val)); }
306 auto put_unaligned_16(Val, P)(Val val, P p) { return ((cast(__una_u16 *)(p)).x = (val)); }
307 auto get_unaligned_le32(P)(P p) { return le32_to_cpu((cast(const __una_le32 *)(p)).x); }
308 auto get_unaligned_32(P)(P p) { return ((cast(const __una_u32 *)(p)).x); }
309 auto put_unaligned_le32(Val, P)(Val val, P p) { return ((cast(__una_le32 *)(p)).x = cpu_to_le32(val)); }
310 auto put_unaligned_32(Val, P)(Val val, P p) { return ((cast(__una_u32 *)(p)).x = (val)); }
311 auto get_unaligned_le64(P)(P p) { return le64_to_cpu((cast(const __una_le64 *)(p)).x); }
312 auto get_unaligned_64(P)(P p) { return ((cast(const __una_u64 *)(p)).x); }
313 auto put_unaligned_le64(Val, P)(Val val, P p) { return ((cast(__una_le64 *)(p)).x = cpu_to_le64(val)); }
314 auto put_unaligned_64(Val, P)(Val val, P p) { return ((cast(__una_u64 *)(p)).x = (val)); }
315 }