1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_FORMAT_H
#define _BCACHEFS_FORMAT_H
/*
* bcachefs on disk data structures
*
* OVERVIEW:
*
* There are three main types of on disk data structures in bcachefs (this is
* reduced from 5 in bcache)
*
* - superblock
* - journal
* - btree
*
* The btree is the primary structure; most metadata exists as keys in the
* various btrees. There are only a small number of btrees, they're not
* sharded - we have one btree for extents, another for inodes, et cetera.
*
* SUPERBLOCK:
*
* The superblock contains the location of the journal, the list of devices in
* the filesystem, and in general any metadata we need in order to decide
* whether we can start a filesystem or prior to reading the journal/btree
* roots.
*
* The superblock is extensible, and most of the contents of the superblock are
* in variable length, type tagged fields; see struct bch_sb_field.
*
* Backup superblocks do not reside in a fixed location; also, superblocks do
* not have a fixed size. To locate backup superblocks we have struct
* bch_sb_layout; we store a copy of this inside every superblock, and also
* before the first superblock.
*
* JOURNAL:
*
* The journal primarily records btree updates in the order they occurred;
* journal replay consists of just iterating over all the keys in the open
* journal entries and re-inserting them into the btrees.
*
* The journal also contains entry types for the btree roots, and blacklisted
* journal sequence numbers (see journal_seq_blacklist.c).
*
* BTREE:
*
* bcachefs btrees are copy on write b+ trees, where nodes are big (typically
* 128k-256k) and log structured. We use struct btree_node for writing the first
* entry in a given node (offset 0), and struct btree_node_entry for all
* subsequent writes.
*
* After the header, btree node entries contain a list of keys in sorted order.
* Values are stored inline with the keys; since values are variable length (and
* keys effectively are variable length too, due to packing) we can't do random
* access without building up additional in memory tables in the btree node read
* path.
*
* BTREE KEYS (struct bkey):
*
* The various btrees share a common format for the key - so as to avoid
* switching in fastpath lookup/comparison code - but define their own
* structures for the key values.
*
* The size of a key/value pair is stored as a u8 in units of u64s, so the max
* size is just under 2k. The common part also contains a type tag for the
* value, and a format field indicating whether the key is packed or not (and
* also meant to allow adding new key fields in the future, if desired).
*
* bkeys, when stored within a btree node, may also be packed. In that case, the
* bkey_format in that node is used to unpack it. Packed bkeys mean that we can
* be generous with field sizes in the common part of the key format (64 bit
* inode number, 64 bit offset, 96 bit version field, etc.) for negligible cost.
*/
#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/kernel.h>
#include <linux/uuid.h>
#include "vstructs.h"
#ifdef __KERNEL__
typedef uuid_t __uuid_t;
#endif
#define BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
\
static inline __u64 name(const type *k) \
{ \
return (k->field >> offset) & ~(~0ULL << (end - offset)); \
} \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
k->field &= ~(~(~0ULL << (end - offset)) << offset); \
k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
}
#define LE_BITMASK(_bits, name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
static const __u##_bits name##_MAX = (1ULL << (end - offset)) - 1; \
\
static inline __u64 name(const type *k) \
{ \
return (__le##_bits##_to_cpu(k->field) >> offset) & \
~(~0ULL << (end - offset)); \
} \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
__u##_bits new = __le##_bits##_to_cpu(k->field); \
\
new &= ~(~(~0ULL << (end - offset)) << offset); \
new |= (v & ~(~0ULL << (end - offset))) << offset; \
k->field = __cpu_to_le##_bits(new); \
}
#define LE16_BITMASK(n, t, f, o, e) LE_BITMASK(16, n, t, f, o, e)
#define LE32_BITMASK(n, t, f, o, e) LE_BITMASK(32, n, t, f, o, e)
#define LE64_BITMASK(n, t, f, o, e) LE_BITMASK(64, n, t, f, o, e)
struct bkey_format {
__u8 key_u64s;
__u8 nr_fields;
/* One unused slot for now: */
__u8 bits_per_field[6];
__le64 field_offset[6];
};
/* Btree keys - all units are in sectors */
struct bpos {
/*
* Word order matches machine byte order - btree code treats a bpos as a
* single large integer, for search/comparison purposes
*
* Note that wherever a bpos is embedded in another on disk data
* structure, it has to be byte swabbed when reading in metadata that
* wasn't written in native endian order:
*/
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u32 snapshot;
__u64 offset;
__u64 inode;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
__u64 inode;
__u64 offset; /* Points to end of extent - sectors */
__u32 snapshot;
#else
#error edit for your odd byteorder.
#endif
} __packed __aligned(4);
#define KEY_INODE_MAX ((__u64)~0ULL)
#define KEY_OFFSET_MAX ((__u64)~0ULL)
#define KEY_SNAPSHOT_MAX ((__u32)~0U)
#define KEY_SIZE_MAX ((__u32)~0U)
static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
{
return (struct bpos) {
.inode = inode,
.offset = offset,
.snapshot = snapshot,
};
}
#define POS_MIN SPOS(0, 0, 0)
#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
#define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
#define POS(_inode, _offset) SPOS(_inode, _offset, 0)
/* Empty placeholder struct, for container_of() */
struct bch_val {
__u64 __nothing[0];
};
struct bversion {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u64 lo;
__u32 hi;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
__u32 hi;
__u64 lo;
#endif
} __packed __aligned(4);
struct bkey {
/* Size of combined key and value, in u64s */
__u8 u64s;
/* Format of key (0 for format local to btree node) */
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 format:7,
needs_whiteout:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u8 needs_whiteout:1,
format:7;
#else
#error edit for your odd byteorder.
#endif
/* Type of the value */
__u8 type;
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
__u8 pad[1];
struct bversion version;
__u32 size; /* extent size, in sectors */
struct bpos p;
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
struct bpos p;
__u32 size; /* extent size, in sectors */
struct bversion version;
__u8 pad[1];
#endif
} __packed __aligned(8);
struct bkey_packed {
__u64 _data[0];
/* Size of combined key and value, in u64s */
__u8 u64s;
/* Format of key (0 for format local to btree node) */
/*
* XXX: next incompat on disk format change, switch format and
* needs_whiteout - bkey_packed() will be cheaper if format is the high
* bits of the bitfield
*/
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 format:7,
needs_whiteout:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u8 needs_whiteout:1,
format:7;
#endif
/* Type of the value */
__u8 type;
__u8 key_start[0];
/*
* We copy bkeys with struct assignment in various places, and while
* that shouldn't be done with packed bkeys we can't disallow it in C,
* and it's legal to cast a bkey to a bkey_packed - so padding it out
* to the same size as struct bkey should hopefully be safest.
*/
__u8 pad[sizeof(struct bkey) - 3];
} __packed __aligned(8);
typedef struct {
__le64 lo;
__le64 hi;
} bch_le128;
#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
#define BKEY_U64s_MAX U8_MAX
#define BKEY_VAL_U64s_MAX (BKEY_U64s_MAX - BKEY_U64s)
#define KEY_PACKED_BITS_START 24
#define KEY_FORMAT_LOCAL_BTREE 0
#define KEY_FORMAT_CURRENT 1
enum bch_bkey_fields {
BKEY_FIELD_INODE,
BKEY_FIELD_OFFSET,
BKEY_FIELD_SNAPSHOT,
BKEY_FIELD_SIZE,
BKEY_FIELD_VERSION_HI,
BKEY_FIELD_VERSION_LO,
BKEY_NR_FIELDS,
};
#define bkey_format_field(name, field) \
[BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
#define BKEY_FORMAT_CURRENT \
((struct bkey_format) { \
.key_u64s = BKEY_U64s, \
.nr_fields = BKEY_NR_FIELDS, \
.bits_per_field = { \
bkey_format_field(INODE, p.inode), \
bkey_format_field(OFFSET, p.offset), \
bkey_format_field(SNAPSHOT, p.snapshot), \
bkey_format_field(SIZE, size), \
bkey_format_field(VERSION_HI, version.hi), \
bkey_format_field(VERSION_LO, version.lo), \
}, \
})
/* bkey with inline value */
struct bkey_i {
__u64 _data[0];
struct bkey k;
struct bch_val v;
};
#define KEY(_inode, _offset, _size) \
((struct bkey) { \
.u64s = BKEY_U64s, \
.format = KEY_FORMAT_CURRENT, \
.p = POS(_inode, _offset), \
.size = _size, \
})
static inline void bkey_init(struct bkey *k)
{
*k = KEY(0, 0, 0);
}
#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
#define __BKEY_PADDED(key, pad) \
struct bkey_i key; __u64 key ## _pad[pad]
/*
* - DELETED keys are used internally to mark keys that should be ignored but
* override keys in composition order. Their version number is ignored.
*
* - DISCARDED keys indicate that the data is all 0s because it has been
* discarded. DISCARDs may have a version; if the version is nonzero the key
* will be persistent, otherwise the key will be dropped whenever the btree
* node is rewritten (like DELETED keys).
*
* - ERROR: any read of the data returns a read error, as the data was lost due
* to a failing device. Like DISCARDED keys, they can be removed (overridden)
* by new writes or cluster-wide GC. Node repair can also overwrite them with
* the same or a more recent version number, but not with an older version
* number.
*
* - WHITEOUT: for hash table btrees
*/
#define BCH_BKEY_TYPES() \
x(deleted, 0) \
x(whiteout, 1) \
x(error, 2) \
x(cookie, 3) \
x(hash_whiteout, 4) \
x(btree_ptr, 5) \
x(extent, 6) \
x(reservation, 7) \
x(inode, 8) \
x(inode_generation, 9) \
x(dirent, 10) \
x(xattr, 11) \
x(alloc, 12) \
x(quota, 13) \
x(stripe, 14) \
x(reflink_p, 15) \
x(reflink_v, 16) \
x(inline_data, 17) \
x(btree_ptr_v2, 18) \
x(indirect_inline_data, 19) \
x(alloc_v2, 20) \
x(subvolume, 21) \
x(snapshot, 22) \
x(inode_v2, 23) \
x(alloc_v3, 24) \
x(set, 25) \
x(lru, 26) \
x(alloc_v4, 27) \
x(backpointer, 28) \
x(inode_v3, 29) \
x(bucket_gens, 30) \
x(snapshot_tree, 31) \
x(logged_op_truncate, 32) \
x(logged_op_finsert, 33)
enum bch_bkey_type {
#define x(name, nr) KEY_TYPE_##name = nr,
BCH_BKEY_TYPES()
#undef x
KEY_TYPE_MAX,
};
struct bch_deleted {
struct bch_val v;
};
struct bch_whiteout {
struct bch_val v;
};
struct bch_error {
struct bch_val v;
};
struct bch_cookie {
struct bch_val v;
__le64 cookie;
};
struct bch_hash_whiteout {
struct bch_val v;
};
struct bch_set {
struct bch_val v;
};
/* Extents */
/*
* In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
* preceded by checksum/compression information (bch_extent_crc32 or
* bch_extent_crc64).
*
* One major determining factor in the format of extents is how we handle and
* represent extents that have been partially overwritten and thus trimmed:
*
* If an extent is not checksummed or compressed, when the extent is trimmed we
* don't have to remember the extent we originally allocated and wrote: we can
* merely adjust ptr->offset to point to the start of the data that is currently
* live. The size field in struct bkey records the current (live) size of the
* extent, and is also used to mean "size of region on disk that we point to" in
* this case.
*
* Thus an extent that is not checksummed or compressed will consist only of a
* list of bch_extent_ptrs, with none of the fields in
* bch_extent_crc32/bch_extent_crc64.
*
* When an extent is checksummed or compressed, it's not possible to read only
* the data that is currently live: we have to read the entire extent that was
* originally written, and then return only the part of the extent that is
* currently live.
*
* Thus, in addition to the current size of the extent in struct bkey, we need
* to store the size of the originally allocated space - this is the
* compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
* when the extent is trimmed, instead of modifying the offset field of the
* pointer, we keep a second smaller offset field - "offset into the original
* extent of the currently live region".
*
* The other major determining factor is replication and data migration:
*
* Each pointer may have its own bch_extent_crc32/64. When doing a replicated
* write, we will initially write all the replicas in the same format, with the
* same checksum type and compression format - however, when copygc runs later (or
* tiering/cache promotion, anything that moves data), it is not in general
* going to rewrite all the pointers at once - one of the replicas may be in a
* bucket on one device that has very little fragmentation while another lives
* in a bucket that has become heavily fragmented, and thus is being rewritten
* sooner than the rest.
*
* Thus it will only move a subset of the pointers (or in the case of
* tiering/cache promotion perhaps add a single pointer without dropping any
* current pointers), and if the extent has been partially overwritten it must
* write only the currently live portion (or copygc would not be able to reduce
* fragmentation!) - which necessitates a different bch_extent_crc format for
* the new pointer.
*
* But in the interests of space efficiency, we don't want to store one
* bch_extent_crc for each pointer if we don't have to.
*
* Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
* bch_extent_ptrs appended arbitrarily one after the other. We determine the
* type of a given entry with a scheme similar to utf8 (except we're encoding a
* type, not a size), encoding the type in the position of the first set bit:
*
* bch_extent_crc32 - 0b1
* bch_extent_ptr - 0b10
* bch_extent_crc64 - 0b100
*
* We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
* bch_extent_crc64 is the least constrained).
*
* Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
* until the next bch_extent_crc32/64.
*
* If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
* is neither checksummed nor compressed.
*/
/* 128 bits, sufficient for cryptographic MACs: */
struct bch_csum {
__le64 lo;
__le64 hi;
} __packed __aligned(8);
#define BCH_EXTENT_ENTRY_TYPES() \
x(ptr, 0) \
x(crc32, 1) \
x(crc64, 2) \
x(crc128, 3) \
x(stripe_ptr, 4) \
x(rebalance, 5)
#define BCH_EXTENT_ENTRY_MAX 6
enum bch_extent_entry_type {
#define x(f, n) BCH_EXTENT_ENTRY_##f = n,
BCH_EXTENT_ENTRY_TYPES()
#undef x
};
/* Compressed/uncompressed size are stored biased by 1: */
struct bch_extent_crc32 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u32 type:2,
_compressed_size:7,
_uncompressed_size:7,
offset:7,
_unused:1,
csum_type:4,
compression_type:4;
__u32 csum;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u32 csum;
__u32 compression_type:4,
csum_type:4,
_unused:1,
offset:7,
_uncompressed_size:7,
_compressed_size:7,
type:2;
#endif
} __packed __aligned(8);
#define CRC32_SIZE_MAX (1U << 7)
#define CRC32_NONCE_MAX 0
struct bch_extent_crc64 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:3,
_compressed_size:9,
_uncompressed_size:9,
offset:9,
nonce:10,
csum_type:4,
compression_type:4,
csum_hi:16;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 csum_hi:16,
compression_type:4,
csum_type:4,
nonce:10,
offset:9,
_uncompressed_size:9,
_compressed_size:9,
type:3;
#endif
__u64 csum_lo;
} __packed __aligned(8);
#define CRC64_SIZE_MAX (1U << 9)
#define CRC64_NONCE_MAX ((1U << 10) - 1)
struct bch_extent_crc128 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:4,
_compressed_size:13,
_uncompressed_size:13,
offset:13,
nonce:13,
csum_type:4,
compression_type:4;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 compression_type:4,
csum_type:4,
nonce:13,
offset:13,
_uncompressed_size:13,
_compressed_size:13,
type:4;
#endif
struct bch_csum csum;
} __packed __aligned(8);
#define CRC128_SIZE_MAX (1U << 13)
#define CRC128_NONCE_MAX ((1U << 13) - 1)
/*
* @reservation - pointer hasn't been written to, just reserved
*/
struct bch_extent_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:1,
cached:1,
unused:1,
unwritten:1,
offset:44, /* 8 petabytes */
dev:8,
gen:8;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 gen:8,
dev:8,
offset:44,
unwritten:1,
unused:1,
cached:1,
type:1;
#endif
} __packed __aligned(8);
struct bch_extent_stripe_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:5,
block:8,
redundancy:4,
idx:47;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 idx:47,
redundancy:4,
block:8,
type:5;
#endif
};
struct bch_extent_reservation {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:6,
unused:22,
replicas:4,
generation:32;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 generation:32,
replicas:4,
unused:22,
type:6;
#endif
};
struct bch_extent_rebalance {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:7,
unused:33,
compression:8,
target:16;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 target:16,
compression:8,
unused:33,
type:7;
#endif
};
union bch_extent_entry {
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ || __BITS_PER_LONG == 64
unsigned long type;
#elif __BITS_PER_LONG == 32
struct {
unsigned long pad;
unsigned long type;
};
#else
#error edit for your odd byteorder.
#endif
#define x(f, n) struct bch_extent_##f f;
BCH_EXTENT_ENTRY_TYPES()
#undef x
};
struct bch_btree_ptr {
struct bch_val v;
__u64 _data[0];
struct bch_extent_ptr start[];
} __packed __aligned(8);
struct bch_btree_ptr_v2 {
struct bch_val v;
__u64 mem_ptr;
__le64 seq;
__le16 sectors_written;
__le16 flags;
struct bpos min_key;
__u64 _data[0];
struct bch_extent_ptr start[];
} __packed __aligned(8);
LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
struct bch_extent {
struct bch_val v;
__u64 _data[0];
union bch_extent_entry start[];
} __packed __aligned(8);
struct bch_reservation {
struct bch_val v;
__le32 generation;
__u8 nr_replicas;
__u8 pad[3];
} __packed __aligned(8);
/* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\
((sizeof(struct bch_extent_crc128) + \
sizeof(struct bch_extent_ptr)) / sizeof(__u64))
/* Maximum possible size of an entire extent value: */
#define BKEY_EXTENT_VAL_U64s_MAX \
(1 + BKEY_EXTENT_PTR_U64s_MAX * (BCH_REPLICAS_MAX + 1))
/* * Maximum possible size of an entire extent, key + value: */
#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
/* Btree pointers don't carry around checksums: */
#define BKEY_BTREE_PTR_VAL_U64s_MAX \
((sizeof(struct bch_btree_ptr_v2) + \
sizeof(struct bch_extent_ptr) * BCH_REPLICAS_MAX) / sizeof(__u64))
#define BKEY_BTREE_PTR_U64s_MAX \
(BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
/* Inodes */
#define BLOCKDEV_INODE_MAX 4096
#define BCACHEFS_ROOT_INO 4096
struct bch_inode {
struct bch_val v;
__le64 bi_hash_seed;
__le32 bi_flags;
__le16 bi_mode;
__u8 fields[];
} __packed __aligned(8);
struct bch_inode_v2 {
struct bch_val v;
__le64 bi_journal_seq;
__le64 bi_hash_seed;
__le64 bi_flags;
__le16 bi_mode;
__u8 fields[];
} __packed __aligned(8);
struct bch_inode_v3 {
struct bch_val v;
__le64 bi_journal_seq;
__le64 bi_hash_seed;
__le64 bi_flags;
__le64 bi_sectors;
__le64 bi_size;
__le64 bi_version;
__u8 fields[];
} __packed __aligned(8);
#define INODEv3_FIELDS_START_INITIAL 6
#define INODEv3_FIELDS_START_CUR (offsetof(struct bch_inode_v3, fields) / sizeof(__u64))
struct bch_inode_generation {
struct bch_val v;
__le32 bi_generation;
__le32 pad;
} __packed __aligned(8);
/*
* bi_subvol and bi_parent_subvol are only set for subvolume roots:
*/
#define BCH_INODE_FIELDS_v2() \
x(bi_atime, 96) \
x(bi_ctime, 96) \
x(bi_mtime, 96) \
x(bi_otime, 96) \
x(bi_size, 64) \
x(bi_sectors, 64) \
x(bi_uid, 32) \
x(bi_gid, 32) \
x(bi_nlink, 32) \
x(bi_generation, 32) \
x(bi_dev, 32) \
x(bi_data_checksum, 8) \
x(bi_compression, 8) \
x(bi_project, 32) \
x(bi_background_compression, 8) \
x(bi_data_replicas, 8) \
x(bi_promote_target, 16) \
x(bi_foreground_target, 16) \
x(bi_background_target, 16) \
x(bi_erasure_code, 16) \
x(bi_fields_set, 16) \
x(bi_dir, 64) \
x(bi_dir_offset, 64) \
x(bi_subvol, 32) \
x(bi_parent_subvol, 32)
#define BCH_INODE_FIELDS_v3() \
x(bi_atime, 96) \
x(bi_ctime, 96) \
x(bi_mtime, 96) \
x(bi_otime, 96) \
x(bi_uid, 32) \
x(bi_gid, 32) \
x(bi_nlink, 32) \
x(bi_generation, 32) \
x(bi_dev, 32) \
x(bi_data_checksum, 8) \
x(bi_compression, 8) \
x(bi_project, 32) \
x(bi_background_compression, 8) \
x(bi_data_replicas, 8) \
x(bi_promote_target, 16) \
x(bi_foreground_target, 16) \
x(bi_background_target, 16) \
x(bi_erasure_code, 16) \
x(bi_fields_set, 16) \
x(bi_dir, 64) \
x(bi_dir_offset, 64) \
x(bi_subvol, 32) \
x(bi_parent_subvol, 32) \
x(bi_nocow, 8)
/* subset of BCH_INODE_FIELDS */
#define BCH_INODE_OPTS() \
x(data_checksum, 8) \
x(compression, 8) \
x(project, 32) \
x(background_compression, 8) \
x(data_replicas, 8) \
x(promote_target, 16) \
x(foreground_target, 16) \
x(background_target, 16) \
x(erasure_code, 16) \
x(nocow, 8)
enum inode_opt_id {
#define x(name, ...) \
Inode_opt_##name,
BCH_INODE_OPTS()
#undef x
Inode_opt_nr,
};
enum {
/*
* User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
* flags)
*/
__BCH_INODE_SYNC = 0,
__BCH_INODE_IMMUTABLE = 1,
__BCH_INODE_APPEND = 2,
__BCH_INODE_NODUMP = 3,
__BCH_INODE_NOATIME = 4,
__BCH_INODE_I_SIZE_DIRTY = 5, /* obsolete */
__BCH_INODE_I_SECTORS_DIRTY = 6, /* obsolete */
__BCH_INODE_UNLINKED = 7,
__BCH_INODE_BACKPTR_UNTRUSTED = 8,
/* bits 20+ reserved for packed fields below: */
};
#define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
#define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
#define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
#define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
#define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
#define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
#define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
#define BCH_INODE_UNLINKED (1 << __BCH_INODE_UNLINKED)
#define BCH_INODE_BACKPTR_UNTRUSTED (1 << __BCH_INODE_BACKPTR_UNTRUSTED)
LE32_BITMASK(INODE_STR_HASH, struct bch_inode, bi_flags, 20, 24);
LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, bi_flags, 24, 31);
LE32_BITMASK(INODE_NEW_VARINT, struct bch_inode, bi_flags, 31, 32);
LE64_BITMASK(INODEv2_STR_HASH, struct bch_inode_v2, bi_flags, 20, 24);
LE64_BITMASK(INODEv2_NR_FIELDS, struct bch_inode_v2, bi_flags, 24, 31);
LE64_BITMASK(INODEv3_STR_HASH, struct bch_inode_v3, bi_flags, 20, 24);
LE64_BITMASK(INODEv3_NR_FIELDS, struct bch_inode_v3, bi_flags, 24, 31);
LE64_BITMASK(INODEv3_FIELDS_START,
struct bch_inode_v3, bi_flags, 31, 36);
LE64_BITMASK(INODEv3_MODE, struct bch_inode_v3, bi_flags, 36, 52);
/* Dirents */
/*
* Dirents (and xattrs) have to implement string lookups; since our b-tree
* doesn't support arbitrary length strings for the key, we instead index by a
* 64 bit hash (currently truncated sha1) of the string, stored in the offset
* field of the key - using linear probing to resolve hash collisions. This also
* provides us with the readdir cookie posix requires.
*
* Linear probing requires us to use whiteouts for deletions, in the event of a
* collision:
*/
struct bch_dirent {
struct bch_val v;
/* Target inode number: */
union {
__le64 d_inum;
struct { /* DT_SUBVOL */
__le32 d_child_subvol;
__le32 d_parent_subvol;
};
};
/*
* Copy of mode bits 12-15 from the target inode - so userspace can get
* the filetype without having to do a stat()
*/
__u8 d_type;
__u8 d_name[];
} __packed __aligned(8);
#define DT_SUBVOL 16
#define BCH_DT_MAX 17
#define BCH_NAME_MAX 512
/* Xattrs */
#define KEY_TYPE_XATTR_INDEX_USER 0
#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS 1
#define KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT 2
#define KEY_TYPE_XATTR_INDEX_TRUSTED 3
#define KEY_TYPE_XATTR_INDEX_SECURITY 4
struct bch_xattr {
struct bch_val v;
__u8 x_type;
__u8 x_name_len;
__le16 x_val_len;
__u8 x_name[];
} __packed __aligned(8);
/* Bucket/allocation information: */
struct bch_alloc {
struct bch_val v;
__u8 fields;
__u8 gen;
__u8 data[];
} __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V1() \
x(read_time, 16) \
x(write_time, 16) \
x(data_type, 8) \
x(dirty_sectors, 16) \
x(cached_sectors, 16) \
x(oldest_gen, 8) \
x(stripe, 32) \
x(stripe_redundancy, 8)
enum {
#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
BCH_ALLOC_FIELDS_V1()
#undef x
};
struct bch_alloc_v2 {
struct bch_val v;
__u8 nr_fields;
__u8 gen;
__u8 oldest_gen;
__u8 data_type;
__u8 data[];
} __packed __aligned(8);
#define BCH_ALLOC_FIELDS_V2() \
x(read_time, 64) \
x(write_time, 64) \
x(dirty_sectors, 32) \
x(cached_sectors, 32) \
x(stripe, 32) \
x(stripe_redundancy, 8)
struct bch_alloc_v3 {
struct bch_val v;
__le64 journal_seq;
__le32 flags;
__u8 nr_fields;
__u8 gen;
__u8 oldest_gen;
__u8 data_type;
__u8 data[];
} __packed __aligned(8);
LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
struct bch_alloc_v4 {
struct bch_val v;
__u64 journal_seq;
__u32 flags;
__u8 gen;
__u8 oldest_gen;
__u8 data_type;
__u8 stripe_redundancy;
__u32 dirty_sectors;
__u32 cached_sectors;
__u64 io_time[2];
__u32 stripe;
__u32 nr_external_backpointers;
__u64 fragmentation_lru;
} __packed __aligned(8);
#define BCH_ALLOC_V4_U64s_V0 6
#define BCH_ALLOC_V4_U64s (sizeof(struct bch_alloc_v4) / sizeof(__u64))
BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
#define BCH_ALLOC_V4_NR_BACKPOINTERS_MAX 40
struct bch_backpointer {
struct bch_val v;
__u8 btree_id;
__u8 level;
__u8 data_type;
__u64 bucket_offset:40;
__u32 bucket_len;
struct bpos pos;
} __packed __aligned(8);
#define KEY_TYPE_BUCKET_GENS_BITS 8
#define KEY_TYPE_BUCKET_GENS_NR (1U << KEY_TYPE_BUCKET_GENS_BITS)
#define KEY_TYPE_BUCKET_GENS_MASK (KEY_TYPE_BUCKET_GENS_NR - 1)
struct bch_bucket_gens {
struct bch_val v;
u8 gens[KEY_TYPE_BUCKET_GENS_NR];
} __packed __aligned(8);
/* Quotas: */
enum quota_types {
QTYP_USR = 0,
QTYP_GRP = 1,
QTYP_PRJ = 2,
QTYP_NR = 3,
};
enum quota_counters {
Q_SPC = 0,
Q_INO = 1,
Q_COUNTERS = 2,
};
struct bch_quota_counter {
__le64 hardlimit;
__le64 softlimit;
};
struct bch_quota {
struct bch_val v;
struct bch_quota_counter c[Q_COUNTERS];
} __packed __aligned(8);
/* Erasure coding */
struct bch_stripe {
struct bch_val v;
__le16 sectors;
__u8 algorithm;
__u8 nr_blocks;
__u8 nr_redundant;
__u8 csum_granularity_bits;
__u8 csum_type;
__u8 pad;
struct bch_extent_ptr ptrs[];
} __packed __aligned(8);
/* Reflink: */
struct bch_reflink_p {
struct bch_val v;
__le64 idx;
/*
* A reflink pointer might point to an indirect extent which is then
* later split (by copygc or rebalance). If we only pointed to part of
* the original indirect extent, and then one of the fragments is
* outside the range we point to, we'd leak a refcount: so when creating
* reflink pointers, we need to store pad values to remember the full
* range we were taking a reference on.
*/
__le32 front_pad;
__le32 back_pad;
} __packed __aligned(8);
struct bch_reflink_v {
struct bch_val v;
__le64 refcount;
union bch_extent_entry start[0];
__u64 _data[];
} __packed __aligned(8);
struct bch_indirect_inline_data {
struct bch_val v;
__le64 refcount;
u8 data[];
};
/* Inline data */
struct bch_inline_data {
struct bch_val v;
u8 data[];
};
/* Subvolumes: */
#define SUBVOL_POS_MIN POS(0, 1)
#define SUBVOL_POS_MAX POS(0, S32_MAX)
#define BCACHEFS_ROOT_SUBVOL 1
struct bch_subvolume {
struct bch_val v;
__le32 flags;
__le32 snapshot;
__le64 inode;
/*
* Snapshot subvolumes form a tree, separate from the snapshot nodes
* tree - if this subvolume is a snapshot, this is the ID of the
* subvolume it was created from:
*/
__le32 parent;
__le32 pad;
bch_le128 otime;
};
LE32_BITMASK(BCH_SUBVOLUME_RO, struct bch_subvolume, flags, 0, 1)
/*
* We need to know whether a subvolume is a snapshot so we can know whether we
* can delete it (or whether it should just be rm -rf'd)
*/
LE32_BITMASK(BCH_SUBVOLUME_SNAP, struct bch_subvolume, flags, 1, 2)
LE32_BITMASK(BCH_SUBVOLUME_UNLINKED, struct bch_subvolume, flags, 2, 3)
/* Snapshots */
struct bch_snapshot {
struct bch_val v;
__le32 flags;
__le32 parent;
__le32 children[2];
__le32 subvol;
/* corresponds to a bch_snapshot_tree in BTREE_ID_snapshot_trees */
__le32 tree;
__le32 depth;
__le32 skip[3];
};
LE32_BITMASK(BCH_SNAPSHOT_DELETED, struct bch_snapshot, flags, 0, 1)
/* True if a subvolume points to this snapshot node: */
LE32_BITMASK(BCH_SNAPSHOT_SUBVOL, struct bch_snapshot, flags, 1, 2)
/*
* Snapshot trees:
*
* The snapshot_trees btree gives us persistent indentifier for each tree of
* bch_snapshot nodes, and allow us to record and easily find the root/master
* subvolume that other snapshots were created from:
*/
struct bch_snapshot_tree {
struct bch_val v;
__le32 master_subvol;
__le32 root_snapshot;
};
/* LRU btree: */
struct bch_lru {
struct bch_val v;
__le64 idx;
} __packed __aligned(8);
#define LRU_ID_STRIPES (1U << 16)
/* Logged operations btree: */
struct bch_logged_op_truncate {
struct bch_val v;
__le32 subvol;
__le32 pad;
__le64 inum;
__le64 new_i_size;
};
enum logged_op_finsert_state {
LOGGED_OP_FINSERT_start,
LOGGED_OP_FINSERT_shift_extents,
LOGGED_OP_FINSERT_finish,
};
struct bch_logged_op_finsert {
struct bch_val v;
__u8 state;
__u8 pad[3];
__le32 subvol;
__le64 inum;
__le64 dst_offset;
__le64 src_offset;
__le64 pos;
};
/* Optional/variable size superblock sections: */
struct bch_sb_field {
__u64 _data[0];
__le32 u64s;
__le32 type;
};
#define BCH_SB_FIELDS() \
x(journal, 0) \
x(members, 1) \
x(crypt, 2) \
x(replicas_v0, 3) \
x(quota, 4) \
x(disk_groups, 5) \
x(clean, 6) \
x(replicas, 7) \
x(journal_seq_blacklist, 8) \
x(journal_v2, 9) \
x(counters, 10)
enum bch_sb_field_type {
#define x(f, nr) BCH_SB_FIELD_##f = nr,
BCH_SB_FIELDS()
#undef x
BCH_SB_FIELD_NR
};
/*
* Most superblock fields are replicated in all device's superblocks - a few are
* not:
*/
#define BCH_SINGLE_DEVICE_SB_FIELDS \
((1U << BCH_SB_FIELD_journal)| \
(1U << BCH_SB_FIELD_journal_v2))
/* BCH_SB_FIELD_journal: */
struct bch_sb_field_journal {
struct bch_sb_field field;
__le64 buckets[];
};
struct bch_sb_field_journal_v2 {
struct bch_sb_field field;
struct bch_sb_field_journal_v2_entry {
__le64 start;
__le64 nr;
} d[];
};
/* BCH_SB_FIELD_members: */
#define BCH_MIN_NR_NBUCKETS (1 << 6)
struct bch_member {
__uuid_t uuid;
__le64 nbuckets; /* device size */
__le16 first_bucket; /* index of first bucket used */
__le16 bucket_size; /* sectors */
__le32 pad;
__le64 last_mount; /* time_t */
__le64 flags[2];
};
LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
/* 4-14 unused, was TIER, HAS_(META)DATA, REPLACEMENT */
LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15)
LE64_BITMASK(BCH_MEMBER_DATA_ALLOWED, struct bch_member, flags[0], 15, 20)
LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags[0], 20, 28)
LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags[0], 28, 30)
LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
struct bch_member, flags[0], 30, 31)
#if 0
LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
#endif
#define BCH_MEMBER_STATES() \
x(rw, 0) \
x(ro, 1) \
x(failed, 2) \
x(spare, 3)
enum bch_member_state {
#define x(t, n) BCH_MEMBER_STATE_##t = n,
BCH_MEMBER_STATES()
#undef x
BCH_MEMBER_STATE_NR
};
struct bch_sb_field_members {
struct bch_sb_field field;
struct bch_member members[];
};
/* BCH_SB_FIELD_crypt: */
struct nonce {
__le32 d[4];
};
struct bch_key {
__le64 key[4];
};
#define BCH_KEY_MAGIC \
(((__u64) 'b' << 0)|((__u64) 'c' << 8)| \
((__u64) 'h' << 16)|((__u64) '*' << 24)| \
((__u64) '*' << 32)|((__u64) 'k' << 40)| \
((__u64) 'e' << 48)|((__u64) 'y' << 56))
struct bch_encrypted_key {
__le64 magic;
struct bch_key key;
};
/*
* If this field is present in the superblock, it stores an encryption key which
* is used encrypt all other data/metadata. The key will normally be encrypted
* with the key userspace provides, but if encryption has been turned off we'll
* just store the master key unencrypted in the superblock so we can access the
* previously encrypted data.
*/
struct bch_sb_field_crypt {
struct bch_sb_field field;
__le64 flags;
__le64 kdf_flags;
struct bch_encrypted_key key;
};
LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
enum bch_kdf_types {
BCH_KDF_SCRYPT = 0,
BCH_KDF_NR = 1,
};
/* stored as base 2 log of scrypt params: */
LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
/* BCH_SB_FIELD_replicas: */
#define BCH_DATA_TYPES() \
x(free, 0) \
x(sb, 1) \
x(journal, 2) \
x(btree, 3) \
x(user, 4) \
x(cached, 5) \
x(parity, 6) \
x(stripe, 7) \
x(need_gc_gens, 8) \
x(need_discard, 9)
enum bch_data_type {
#define x(t, n) BCH_DATA_##t,
BCH_DATA_TYPES()
#undef x
BCH_DATA_NR
};
static inline bool data_type_is_empty(enum bch_data_type type)
{
switch (type) {
case BCH_DATA_free:
case BCH_DATA_need_gc_gens:
case BCH_DATA_need_discard:
return true;
default:
return false;
}
}
static inline bool data_type_is_hidden(enum bch_data_type type)
{
switch (type) {
case BCH_DATA_sb:
case BCH_DATA_journal:
return true;
default:
return false;
}
}
struct bch_replicas_entry_v0 {
__u8 data_type;
__u8 nr_devs;
__u8 devs[];
} __packed;
struct bch_sb_field_replicas_v0 {
struct bch_sb_field field;
struct bch_replicas_entry_v0 entries[];
} __packed __aligned(8);
struct bch_replicas_entry {
__u8 data_type;
__u8 nr_devs;
__u8 nr_required;
__u8 devs[];
} __packed;
#define replicas_entry_bytes(_i) \
(offsetof(typeof(*(_i)), devs) + (_i)->nr_devs)
struct bch_sb_field_replicas {
struct bch_sb_field field;
struct bch_replicas_entry entries[];
} __packed __aligned(8);
/* BCH_SB_FIELD_quota: */
struct bch_sb_quota_counter {
__le32 timelimit;
__le32 warnlimit;
};
struct bch_sb_quota_type {
__le64 flags;
struct bch_sb_quota_counter c[Q_COUNTERS];
};
struct bch_sb_field_quota {
struct bch_sb_field field;
struct bch_sb_quota_type q[QTYP_NR];
} __packed __aligned(8);
/* BCH_SB_FIELD_disk_groups: */
#define BCH_SB_LABEL_SIZE 32
struct bch_disk_group {
__u8 label[BCH_SB_LABEL_SIZE];
__le64 flags[2];
} __packed __aligned(8);
LE64_BITMASK(BCH_GROUP_DELETED, struct bch_disk_group, flags[0], 0, 1)
LE64_BITMASK(BCH_GROUP_DATA_ALLOWED, struct bch_disk_group, flags[0], 1, 6)
LE64_BITMASK(BCH_GROUP_PARENT, struct bch_disk_group, flags[0], 6, 24)
struct bch_sb_field_disk_groups {
struct bch_sb_field field;
struct bch_disk_group entries[];
} __packed __aligned(8);
/* BCH_SB_FIELD_counters */
#define BCH_PERSISTENT_COUNTERS() \
x(io_read, 0) \
x(io_write, 1) \
x(io_move, 2) \
x(bucket_invalidate, 3) \
x(bucket_discard, 4) \
x(bucket_alloc, 5) \
x(bucket_alloc_fail, 6) \
x(btree_cache_scan, 7) \
x(btree_cache_reap, 8) \
x(btree_cache_cannibalize, 9) \
x(btree_cache_cannibalize_lock, 10) \
x(btree_cache_cannibalize_lock_fail, 11) \
x(btree_cache_cannibalize_unlock, 12) \
x(btree_node_write, 13) \
x(btree_node_read, 14) \
x(btree_node_compact, 15) \
x(btree_node_merge, 16) \
x(btree_node_split, 17) \
x(btree_node_rewrite, 18) \
x(btree_node_alloc, 19) \
x(btree_node_free, 20) \
x(btree_node_set_root, 21) \
x(btree_path_relock_fail, 22) \
x(btree_path_upgrade_fail, 23) \
x(btree_reserve_get_fail, 24) \
x(journal_entry_full, 25) \
x(journal_full, 26) \
x(journal_reclaim_finish, 27) \
x(journal_reclaim_start, 28) \
x(journal_write, 29) \
x(read_promote, 30) \
x(read_bounce, 31) \
x(read_split, 33) \
x(read_retry, 32) \
x(read_reuse_race, 34) \
x(move_extent_read, 35) \
x(move_extent_write, 36) \
x(move_extent_finish, 37) \
x(move_extent_fail, 38) \
x(move_extent_alloc_mem_fail, 39) \
x(copygc, 40) \
x(copygc_wait, 41) \
x(gc_gens_end, 42) \
x(gc_gens_start, 43) \
x(trans_blocked_journal_reclaim, 44) \
x(trans_restart_btree_node_reused, 45) \
x(trans_restart_btree_node_split, 46) \
x(trans_restart_fault_inject, 47) \
x(trans_restart_iter_upgrade, 48) \
x(trans_restart_journal_preres_get, 49) \
x(trans_restart_journal_reclaim, 50) \
x(trans_restart_journal_res_get, 51) \
x(trans_restart_key_cache_key_realloced, 52) \
x(trans_restart_key_cache_raced, 53) \
x(trans_restart_mark_replicas, 54) \
x(trans_restart_mem_realloced, 55) \
x(trans_restart_memory_allocation_failure, 56) \
x(trans_restart_relock, 57) \
x(trans_restart_relock_after_fill, 58) \
x(trans_restart_relock_key_cache_fill, 59) \
x(trans_restart_relock_next_node, 60) \
x(trans_restart_relock_parent_for_fill, 61) \
x(trans_restart_relock_path, 62) \
x(trans_restart_relock_path_intent, 63) \
x(trans_restart_too_many_iters, 64) \
x(trans_restart_traverse, 65) \
x(trans_restart_upgrade, 66) \
x(trans_restart_would_deadlock, 67) \
x(trans_restart_would_deadlock_write, 68) \
x(trans_restart_injected, 69) \
x(trans_restart_key_cache_upgrade, 70) \
x(trans_traverse_all, 71) \
x(transaction_commit, 72) \
x(write_super, 73) \
x(trans_restart_would_deadlock_recursion_limit, 74) \
x(trans_restart_write_buffer_flush, 75) \
x(trans_restart_split_race, 76)
enum bch_persistent_counters {
#define x(t, n, ...) BCH_COUNTER_##t,
BCH_PERSISTENT_COUNTERS()
#undef x
BCH_COUNTER_NR
};
struct bch_sb_field_counters {
struct bch_sb_field field;
__le64 d[];
};
/*
* On clean shutdown, store btree roots and current journal sequence number in
* the superblock:
*/
struct jset_entry {
__le16 u64s;
__u8 btree_id;
__u8 level;
__u8 type; /* designates what this jset holds */
__u8 pad[3];
struct bkey_i start[0];
__u64 _data[];
};
struct bch_sb_field_clean {
struct bch_sb_field field;
__le32 flags;
__le16 _read_clock; /* no longer used */
__le16 _write_clock;
__le64 journal_seq;
struct jset_entry start[0];
__u64 _data[];
};
struct journal_seq_blacklist_entry {
__le64 start;
__le64 end;
};
struct bch_sb_field_journal_seq_blacklist {
struct bch_sb_field field;
struct journal_seq_blacklist_entry start[0];
__u64 _data[];
};
/* Superblock: */
/*
* New versioning scheme:
* One common version number for all on disk data structures - superblock, btree
* nodes, journal entries
*/
#define BCH_VERSION_MAJOR(_v) ((__u16) ((_v) >> 10))
#define BCH_VERSION_MINOR(_v) ((__u16) ((_v) & ~(~0U << 10)))
#define BCH_VERSION(_major, _minor) (((_major) << 10)|(_minor) << 0)
#define RECOVERY_PASS_ALL_FSCK (1ULL << 63)
#define BCH_METADATA_VERSIONS() \
x(bkey_renumber, BCH_VERSION(0, 10), \
RECOVERY_PASS_ALL_FSCK) \
x(inode_btree_change, BCH_VERSION(0, 11), \
RECOVERY_PASS_ALL_FSCK) \
x(snapshot, BCH_VERSION(0, 12), \
RECOVERY_PASS_ALL_FSCK) \
x(inode_backpointers, BCH_VERSION(0, 13), \
RECOVERY_PASS_ALL_FSCK) \
x(btree_ptr_sectors_written, BCH_VERSION(0, 14), \
RECOVERY_PASS_ALL_FSCK) \
x(snapshot_2, BCH_VERSION(0, 15), \
BIT_ULL(BCH_RECOVERY_PASS_fs_upgrade_for_subvolumes)| \
BIT_ULL(BCH_RECOVERY_PASS_initialize_subvolumes)| \
RECOVERY_PASS_ALL_FSCK) \
x(reflink_p_fix, BCH_VERSION(0, 16), \
BIT_ULL(BCH_RECOVERY_PASS_fix_reflink_p)) \
x(subvol_dirent, BCH_VERSION(0, 17), \
RECOVERY_PASS_ALL_FSCK) \
x(inode_v2, BCH_VERSION(0, 18), \
RECOVERY_PASS_ALL_FSCK) \
x(freespace, BCH_VERSION(0, 19), \
RECOVERY_PASS_ALL_FSCK) \
x(alloc_v4, BCH_VERSION(0, 20), \
RECOVERY_PASS_ALL_FSCK) \
x(new_data_types, BCH_VERSION(0, 21), \
RECOVERY_PASS_ALL_FSCK) \
x(backpointers, BCH_VERSION(0, 22), \
RECOVERY_PASS_ALL_FSCK) \
x(inode_v3, BCH_VERSION(0, 23), \
RECOVERY_PASS_ALL_FSCK) \
x(unwritten_extents, BCH_VERSION(0, 24), \
RECOVERY_PASS_ALL_FSCK) \
x(bucket_gens, BCH_VERSION(0, 25), \
BIT_ULL(BCH_RECOVERY_PASS_bucket_gens_init)| \
RECOVERY_PASS_ALL_FSCK) \
x(lru_v2, BCH_VERSION(0, 26), \
RECOVERY_PASS_ALL_FSCK) \
x(fragmentation_lru, BCH_VERSION(0, 27), \
RECOVERY_PASS_ALL_FSCK) \
x(no_bps_in_alloc_keys, BCH_VERSION(0, 28), \
RECOVERY_PASS_ALL_FSCK) \
x(snapshot_trees, BCH_VERSION(0, 29), \
RECOVERY_PASS_ALL_FSCK) \
x(major_minor, BCH_VERSION(1, 0), \
0) \
x(snapshot_skiplists, BCH_VERSION(1, 1), \
BIT_ULL(BCH_RECOVERY_PASS_check_snapshots)) \
x(deleted_inodes, BCH_VERSION(1, 2), \
BIT_ULL(BCH_RECOVERY_PASS_check_inodes))
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
#define x(t, n, upgrade_passes) bcachefs_metadata_version_##t = n,
BCH_METADATA_VERSIONS()
#undef x
bcachefs_metadata_version_max
};
static const unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metadata_version_major_minor;
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
#define BCH_SB_SECTOR 8
#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
struct bch_sb_layout {
__uuid_t magic; /* bcachefs superblock UUID */
__u8 layout_type;
__u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
__u8 nr_superblocks;
__u8 pad[5];
__le64 sb_offset[61];
} __packed __aligned(8);
#define BCH_SB_LAYOUT_SECTOR 7
/*
* @offset - sector where this sb was written
* @version - on disk format version
* @version_min - Oldest metadata version this filesystem contains; so we can
* safely drop compatibility code and refuse to mount filesystems
* we'd need it for
* @magic - identifies as a bcachefs superblock (BCHFS_MAGIC)
* @seq - incremented each time superblock is written
* @uuid - used for generating various magic numbers and identifying
* member devices, never changes
* @user_uuid - user visible UUID, may be changed
* @label - filesystem label
* @seq - identifies most recent superblock, incremented each time
* superblock is written
* @features - enabled incompatible features
*/
struct bch_sb {
struct bch_csum csum;
__le16 version;
__le16 version_min;
__le16 pad[2];
__uuid_t magic;
__uuid_t uuid;
__uuid_t user_uuid;
__u8 label[BCH_SB_LABEL_SIZE];
__le64 offset;
__le64 seq;
__le16 block_size;
__u8 dev_idx;
__u8 nr_devices;
__le32 u64s;
__le64 time_base_lo;
__le32 time_base_hi;
__le32 time_precision;
__le64 flags[8];
__le64 features[2];
__le64 compat[2];
struct bch_sb_layout layout;
struct bch_sb_field start[0];
__le64 _data[];
} __packed __aligned(8);
/*
* Flags:
* BCH_SB_INITALIZED - set on first mount
* BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
* behaviour of mount/recovery path:
* BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
* BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
* BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
* DATA/META_CSUM_TYPE. Also indicates encryption
* algorithm in use, if/when we get more than one
*/
LE16_BITMASK(BCH_SB_BLOCK_SIZE, struct bch_sb, block_size, 0, 16);
LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
LE64_BITMASK(BCH_SB_POSIX_ACL, struct bch_sb, flags[0], 56, 57);
LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8);
LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
/*
* Max size of an extent that may require bouncing to read or write
* (checksummed, compressed): 64k
*/
LE64_BITMASK(BCH_SB_ENCODED_EXTENT_MAX_BITS,
struct bch_sb, flags[1], 14, 20);
LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
LE64_BITMASK(BCH_SB_PROMOTE_TARGET, struct bch_sb, flags[1], 28, 40);
LE64_BITMASK(BCH_SB_FOREGROUND_TARGET, struct bch_sb, flags[1], 40, 52);
LE64_BITMASK(BCH_SB_BACKGROUND_TARGET, struct bch_sb, flags[1], 52, 64);
LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO,
struct bch_sb, flags[2], 0, 4);
LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DELAY,struct bch_sb, flags[3], 30, 62);
LE64_BITMASK(BCH_SB_JOURNAL_FLUSH_DISABLED,struct bch_sb, flags[3], 62, 63);
LE64_BITMASK(BCH_SB_JOURNAL_RECLAIM_DELAY,struct bch_sb, flags[4], 0, 32);
LE64_BITMASK(BCH_SB_JOURNAL_TRANSACTION_NAMES,struct bch_sb, flags[4], 32, 33);
LE64_BITMASK(BCH_SB_NOCOW, struct bch_sb, flags[4], 33, 34);
LE64_BITMASK(BCH_SB_WRITE_BUFFER_SIZE, struct bch_sb, flags[4], 34, 54);
LE64_BITMASK(BCH_SB_VERSION_UPGRADE, struct bch_sb, flags[4], 54, 56);
LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_HI,struct bch_sb, flags[4], 56, 60);
LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
struct bch_sb, flags[4], 60, 64);
LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
struct bch_sb, flags[5], 0, 16);
static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
{
return BCH_SB_COMPRESSION_TYPE_LO(sb) | (BCH_SB_COMPRESSION_TYPE_HI(sb) << 4);
}
static inline void SET_BCH_SB_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
{
SET_BCH_SB_COMPRESSION_TYPE_LO(sb, v);
SET_BCH_SB_COMPRESSION_TYPE_HI(sb, v >> 4);
}
static inline __u64 BCH_SB_BACKGROUND_COMPRESSION_TYPE(const struct bch_sb *sb)
{
return BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb) |
(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb) << 4);
}
static inline void SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE(struct bch_sb *sb, __u64 v)
{
SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_LO(sb, v);
SET_BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI(sb, v >> 4);
}
/*
* Features:
*
* journal_seq_blacklist_v3: gates BCH_SB_FIELD_journal_seq_blacklist
* reflink: gates KEY_TYPE_reflink
* inline_data: gates KEY_TYPE_inline_data
* new_siphash: gates BCH_STR_HASH_siphash
* new_extent_overwrite: gates BTREE_NODE_NEW_EXTENT_OVERWRITE
*/
#define BCH_SB_FEATURES() \
x(lz4, 0) \
x(gzip, 1) \
x(zstd, 2) \
x(atomic_nlink, 3) \
x(ec, 4) \
x(journal_seq_blacklist_v3, 5) \
x(reflink, 6) \
x(new_siphash, 7) \
x(inline_data, 8) \
x(new_extent_overwrite, 9) \
x(incompressible, 10) \
x(btree_ptr_v2, 11) \
x(extents_above_btree_updates, 12) \
x(btree_updates_journalled, 13) \
x(reflink_inline_data, 14) \
x(new_varint, 15) \
x(journal_no_flush, 16) \
x(alloc_v2, 17) \
x(extents_across_btree_nodes, 18)
#define BCH_SB_FEATURES_ALWAYS \
((1ULL << BCH_FEATURE_new_extent_overwrite)| \
(1ULL << BCH_FEATURE_extents_above_btree_updates)|\
(1ULL << BCH_FEATURE_btree_updates_journalled)|\
(1ULL << BCH_FEATURE_alloc_v2)|\
(1ULL << BCH_FEATURE_extents_across_btree_nodes))
#define BCH_SB_FEATURES_ALL \
(BCH_SB_FEATURES_ALWAYS| \
(1ULL << BCH_FEATURE_new_siphash)| \
(1ULL << BCH_FEATURE_btree_ptr_v2)| \
(1ULL << BCH_FEATURE_new_varint)| \
(1ULL << BCH_FEATURE_journal_no_flush))
enum bch_sb_feature {
#define x(f, n) BCH_FEATURE_##f,
BCH_SB_FEATURES()
#undef x
BCH_FEATURE_NR,
};
#define BCH_SB_COMPAT() \
x(alloc_info, 0) \
x(alloc_metadata, 1) \
x(extents_above_btree_updates_done, 2) \
x(bformat_overflow_done, 3)
enum bch_sb_compat {
#define x(f, n) BCH_COMPAT_##f,
BCH_SB_COMPAT()
#undef x
BCH_COMPAT_NR,
};
/* options: */
#define BCH_VERSION_UPGRADE_OPTS() \
x(compatible, 0) \
x(incompatible, 1) \
x(none, 2)
enum bch_version_upgrade_opts {
#define x(t, n) BCH_VERSION_UPGRADE_##t = n,
BCH_VERSION_UPGRADE_OPTS()
#undef x
};
#define BCH_REPLICAS_MAX 4U
#define BCH_BKEY_PTRS_MAX 16U
#define BCH_ERROR_ACTIONS() \
x(continue, 0) \
x(ro, 1) \
x(panic, 2)
enum bch_error_actions {
#define x(t, n) BCH_ON_ERROR_##t = n,
BCH_ERROR_ACTIONS()
#undef x
BCH_ON_ERROR_NR
};
#define BCH_STR_HASH_TYPES() \
x(crc32c, 0) \
x(crc64, 1) \
x(siphash_old, 2) \
x(siphash, 3)
enum bch_str_hash_type {
#define x(t, n) BCH_STR_HASH_##t = n,
BCH_STR_HASH_TYPES()
#undef x
BCH_STR_HASH_NR
};
#define BCH_STR_HASH_OPTS() \
x(crc32c, 0) \
x(crc64, 1) \
x(siphash, 2)
enum bch_str_hash_opts {
#define x(t, n) BCH_STR_HASH_OPT_##t = n,
BCH_STR_HASH_OPTS()
#undef x
BCH_STR_HASH_OPT_NR
};
#define BCH_CSUM_TYPES() \
x(none, 0) \
x(crc32c_nonzero, 1) \
x(crc64_nonzero, 2) \
x(chacha20_poly1305_80, 3) \
x(chacha20_poly1305_128, 4) \
x(crc32c, 5) \
x(crc64, 6) \
x(xxhash, 7)
enum bch_csum_type {
#define x(t, n) BCH_CSUM_##t = n,
BCH_CSUM_TYPES()
#undef x
BCH_CSUM_NR
};
static const unsigned bch_crc_bytes[] = {
[BCH_CSUM_none] = 0,
[BCH_CSUM_crc32c_nonzero] = 4,
[BCH_CSUM_crc32c] = 4,
[BCH_CSUM_crc64_nonzero] = 8,
[BCH_CSUM_crc64] = 8,
[BCH_CSUM_xxhash] = 8,
[BCH_CSUM_chacha20_poly1305_80] = 10,
[BCH_CSUM_chacha20_poly1305_128] = 16,
};
static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
{
switch (type) {
case BCH_CSUM_chacha20_poly1305_80:
case BCH_CSUM_chacha20_poly1305_128:
return true;
default:
return false;
}
}
#define BCH_CSUM_OPTS() \
x(none, 0) \
x(crc32c, 1) \
x(crc64, 2) \
x(xxhash, 3)
enum bch_csum_opts {
#define x(t, n) BCH_CSUM_OPT_##t = n,
BCH_CSUM_OPTS()
#undef x
BCH_CSUM_OPT_NR
};
#define BCH_COMPRESSION_TYPES() \
x(none, 0) \
x(lz4_old, 1) \
x(gzip, 2) \
x(lz4, 3) \
x(zstd, 4) \
x(incompressible, 5)
enum bch_compression_type {
#define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
BCH_COMPRESSION_TYPES()
#undef x
BCH_COMPRESSION_TYPE_NR
};
#define BCH_COMPRESSION_OPTS() \
x(none, 0) \
x(lz4, 1) \
x(gzip, 2) \
x(zstd, 3)
enum bch_compression_opts {
#define x(t, n) BCH_COMPRESSION_OPT_##t = n,
BCH_COMPRESSION_OPTS()
#undef x
BCH_COMPRESSION_OPT_NR
};
/*
* Magic numbers
*
* The various other data structures have their own magic numbers, which are
* xored with the first part of the cache set's UUID
*/
#define BCACHE_MAGIC \
UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca, \
0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
#define BCHFS_MAGIC \
UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \
0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
#define BCACHEFS_STATFS_MAGIC 0xca451a4e
#define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
#define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
{
__le64 ret;
memcpy(&ret, &sb->uuid, sizeof(ret));
return ret;
}
static inline __u64 __jset_magic(struct bch_sb *sb)
{
return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
}
static inline __u64 __bset_magic(struct bch_sb *sb)
{
return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
}
/* Journal */
#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
#define BCH_JSET_ENTRY_TYPES() \
x(btree_keys, 0) \
x(btree_root, 1) \
x(prio_ptrs, 2) \
x(blacklist, 3) \
x(blacklist_v2, 4) \
x(usage, 5) \
x(data_usage, 6) \
x(clock, 7) \
x(dev_usage, 8) \
x(log, 9) \
x(overwrite, 10)
enum {
#define x(f, nr) BCH_JSET_ENTRY_##f = nr,
BCH_JSET_ENTRY_TYPES()
#undef x
BCH_JSET_ENTRY_NR
};
/*
* Journal sequence numbers can be blacklisted: bsets record the max sequence
* number of all the journal entries they contain updates for, so that on
* recovery we can ignore those bsets that contain index updates newer that what
* made it into the journal.
*
* This means that we can't reuse that journal_seq - we have to skip it, and
* then record that we skipped it so that the next time we crash and recover we
* don't think there was a missing journal entry.
*/
struct jset_entry_blacklist {
struct jset_entry entry;
__le64 seq;
};
struct jset_entry_blacklist_v2 {
struct jset_entry entry;
__le64 start;
__le64 end;
};
#define BCH_FS_USAGE_TYPES() \
x(reserved, 0) \
x(inodes, 1) \
x(key_version, 2)
enum {
#define x(f, nr) BCH_FS_USAGE_##f = nr,
BCH_FS_USAGE_TYPES()
#undef x
BCH_FS_USAGE_NR
};
struct jset_entry_usage {
struct jset_entry entry;
__le64 v;
} __packed;
struct jset_entry_data_usage {
struct jset_entry entry;
__le64 v;
struct bch_replicas_entry r;
} __packed;
struct jset_entry_clock {
struct jset_entry entry;
__u8 rw;
__u8 pad[7];
__le64 time;
} __packed;
struct jset_entry_dev_usage_type {
__le64 buckets;
__le64 sectors;
__le64 fragmented;
} __packed;
struct jset_entry_dev_usage {
struct jset_entry entry;
__le32 dev;
__u32 pad;
__le64 buckets_ec;
__le64 _buckets_unavailable; /* No longer used */
struct jset_entry_dev_usage_type d[];
};
static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
{
return (vstruct_bytes(&u->entry) - sizeof(struct jset_entry_dev_usage)) /
sizeof(struct jset_entry_dev_usage_type);
}
struct jset_entry_log {
struct jset_entry entry;
u8 d[];
} __packed;
/*
* On disk format for a journal entry:
* seq is monotonically increasing; every journal entry has its own unique
* sequence number.
*
* last_seq is the oldest journal entry that still has keys the btree hasn't
* flushed to disk yet.
*
* version is for on disk format changes.
*/
struct jset {
struct bch_csum csum;
__le64 magic;
__le64 seq;
__le32 version;
__le32 flags;
__le32 u64s; /* size of d[] in u64s */
__u8 encrypted_start[0];
__le16 _read_clock; /* no longer used */
__le16 _write_clock;
/* Sequence number of oldest dirty journal entry */
__le64 last_seq;
struct jset_entry start[0];
__u64 _data[];
} __packed __aligned(8);
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
#define BCH_JOURNAL_BUCKETS_MIN 8
/* Btree: */
enum btree_id_flags {
BTREE_ID_EXTENTS = BIT(0),
BTREE_ID_SNAPSHOTS = BIT(1),
BTREE_ID_DATA = BIT(2),
};
#define BCH_BTREE_IDS() \
x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_error)| \
BIT_ULL(KEY_TYPE_cookie)| \
BIT_ULL(KEY_TYPE_extent)| \
BIT_ULL(KEY_TYPE_reservation)| \
BIT_ULL(KEY_TYPE_reflink_p)| \
BIT_ULL(KEY_TYPE_inline_data)) \
x(inodes, 1, BTREE_ID_SNAPSHOTS, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_inode)| \
BIT_ULL(KEY_TYPE_inode_v2)| \
BIT_ULL(KEY_TYPE_inode_v3)| \
BIT_ULL(KEY_TYPE_inode_generation)) \
x(dirents, 2, BTREE_ID_SNAPSHOTS, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_hash_whiteout)| \
BIT_ULL(KEY_TYPE_dirent)) \
x(xattrs, 3, BTREE_ID_SNAPSHOTS, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_cookie)| \
BIT_ULL(KEY_TYPE_hash_whiteout)| \
BIT_ULL(KEY_TYPE_xattr)) \
x(alloc, 4, 0, \
BIT_ULL(KEY_TYPE_alloc)| \
BIT_ULL(KEY_TYPE_alloc_v2)| \
BIT_ULL(KEY_TYPE_alloc_v3)| \
BIT_ULL(KEY_TYPE_alloc_v4)) \
x(quotas, 5, 0, \
BIT_ULL(KEY_TYPE_quota)) \
x(stripes, 6, 0, \
BIT_ULL(KEY_TYPE_stripe)) \
x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
BIT_ULL(KEY_TYPE_reflink_v)| \
BIT_ULL(KEY_TYPE_indirect_inline_data)) \
x(subvolumes, 8, 0, \
BIT_ULL(KEY_TYPE_subvolume)) \
x(snapshots, 9, 0, \
BIT_ULL(KEY_TYPE_snapshot)) \
x(lru, 10, 0, \
BIT_ULL(KEY_TYPE_set)) \
x(freespace, 11, BTREE_ID_EXTENTS, \
BIT_ULL(KEY_TYPE_set)) \
x(need_discard, 12, 0, \
BIT_ULL(KEY_TYPE_set)) \
x(backpointers, 13, 0, \
BIT_ULL(KEY_TYPE_backpointer)) \
x(bucket_gens, 14, 0, \
BIT_ULL(KEY_TYPE_bucket_gens)) \
x(snapshot_trees, 15, 0, \
BIT_ULL(KEY_TYPE_snapshot_tree)) \
x(deleted_inodes, 16, BTREE_ID_SNAPSHOTS, \
BIT_ULL(KEY_TYPE_set)) \
x(logged_ops, 17, 0, \
BIT_ULL(KEY_TYPE_logged_op_truncate)| \
BIT_ULL(KEY_TYPE_logged_op_finsert))
enum btree_id {
#define x(name, nr, ...) BTREE_ID_##name = nr,
BCH_BTREE_IDS()
#undef x
BTREE_ID_NR
};
#define BTREE_MAX_DEPTH 4U
/* Btree nodes */
/*
* Btree nodes
*
* On disk a btree node is a list/log of these; within each set the keys are
* sorted
*/
struct bset {
__le64 seq;
/*
* Highest journal entry this bset contains keys for.
* If on recovery we don't see that journal entry, this bset is ignored:
* this allows us to preserve the order of all index updates after a
* crash, since the journal records a total order of all index updates
* and anything that didn't make it to the journal doesn't get used.
*/
__le64 journal_seq;
__le32 flags;
__le16 version;
__le16 u64s; /* count of d[] in u64s */
struct bkey_packed start[0];
__u64 _data[];
} __packed __aligned(8);
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
struct bset, flags, 5, 6);
/* Sector offset within the btree node: */
LE32_BITMASK(BSET_OFFSET, struct bset, flags, 16, 32);
struct btree_node {
struct bch_csum csum;
__le64 magic;
/* this flags field is encrypted, unlike bset->flags: */
__le64 flags;
/* Closed interval: */
struct bpos min_key;
struct bpos max_key;
struct bch_extent_ptr _ptr; /* not used anymore */
struct bkey_format format;
union {
struct bset keys;
struct {
__u8 pad[22];
__le16 u64s;
__u64 _data[0];
};
};
} __packed __aligned(8);
LE64_BITMASK(BTREE_NODE_ID_LO, struct btree_node, flags, 0, 4);
LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
LE64_BITMASK(BTREE_NODE_NEW_EXTENT_OVERWRITE,
struct btree_node, flags, 8, 9);
LE64_BITMASK(BTREE_NODE_ID_HI, struct btree_node, flags, 9, 25);
/* 25-32 unused */
LE64_BITMASK(BTREE_NODE_SEQ, struct btree_node, flags, 32, 64);
static inline __u64 BTREE_NODE_ID(struct btree_node *n)
{
return BTREE_NODE_ID_LO(n) | (BTREE_NODE_ID_HI(n) << 4);
}
static inline void SET_BTREE_NODE_ID(struct btree_node *n, __u64 v)
{
SET_BTREE_NODE_ID_LO(n, v);
SET_BTREE_NODE_ID_HI(n, v >> 4);
}
struct btree_node_entry {
struct bch_csum csum;
union {
struct bset keys;
struct {
__u8 pad[22];
__le16 u64s;
__u64 _data[0];
};
};
} __packed __aligned(8);
#endif /* _BCACHEFS_FORMAT_H */
|