This commit is contained in:
pepe 2023-05-13 00:20:51 +00:00
parent 8806dfe4f4
commit 77d2feb230
122 changed files with 1 additions and 17647 deletions

989
Cargo.lock generated
View File

@ -1,989 +0,0 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "aho-corasick"
version = "0.7.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7404febffaa47dac81aa44dba71523c9d069b1bdc50a77db41195149e17f68e5"
dependencies = [
"memchr",
]
[[package]]
name = "ansi_term"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
dependencies = [
"winapi",
]
[[package]]
name = "anyhow"
version = "1.0.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15af2628f6890fe2609a3b91bef4c83450512802e59489f9c1cb1fa5df064a61"
[[package]]
name = "arrayvec"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b"
[[package]]
name = "atty"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
dependencies = [
"hermit-abi",
"libc",
"winapi",
]
[[package]]
name = "autocfg"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "base64"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd"
[[package]]
name = "bitflags"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
[[package]]
name = "bitvec"
version = "0.19.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8942c8d352ae1838c9dda0b0ca2ab657696ef2232a20147cf1b30ae1a9cb4321"
dependencies = [
"funty",
"radium",
"tap",
"wyz",
]
[[package]]
name = "byteorder"
version = "1.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
[[package]]
name = "cassowary"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53"
[[package]]
name = "cc"
version = "1.0.68"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a72c244c1ff497a746a7e1fb3d14bd08420ecda70c8f25c7112f2781652d787"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "clap"
version = "2.33.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
dependencies = [
"ansi_term",
"atty",
"bitflags",
"strsim",
"textwrap",
"unicode-width",
"vec_map",
]
[[package]]
name = "console"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3993e6445baa160675931ec041a5e03ca84b9c6e32a056150d3aa2bdda0a1f45"
dependencies = [
"encode_unicode",
"lazy_static",
"libc",
"terminal_size",
"winapi",
]
[[package]]
name = "crc32c"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "210cdf933e6a81212bfabf90cd8762f471b5922e5f6b709547673ad8e04b9448"
dependencies = [
"rustc_version",
]
[[package]]
name = "crc32fast"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a"
dependencies = [
"cfg-if",
]
[[package]]
name = "data-encoding"
version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57"
[[package]]
name = "duct"
version = "0.13.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fc6a0a59ed0888e0041cf708e66357b7ae1a82f1c67247e1f93b5e0818f7d8d"
dependencies = [
"libc",
"once_cell",
"os_pipe",
"shared_child",
]
[[package]]
name = "encode_unicode"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f"
[[package]]
name = "env_logger"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
dependencies = [
"log",
"regex",
]
[[package]]
name = "fixedbitset"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e"
[[package]]
name = "flate2"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd3aec53de10fe96d7d8c565eb17f2c687bb5518a2ec453b5b1252964526abe0"
dependencies = [
"cfg-if",
"crc32fast",
"libc",
"miniz_oxide",
]
[[package]]
name = "funty"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7"
[[package]]
name = "futures"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0e7e43a803dae2fa37c1f6a8fe121e1f7bf9548b4dfc0522a42f34145dadfc27"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e682a68b29a882df0545c143dc3646daefe80ba479bcdede94d5a703de2871e2"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
name = "futures-core"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0402f765d8a89a26043b889b26ce3c4679d268fa6bb22cd7c6aad98340e179d1"
[[package]]
name = "futures-executor"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "badaa6a909fac9e7236d0620a2f57f7664640c56575b71a7552fbd68deafab79"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "acc499defb3b348f8d8f3f66415835a9131856ff7714bf10dadfc4ec4bdb29a1"
[[package]]
name = "futures-macro"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4c40298486cdf52cc00cd6d6987892ba502c7656a16a4192a9992b1ccedd121"
dependencies = [
"autocfg",
"proc-macro-hack",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "futures-sink"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a57bead0ceff0d6dde8f465ecd96c9338121bb7717d3e7b108059531870c4282"
[[package]]
name = "futures-task"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a16bef9fc1a4dddb5bee51c989e3fbba26569cbb0e31f5b303c184e3dd33dae"
[[package]]
name = "futures-util"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "feb5c238d27e2bf94ffdfd27b2c29e3df4a68c4193bb6427384259e2bf191967"
dependencies = [
"autocfg",
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-utils",
"proc-macro-hack",
"proc-macro-nested",
"slab",
]
[[package]]
name = "getrandom"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce"
dependencies = [
"cfg-if",
"libc",
"wasi 0.9.0+wasi-snapshot-preview1",
]
[[package]]
name = "getrandom"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753"
dependencies = [
"cfg-if",
"libc",
"wasi 0.10.2+wasi-snapshot-preview1",
]
[[package]]
name = "hermit-abi"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b467343b94ba476dcb2500d242dadbb39557df889310ac77c5d99100aaac33"
dependencies = [
"libc",
]
[[package]]
name = "indicatif"
version = "0.16.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2d207dc617c7a380ab07ff572a6e52fa202a2a8f355860ac9c38e23f8196be1b"
dependencies = [
"console",
"lazy_static",
"number_prefix",
"regex",
]
[[package]]
name = "io-uring"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2f7589adca0ddd74f56ed83a5098b45e3abf264dc27e150a8bec3397fcc34338"
dependencies = [
"bitflags",
"libc",
]
[[package]]
name = "json"
version = "0.12.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd"
[[package]]
name = "lazy_static"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "lexical-core"
version = "0.7.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6607c62aa161d23d17a9072cc5da0be67cdfc89d3afb1e8d9c842bebc2525ffe"
dependencies = [
"arrayvec",
"bitflags",
"cfg-if",
"ryu",
"static_assertions",
]
[[package]]
name = "libc"
version = "0.2.102"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2a5ac8f984bfcf3a823267e5fde638acc3325f6496633a5da6bb6eb2171e103"
[[package]]
name = "log"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710"
dependencies = [
"cfg-if",
]
[[package]]
name = "memchr"
version = "2.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee1c47aaa256ecabcaea351eae4a9b01ef39ed810004e298d2511ed284b1525"
[[package]]
name = "memoffset"
version = "0.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9"
dependencies = [
"autocfg",
]
[[package]]
name = "miniz_oxide"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b"
dependencies = [
"adler",
"autocfg",
]
[[package]]
name = "nix"
version = "0.22.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7555d6c7164cc913be1ce7f95cbecdabda61eb2ccd89008524af306fb7f5031"
dependencies = [
"bitflags",
"cc",
"cfg-if",
"libc",
"memoffset",
]
[[package]]
name = "nom"
version = "6.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c5c51b9083a3c620fa67a2a635d1ce7d95b897e957d6b28ff9a5da960a103a6"
dependencies = [
"bitvec",
"funty",
"lexical-core",
"memchr",
"version_check",
]
[[package]]
name = "num-derive"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "876a53fff98e03a936a674b29568b0e605f06b29372c2489ff4de23f1949743d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "num-traits"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290"
dependencies = [
"autocfg",
]
[[package]]
name = "num_cpus"
version = "1.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3"
dependencies = [
"hermit-abi",
"libc",
]
[[package]]
name = "number_prefix"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3"
[[package]]
name = "numtoa"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b8f8bdf33df195859076e54ab11ee78a1b208382d3a26ec40d142ffc1ecc49ef"
[[package]]
name = "once_cell"
version = "1.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56"
[[package]]
name = "os_pipe"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb233f06c2307e1f5ce2ecad9f8121cffbbee2c95428f44ea85222e460d0d213"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "pin-project-lite"
version = "0.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc0e1f259c92177c30a4c9d177246edd0a3568b25756a977d0632cf8fa37e905"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "ppv-lite86"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857"
[[package]]
name = "proc-macro-hack"
version = "0.5.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5"
[[package]]
name = "proc-macro-nested"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086"
[[package]]
name = "proc-macro2"
version = "1.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0d8caf72986c1a598726adc988bb5984792ef84f5ee5aa50209145ee8077038"
dependencies = [
"unicode-xid",
]
[[package]]
name = "quick-xml"
version = "0.22.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8533f14c8382aaad0d592c812ac3b826162128b65662331e1127b45c3d18536b"
dependencies = [
"memchr",
]
[[package]]
name = "quickcheck"
version = "0.9.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f"
dependencies = [
"env_logger",
"log",
"rand 0.7.3",
"rand_core 0.5.1",
]
[[package]]
name = "quickcheck_macros"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "608c156fd8e97febc07dc9c2e2c80bf74cfc6ef26893eae3daf8bc2bc94a4b7f"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "quote"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "radium"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "941ba9d78d8e2f7ce474c015eea4d9c6d25b6a3327f9832ee29a4de27f91bbb8"
[[package]]
name = "rand"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03"
dependencies = [
"getrandom 0.1.16",
"libc",
"rand_chacha 0.2.2",
"rand_core 0.5.1",
"rand_hc 0.2.0",
]
[[package]]
name = "rand"
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8"
dependencies = [
"libc",
"rand_chacha 0.3.1",
"rand_core 0.6.3",
"rand_hc 0.3.1",
]
[[package]]
name = "rand_chacha"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402"
dependencies = [
"ppv-lite86",
"rand_core 0.5.1",
]
[[package]]
name = "rand_chacha"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88"
dependencies = [
"ppv-lite86",
"rand_core 0.6.3",
]
[[package]]
name = "rand_core"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19"
dependencies = [
"getrandom 0.1.16",
]
[[package]]
name = "rand_core"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7"
dependencies = [
"getrandom 0.2.3",
]
[[package]]
name = "rand_hc"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c"
dependencies = [
"rand_core 0.5.1",
]
[[package]]
name = "rand_hc"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7"
dependencies = [
"rand_core 0.6.3",
]
[[package]]
name = "redox_syscall"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5ab49abadf3f9e1c4bc499e8845e152ad87d2ad2d30371841171169e9d75feee"
dependencies = [
"bitflags",
]
[[package]]
name = "redox_termios"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8440d8acb4fd3d277125b4bd01a6f38aee8d814b3b5fc09b3f2b825d37d3fe8f"
dependencies = [
"redox_syscall",
]
[[package]]
name = "regex"
version = "1.4.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a26af418b574bd56588335b3a3659a65725d4e636eb1016c2f9e3b38c7cc759"
dependencies = [
"aho-corasick",
"memchr",
"regex-syntax",
]
[[package]]
name = "regex-syntax"
version = "0.6.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b"
[[package]]
name = "remove_dir_all"
version = "0.5.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7"
dependencies = [
"winapi",
]
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
dependencies = [
"semver",
]
[[package]]
name = "ryu"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e"
[[package]]
name = "safemem"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072"
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
dependencies = [
"semver-parser",
]
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "shared_child"
version = "0.3.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6be9f7d5565b1483af3e72975e2dee33879b3b86bd48c0929fccf6585d79e65a"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "slab"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f173ac3d1a7e3b28003f40de0b5ce7fe2710f9b9dc3fc38664cebee46b3b6527"
[[package]]
name = "static_assertions"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
[[package]]
name = "strsim"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "syn"
version = "1.0.73"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f71489ff30030d2ae598524f61326b902466f72a0fb1a8564c001cc63425bcc7"
dependencies = [
"proc-macro2",
"quote",
"unicode-xid",
]
[[package]]
name = "tap"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tempfile"
version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22"
dependencies = [
"cfg-if",
"libc",
"rand 0.8.4",
"redox_syscall",
"remove_dir_all",
"winapi",
]
[[package]]
name = "terminal_size"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "633c1a546cee861a1a6d0dc69ebeca693bf4296661ba7852b9d21d159e0506df"
dependencies = [
"libc",
"winapi",
]
[[package]]
name = "termion"
version = "1.5.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "077185e2eac69c3f8379a4298e1e07cd36beb962290d4a51199acf0fdc10607e"
dependencies = [
"libc",
"numtoa",
"redox_syscall",
"redox_termios",
]
[[package]]
name = "textwrap"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
dependencies = [
"unicode-width",
]
[[package]]
name = "thinp"
version = "0.1.0"
dependencies = [
"anyhow",
"atty",
"base64",
"byteorder",
"clap",
"crc32c",
"data-encoding",
"duct",
"fixedbitset",
"flate2",
"futures",
"indicatif",
"io-uring",
"json",
"libc",
"nix",
"nom",
"num-derive",
"num-traits",
"num_cpus",
"quick-xml",
"quickcheck",
"quickcheck_macros",
"rand 0.8.4",
"safemem",
"tempfile",
"termion",
"thiserror",
"threadpool",
"tui",
]
[[package]]
name = "thiserror"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa6f76457f59514c7eeb4e59d891395fab0b2fd1d40723ae737d64153392e9c6"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a36768c0fbf1bb15eca10defa29526bda730a2376c2ab4393ccfa16fb1a318d"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "threadpool"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
dependencies = [
"num_cpus",
]
[[package]]
name = "tui"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ced152a8e9295a5b168adc254074525c17ac4a83c90b2716274cc38118bddc9"
dependencies = [
"bitflags",
"cassowary",
"termion",
"unicode-segmentation",
"unicode-width",
]
[[package]]
name = "unicode-segmentation"
version = "1.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bb0d2e7be6ae3a5fa87eed5fb451aff96f2573d2694942e40543ae0bbe19c796"
[[package]]
name = "unicode-width"
version = "0.1.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
[[package]]
name = "unicode-xid"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3"
[[package]]
name = "vec_map"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
[[package]]
name = "version_check"
version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe"
[[package]]
name = "wasi"
version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "wasi"
version = "0.10.2+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
dependencies = [
"winapi-i686-pc-windows-gnu",
"winapi-x86_64-pc-windows-gnu",
]
[[package]]
name = "winapi-i686-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "wyz"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214"

View File

@ -1,46 +0,0 @@
[package]
name = "thinp"
version = "0.1.0"
authors = ["Joe Thornber <ejt@redhat.com>"]
edition = "2018"
license = "GPL3"
[dependencies]
atty = "0.2"
anyhow = "1.0"
base64 = "0.13"
byteorder = "1.4"
clap = "2.33"
crc32c = "0.6"
data-encoding = "2.3"
duct = "0.13"
fixedbitset = "0.4"
futures = "0.3"
flate2 = "1.0"
io-uring = "0.4"
indicatif = "0.16"
libc = "0.2"
nix = "0.22"
nom = "6.2"
num_cpus = "1.13"
num-derive = "0.3"
num-traits = "0.2"
quick-xml = "0.22"
rand = "0.8"
safemem = "0.3"
tempfile = "3.2"
threadpool = "1.8"
thiserror = "1.0"
tui = "0.14"
termion = "1.5"
[dev-dependencies]
json = "0.12"
quickcheck = "0.9"
quickcheck_macros = "0.9"
[profile.release]
debug = true
[features]
rust_tests = []

View File

@ -344,20 +344,7 @@ install: bin/pdata_tools $(MANPAGES)
$(INSTALL_DATA) man8/era_invalidate.8 $(MANPATH)/man8
$(INSTALL_DATA) man8/thin_trim.8 $(MANPATH)/man8
.PHONY: install install-rust-tools rust-tools
rust-tools:
cargo build --release
install-rust-tools: man8/thin_metadata_pack.8 man8/thin_metadata_unpack.8 rust-tools
$(INSTALL_DIR) $(BINDIR)
$(INSTALL_PROGRAM) target/release/thin_metadata_pack $(BINDIR)
$(INSTALL_PROGRAM) target/release/thin_metadata_unpack $(BINDIR)
$(STRIP) $(BINDIR)/thin_metadata_pack
$(STRIP) $(BINDIR)/thin_metadata_unpack
$(INSTALL_DIR) $(MANPATH)/man8
$(INSTALL_DATA) man8/thin_metadata_pack.8 $(MANPATH)/man8
$(INSTALL_DATA) man8/thin_metadata_unpack.8 $(MANPATH)/man8
.PHONY: install
#----------------------------------------------------------------

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
../target/release/thin_metadata_pack

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
../target/release/thin_metadata_unpack

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1 +0,0 @@
pdata_tools_dev

View File

@ -1,79 +0,0 @@
use anyhow::{anyhow, ensure, Result};
use std::ffi::OsString;
use std::path::Path;
use std::process::exit;
use thinp::commands::*;
fn name_eq(name: &Path, cmd: &str) -> bool {
name == Path::new(cmd)
}
fn main_() -> Result<()> {
let mut args = std::env::args_os();
ensure!(args.len() > 0);
let mut os_name = args.next().unwrap();
let mut name = Path::new(&os_name);
name = Path::new(name.file_name().unwrap());
if name == Path::new("pdata_tools") {
os_name = args.next().unwrap();
name = Path::new(&os_name);
}
let mut new_args = vec![OsString::from(&name)];
for a in args.into_iter() {
new_args.push(a);
}
if name_eq(name, "cache_check") {
cache_check::run(&new_args);
} else if name_eq(name, "cache_dump") {
cache_dump::run(&new_args);
} else if name_eq(name, "cache_metadata_size") {
cache_metadata_size::run(&new_args);
} else if name_eq(name, "cache_repair") {
cache_repair::run(&new_args);
} else if name_eq(name, "cache_restore") {
cache_restore::run(&new_args);
} else if name_eq(name, "era_check") {
era_check::run(&new_args);
} else if name_eq(name, "era_dump") {
era_dump::run(&new_args);
} else if name_eq(name, "era_restore") {
era_restore::run(&new_args);
} else if name_eq(name, "thin_check") {
thin_check::run(&new_args);
} else if name_eq(name, "thin_dump") {
thin_dump::run(&new_args);
} else if name_eq(name, "thin_metadata_pack") {
thin_metadata_pack::run(&new_args);
} else if name_eq(name, "thin_metadata_size") {
thin_metadata_size::run(&new_args);
} else if name_eq(name, "thin_metadata_unpack") {
thin_metadata_unpack::run(&new_args);
} else if name_eq(name, "thin_repair") {
thin_repair::run(&new_args);
} else if name_eq(name, "thin_restore") {
thin_restore::run(&new_args);
} else if name_eq(name, "thin_shrink") {
thin_shrink::run(&new_args);
} else {
return Err(anyhow!("unrecognised command"));
}
Ok(())
}
fn main() {
let code = match main_() {
Ok(()) => 0,
Err(_) => {
// We don't print out the error since -q may be set
// eprintln!("{}", e);
1
}
};
exit(code)
}

View File

@ -1,867 +0,0 @@
extern crate clap;
use anyhow::{anyhow, Result};
use clap::{App, Arg};
use std::fmt;
use std::io::{self, Write};
use std::path::Path;
use std::sync::mpsc;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::thread;
use std::time::Duration;
use termion::event::Key;
use termion::input::TermRead;
use termion::raw::IntoRawMode;
use tui::{
backend::TermionBackend,
buffer::Buffer,
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
terminal::Frame,
text::Span,
widgets::{Block, Borders, List, ListItem, ListState, Row, StatefulWidget, Table, Widget},
Terminal,
};
use thinp::io_engine::*;
use thinp::pdata::btree;
use thinp::pdata::unpack::*;
use thinp::thin::block_time::*;
use thinp::thin::device_detail::*;
use thinp::thin::superblock::*;
//------------------------------------
pub enum Event<I> {
Input(I),
Tick,
}
pub struct Events {
rx: mpsc::Receiver<Event<Key>>,
input_handle: thread::JoinHandle<()>,
ignore_exit_key: Arc<AtomicBool>,
}
#[derive(Debug, Clone, Copy)]
pub struct Config {
pub exit_key: Key,
pub tick_rate: Duration,
}
impl Default for Config {
fn default() -> Config {
Config {
exit_key: Key::Char('q'),
tick_rate: Duration::from_millis(250),
}
}
}
impl Events {
pub fn new() -> Events {
Events::with_config(Config::default())
}
pub fn with_config(config: Config) -> Events {
let (tx, rx) = mpsc::channel();
let ignore_exit_key = Arc::new(AtomicBool::new(false));
let input_handle = {
let ignore_exit_key = ignore_exit_key.clone();
thread::spawn(move || {
let stdin = io::stdin();
for key in stdin.keys().flatten() {
if let Err(err) = tx.send(Event::Input(key)) {
eprintln!("{}", err);
return;
}
if !ignore_exit_key.load(Ordering::Relaxed) && key == config.exit_key {
return;
}
}
})
};
Events {
rx,
input_handle,
ignore_exit_key,
}
}
pub fn next(&self) -> Result<Event<Key>, mpsc::RecvError> {
self.rx.recv()
}
pub fn disable_exit_key(&mut self) {
self.ignore_exit_key.store(true, Ordering::Relaxed);
}
pub fn enable_exit_key(&mut self) {
self.ignore_exit_key.store(false, Ordering::Relaxed);
}
}
impl Default for Events {
fn default() -> Self {
Self::new()
}
}
//------------------------------------
fn ls_next(ls: &mut ListState, max: usize) {
let i = match ls.selected() {
Some(i) => {
if i >= max - 1 {
max - 1
} else {
i + 1
}
}
None => 0,
};
ls.select(Some(i));
}
fn ls_previous(ls: &mut ListState) {
let i = match ls.selected() {
Some(i) => {
if i == 0 {
0
} else {
i - 1
}
}
None => 0,
};
ls.select(Some(i));
}
//------------------------------------
struct SBWidget<'a> {
sb: &'a Superblock,
}
impl<'a> StatefulWidget for SBWidget<'a> {
type State = ListState;
fn render(self, area: Rect, buf: &mut Buffer, state: &mut ListState) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(10), Constraint::Percentage(80)].as_ref())
.split(area);
let sb = self.sb;
let flags = vec!["flags".to_string(), format!("{}", sb.flags)];
let block = vec!["block".to_string(), format!("{}", sb.block)];
let uuid = vec!["uuid".to_string(), "-".to_string()];
let version = vec!["version".to_string(), format!("{}", sb.version)];
let time = vec!["time".to_string(), format!("{}", sb.time)];
let transaction_id = vec![
"transaction_id".to_string(),
format!("{}", sb.transaction_id),
];
let metadata_snap = vec![
"metadata_snap".to_string(),
if sb.metadata_snap == 0 {
"-".to_string()
} else {
format!("{}", sb.metadata_snap)
},
];
let mapping_root = vec!["mapping root".to_string(), format!("{}", sb.mapping_root)];
let details_root = vec!["details root".to_string(), format!("{}", sb.details_root)];
let data_block_size = vec![
"data block size".to_string(),
format!("{}k", sb.data_block_size * 2),
];
let table = Table::new(vec![
Row::new(flags),
Row::new(block),
Row::new(uuid),
Row::new(version),
Row::new(time),
Row::new(transaction_id),
Row::new(metadata_snap),
Row::new(mapping_root),
Row::new(details_root),
Row::new(data_block_size),
])
.header(Row::new(vec!["Field", "Value"]).style(Style::default().fg(Color::Yellow)))
.block(
Block::default()
.borders(Borders::ALL)
.title("Superblock".to_string()),
)
.widths(&[Constraint::Length(20), Constraint::Length(60)])
.style(Style::default().fg(Color::White))
.column_spacing(1);
Widget::render(table, chunks[0], buf);
let items = vec![
ListItem::new(Span::raw("Device tree".to_string())),
ListItem::new(Span::raw("Mapping tree".to_string())),
];
let items = List::new(items)
.block(Block::default().borders(Borders::ALL).title("Entries"))
.highlight_style(
Style::default()
.bg(Color::LightGreen)
.add_modifier(Modifier::BOLD),
);
StatefulWidget::render(items, chunks[1], buf, state);
}
}
//------------------------------------
struct HeaderWidget<'a> {
title: String,
hdr: &'a btree::NodeHeader,
}
impl<'a> Widget for HeaderWidget<'a> {
fn render(self, area: Rect, buf: &mut Buffer) {
let hdr = &self.hdr;
let block = vec!["block".to_string(), format!("{}", hdr.block)];
let kind = vec![
"type".to_string(),
match hdr.is_leaf {
true => "LEAF".to_string(),
false => "INTERNAL".to_string(),
},
];
let nr_entries = vec!["nr_entries".to_string(), format!("{}", hdr.nr_entries)];
let max_entries = vec!["max_entries".to_string(), format!("{}", hdr.max_entries)];
let value_size = vec!["value size".to_string(), format!("{}", hdr.value_size)];
let table = Table::new(vec![
Row::new(block),
Row::new(kind),
Row::new(nr_entries),
Row::new(max_entries),
Row::new(value_size),
])
.header(Row::new(vec!["Field", "Value"]).style(Style::default().fg(Color::Yellow)))
.block(Block::default().borders(Borders::ALL).title(self.title))
.widths(&[Constraint::Length(20), Constraint::Length(60)])
.style(Style::default().fg(Color::White))
.column_spacing(1);
Widget::render(table, area, buf);
}
}
fn read_node<V: Unpack>(engine: &dyn IoEngine, loc: u64) -> Result<btree::Node<V>> {
let b = engine.read(loc)?;
let path = Vec::new();
btree::unpack_node(&path, b.get_data(), true, false)
.map_err(|_| anyhow!("couldn't unpack btree node"))
}
//------------------------------------
// For types that have the concept of adjacency, but not of a distance
// between values. For instance with a BlockTime there is no delta that
// will get between two values with different times.
trait Adjacent {
fn adjacent(&self, rhs: &Self) -> bool;
}
impl Adjacent for u64 {
fn adjacent(&self, rhs: &Self) -> bool {
(*self + 1) == *rhs
}
}
impl Adjacent for BlockTime {
fn adjacent(&self, rhs: &Self) -> bool {
if self.time != rhs.time {
return false;
}
self.block + 1 == rhs.block
}
}
impl Adjacent for DeviceDetail {
fn adjacent(&self, _rhs: &Self) -> bool {
false
}
}
impl<X: Adjacent, Y: Adjacent> Adjacent for (X, Y) {
fn adjacent(&self, rhs: &Self) -> bool {
self.0.adjacent(&rhs.0) && self.1.adjacent(&rhs.1)
}
}
fn adjacent_runs<V: Adjacent + Copy>(mut ns: Vec<V>) -> Vec<(V, usize)> {
let mut result = Vec::new();
if ns.is_empty() {
return result;
}
// Reverse so we can pop without cloning the value.
ns.reverse();
let mut base = ns.pop().unwrap();
let mut current = base;
let mut len = 1;
while let Some(v) = ns.pop() {
if current.adjacent(&v) {
current = v;
len += 1;
} else {
result.push((base, len));
base = v;
current = v;
len = 1;
}
}
result.push((base, len));
result
}
fn mk_runs<V: Adjacent + Sized + Copy>(keys: &[u64], values: &[V]) -> Vec<((u64, V), usize)> {
let mut pairs = Vec::new();
for (k, v) in keys.iter().zip(values.iter()) {
pairs.push((*k, *v));
}
adjacent_runs(pairs)
}
//------------------------------------
struct NodeWidget<'a, V: Unpack + Adjacent + Clone> {
title: String,
node: &'a btree::Node<V>,
}
fn mk_item<'a, V: fmt::Display>(k: u64, v: &V, len: usize) -> ListItem<'a> {
if len > 1 {
ListItem::new(Span::raw(format!("{} x {} -> {}", k, len as u64, v)))
} else {
ListItem::new(Span::raw(format!("{} -> {}", k, v)))
}
}
fn mk_items<'a, V>(keys: &[u64], values: &[V], selected: usize) -> (Vec<ListItem<'a>>, usize)
where
V: Adjacent + Copy + fmt::Display,
{
let mut items = Vec::new();
let bkeys = &keys[0..selected];
let key = keys[selected];
let akeys = &keys[(selected + 1)..];
let bvalues = &values[0..selected];
let value = values[selected];
let avalues = &values[(selected + 1)..];
let bruns = mk_runs(bkeys, bvalues);
let aruns = mk_runs(akeys, avalues);
let i = bruns.len();
for ((k, v), len) in bruns {
items.push(mk_item(k, &v, len));
}
items.push(ListItem::new(Span::raw(format!("{} -> {}", key, value))));
for ((k, v), len) in aruns {
items.push(mk_item(k, &v, len));
}
(items, i)
}
impl<'a, V: Unpack + fmt::Display + Adjacent + Copy> StatefulWidget for NodeWidget<'a, V> {
type State = ListState;
fn render(self, area: Rect, buf: &mut Buffer, state: &mut ListState) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(10), Constraint::Percentage(80)].as_ref())
.split(area);
let hdr = HeaderWidget {
title: self.title,
hdr: self.node.get_header(),
};
hdr.render(chunks[0], buf);
let items: Vec<ListItem>;
let i: usize;
let selected = state.selected().unwrap();
let mut state = ListState::default();
match self.node {
btree::Node::Internal { keys, values, .. } => {
let (items_, i_) = mk_items(keys, values, selected);
items = items_;
i = i_;
}
btree::Node::Leaf { keys, values, .. } => {
let (items_, i_) = mk_items(keys, values, selected);
items = items_;
i = i_;
}
}
state.select(Some(i));
let items = List::new(items)
.block(Block::default().borders(Borders::ALL).title("Entries"))
.highlight_style(
Style::default()
.bg(Color::LightGreen)
.add_modifier(Modifier::BOLD),
);
StatefulWidget::render(items, chunks[1], buf, &mut state);
}
}
//------------------------------------
enum Action {
PushDeviceDetail(u64),
PushTopLevel(u64),
PushBottomLevel(u32, u64),
PopPanel,
}
use Action::*;
type Frame_<'a, 'b> = Frame<'a, TermionBackend<termion::raw::RawTerminal<std::io::StdoutLock<'b>>>>;
trait Panel {
fn render(&mut self, area: Rect, f: &mut Frame_);
fn input(&mut self, k: Key) -> Option<Action>;
fn path_action(&mut self, child: u64) -> Option<Action>;
}
//------------------------------------
struct SBPanel {
sb: Superblock,
state: ListState,
}
impl SBPanel {
fn new(sb: Superblock) -> SBPanel {
let mut state = ListState::default();
state.select(Some(0));
SBPanel { sb, state }
}
}
impl Panel for SBPanel {
fn render(&mut self, area: Rect, f: &mut Frame_) {
let w = SBWidget { sb: &self.sb };
f.render_stateful_widget(w, area, &mut self.state);
}
fn input(&mut self, k: Key) -> Option<Action> {
match k {
Key::Char('j') | Key::Down => {
ls_next(&mut self.state, 2);
None
}
Key::Char('k') | Key::Up => {
ls_previous(&mut self.state);
None
}
Key::Char('l') | Key::Right => {
if self.state.selected().unwrap() == 0 {
Some(PushDeviceDetail(self.sb.details_root))
} else {
Some(PushTopLevel(self.sb.mapping_root))
}
}
Key::Char('h') | Key::Left => Some(PopPanel),
_ => None,
}
}
fn path_action(&mut self, child: u64) -> Option<Action> {
if child == self.sb.mapping_root {
Some(PushTopLevel(child))
} else if child == self.sb.details_root {
Some(PushDeviceDetail(child))
} else {
None
}
}
}
//------------------------------------
struct DeviceDetailPanel {
node: btree::Node<DeviceDetail>,
nr_entries: usize,
state: ListState,
}
impl DeviceDetailPanel {
fn new(node: btree::Node<DeviceDetail>) -> DeviceDetailPanel {
let nr_entries = node.get_header().nr_entries as usize;
let mut state = ListState::default();
state.select(Some(0));
DeviceDetailPanel {
node,
nr_entries,
state,
}
}
}
impl Panel for DeviceDetailPanel {
fn render(&mut self, area: Rect, f: &mut Frame_) {
let w = NodeWidget {
title: "Device Details".to_string(),
node: &self.node,
};
f.render_stateful_widget(w, area, &mut self.state);
}
fn input(&mut self, k: Key) -> Option<Action> {
match k {
Key::Char('j') | Key::Down => {
ls_next(&mut self.state, self.nr_entries);
None
}
Key::Char('k') | Key::Up => {
ls_previous(&mut self.state);
None
}
Key::Char('l') | Key::Right => match &self.node {
btree::Node::Internal { values, .. } => {
Some(PushDeviceDetail(values[self.state.selected().unwrap()]))
}
btree::Node::Leaf { .. } => None,
},
Key::Char('h') | Key::Left => Some(PopPanel),
_ => None,
}
}
fn path_action(&mut self, child: u64) -> Option<Action> {
match &self.node {
btree::Node::Internal { values, .. } => {
for (i, v) in values.iter().enumerate() {
if *v == child {
self.state.select(Some(i));
return Some(PushDeviceDetail(child));
}
}
None
}
btree::Node::Leaf { .. } => None,
}
}
}
//------------------------------------
struct TopLevelPanel {
node: btree::Node<u64>,
nr_entries: usize,
state: ListState,
}
impl TopLevelPanel {
fn new(node: btree::Node<u64>) -> TopLevelPanel {
let nr_entries = node.get_header().nr_entries as usize;
let mut state = ListState::default();
state.select(Some(0));
TopLevelPanel {
node,
nr_entries,
state,
}
}
}
impl Panel for TopLevelPanel {
fn render(&mut self, area: Rect, f: &mut Frame_) {
let w = NodeWidget {
title: "Top Level".to_string(),
node: &self.node,
};
f.render_stateful_widget(w, area, &mut self.state);
}
fn input(&mut self, k: Key) -> Option<Action> {
match k {
Key::Char('j') | Key::Down => {
ls_next(&mut self.state, self.nr_entries);
None
}
Key::Char('k') | Key::Up => {
ls_previous(&mut self.state);
None
}
Key::Char('l') | Key::Right => match &self.node {
btree::Node::Internal { values, .. } => {
Some(PushTopLevel(values[self.state.selected().unwrap()]))
}
btree::Node::Leaf { values, keys, .. } => {
let index = self.state.selected().unwrap();
Some(PushBottomLevel(keys[index] as u32, values[index]))
}
},
Key::Char('h') | Key::Left => Some(PopPanel),
_ => None,
}
}
fn path_action(&mut self, child: u64) -> Option<Action> {
match &self.node {
btree::Node::Internal { values, .. } => {
for (i, v) in values.iter().enumerate() {
if *v == child {
self.state.select(Some(i));
return Some(PushTopLevel(child));
}
}
None
}
btree::Node::Leaf { keys, values, .. } => {
for i in 0..values.len() {
if values[i] == child {
self.state.select(Some(i));
return Some(PushBottomLevel(keys[i] as u32, child));
}
}
None
}
}
}
}
struct BottomLevelPanel {
thin_id: u32,
node: btree::Node<BlockTime>,
nr_entries: usize,
state: ListState,
}
impl BottomLevelPanel {
fn new(thin_id: u32, node: btree::Node<BlockTime>) -> BottomLevelPanel {
let nr_entries = node.get_header().nr_entries as usize;
let mut state = ListState::default();
state.select(Some(0));
BottomLevelPanel {
thin_id,
node,
nr_entries,
state,
}
}
}
impl Panel for BottomLevelPanel {
fn render(&mut self, area: Rect, f: &mut Frame_) {
let w = NodeWidget {
title: format!("Thin dev #{}", self.thin_id),
node: &self.node,
};
f.render_stateful_widget(w, area, &mut self.state);
}
fn input(&mut self, k: Key) -> Option<Action> {
match k {
Key::Char('j') | Key::Down => {
ls_next(&mut self.state, self.nr_entries);
None
}
Key::Char('k') | Key::Up => {
ls_previous(&mut self.state);
None
}
Key::Char('l') | Key::Right => match &self.node {
btree::Node::Internal { values, .. } => Some(PushBottomLevel(
self.thin_id,
values[self.state.selected().unwrap()],
)),
_ => None,
},
Key::Char('h') | Key::Left => Some(PopPanel),
_ => None,
}
}
fn path_action(&mut self, child: u64) -> Option<Action> {
match &self.node {
btree::Node::Internal { values, .. } => {
for (i, v) in values.iter().enumerate() {
if *v == child {
self.state.select(Some(i));
return Some(PushBottomLevel(self.thin_id, child));
}
}
None
}
btree::Node::Leaf { .. } => None,
}
}
}
//------------------------------------
fn perform_action(
panels: &mut Vec<Box<dyn Panel>>,
engine: &dyn IoEngine,
action: Action,
) -> Result<()> {
match action {
PushDeviceDetail(b) => {
let node = read_node::<DeviceDetail>(engine, b)?;
panels.push(Box::new(DeviceDetailPanel::new(node)));
}
PushTopLevel(b) => {
let node = read_node::<u64>(engine, b)?;
panels.push(Box::new(TopLevelPanel::new(node)));
}
PushBottomLevel(thin_id, b) => {
let node = read_node::<BlockTime>(engine, b)?;
panels.push(Box::new(BottomLevelPanel::new(thin_id, node)));
}
PopPanel => {
if panels.len() > 1 {
panels.pop();
}
}
};
Ok(())
}
fn explore(path: &Path, node_path: Option<Vec<u64>>) -> Result<()> {
let engine = SyncIoEngine::new(path, 1, false)?;
let mut panels: Vec<Box<dyn Panel>> = Vec::new();
if let Some(path) = node_path {
eprintln!("using path: {:?}", path);
assert_eq!(path[0], 0);
let sb = read_superblock(&engine, path[0])?;
panels.push(Box::new(SBPanel::new(sb)));
for b in &path[1..] {
let action = panels.last_mut().unwrap().path_action(*b);
if let Some(action) = action {
perform_action(&mut panels, &engine, action)?;
} else {
return Err(anyhow!("bad node path: couldn't find child node {}", b));
}
}
} else {
let sb = read_superblock(&engine, 0)?;
panels.push(Box::new(SBPanel::new(sb)));
}
let events = Events::new();
let stdout = io::stdout();
let mut stdout = stdout.lock().into_raw_mode()?;
write!(stdout, "{}", termion::clear::All)?;
let backend = TermionBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
'main: loop {
let render_panels = |f: &mut Frame_| {
let chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(f.size());
let mut base = panels.len();
if base >= 2 {
base -= 2;
} else {
base = 0;
}
for i in base..panels.len() {
panels[i].render(chunks[i - base], f);
}
};
terminal.draw(render_panels)?;
let last = panels.len() - 1;
let active_panel = &mut panels[last];
if let Event::Input(key) = events.next()? {
match key {
Key::Char('q') => break 'main,
_ => {
if let Some(action) = active_panel.input(key) {
perform_action(&mut panels, &engine, action)?;
}
}
}
}
}
events.input_handle.join().unwrap();
Ok(())
}
//------------------------------------
fn main() -> Result<()> {
let parser = App::new("thin_explore")
.version(thinp::version::tools_version())
.about("A text user interface for examining thin metadata.")
.arg(
Arg::with_name("NODE_PATH")
.help("Pass in a node path as output by thin_check")
.short("p")
.long("node-path")
.value_name("NODE_PATH"),
)
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to check")
.required(true)
.index(1),
);
let matches = parser.get_matches();
let node_path = matches
.value_of("NODE_PATH")
.map(|text| btree::decode_node_path(text).unwrap());
let input_file = Path::new(matches.value_of("INPUT").unwrap());
explore(input_file, node_path)
}
//------------------------------------

373
src/cache/check.rs vendored
View File

@ -1,373 +0,0 @@
use anyhow::anyhow;
use std::collections::BTreeSet;
use std::path::Path;
use std::sync::{Arc, Mutex};
use crate::cache::hint::*;
use crate::cache::mapping::*;
use crate::cache::superblock::*;
use crate::commands::utils::*;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::array::{self, ArrayBlock, ArrayError};
use crate::pdata::array_walker::*;
use crate::pdata::bitset::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_checker::*;
use crate::pdata::space_map_common::*;
use crate::pdata::unpack::unpack;
use crate::report::*;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
fn inc_superblock(sm: &ASpaceMap) -> anyhow::Result<()> {
let mut sm = sm.lock().unwrap();
sm.inc(SUPERBLOCK_LOCATION, 1)?;
Ok(())
}
//------------------------------------------
mod format1 {
use super::*;
pub struct MappingChecker {
nr_origin_blocks: u64,
seen_oblocks: Mutex<BTreeSet<u64>>,
}
impl MappingChecker {
pub fn new(nr_origin_blocks: Option<u64>) -> MappingChecker {
MappingChecker {
nr_origin_blocks: if let Some(n) = nr_origin_blocks {
n
} else {
MAX_ORIGIN_BLOCKS
},
seen_oblocks: Mutex::new(BTreeSet::new()),
}
}
fn check_flags(&self, m: &Mapping) -> array::Result<()> {
if (m.flags & !(MappingFlags::Valid as u32 | MappingFlags::Dirty as u32)) != 0 {
return Err(array::value_err(format!(
"unknown flags in mapping: {}",
m.flags
)));
}
if !m.is_valid() && m.is_dirty() {
return Err(array::value_err(
"dirty bit found on an unmapped block".to_string(),
));
}
Ok(())
}
fn check_oblock(&self, m: &Mapping) -> array::Result<()> {
if !m.is_valid() {
if m.oblock > 0 {
return Err(array::value_err("invalid block is mapped".to_string()));
}
return Ok(());
}
if m.oblock >= self.nr_origin_blocks {
return Err(array::value_err(
"mapping beyond end of the origin device".to_string(),
));
}
let mut seen_oblocks = self.seen_oblocks.lock().unwrap();
if seen_oblocks.contains(&m.oblock) {
return Err(array::value_err("origin block already mapped".to_string()));
}
seen_oblocks.insert(m.oblock);
Ok(())
}
}
impl ArrayVisitor<Mapping> for MappingChecker {
fn visit(&self, _index: u64, b: ArrayBlock<Mapping>) -> array::Result<()> {
let mut errs: Vec<ArrayError> = Vec::new();
for m in b.values.iter() {
if let Err(e) = self.check_flags(m) {
errs.push(e);
}
if let Err(e) = self.check_oblock(m) {
errs.push(e);
}
}
// FIXME: duplicate to BTreeWalker::build_aggregrate()
match errs.len() {
0 => Ok(()),
1 => Err(errs[0].clone()),
_ => Err(array::aggregate_error(errs)),
}
}
}
}
mod format2 {
use super::*;
pub struct MappingChecker {
nr_origin_blocks: u64,
inner: Mutex<Inner>,
}
struct Inner {
seen_oblocks: BTreeSet<u64>,
dirty_bits: CheckedBitSet,
}
impl MappingChecker {
pub fn new(nr_origin_blocks: Option<u64>, dirty_bits: CheckedBitSet) -> MappingChecker {
MappingChecker {
nr_origin_blocks: if let Some(n) = nr_origin_blocks {
n
} else {
MAX_ORIGIN_BLOCKS
},
inner: Mutex::new(Inner {
seen_oblocks: BTreeSet::new(),
dirty_bits,
}),
}
}
fn check_flags(&self, m: &Mapping, dirty_bit: Option<bool>) -> array::Result<()> {
if (m.flags & !(MappingFlags::Valid as u32)) != 0 {
return Err(array::value_err(format!(
"unknown flags in mapping: {}",
m.flags
)));
}
if !m.is_valid() && dirty_bit.is_some() && dirty_bit.unwrap() {
return Err(array::value_err(
"dirty bit found on an unmapped block".to_string(),
));
}
Ok(())
}
fn check_oblock(&self, m: &Mapping, seen_oblocks: &mut BTreeSet<u64>) -> array::Result<()> {
if !m.is_valid() {
if m.oblock > 0 {
return Err(array::value_err("invalid mapped block".to_string()));
}
return Ok(());
}
if m.oblock >= self.nr_origin_blocks {
return Err(array::value_err(
"mapping beyond end of the origin device".to_string(),
));
}
if seen_oblocks.contains(&m.oblock) {
return Err(array::value_err("origin block already mapped".to_string()));
}
seen_oblocks.insert(m.oblock);
Ok(())
}
}
impl ArrayVisitor<Mapping> for MappingChecker {
fn visit(&self, index: u64, b: ArrayBlock<Mapping>) -> array::Result<()> {
let mut inner = self.inner.lock().unwrap();
let mut errs: Vec<ArrayError> = Vec::new();
let cbegin = index as u32 * b.header.max_entries;
let cend = cbegin + b.header.nr_entries;
for (m, cblock) in b.values.iter().zip(cbegin..cend) {
if let Err(e) = self.check_flags(m, inner.dirty_bits.contains(cblock as usize)) {
errs.push(e);
}
if let Err(e) = self.check_oblock(m, &mut inner.seen_oblocks) {
errs.push(e);
}
}
// FIXME: duplicate to BTreeWalker::build_aggregrate()
match errs.len() {
0 => Ok(()),
1 => Err(errs[0].clone()),
_ => Err(array::aggregate_error(errs)),
}
}
}
}
//------------------------------------------
struct HintChecker;
impl HintChecker {
fn new() -> HintChecker {
HintChecker
}
}
impl ArrayVisitor<Hint> for HintChecker {
fn visit(&self, _index: u64, _b: ArrayBlock<Hint>) -> array::Result<()> {
// TODO: check hints
Ok(())
}
}
//------------------------------------------
// TODO: clear_needs_check, auto_repair
pub struct CacheCheckOptions<'a> {
pub dev: &'a Path,
pub async_io: bool,
pub sb_only: bool,
pub skip_mappings: bool,
pub skip_hints: bool,
pub skip_discards: bool,
pub ignore_non_fatal: bool,
pub auto_repair: bool,
pub report: Arc<Report>,
}
// TODO: thread pool
struct Context {
report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &CacheCheckOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.dev, MAX_CONCURRENT_IO, false)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, false)?);
}
Ok(Context {
report: opts.report.clone(),
engine,
})
}
fn check_superblock(sb: &Superblock) -> anyhow::Result<()> {
if sb.version >= 2 && sb.dirty_root == None {
return Err(anyhow!("dirty bitset not found"));
}
Ok(())
}
pub fn check(opts: CacheCheckOptions) -> anyhow::Result<()> {
let ctx = mk_context(&opts)?;
let engine = &ctx.engine;
let metadata_sm = core_sm(engine.get_nr_blocks(), u8::MAX as u32);
inc_superblock(&metadata_sm)?;
let sb = match read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION) {
Ok(sb) => sb,
Err(e) => {
check_not_xml(opts.dev, &opts.report);
return Err(e);
}
};
check_superblock(&sb)?;
if opts.sb_only {
return Ok(());
}
// The discard bitset is optional and could be updated during device suspension.
// A restored metadata therefore comes with a zero-sized discard bitset,
// and also zeroed discard_block_size and discard_nr_blocks.
let nr_origin_blocks;
if sb.flags.clean_shutdown && sb.discard_block_size > 0 && sb.discard_nr_blocks > 0 {
let origin_sectors = sb.discard_block_size * sb.discard_nr_blocks;
nr_origin_blocks = Some(origin_sectors / sb.data_block_size as u64);
} else {
nr_origin_blocks = None;
}
// TODO: factor out into check_mappings()
if !opts.skip_mappings {
let w =
ArrayWalker::new_with_sm(engine.clone(), metadata_sm.clone(), opts.ignore_non_fatal)?;
match sb.version {
1 => {
let mut c = format1::MappingChecker::new(nr_origin_blocks);
if let Err(e) = w.walk(&mut c, sb.mapping_root) {
ctx.report.fatal(&format!("{}", e));
}
}
2 => {
let (dirty_bits, err) = read_bitset_with_sm(
engine.clone(),
sb.dirty_root.unwrap(),
sb.cache_blocks as usize,
metadata_sm.clone(),
opts.ignore_non_fatal,
)?;
if err.is_some() {
ctx.report.fatal(&format!("{}", err.unwrap()));
}
let mut c = format2::MappingChecker::new(nr_origin_blocks, dirty_bits);
if let Err(e) = w.walk(&mut c, sb.mapping_root) {
ctx.report.fatal(&format!("{}", e));
}
}
v => {
return Err(anyhow!("unsupported metadata version {}", v));
}
}
}
if !opts.skip_hints && sb.hint_root != 0 && sb.policy_hint_size != 0 {
if sb.policy_hint_size != 4 {
return Err(anyhow!("cache_check only supports policy hint size of 4"));
}
let w =
ArrayWalker::new_with_sm(engine.clone(), metadata_sm.clone(), opts.ignore_non_fatal)?;
let mut c = HintChecker::new();
if let Err(e) = w.walk(&mut c, sb.hint_root) {
ctx.report.fatal(&format!("{}", e));
}
}
// The discard bitset might not be available if the cache has never been suspended,
// e.g., a crash of freshly created cache.
if !opts.skip_discards && sb.discard_root != 0 {
let (_discard_bits, err) = read_bitset_with_sm(
engine.clone(),
sb.discard_root,
sb.discard_nr_blocks as usize,
metadata_sm.clone(),
opts.ignore_non_fatal,
)?;
if err.is_some() {
ctx.report.fatal(&format!("{}", err.unwrap()));
}
}
let root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
let metadata_leaks = check_metadata_space_map(
engine.clone(),
ctx.report.clone(),
root,
metadata_sm.clone(),
opts.ignore_non_fatal,
)?;
if opts.auto_repair && !metadata_leaks.is_empty() {
ctx.report.info("Repairing metadata leaks.");
repair_space_map(ctx.engine.clone(), metadata_leaks, metadata_sm.clone())?;
}
Ok(())
}
//------------------------------------------

301
src/cache/dump.rs vendored
View File

@ -1,301 +0,0 @@
use anyhow::anyhow;
use fixedbitset::FixedBitSet;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::path::Path;
use std::sync::{Arc, Mutex};
use crate::cache::hint::Hint;
use crate::cache::ir::{self, MetadataVisitor};
use crate::cache::mapping::Mapping;
use crate::cache::superblock::*;
use crate::cache::xml;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::array::{self, ArrayBlock};
use crate::pdata::array_walker::*;
use crate::pdata::bitset::{read_bitset, CheckedBitSet};
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
mod format1 {
use super::*;
struct Inner<'a> {
visitor: &'a mut dyn MetadataVisitor,
valid_mappings: FixedBitSet,
}
pub struct MappingEmitter<'a> {
inner: Mutex<Inner<'a>>,
}
impl<'a> MappingEmitter<'a> {
pub fn new(nr_entries: usize, visitor: &'a mut dyn MetadataVisitor) -> MappingEmitter<'a> {
MappingEmitter {
inner: Mutex::new(Inner {
visitor,
valid_mappings: FixedBitSet::with_capacity(nr_entries),
}),
}
}
pub fn get_valid(self) -> FixedBitSet {
let inner = self.inner.into_inner().unwrap();
inner.valid_mappings
}
}
impl<'a> ArrayVisitor<Mapping> for MappingEmitter<'a> {
fn visit(&self, index: u64, b: ArrayBlock<Mapping>) -> array::Result<()> {
let cbegin = index as u32 * b.header.max_entries;
let cend = cbegin + b.header.nr_entries;
for (map, cblock) in b.values.iter().zip(cbegin..cend) {
if !map.is_valid() {
continue;
}
let m = ir::Map {
cblock,
oblock: map.oblock,
dirty: map.is_dirty(),
};
let mut inner = self.inner.lock().unwrap();
inner.valid_mappings.set(cblock as usize, true);
inner
.visitor
.mapping(&m)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
}
}
}
//------------------------------------------
mod format2 {
use super::*;
//-------------------
// Mapping visitor
struct Inner<'a> {
visitor: &'a mut dyn MetadataVisitor,
dirty_bits: CheckedBitSet,
valid_mappings: FixedBitSet,
}
pub struct MappingEmitter<'a> {
inner: Mutex<Inner<'a>>,
}
impl<'a> MappingEmitter<'a> {
pub fn new(
nr_entries: usize,
dirty_bits: CheckedBitSet,
visitor: &'a mut dyn MetadataVisitor,
) -> MappingEmitter<'a> {
MappingEmitter {
inner: Mutex::new(Inner {
visitor,
dirty_bits,
valid_mappings: FixedBitSet::with_capacity(nr_entries),
}),
}
}
pub fn get_valid(self) -> FixedBitSet {
let inner = self.inner.into_inner().unwrap();
inner.valid_mappings
}
}
impl<'a> ArrayVisitor<Mapping> for MappingEmitter<'a> {
fn visit(&self, index: u64, b: ArrayBlock<Mapping>) -> array::Result<()> {
let cbegin = index as u32 * b.header.max_entries;
let cend = cbegin + b.header.nr_entries;
for (map, cblock) in b.values.iter().zip(cbegin..cend) {
if !map.is_valid() {
continue;
}
let mut inner = self.inner.lock().unwrap();
let dirty;
if let Some(bit) = inner.dirty_bits.contains(cblock as usize) {
dirty = bit;
} else {
// default to dirty if the bitset is damaged
dirty = true;
}
let m = ir::Map {
cblock,
oblock: map.oblock,
dirty,
};
inner.valid_mappings.set(cblock as usize, true);
inner
.visitor
.mapping(&m)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
}
}
}
//-----------------------------------------
struct HintEmitter<'a> {
emitter: Mutex<&'a mut dyn MetadataVisitor>,
valid_mappings: FixedBitSet,
}
impl<'a> HintEmitter<'a> {
pub fn new(emitter: &'a mut dyn MetadataVisitor, valid_mappings: FixedBitSet) -> HintEmitter {
HintEmitter {
emitter: Mutex::new(emitter),
valid_mappings,
}
}
}
impl<'a> ArrayVisitor<Hint> for HintEmitter<'a> {
fn visit(&self, index: u64, b: ArrayBlock<Hint>) -> array::Result<()> {
let cbegin = index as u32 * b.header.max_entries;
let cend = cbegin + b.header.nr_entries;
for (hint, cblock) in b.values.iter().zip(cbegin..cend) {
if !self.valid_mappings.contains(cblock as usize) {
continue;
}
let h = ir::Hint {
cblock,
data: hint.hint.to_vec(),
};
self.emitter
.lock()
.unwrap()
.hint(&h)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
}
}
//------------------------------------------
pub struct CacheDumpOptions<'a> {
pub input: &'a Path,
pub output: Option<&'a Path>,
pub async_io: bool,
pub repair: bool,
}
struct Context {
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &CacheDumpOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.input, MAX_CONCURRENT_IO, false)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.input, nr_threads, false)?);
}
Ok(Context { engine })
}
pub fn dump_metadata(
engine: Arc<dyn IoEngine + Send + Sync>,
out: &mut dyn MetadataVisitor,
sb: &Superblock,
repair: bool,
) -> anyhow::Result<()> {
let xml_sb = ir::Superblock {
uuid: "".to_string(),
block_size: sb.data_block_size,
nr_cache_blocks: sb.cache_blocks,
policy: std::str::from_utf8(&sb.policy_name)?.to_string(),
hint_width: sb.policy_hint_size,
};
out.superblock_b(&xml_sb)?;
out.mappings_b()?;
let valid_mappings = match sb.version {
1 => {
let w = ArrayWalker::new(engine.clone(), repair);
let mut emitter = format1::MappingEmitter::new(sb.cache_blocks as usize, out);
w.walk(&mut emitter, sb.mapping_root)?;
emitter.get_valid()
}
2 => {
// We need to walk the dirty bitset first.
let dirty_bits;
if let Some(root) = sb.dirty_root {
let (bits, errs) =
read_bitset(engine.clone(), root, sb.cache_blocks as usize, repair);
if errs.is_some() && !repair {
return Err(anyhow!("errors in bitset {}", errs.unwrap()));
}
dirty_bits = bits;
} else {
// FIXME: is there a way this can legally happen? eg,
// a crash of a freshly created cache?
return Err(anyhow!("format 2 selected, but no dirty bitset present"));
}
let w = ArrayWalker::new(engine.clone(), repair);
let mut emitter =
format2::MappingEmitter::new(sb.cache_blocks as usize, dirty_bits, out);
w.walk(&mut emitter, sb.mapping_root)?;
emitter.get_valid()
}
v => {
return Err(anyhow!("unsupported metadata version: {}", v));
}
};
out.mappings_e()?;
out.hints_b()?;
{
let w = ArrayWalker::new(engine.clone(), repair);
let mut emitter = HintEmitter::new(out, valid_mappings);
w.walk(&mut emitter, sb.hint_root)?;
}
out.hints_e()?;
out.superblock_e()?;
out.eof()?;
Ok(())
}
pub fn dump(opts: CacheDumpOptions) -> anyhow::Result<()> {
let ctx = mk_context(&opts)?;
let sb = read_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION)?;
let writer: Box<dyn Write>;
if opts.output.is_some() {
writer = Box::new(BufWriter::new(File::create(opts.output.unwrap())?));
} else {
writer = Box::new(BufWriter::new(std::io::stdout()));
}
let mut out = xml::XmlWriter::new(writer);
dump_metadata(ctx.engine, &mut out, &sb, opts.repair)
}
//------------------------------------------

46
src/cache/hint.rs vendored
View File

@ -1,46 +0,0 @@
use anyhow::Result;
use byteorder::WriteBytesExt;
use nom::IResult;
use std::convert::TryInto;
use crate::pdata::unpack::*;
//------------------------------------------
#[derive(Clone, Copy)]
pub struct Hint {
pub hint: [u8; 4],
}
impl Unpack for Hint {
fn disk_size() -> u32 {
4
}
fn unpack(i: &[u8]) -> IResult<&[u8], Hint> {
let size = 4;
Ok((
&i[size..],
Hint {
hint: i[0..size].try_into().unwrap(),
},
))
}
}
impl Pack for Hint {
fn pack<W: WriteBytesExt>(&self, data: &mut W) -> Result<()> {
for v in &self.hint {
data.write_u8(*v)?;
}
Ok(())
}
}
impl Default for Hint {
fn default() -> Self {
Hint { hint: [0; 4] }
}
}
//------------------------------------------

60
src/cache/ir.rs vendored
View File

@ -1,60 +0,0 @@
use anyhow::Result;
//------------------------------------------
#[derive(Clone)]
pub struct Superblock {
pub uuid: String,
pub block_size: u32,
pub nr_cache_blocks: u32,
pub policy: String,
pub hint_width: u32,
}
#[derive(Clone)]
pub struct Map {
pub cblock: u32,
pub oblock: u64,
pub dirty: bool,
}
#[derive(Clone)]
pub struct Hint {
pub cblock: u32,
pub data: Vec<u8>,
}
#[derive(Clone)]
pub struct Discard {
pub begin: u64,
pub end: u64,
}
//------------------------------------------
#[derive(Clone)]
pub enum Visit {
Continue,
Stop,
}
pub trait MetadataVisitor {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit>;
fn superblock_e(&mut self) -> Result<Visit>;
fn mappings_b(&mut self) -> Result<Visit>;
fn mappings_e(&mut self) -> Result<Visit>;
fn mapping(&mut self, m: &Map) -> Result<Visit>;
fn hints_b(&mut self) -> Result<Visit>;
fn hints_e(&mut self) -> Result<Visit>;
fn hint(&mut self, h: &Hint) -> Result<Visit>;
fn discards_b(&mut self) -> Result<Visit>;
fn discards_e(&mut self) -> Result<Visit>;
fn discard(&mut self, d: &Discard) -> Result<Visit>;
fn eof(&mut self) -> Result<Visit>;
}
//------------------------------------------

72
src/cache/mapping.rs vendored
View File

@ -1,72 +0,0 @@
use anyhow::Result;
use byteorder::WriteBytesExt;
use nom::number::complete::*;
use nom::IResult;
use crate::pdata::unpack::*;
//------------------------------------------
pub const MAX_ORIGIN_BLOCKS: u64 = 1 << 48;
const FLAGS_MASK: u64 = (1 << 16) - 1;
//------------------------------------------
pub enum MappingFlags {
Valid = 1,
Dirty = 2,
}
#[derive(Clone, Copy)]
pub struct Mapping {
pub oblock: u64,
pub flags: u32,
}
impl Mapping {
pub fn is_valid(&self) -> bool {
(self.flags & MappingFlags::Valid as u32) != 0
}
pub fn is_dirty(&self) -> bool {
(self.flags & MappingFlags::Dirty as u32) != 0
}
}
impl Unpack for Mapping {
fn disk_size() -> u32 {
8
}
fn unpack(i: &[u8]) -> IResult<&[u8], Mapping> {
let (i, n) = le_u64(i)?;
let oblock = n >> 16;
let flags = n & FLAGS_MASK;
Ok((
i,
Mapping {
oblock,
flags: flags as u32,
},
))
}
}
impl Pack for Mapping {
fn pack<W: WriteBytesExt>(&self, data: &mut W) -> Result<()> {
let m: u64 = (self.oblock << 16) | self.flags as u64;
m.pack(data)
}
}
impl Default for Mapping {
fn default() -> Self {
Mapping {
oblock: 0,
flags: 0,
}
}
}
//------------------------------------------

View File

@ -1,19 +0,0 @@
use anyhow::Result;
pub struct CacheMetadataSizeOptions {
pub nr_blocks: u64,
pub max_hint_width: u32, // bytes
}
pub fn metadata_size(opts: &CacheMetadataSizeOptions) -> Result<u64> {
const SECTOR_SHIFT: u64 = 9; // 512 bytes per sector
const BYTES_PER_BLOCK_SHIFT: u64 = 4; // 16 bytes for key and value
const TRANSACTION_OVERHEAD: u64 = 8192; // in sectors; 4 MB
const HINT_OVERHEAD_PER_BLOCK: u64 = 8; // 8 bytes for the key
let mapping_size = (opts.nr_blocks << BYTES_PER_BLOCK_SHIFT) >> SECTOR_SHIFT;
let hint_size =
(opts.nr_blocks * (opts.max_hint_width as u64 + HINT_OVERHEAD_PER_BLOCK)) >> SECTOR_SHIFT;
Ok(TRANSACTION_OVERHEAD + mapping_size + hint_size)
}

10
src/cache/mod.rs vendored
View File

@ -1,10 +0,0 @@
pub mod check;
pub mod dump;
pub mod hint;
pub mod ir;
pub mod mapping;
pub mod metadata_size;
pub mod repair;
pub mod restore;
pub mod superblock;
pub mod xml;

68
src/cache/repair.rs vendored
View File

@ -1,68 +0,0 @@
use anyhow::Result;
use std::path::Path;
use std::sync::Arc;
use crate::cache::dump::*;
use crate::cache::restore::*;
use crate::cache::superblock::*;
use crate::io_engine::*;
use crate::pdata::space_map_metadata::*;
use crate::report::*;
use crate::write_batcher::*;
//------------------------------------------
pub struct CacheRepairOptions<'a> {
pub input: &'a Path,
pub output: &'a Path,
pub async_io: bool,
pub report: Arc<Report>,
}
struct Context {
_report: Arc<Report>,
engine_in: Arc<dyn IoEngine + Send + Sync>,
engine_out: Arc<dyn IoEngine + Send + Sync>,
}
const MAX_CONCURRENT_IO: u32 = 1024;
fn new_context(opts: &CacheRepairOptions) -> Result<Context> {
let engine_in: Arc<dyn IoEngine + Send + Sync>;
let engine_out: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine_in = Arc::new(AsyncIoEngine::new(opts.input, MAX_CONCURRENT_IO, false)?);
engine_out = Arc::new(AsyncIoEngine::new(opts.output, MAX_CONCURRENT_IO, true)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine_in = Arc::new(SyncIoEngine::new(opts.input, nr_threads, false)?);
engine_out = Arc::new(SyncIoEngine::new(opts.output, nr_threads, true)?);
}
Ok(Context {
_report: opts.report.clone(),
engine_in,
engine_out,
})
}
//------------------------------------------
pub fn repair(opts: CacheRepairOptions) -> Result<()> {
let ctx = new_context(&opts)?;
let sb = read_superblock(ctx.engine_in.as_ref(), SUPERBLOCK_LOCATION)?;
let sm = core_metadata_sm(ctx.engine_out.get_nr_blocks(), u32::MAX);
let mut w = WriteBatcher::new(
ctx.engine_out.clone(),
sm.clone(),
ctx.engine_out.get_batch_size(),
);
let mut restorer = Restorer::new(&mut w);
dump_metadata(ctx.engine_in, &mut restorer, &sb, true)
}
//------------------------------------------

313
src/cache/restore.rs vendored
View File

@ -1,313 +0,0 @@
use anyhow::{anyhow, Result};
use std::convert::TryInto;
use std::fs::OpenOptions;
use std::path::Path;
use std::sync::Arc;
use crate::cache::hint::Hint;
use crate::cache::ir::{self, MetadataVisitor, Visit};
use crate::cache::mapping::{Mapping, MappingFlags};
use crate::cache::superblock::*;
use crate::cache::xml;
use crate::io_engine::*;
use crate::math::*;
use crate::pdata::array_builder::*;
use crate::pdata::space_map_common::pack_root;
use crate::pdata::space_map_metadata::*;
use crate::report::*;
use crate::write_batcher::*;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
pub struct CacheRestoreOptions<'a> {
pub input: &'a Path,
pub output: &'a Path,
pub async_io: bool,
pub report: Arc<Report>,
}
struct Context {
_report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &CacheRestoreOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.output, MAX_CONCURRENT_IO, true)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.output, nr_threads, true)?);
}
Ok(Context {
_report: opts.report.clone(),
engine,
})
}
//------------------------------------------
#[derive(PartialEq)]
enum Section {
None,
Superblock,
Mappings,
Hints,
Finalized,
}
pub struct Restorer<'a> {
write_batcher: &'a mut WriteBatcher,
sb: Option<ir::Superblock>,
mapping_builder: Option<ArrayBuilder<Mapping>>,
dirty_builder: Option<ArrayBuilder<u64>>,
hint_builder: Option<ArrayBuilder<Hint>>,
mapping_root: Option<u64>,
dirty_root: Option<u64>,
hint_root: Option<u64>,
discard_root: Option<u64>,
dirty_bits: (u32, u64), // (index in u64 array, value)
in_section: Section,
}
impl<'a> Restorer<'a> {
pub fn new(w: &'a mut WriteBatcher) -> Restorer<'a> {
Restorer {
write_batcher: w,
sb: None,
mapping_builder: None,
dirty_builder: None,
hint_builder: None,
mapping_root: None,
dirty_root: None,
hint_root: None,
discard_root: None,
dirty_bits: (0, 0),
in_section: Section::None,
}
}
fn finalize(&mut self) -> Result<()> {
let src_sb;
if let Some(sb) = self.sb.take() {
src_sb = sb;
} else {
return Err(anyhow!("not in superblock"));
}
// complete the mapping array
if let Some(builder) = self.mapping_builder.take() {
self.mapping_root = Some(builder.complete(self.write_batcher)?);
}
// complete the dirty array
if let Some(mut builder) = self.dirty_builder.take() {
// push the bufferred trailing bits
builder.push_value(
self.write_batcher,
self.dirty_bits.0 as u64,
self.dirty_bits.1,
)?;
self.dirty_root = Some(builder.complete(self.write_batcher)?);
}
// complete the hint array
if let Some(builder) = self.hint_builder.take() {
self.hint_root = Some(builder.complete(self.write_batcher)?);
}
// build metadata space map
let metadata_sm_root = build_metadata_sm(self.write_batcher)?;
let mapping_root = self.mapping_root.as_ref().unwrap();
let hint_root = self.hint_root.as_ref().unwrap();
let discard_root = self.discard_root.as_ref().unwrap();
let sb = Superblock {
flags: SuperblockFlags {
clean_shutdown: true,
needs_check: false,
},
block: SUPERBLOCK_LOCATION,
version: 2,
policy_name: src_sb.policy.as_bytes().to_vec(),
policy_version: vec![2, 0, 0],
policy_hint_size: src_sb.hint_width,
metadata_sm_root,
mapping_root: *mapping_root,
dirty_root: self.dirty_root, // dirty_root is optional
hint_root: *hint_root,
discard_root: *discard_root,
discard_block_size: 0,
discard_nr_blocks: 0,
data_block_size: src_sb.block_size,
cache_blocks: src_sb.nr_cache_blocks,
compat_flags: 0,
compat_ro_flags: 0,
incompat_flags: 0,
read_hits: 0,
read_misses: 9,
write_hits: 0,
write_misses: 0,
};
write_superblock(self.write_batcher.engine.as_ref(), SUPERBLOCK_LOCATION, &sb)?;
self.in_section = Section::Finalized;
Ok(())
}
}
impl<'a> MetadataVisitor for Restorer<'a> {
fn superblock_b(&mut self, sb: &ir::Superblock) -> Result<Visit> {
if self.in_section != Section::None {
return Err(anyhow!("duplicated superblock"));
}
self.sb = Some(sb.clone());
let b = self.write_batcher.alloc()?;
if b.loc != SUPERBLOCK_LOCATION {
return Err(anyhow!("superblock was occupied"));
}
self.mapping_builder = Some(ArrayBuilder::new(sb.nr_cache_blocks as u64));
self.dirty_builder = Some(ArrayBuilder::new(div_up(sb.nr_cache_blocks as u64, 64)));
self.hint_builder = Some(ArrayBuilder::new(sb.nr_cache_blocks as u64));
let discard_builder = ArrayBuilder::<u64>::new(0); // discard bitset is optional
self.discard_root = Some(discard_builder.complete(self.write_batcher)?);
self.in_section = Section::Superblock;
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
self.finalize()?;
Ok(Visit::Continue)
}
fn mappings_b(&mut self) -> Result<Visit> {
if self.in_section != Section::Superblock {
return Err(anyhow!("not in superblock"));
}
self.in_section = Section::Mappings;
Ok(Visit::Continue)
}
fn mappings_e(&mut self) -> Result<Visit> {
if self.in_section != Section::Mappings {
return Err(anyhow!("not in mappings"));
}
self.in_section = Section::Superblock;
Ok(Visit::Continue)
}
fn mapping(&mut self, m: &ir::Map) -> Result<Visit> {
let map = Mapping {
oblock: m.oblock,
flags: MappingFlags::Valid as u32,
};
let mapping_builder = self.mapping_builder.as_mut().unwrap();
mapping_builder.push_value(self.write_batcher, m.cblock as u64, map)?;
if m.dirty {
let index = m.cblock >> 6;
let mask = 1 << (m.cblock & 63);
if index == self.dirty_bits.0 {
self.dirty_bits.1 |= mask;
} else {
let dirty_builder = self.dirty_builder.as_mut().unwrap();
dirty_builder.push_value(
self.write_batcher,
self.dirty_bits.0 as u64,
self.dirty_bits.1,
)?;
self.dirty_bits.0 = index;
self.dirty_bits.1 = 0;
}
}
Ok(Visit::Continue)
}
fn hints_b(&mut self) -> Result<Visit> {
if self.in_section != Section::Superblock {
return Err(anyhow!("not in superblock"));
}
self.in_section = Section::Hints;
Ok(Visit::Continue)
}
fn hints_e(&mut self) -> Result<Visit> {
if self.in_section != Section::Hints {
return Err(anyhow!("not in hints"));
}
self.in_section = Section::Superblock;
Ok(Visit::Continue)
}
fn hint(&mut self, h: &ir::Hint) -> Result<Visit> {
let hint = Hint {
hint: h.data[..].try_into().unwrap(),
};
let hint_builder = self.hint_builder.as_mut().unwrap();
hint_builder.push_value(self.write_batcher, h.cblock as u64, hint)?;
Ok(Visit::Continue)
}
fn discards_b(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn discards_e(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn discard(&mut self, _d: &ir::Discard) -> Result<Visit> {
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
if self.in_section != Section::Finalized {
return Err(anyhow!("incompleted source metadata"));
}
Ok(Visit::Continue)
}
}
//------------------------------------------
fn build_metadata_sm(w: &mut WriteBatcher) -> Result<Vec<u8>> {
let r = write_metadata_sm(w)?;
let sm_root = pack_root(&r, SPACE_MAP_ROOT_SIZE)?;
Ok(sm_root)
}
//------------------------------------------
pub fn restore(opts: CacheRestoreOptions) -> Result<()> {
let input = OpenOptions::new()
.read(true)
.write(false)
.open(opts.input)?;
let ctx = mk_context(&opts)?;
let sm = core_metadata_sm(ctx.engine.get_nr_blocks(), u32::MAX);
let mut w = WriteBatcher::new(ctx.engine.clone(), sm.clone(), ctx.engine.get_batch_size());
// build cache mappings
let mut restorer = Restorer::new(&mut w);
xml::read(input, &mut restorer)?;
Ok(())
}
//------------------------------------------

View File

@ -1,224 +0,0 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{bytes::complete::*, number::complete::*, IResult};
use std::io::Cursor;
use crate::checksum::*;
use crate::io_engine::*;
//------------------------------------------
pub const SPACE_MAP_ROOT_SIZE: usize = 128;
pub const SUPERBLOCK_LOCATION: u64 = 0;
const MAGIC: u64 = 0o6142003; // 0x18c403 in hex
const POLICY_NAME_SIZE: usize = 16;
const UUID_SIZE: usize = 16;
//------------------------------------------
#[derive(Debug, Clone)]
pub struct SuperblockFlags {
pub clean_shutdown: bool,
pub needs_check: bool,
}
#[derive(Debug, Clone)]
pub struct Superblock {
pub flags: SuperblockFlags,
pub block: u64,
pub version: u32,
pub policy_name: Vec<u8>,
pub policy_version: Vec<u32>,
pub policy_hint_size: u32,
pub metadata_sm_root: Vec<u8>,
pub mapping_root: u64,
pub dirty_root: Option<u64>, // format 2 only
pub hint_root: u64,
pub discard_root: u64,
pub discard_block_size: u64,
pub discard_nr_blocks: u64,
pub data_block_size: u32,
pub cache_blocks: u32,
pub compat_flags: u32,
pub compat_ro_flags: u32,
pub incompat_flags: u32,
pub read_hits: u32,
pub read_misses: u32,
pub write_hits: u32,
pub write_misses: u32,
}
fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
let (i, _csum) = le_u32(data)?;
let (i, flags) = le_u32(i)?;
let (i, block) = le_u64(i)?;
let (i, _uuid) = take(16usize)(i)?;
let (i, _magic) = le_u64(i)?;
let (i, version) = le_u32(i)?;
let (i, policy_name) = take(POLICY_NAME_SIZE)(i)?;
let (i, policy_hint_size) = le_u32(i)?;
let (i, metadata_sm_root) = take(SPACE_MAP_ROOT_SIZE)(i)?;
let (i, mapping_root) = le_u64(i)?;
let (i, hint_root) = le_u64(i)?;
let (i, discard_root) = le_u64(i)?;
let (i, discard_block_size) = le_u64(i)?;
let (i, discard_nr_blocks) = le_u64(i)?;
let (i, data_block_size) = le_u32(i)?;
let (i, _metadata_block_size) = le_u32(i)?;
let (i, cache_blocks) = le_u32(i)?;
let (i, compat_flags) = le_u32(i)?;
let (i, compat_ro_flags) = le_u32(i)?;
let (i, incompat_flags) = le_u32(i)?;
let (i, read_hits) = le_u32(i)?;
let (i, read_misses) = le_u32(i)?;
let (i, write_hits) = le_u32(i)?;
let (i, write_misses) = le_u32(i)?;
let (i, vsn_major) = le_u32(i)?;
let (i, vsn_minor) = le_u32(i)?;
let (i, vsn_patch) = le_u32(i)?;
let mut i = i;
let mut dirty_root = None;
if version >= 2 {
let (m, root) = le_u64(i)?;
dirty_root = Some(root);
i = m;
}
Ok((
i,
Superblock {
flags: SuperblockFlags {
clean_shutdown: (flags & 0x1) != 0,
needs_check: (flags & 0x2) != 0,
},
block,
version,
policy_name: policy_name.splitn(2, |c| *c == 0).next().unwrap().to_vec(),
policy_version: vec![vsn_major, vsn_minor, vsn_patch],
policy_hint_size,
metadata_sm_root: metadata_sm_root.to_vec(),
mapping_root,
dirty_root,
hint_root,
discard_root,
discard_block_size,
discard_nr_blocks,
data_block_size,
cache_blocks,
compat_flags,
compat_ro_flags,
incompat_flags,
read_hits,
read_misses,
write_hits,
write_misses,
},
))
}
pub fn read_superblock(engine: &dyn IoEngine, loc: u64) -> Result<Superblock> {
let b = engine.read(loc)?;
if metadata_block_type(b.get_data()) != BT::CACHE_SUPERBLOCK {
return Err(anyhow!("bad checksum in superblock"));
}
if let Ok((_, sb)) = unpack(b.get_data()) {
Ok(sb)
} else {
Err(anyhow!("couldn't unpack superblock"))
}
}
//------------------------------------------
fn pack_superblock<W: WriteBytesExt>(sb: &Superblock, w: &mut W) -> Result<()> {
// checksum, which we don't know yet
w.write_u32::<LittleEndian>(0)?;
// flags
let mut flags: u32 = 0;
if sb.flags.clean_shutdown {
flags |= 0x1;
}
if sb.flags.needs_check {
flags |= 0x2;
}
w.write_u32::<LittleEndian>(flags)?;
w.write_u64::<LittleEndian>(sb.block)?;
w.write_all(&[0; UUID_SIZE])?;
w.write_u64::<LittleEndian>(MAGIC)?;
w.write_u32::<LittleEndian>(sb.version)?;
let mut policy_name = [0u8; POLICY_NAME_SIZE];
policy_name[..sb.policy_name.len()].copy_from_slice(&sb.policy_name[..]);
w.write_all(&policy_name)?;
w.write_u32::<LittleEndian>(sb.policy_hint_size)?;
w.write_all(&sb.metadata_sm_root)?;
w.write_u64::<LittleEndian>(sb.mapping_root)?;
w.write_u64::<LittleEndian>(sb.hint_root)?;
w.write_u64::<LittleEndian>(sb.discard_root)?;
w.write_u64::<LittleEndian>(sb.discard_block_size)?;
w.write_u64::<LittleEndian>(sb.discard_nr_blocks)?;
w.write_u32::<LittleEndian>(sb.data_block_size)?;
// metadata block size
w.write_u32::<LittleEndian>((BLOCK_SIZE >> SECTOR_SHIFT) as u32)?;
w.write_u32::<LittleEndian>(sb.cache_blocks)?;
w.write_u32::<LittleEndian>(sb.compat_flags)?;
w.write_u32::<LittleEndian>(sb.compat_ro_flags)?;
w.write_u32::<LittleEndian>(sb.incompat_flags)?;
w.write_u32::<LittleEndian>(sb.read_hits)?;
w.write_u32::<LittleEndian>(sb.read_misses)?;
w.write_u32::<LittleEndian>(sb.write_hits)?;
w.write_u32::<LittleEndian>(sb.write_misses)?;
w.write_u32::<LittleEndian>(sb.policy_version[0])?;
w.write_u32::<LittleEndian>(sb.policy_version[1])?;
w.write_u32::<LittleEndian>(sb.policy_version[2])?;
if sb.dirty_root.is_some() {
w.write_u64::<LittleEndian>(sb.dirty_root.unwrap())?;
}
Ok(())
}
pub fn write_superblock(engine: &dyn IoEngine, _loc: u64, sb: &Superblock) -> Result<()> {
let b = Block::zeroed(SUPERBLOCK_LOCATION);
// pack the superblock
{
let mut cursor = Cursor::new(b.get_data());
pack_superblock(sb, &mut cursor)?;
}
// calculate the checksum
write_checksum(b.get_data(), BT::CACHE_SUPERBLOCK)?;
// write
engine.write(&b)?;
Ok(())
}
//------------------------------------------

268
src/cache/xml.rs vendored
View File

@ -1,268 +0,0 @@
use anyhow::{anyhow, Result};
use base64::{decode, encode};
use std::io::{BufRead, BufReader};
use std::io::{Read, Write};
use quick_xml::events::{BytesEnd, BytesStart, Event};
use quick_xml::{Reader, Writer};
use crate::cache::ir::*;
use crate::xml::*;
//---------------------------------------
pub struct XmlWriter<W: Write> {
w: Writer<W>,
}
impl<W: Write> XmlWriter<W> {
pub fn new(w: W) -> XmlWriter<W> {
XmlWriter {
w: Writer::new_with_indent(w, 0x20, 2),
}
}
}
impl<W: Write> MetadataVisitor for XmlWriter<W> {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit> {
let tag = b"superblock";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"uuid", sb.uuid.clone()));
elem.push_attribute(mk_attr(b"block_size", sb.block_size));
elem.push_attribute(mk_attr(b"nr_cache_blocks", sb.nr_cache_blocks));
elem.push_attribute(mk_attr(b"policy", sb.policy.clone()));
elem.push_attribute(mk_attr(b"hint_width", sb.hint_width));
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
self.w
.write_event(Event::End(BytesEnd::borrowed(b"superblock")))?;
Ok(Visit::Continue)
}
fn mappings_b(&mut self) -> Result<Visit> {
let tag = b"mappings";
let elem = BytesStart::owned(tag.to_vec(), tag.len());
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn mappings_e(&mut self) -> Result<Visit> {
self.w
.write_event(Event::End(BytesEnd::borrowed(b"mappings")))?;
Ok(Visit::Continue)
}
fn mapping(&mut self, m: &Map) -> Result<Visit> {
let tag = b"mapping";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"cache_block", m.cblock));
elem.push_attribute(mk_attr(b"origin_block", m.oblock));
elem.push_attribute(mk_attr(b"dirty", m.dirty));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn hints_b(&mut self) -> Result<Visit> {
let tag = b"hints";
let elem = BytesStart::owned(tag.to_vec(), tag.len());
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn hints_e(&mut self) -> Result<Visit> {
self.w
.write_event(Event::End(BytesEnd::borrowed(b"hints")))?;
Ok(Visit::Continue)
}
fn hint(&mut self, h: &Hint) -> Result<Visit> {
let tag = b"hint";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"cache_block", h.cblock));
elem.push_attribute(mk_attr(b"data", encode(&h.data[0..])));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn discards_b(&mut self) -> Result<Visit> {
let tag = b"discards";
let elem = BytesStart::owned(tag.to_vec(), tag.len());
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn discards_e(&mut self) -> Result<Visit> {
self.w
.write_event(Event::End(BytesEnd::borrowed(b"discards")))?;
Ok(Visit::Continue)
}
fn discard(&mut self, d: &Discard) -> Result<Visit> {
let tag = b"discard";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"dbegin", d.begin));
elem.push_attribute(mk_attr(b"dend", d.end));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
}
//------------------------------------------
fn parse_superblock(e: &BytesStart) -> Result<Superblock> {
let mut uuid: Option<String> = None;
let mut block_size: Option<u32> = None;
let mut nr_cache_blocks: Option<u32> = None;
let mut policy: Option<String> = None;
let mut hint_width: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"uuid" => uuid = Some(string_val(&kv)),
b"block_size" => block_size = Some(u32_val(&kv)?),
b"nr_cache_blocks" => nr_cache_blocks = Some(u32_val(&kv)?),
b"policy" => policy = Some(string_val(&kv)),
b"hint_width" => hint_width = Some(u32_val(&kv)?),
_ => return bad_attr("superblock", kv.key),
}
}
let tag = "cache";
Ok(Superblock {
uuid: check_attr(tag, "uuid", uuid)?,
block_size: check_attr(tag, "block_size", block_size)?,
nr_cache_blocks: check_attr(tag, "nr_cache_blocks", nr_cache_blocks)?,
policy: check_attr(tag, "policy", policy)?,
hint_width: check_attr(tag, "hint_width", hint_width)?,
})
}
fn parse_mapping(e: &BytesStart) -> Result<Map> {
let mut cblock: Option<u32> = None;
let mut oblock: Option<u64> = None;
let mut dirty: Option<bool> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"cache_block" => cblock = Some(u32_val(&kv)?),
b"origin_block" => oblock = Some(u64_val(&kv)?),
b"dirty" => dirty = Some(bool_val(&kv)?),
_ => return bad_attr("mapping", kv.key),
}
}
let tag = "mapping";
Ok(Map {
cblock: check_attr(tag, "cache_block", cblock)?,
oblock: check_attr(tag, "origin_block", oblock)?,
dirty: check_attr(tag, "dirty", dirty)?,
})
}
fn parse_hint(e: &BytesStart) -> Result<Hint> {
let mut cblock: Option<u32> = None;
let mut data: Option<Vec<u8>> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"cache_block" => cblock = Some(u32_val(&kv)?),
b"data" => data = Some(decode(bytes_val(&kv))?),
_ => return bad_attr("mapping", kv.key),
}
}
let tag = "hint";
Ok(Hint {
cblock: check_attr(tag, "cache_block", cblock)?,
data: check_attr(tag, "data", data)?,
})
}
fn handle_event<R, M>(reader: &mut Reader<R>, buf: &mut Vec<u8>, visitor: &mut M) -> Result<Visit>
where
R: Read + BufRead,
M: MetadataVisitor,
{
match reader.read_event(buf) {
Ok(Event::Start(ref e)) => match e.name() {
b"superblock" => visitor.superblock_b(&parse_superblock(e)?),
b"mappings" => visitor.mappings_b(),
b"hints" => visitor.hints_b(),
_ => {
return Err(anyhow!(
"Parse error 1 at byte {}",
reader.buffer_position()
))
}
},
Ok(Event::End(ref e)) => match e.name() {
b"superblock" => visitor.superblock_e(),
b"mappings" => visitor.mappings_e(),
b"hints" => visitor.hints_e(),
_ => {
return Err(anyhow!(
"Parse error 2 at byte {}",
reader.buffer_position()
))
}
},
Ok(Event::Empty(ref e)) => match e.name() {
b"mapping" => visitor.mapping(&parse_mapping(e)?),
b"hint" => visitor.hint(&parse_hint(e)?),
_ => {
return Err(anyhow!(
"Parse error 3 at byte {}",
reader.buffer_position()
))
}
},
Ok(Event::Text(_)) => Ok(Visit::Continue),
Ok(Event::Comment(_)) => Ok(Visit::Continue),
Ok(Event::Eof) => {
visitor.eof()?;
Ok(Visit::Stop)
}
Ok(_) => {
return Err(anyhow!(
"Parse error 4 at byte {}",
reader.buffer_position()
))
}
Err(e) => {
return Err(anyhow!(
"Parse error 5 at byte {}: {:?}",
reader.buffer_position(),
e
))
}
}
}
pub fn read<R, M>(input: R, visitor: &mut M) -> Result<()>
where
R: Read,
M: MetadataVisitor,
{
let input = BufReader::new(input);
let mut reader = Reader::from_reader(input);
reader.trim_text(true);
let mut buf = Vec::new();
while let Visit::Continue = handle_event(&mut reader, &mut buf, visitor)? {}
Ok(())
}

View File

@ -1,80 +0,0 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use crc32c::crc32c;
use std::io::Cursor;
const BLOCK_SIZE: u64 = 4096;
const THIN_SUPERBLOCK_CSUM_XOR: u32 = 160774;
const CACHE_SUPERBLOCK_CSUM_XOR: u32 = 9031977;
const ERA_SUPERBLOCK_CSUM_XOR: u32 = 146538381;
const BITMAP_CSUM_XOR: u32 = 240779;
const INDEX_CSUM_XOR: u32 = 160478;
const BTREE_CSUM_XOR: u32 = 121107;
const ARRAY_CSUM_XOR: u32 = 595846735;
fn checksum(buf: &[u8]) -> u32 {
crc32c(&buf[4..]) ^ 0xffffffff
}
#[derive(Debug, PartialEq)]
#[allow(clippy::upper_case_acronyms)]
#[allow(non_camel_case_types)]
pub enum BT {
THIN_SUPERBLOCK,
CACHE_SUPERBLOCK,
ERA_SUPERBLOCK,
NODE,
INDEX,
BITMAP,
ARRAY,
UNKNOWN,
}
pub fn metadata_block_type(buf: &[u8]) -> BT {
if buf.len() != BLOCK_SIZE as usize {
return BT::UNKNOWN;
}
// The checksum is always stored in the first u32 of the buffer.
let mut rdr = Cursor::new(buf);
let sum_on_disk = rdr.read_u32::<LittleEndian>().unwrap();
let csum = checksum(buf);
let btype = csum ^ sum_on_disk;
match btype {
THIN_SUPERBLOCK_CSUM_XOR => BT::THIN_SUPERBLOCK,
CACHE_SUPERBLOCK_CSUM_XOR => BT::CACHE_SUPERBLOCK,
ERA_SUPERBLOCK_CSUM_XOR => BT::ERA_SUPERBLOCK,
BTREE_CSUM_XOR => BT::NODE,
BITMAP_CSUM_XOR => BT::BITMAP,
INDEX_CSUM_XOR => BT::INDEX,
ARRAY_CSUM_XOR => BT::ARRAY,
_ => BT::UNKNOWN,
}
}
pub fn write_checksum(buf: &mut [u8], kind: BT) -> Result<()> {
if buf.len() != BLOCK_SIZE as usize {
return Err(anyhow!("block is wrong size"));
}
use BT::*;
let salt = match kind {
THIN_SUPERBLOCK => THIN_SUPERBLOCK_CSUM_XOR,
CACHE_SUPERBLOCK => CACHE_SUPERBLOCK_CSUM_XOR,
ERA_SUPERBLOCK => ERA_SUPERBLOCK_CSUM_XOR,
NODE => BTREE_CSUM_XOR,
BITMAP => BITMAP_CSUM_XOR,
INDEX => INDEX_CSUM_XOR,
ARRAY => ARRAY_CSUM_XOR,
UNKNOWN => {
return Err(anyhow!("Invalid block type"));
}
};
let csum = checksum(buf) ^ salt;
let mut out = std::io::Cursor::new(buf);
out.write_u32::<LittleEndian>(csum)?;
Ok(())
}

View File

@ -1,96 +0,0 @@
extern crate clap;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::cache::check::{check, CacheCheckOptions};
use crate::commands::utils::*;
use crate::report::*;
//------------------------------------------
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("cache_check")
.version(crate::version::tools_version())
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("AUTO_REPAIR")
.help("Auto repair trivial issues.")
.long("auto-repair"),
)
.arg(
Arg::with_name("IGNORE_NON_FATAL")
.help("Only return a non-zero exit code if a fatal error is found.")
.long("ignore-non-fatal-errors"),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
.arg(
Arg::with_name("SB_ONLY")
.help("Only check the superblock.")
.long("super-block-only"),
)
.arg(
Arg::with_name("SKIP_HINTS")
.help("Don't check the hint array")
.long("skip-hints"),
)
.arg(
Arg::with_name("SKIP_DISCARDS")
.help("Don't check the discard bitset")
.long("skip-discards"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to check")
.required(true)
.index(1),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let report = if matches.is_present("QUIET") {
std::sync::Arc::new(mk_quiet_report())
} else if atty::is(Stream::Stdout) {
std::sync::Arc::new(mk_progress_bar_report())
} else {
Arc::new(mk_simple_report())
};
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
let opts = CacheCheckOptions {
dev: input_file,
async_io: matches.is_present("ASYNC_IO"),
sb_only: matches.is_present("SB_ONLY"),
skip_mappings: matches.is_present("SKIP_MAPPINGS"),
skip_hints: matches.is_present("SKIP_HINTS"),
skip_discards: matches.is_present("SKIP_DISCARDS"),
ignore_non_fatal: matches.is_present("IGNORE_NON_FATAL"),
auto_repair: matches.is_present("AUTO_REPAIR"),
report: report.clone(),
};
if let Err(reason) = check(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}
//------------------------------------------

View File

@ -1,73 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::cache::dump::{dump, CacheDumpOptions};
use crate::commands::utils::*;
//------------------------------------------
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("cache_dump")
.version(crate::version::tools_version())
.about("Dump the cache metadata to stdout in XML format")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("REPAIR")
.help("Repair the metadata whilst dumping it")
.short("r")
.long("repair"),
)
// options
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output file rather than stdout")
.short("o")
.long("output")
.value_name("FILE"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to dump")
.required(true)
.index(1),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = if matches.is_present("OUTPUT") {
Some(Path::new(matches.value_of("OUTPUT").unwrap()))
} else {
None
};
// Create a temporary report just in case these checks
// need to report anything.
let report = std::sync::Arc::new(crate::report::mk_simple_report());
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
drop(report);
let opts = CacheDumpOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
repair: matches.is_present("REPAIR"),
};
if let Err(reason) = dump(opts) {
eprintln!("{}", reason);
process::exit(1);
}
}
//------------------------------------------

View File

@ -1,88 +0,0 @@
extern crate clap;
use clap::{value_t_or_exit, App, Arg, ArgGroup};
use std::ffi::OsString;
use std::process;
use crate::cache::metadata_size::{metadata_size, CacheMetadataSizeOptions};
use crate::math::div_up;
//------------------------------------------
fn parse_args<I, T>(args: I) -> CacheMetadataSizeOptions
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
let parser = App::new("cache_metadata_size")
.version(crate::version::tools_version())
.about("Estimate the size of the metadata device needed for a given configuration.")
.usage("cache_metadata_size [OPTIONS] <--device-size <SECTORS> --block-size <SECTORS> | --nr-blocks <NUM>>")
// options
.arg(
Arg::with_name("BLOCK_SIZE")
.help("Specify the size of each cache block")
.long("block-size")
.requires("DEVICE_SIZE")
.value_name("SECTORS"),
)
.arg(
Arg::with_name("DEVICE_SIZE")
.help("Specify total size of the fast device used in the cache")
.long("device-size")
.requires("BLOCK_SIZE")
.value_name("SECTORS"),
)
.arg(
Arg::with_name("NR_BLOCKS")
.help("Specify the number of cache blocks")
.long("nr-blocks")
.value_name("NUM"),
)
.arg(
Arg::with_name("MAX_HINT_WIDTH")
.help("Specity the per-block hint width")
.long("max-hint-width")
.value_name("BYTES")
.default_value("4"),
)
.group(
ArgGroup::with_name("selection")
.args(&["DEVICE_SIZE", "NR_BLOCKS"])
.required(true)
);
let matches = parser.get_matches_from(args);
let nr_blocks = matches.value_of("NR_BLOCKS").map_or_else(
|| {
let device_size = value_t_or_exit!(matches.value_of("DEVICE_SIZE"), u64);
let block_size = value_t_or_exit!(matches.value_of("BLOCK_SIZE"), u32);
div_up(device_size, block_size as u64)
},
|_| value_t_or_exit!(matches.value_of("NR_BLOCKS"), u64),
);
let max_hint_width = value_t_or_exit!(matches.value_of("MAX_HINT_WIDTH"), u32);
CacheMetadataSizeOptions {
nr_blocks,
max_hint_width,
}
}
pub fn run(args: &[std::ffi::OsString]) {
let opts = parse_args(args);
match metadata_size(&opts) {
Ok(size) => {
println!("{} sectors", size);
}
Err(reason) => {
eprintln!("{}", reason);
process::exit(1);
}
}
}
//------------------------------------------

View File

@ -1,73 +0,0 @@
extern crate clap;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::cache::repair::{repair, CacheRepairOptions};
use crate::commands::utils::*;
use crate::report::*;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("cache_repair")
.version(crate::version::tools_version())
.about("Repair binary cache metadata, and write it to a different device or file")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
// options
.arg(
Arg::with_name("INPUT")
.help("Specify the input device")
.short("i")
.long("input")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device")
.short("o")
.long("output")
.value_name("FILE")
.required(true),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = if matches.is_present("QUIET") {
std::sync::Arc::new(mk_quiet_report())
} else if atty::is(Stream::Stdout) {
std::sync::Arc::new(mk_progress_bar_report())
} else {
Arc::new(mk_simple_report())
};
check_input_file(input_file, &report);
let opts = CacheRepairOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};
if let Err(reason) = repair(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,64 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::cache::restore::{restore, CacheRestoreOptions};
use crate::commands::utils::*;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("cache_restore")
.version(crate::version::tools_version())
.about("Convert XML format metadata to binary.")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
// options
.arg(
Arg::with_name("INPUT")
.help("Specify the input xml")
.short("i")
.long("input")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device to check")
.short("o")
.long("output")
.value_name("FILE")
.required(true),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = mk_report(matches.is_present("QUIET"));
check_input_file(input_file, &report);
check_output_file(output_file, &report);
let opts = CacheRestoreOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};
if let Err(reason) = restore(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,78 +0,0 @@
extern crate clap;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::commands::utils::*;
use crate::era::check::{check, EraCheckOptions};
use crate::report::*;
//------------------------------------------
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_check")
.version(crate::version::tools_version())
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("IGNORE_NON_FATAL")
.help("Only return a non-zero exit code if a fatal error is found.")
.long("ignore-non-fatal-errors"),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
.arg(
Arg::with_name("SB_ONLY")
.help("Only check the superblock.")
.long("super-block-only"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to check")
.required(true)
.index(1),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let report = if matches.is_present("QUIET") {
std::sync::Arc::new(mk_quiet_report())
} else if atty::is(Stream::Stdout) {
std::sync::Arc::new(mk_progress_bar_report())
} else {
Arc::new(mk_simple_report())
};
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
check_not_xml(input_file, &report);
let opts = EraCheckOptions {
dev: input_file,
async_io: matches.is_present("ASYNC_IO"),
sb_only: matches.is_present("SB_ONLY"),
ignore_non_fatal: matches.is_present("IGNORE_NON_FATAL"),
report: report.clone(),
};
if let Err(reason) = check(&opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}
//------------------------------------------

View File

@ -1,79 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::commands::utils::*;
use crate::era::dump::{dump, EraDumpOptions};
//------------------------------------------
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_dump")
.version(crate::version::tools_version())
.about("Dump the era metadata to stdout in XML format")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("LOGICAL")
.help("Fold any unprocessed write sets into the final era array")
.long("logical"),
)
.arg(
Arg::with_name("REPAIR")
.help("Repair the metadata whilst dumping it")
.short("r")
.long("repair"),
)
// options
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output file rather than stdout")
.short("o")
.long("output")
.value_name("FILE"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to dump")
.required(true)
.index(1),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = if matches.is_present("OUTPUT") {
Some(Path::new(matches.value_of("OUTPUT").unwrap()))
} else {
None
};
// Create a temporary report just in case these checks
// need to report anything.
let report = std::sync::Arc::new(crate::report::mk_simple_report());
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
drop(report);
let opts = EraDumpOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
logical: matches.is_present("LOGICAL"),
repair: matches.is_present("REPAIR"),
};
if let Err(reason) = dump(opts) {
eprintln!("{}", reason);
process::exit(1);
}
}
//------------------------------------------

View File

@ -1,85 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::commands::utils::*;
use crate::era::invalidate::{invalidate, EraInvalidateOptions};
//------------------------------------------
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_invalidate")
.version(crate::version::tools_version())
.about("List blocks that may have changed since a given era")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
// options
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output file rather than stdout")
.short("o")
.long("output")
.value_name("FILE"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to dump")
.required(true)
.index(1),
)
.arg(
Arg::with_name("WRITTEN_SINCE")
.help("Blocks written since the given era will be listed")
.long("written-since")
.required(true)
.value_name("ERA"),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = if matches.is_present("OUTPUT") {
Some(Path::new(matches.value_of("OUTPUT").unwrap()))
} else {
None
};
// Create a temporary report just in case these checks
// need to report anything.
let report = std::sync::Arc::new(crate::report::mk_simple_report());
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
drop(report);
let threshold = matches
.value_of("WRITTEN_SINCE")
.map(|s| {
s.parse::<u32>().unwrap_or_else(|_| {
eprintln!("Couldn't parse written_since");
process::exit(1);
})
})
.unwrap_or(0);
let opts = EraInvalidateOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
threshold,
metadata_snap: matches.is_present("METADATA_SNAP"),
};
if let Err(reason) = invalidate(&opts) {
eprintln!("{}", reason);
process::exit(1);
}
}
//------------------------------------------

View File

@ -1,73 +0,0 @@
extern crate clap;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::commands::utils::*;
use crate::era::repair::{repair, EraRepairOptions};
use crate::report::*;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_repair")
.version(crate::version::tools_version())
.about("Repair binary era metadata, and write it to a different device or file")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
// options
.arg(
Arg::with_name("INPUT")
.help("Specify the input device")
.short("i")
.long("input")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device")
.short("o")
.long("output")
.value_name("FILE")
.required(true),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = if matches.is_present("QUIET") {
std::sync::Arc::new(mk_quiet_report())
} else if atty::is(Stream::Stdout) {
std::sync::Arc::new(mk_progress_bar_report())
} else {
Arc::new(mk_simple_report())
};
check_input_file(input_file, &report);
let opts = EraRepairOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};
if let Err(reason) = repair(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,64 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::commands::utils::*;
use crate::era::restore::{restore, EraRestoreOptions};
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_restore")
.version(crate::version::tools_version())
.about("Convert XML format metadata to binary.")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
// options
.arg(
Arg::with_name("INPUT")
.help("Specify the input xml")
.short("i")
.long("input")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device to check")
.short("o")
.long("output")
.value_name("FILE")
.required(true),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = mk_report(matches.is_present("QUIET"));
check_input_file(input_file, &report);
check_output_file(output_file, &report);
let opts = EraRestoreOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};
if let Err(reason) = restore(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,19 +0,0 @@
pub mod cache_check;
pub mod cache_dump;
pub mod cache_metadata_size;
pub mod cache_repair;
pub mod cache_restore;
pub mod era_check;
pub mod era_dump;
pub mod era_invalidate;
pub mod era_repair;
pub mod era_restore;
pub mod thin_check;
pub mod thin_dump;
pub mod thin_metadata_pack;
pub mod thin_metadata_size;
pub mod thin_metadata_unpack;
pub mod thin_repair;
pub mod thin_restore;
pub mod thin_shrink;
pub mod utils;

View File

@ -1,128 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::commands::utils::*;
use crate::io_engine::*;
use crate::thin::check::{check, ThinCheckOptions, MAX_CONCURRENT_IO};
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("thin_check")
.version(crate::version::tools_version())
.about("Validates thin provisioning metadata on a device or file.")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("AUTO_REPAIR")
.help("Auto repair trivial issues.")
.long("auto-repair")
.conflicts_with_all(&[
"IGNORE_NON_FATAL",
"METADATA_SNAPSHOT",
"OVERRIDE_MAPPING_ROOT",
"SB_ONLY",
"SKIP_MAPPINGS",
]),
)
.arg(
// Using --clear-needs-check along with --skip-mappings is allowed
// (but not recommended) for backward compatibility (commit 1fe8a0d)
Arg::with_name("CLEAR_NEEDS_CHECK")
.help("Clears the 'needs_check' flag in the superblock")
.long("clear-needs-check-flag")
.conflicts_with_all(&[
"IGNORE_NON_FATAL",
"METADATA_SNAPSHOT",
"OVERRIDE_MAPPING_ROOT",
"SB_ONLY",
]),
)
.arg(
Arg::with_name("IGNORE_NON_FATAL")
.help("Only return a non-zero exit code if a fatal error is found.")
.long("ignore-non-fatal-errors"),
)
.arg(
Arg::with_name("METADATA_SNAPSHOT")
.help("Check the metadata snapshot on a live pool")
.short("m")
.long("metadata-snapshot"),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
.arg(
Arg::with_name("SB_ONLY")
.help("Only check the superblock.")
.long("super-block-only"),
)
.arg(
Arg::with_name("SKIP_MAPPINGS")
.help("Don't check the mapping tree")
.long("skip-mappings"),
)
// options
.arg(
Arg::with_name("OVERRIDE_MAPPING_ROOT")
.help("Specify a mapping root to use")
.long("override-mapping-root")
.value_name("OVERRIDE_MAPPING_ROOT")
.takes_value(true),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to check")
.required(true)
.index(1),
);
let matches = parser.get_matches_from(args.iter());
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let report = mk_report(matches.is_present("QUIET"));
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
check_not_xml(input_file, &report);
let engine: Arc<dyn IoEngine + Send + Sync>;
let writable = matches.is_present("AUTO_REPAIR") || matches.is_present("CLEAR_NEEDS_CHECK");
if matches.is_present("ASYNC_IO") {
engine = Arc::new(
AsyncIoEngine::new(input_file, MAX_CONCURRENT_IO, writable)
.expect("unable to open input file"),
);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(
SyncIoEngine::new(input_file, nr_threads, writable).expect("unable to open input file"),
);
}
let opts = ThinCheckOptions {
engine,
sb_only: matches.is_present("SB_ONLY"),
skip_mappings: matches.is_present("SKIP_MAPPINGS"),
ignore_non_fatal: matches.is_present("IGNORE_NON_FATAL"),
auto_repair: matches.is_present("AUTO_REPAIR"),
clear_needs_check: matches.is_present("CLEAR_NEEDS_CHECK"),
report: report.clone(),
};
if let Err(reason) = check(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,142 +0,0 @@
extern crate clap;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::commands::utils::*;
use crate::report::*;
use crate::thin::dump::{dump, ThinDumpOptions};
use crate::thin::metadata_repair::SuperblockOverrides;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("thin_dump")
.version(crate::version::tools_version())
.about("Dump thin-provisioning metadata to stdout in XML format")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
.arg(
Arg::with_name("REPAIR")
.help("Repair the metadata whilst dumping it")
.short("r")
.long("repair"),
)
.arg(
Arg::with_name("SKIP_MAPPINGS")
.help("Do not dump the mappings")
.long("skip-mappings"),
)
// options
.arg(
Arg::with_name("DATA_BLOCK_SIZE")
.help("Provide the data block size for repairing")
.long("data-block-size")
.value_name("SECTORS"),
)
.arg(
Arg::with_name("METADATA_SNAPSHOT")
.help("Access the metadata snapshot on a live pool")
.short("m")
.long("metadata-snapshot")
.value_name("METADATA_SNAPSHOT"),
)
.arg(
Arg::with_name("NR_DATA_BLOCKS")
.help("Override the number of data blocks if needed")
.long("nr-data-blocks")
.value_name("NUM"),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output file rather than stdout")
.short("o")
.long("output")
.value_name("FILE"),
)
.arg(
Arg::with_name("TRANSACTION_ID")
.help("Override the transaction id if needed")
.long("transaction-id")
.value_name("NUM"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to dump")
.required(true)
.index(1),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = if matches.is_present("OUTPUT") {
Some(Path::new(matches.value_of("OUTPUT").unwrap()))
} else {
None
};
let report = std::sync::Arc::new(mk_simple_report());
check_input_file(input_file, &report);
let transaction_id = matches.value_of("TRANSACTION_ID").map(|s| {
s.parse::<u64>().unwrap_or_else(|_| {
eprintln!("Couldn't parse transaction_id");
process::exit(1);
})
});
let data_block_size = matches.value_of("DATA_BLOCK_SIZE").map(|s| {
s.parse::<u32>().unwrap_or_else(|_| {
eprintln!("Couldn't parse data_block_size");
process::exit(1);
})
});
let nr_data_blocks = matches.value_of("NR_DATA_BLOCKS").map(|s| {
s.parse::<u64>().unwrap_or_else(|_| {
eprintln!("Couldn't parse nr_data_blocks");
process::exit(1);
})
});
let report;
if matches.is_present("QUIET") {
report = std::sync::Arc::new(mk_quiet_report());
} else if atty::is(Stream::Stdout) {
report = std::sync::Arc::new(mk_progress_bar_report());
} else {
report = Arc::new(mk_simple_report());
}
let opts = ThinDumpOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
repair: matches.is_present("REPAIR"),
overrides: SuperblockOverrides {
transaction_id,
data_block_size,
nr_data_blocks,
},
};
if let Err(reason) = dump(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,38 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process::exit;
use crate::commands::utils::*;
use crate::report::*;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("thin_metadata_pack")
.version(crate::version::tools_version())
.about("Produces a compressed file of thin metadata. Only packs metadata blocks that are actually used.")
.arg(Arg::with_name("INPUT")
.help("Specify thinp metadata binary device/file")
.required(true)
.short("i")
.value_name("DEV")
.takes_value(true))
.arg(Arg::with_name("OUTPUT")
.help("Specify packed output file")
.required(true)
.short("o")
.value_name("FILE")
.takes_value(true));
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = std::sync::Arc::new(mk_simple_report());
check_input_file(input_file, &report);
if let Err(reason) = crate::pack::toplevel::pack(input_file, output_file) {
report.fatal(&format!("Application error: {}\n", reason));
exit(1);
}
}

View File

@ -1,100 +0,0 @@
extern crate clap;
use clap::{value_t_or_exit, App, Arg};
use std::ffi::OsString;
use std::process;
use crate::thin::metadata_size::{metadata_size, ThinMetadataSizeOptions};
use crate::units::*;
//------------------------------------------
fn parse_args<I, T>(args: I) -> (ThinMetadataSizeOptions, Units, bool)
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
let parser = App::new("thin_metadata_size")
.version(crate::version::tools_version())
.about("Estimate the size of the metadata device needed for a given configuration.")
// options
.arg(
Arg::with_name("BLOCK_SIZE")
.help("Specify the data block size")
.short("b")
.long("block-size")
.required(true)
.value_name("SECTORS"),
)
.arg(
Arg::with_name("POOL_SIZE")
.help("Specify the size of pool device")
.short("s")
.long("pool-size")
.required(true)
.value_name("SECTORS"),
)
.arg(
Arg::with_name("MAX_THINS")
.help("Maximum number of thin devices and snapshots")
.short("m")
.long("max-thins")
.required(true)
.value_name("NUM"),
)
.arg(
Arg::with_name("UNIT")
.help("Specify the output unit")
.short("u")
.long("unit")
.value_name("UNIT")
.default_value("sector"),
)
.arg(
Arg::with_name("NUMERIC_ONLY")
.help("Output numeric value only")
.short("n")
.long("numeric-only"),
);
let matches = parser.get_matches_from(args);
// TODO: handle unit suffix
let pool_size = value_t_or_exit!(matches.value_of("POOL_SIZE"), u64);
let block_size = value_t_or_exit!(matches.value_of("BLOCK_SIZE"), u32);
let max_thins = value_t_or_exit!(matches.value_of("MAX_THINS"), u64);
let unit = value_t_or_exit!(matches.value_of("UNIT"), Units);
let numeric_only = matches.is_present("NUMERIC_ONLY");
(
ThinMetadataSizeOptions {
nr_blocks: pool_size / block_size as u64,
max_thins,
},
unit,
numeric_only,
)
}
pub fn run(args: &[std::ffi::OsString]) {
let (opts, unit, numeric_only) = parse_args(args);
match metadata_size(&opts) {
Ok(size) => {
let size = to_units(size * 512, unit.clone());
if numeric_only {
println!("{}", size);
} else {
let mut name = unit.to_string();
name.push('s');
println!("{} {}", size, name);
}
}
Err(reason) => {
eprintln!("{}", reason);
process::exit(1);
}
}
}
//------------------------------------------

View File

@ -1,44 +0,0 @@
extern crate clap;
use crate::file_utils;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::process::exit;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("thin_metadata_unpack")
.version(crate::version::tools_version())
.about("Unpack a compressed file of thin metadata.")
.arg(
Arg::with_name("INPUT")
.help("Specify thinp metadata binary device/file")
.required(true)
.short("i")
.value_name("DEV")
.takes_value(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify packed output file")
.required(true)
.short("o")
.value_name("FILE")
.takes_value(true),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
if !file_utils::is_file(input_file) {
eprintln!("Invalid input file '{}'.", input_file.display());
exit(1);
}
if let Err(reason) = crate::pack::toplevel::unpack(input_file, output_file) {
eprintln!("Application error: {}", reason);
process::exit(1);
}
}

View File

@ -1,109 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::commands::utils::*;
use crate::thin::metadata_repair::SuperblockOverrides;
use crate::thin::repair::{repair, ThinRepairOptions};
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("thin_repair")
.version(crate::version::tools_version())
.about("Repair thin-provisioning metadata, and write it to different device or file")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
// options
.arg(
Arg::with_name("DATA_BLOCK_SIZE")
.help("Provide the data block size for repairing")
.long("data-block-size")
.value_name("SECTORS"),
)
.arg(
Arg::with_name("INPUT")
.help("Specify the input device")
.short("i")
.long("input")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("NR_DATA_BLOCKS")
.help("Override the number of data blocks if needed")
.long("nr-data-blocks")
.value_name("NUM"),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device")
.short("o")
.long("output")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("TRANSACTION_ID")
.help("Override the transaction id if needed")
.long("transaction-id")
.value_name("NUM"),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = mk_report(matches.is_present("QUIET"));
check_input_file(input_file, &report);
check_output_file(output_file, &report);
let transaction_id = matches.value_of("TRANSACTION_ID").map(|s| {
s.parse::<u64>().unwrap_or_else(|_| {
report.fatal("Couldn't parse transaction_id");
process::exit(1);
})
});
let data_block_size = matches.value_of("DATA_BLOCK_SIZE").map(|s| {
s.parse::<u32>().unwrap_or_else(|_| {
report.fatal("Couldn't parse data_block_size");
process::exit(1);
})
});
let nr_data_blocks = matches.value_of("NR_DATA_BLOCKS").map(|s| {
s.parse::<u64>().unwrap_or_else(|_| {
report.fatal("Couldn't parse nr_data_blocks");
process::exit(1);
})
});
let opts = ThinRepairOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
overrides: SuperblockOverrides {
transaction_id,
data_block_size,
nr_data_blocks,
},
};
if let Err(reason) = repair(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,64 +0,0 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::commands::utils::*;
use crate::thin::restore::{restore, ThinRestoreOptions};
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("thin_restore")
.version(crate::version::tools_version())
.about("Convert XML format metadata to binary.")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
// options
.arg(
Arg::with_name("INPUT")
.help("Specify the input xml")
.short("i")
.long("input")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device")
.short("o")
.long("output")
.value_name("FILE")
.required(true),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = mk_report(matches.is_present("QUIET"));
check_input_file(input_file, &report);
check_output_file(output_file, &report);
let opts = ThinRestoreOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};
if let Err(reason) = restore(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,78 +0,0 @@
// This work is based on the implementation by Nikhil Kshirsagar which
// can be found here:
// https://github.com/nkshirsagar/thinpool_shrink/blob/split_ranges/thin_shrink.py
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process::exit;
use crate::commands::utils::*;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("thin_shrink")
.version(crate::version::tools_version())
.about("Rewrite xml metadata and move data in an inactive pool.")
.arg(
Arg::with_name("INPUT")
.help("Specify thinp metadata xml file")
.required(true)
.short("i")
.long("input")
.value_name("FILE")
.takes_value(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify output xml file")
.required(true)
.short("o")
.long("output")
.value_name("FILE")
.takes_value(true),
)
.arg(
Arg::with_name("DATA")
.help("Specify pool data device where data will be moved")
.required(true)
.long("data")
.value_name("DATA")
.takes_value(true),
)
.arg(
Arg::with_name("NOCOPY")
.help("Skip the copying of data, useful for benchmarking")
.required(false)
.long("no-copy")
.value_name("NOCOPY")
.takes_value(false),
)
.arg(
Arg::with_name("SIZE")
.help("Specify new size for the pool (in data blocks)")
.required(true)
.long("nr-blocks")
.value_name("SIZE")
.takes_value(true),
);
let matches = parser.get_matches_from(args);
// FIXME: check these look like xml
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let size = matches.value_of("SIZE").unwrap().parse::<u64>().unwrap();
let data_file = Path::new(matches.value_of("DATA").unwrap());
let do_copy = !matches.is_present("NOCOPY");
let report = mk_report(false);
check_input_file(input_file, &report);
if let Err(reason) =
crate::shrink::toplevel::shrink(input_file, output_file, data_file, size, do_copy)
{
eprintln!("Application error: {}\n", reason);
exit(1);
}
}

View File

@ -1,84 +0,0 @@
use anyhow::Result;
use atty::Stream;
use std::fs::OpenOptions;
use std::io::Read;
use std::path::Path;
use std::process::exit;
use crate::file_utils;
use crate::report::*;
pub fn check_input_file(input_file: &Path, report: &Report) {
if !file_utils::file_exists(input_file) {
report.fatal(&format!("Couldn't find input file '{:?}'.", &input_file));
exit(1);
}
if !file_utils::is_file_or_blk(input_file) {
report.fatal(&format!(
"Not a block device or regular file '{:?}'.",
&input_file
));
exit(1);
}
}
pub fn check_file_not_tiny(input_file: &Path, report: &Report) {
if file_utils::file_size(input_file).expect("couldn't get input size") < 4096 {
report.fatal("Metadata device/file too small. Is this binary metadata?");
exit(1);
}
}
pub fn check_output_file(path: &Path, report: &Report) {
// minimal thin metadata size is 10 blocks, with one device
match file_utils::file_size(path) {
Ok(size) => {
if size < 40960 {
report.fatal("Output file too small.");
exit(1);
}
}
Err(e) => {
report.fatal(&format!("{}", e));
exit(1);
}
}
}
pub fn mk_report(quiet: bool) -> std::sync::Arc<Report> {
use std::sync::Arc;
if quiet {
Arc::new(mk_quiet_report())
} else if atty::is(Stream::Stdout) {
Arc::new(mk_progress_bar_report())
} else {
Arc::new(mk_simple_report())
}
}
fn is_xml(line: &[u8]) -> bool {
line.starts_with(b"<superblock") || line.starts_with(b"?xml") || line.starts_with(b"<!DOCTYPE")
}
pub fn check_not_xml_(input_file: &Path, report: &Report) -> Result<()> {
let mut file = OpenOptions::new().read(true).open(input_file)?;
let mut data = vec![0; 16];
file.read_exact(&mut data)?;
if is_xml(&data) {
report.fatal("This looks like XML. This tool only checks the binary metadata format.");
}
Ok(())
}
/// This trys to read the start of input_path to see
/// if it's xml. If there are any problems reading the file
/// then it fails silently.
pub fn check_not_xml(input_file: &Path, report: &Report) {
let _ = check_not_xml_(input_file, report);
}
//---------------------------------------

View File

@ -1,152 +0,0 @@
use anyhow::{anyhow, Result};
use std::path::Path;
use std::sync::Arc;
use crate::era::superblock::*;
use crate::era::writeset::*;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::array::{self, ArrayBlock, ArrayError};
use crate::pdata::array_walker::*;
use crate::pdata::bitset::*;
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::report::*;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
fn inc_superblock(sm: &ASpaceMap) -> anyhow::Result<()> {
let mut sm = sm.lock().unwrap();
sm.inc(SUPERBLOCK_LOCATION, 1)?;
Ok(())
}
//------------------------------------------
struct EraChecker {
current_era: u32,
}
impl EraChecker {
pub fn new(current_era: u32) -> EraChecker {
EraChecker { current_era }
}
}
impl ArrayVisitor<u32> for EraChecker {
fn visit(&self, index: u64, b: ArrayBlock<u32>) -> array::Result<()> {
let mut errs: Vec<ArrayError> = Vec::new();
let dbegin = index as u32 * b.header.max_entries;
let dend = dbegin + b.header.max_entries;
for (era, dblock) in b.values.iter().zip(dbegin..dend) {
if era > &self.current_era {
errs.push(array::value_err(format!(
"invalid era value at data block {}: {}",
dblock, era
)));
}
}
match errs.len() {
0 => Ok(()),
1 => Err(errs[0].clone()),
_ => Err(array::aggregate_error(errs)),
}
}
}
//------------------------------------------
pub struct EraCheckOptions<'a> {
pub dev: &'a Path,
pub async_io: bool,
pub sb_only: bool,
pub ignore_non_fatal: bool,
pub report: Arc<Report>,
}
struct Context {
report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &EraCheckOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.dev, MAX_CONCURRENT_IO, false)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, false)?);
}
Ok(Context {
report: opts.report.clone(),
engine,
})
}
fn check_superblock(sb: &Superblock) -> anyhow::Result<()> {
if sb.version > 1 {
return Err(anyhow!("unknown superblock version"));
}
Ok(())
}
pub fn check(opts: &EraCheckOptions) -> Result<()> {
let ctx = mk_context(opts)?;
let engine = &ctx.engine;
let report = &ctx.report;
let mut fatal = false;
report.set_title("Checking era metadata");
let metadata_sm = core_sm(engine.get_nr_blocks(), u8::MAX as u32);
inc_superblock(&metadata_sm)?;
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
check_superblock(&sb)?;
if opts.sb_only {
return Ok(());
}
let mut path = vec![0];
let writesets = btree_to_map::<Writeset>(
&mut path,
engine.clone(),
opts.ignore_non_fatal,
sb.writeset_tree_root,
)?;
for ws in writesets.values() {
let (_bs, err) = read_bitset_with_sm(
engine.clone(),
ws.root,
ws.nr_bits as usize,
metadata_sm.clone(),
opts.ignore_non_fatal,
)?;
if err.is_some() {
ctx.report.fatal(&format!("{}", err.unwrap()));
fatal = true;
}
}
let w = ArrayWalker::new_with_sm(engine.clone(), metadata_sm.clone(), opts.ignore_non_fatal)?;
let mut c = EraChecker::new(sb.current_era);
if let Err(e) = w.walk(&mut c, sb.era_array_root) {
ctx.report.fatal(&format!("{}", e));
fatal = true;
}
if fatal {
Err(anyhow!("fatal errors in metadata"))
} else {
Ok(())
}
}

View File

@ -1,420 +0,0 @@
use anyhow::{anyhow, Result};
use fixedbitset::FixedBitSet;
use std::convert::TryFrom;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::ops::Deref;
use std::path::Path;
use std::sync::{Arc, Mutex};
use crate::era::ir::{self, MetadataVisitor};
use crate::era::superblock::*;
use crate::era::writeset::Writeset;
use crate::era::xml;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::array::{self, ArrayBlock};
use crate::pdata::array_walker::*;
use crate::pdata::bitset::read_bitset_no_err;
use crate::pdata::btree_walker::btree_to_map;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//-----------------------------------------
struct EraEmitter<'a> {
emitter: Mutex<&'a mut dyn MetadataVisitor>,
}
impl<'a> EraEmitter<'a> {
pub fn new(emitter: &'a mut dyn MetadataVisitor) -> EraEmitter {
EraEmitter {
emitter: Mutex::new(emitter),
}
}
}
impl<'a> ArrayVisitor<u32> for EraEmitter<'a> {
fn visit(&self, index: u64, b: ArrayBlock<u32>) -> array::Result<()> {
let begin = index as u32 * b.header.max_entries;
let end = begin + b.header.nr_entries;
for (v, block) in b.values.iter().zip(begin..end) {
let era = ir::Era { block, era: *v };
self.emitter
.lock()
.unwrap()
.era(&era)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
}
}
//------------------------------------------
trait Archive {
fn set(&mut self, key: u32, value: u32) -> Result<()>;
fn get(&self, key: u32) -> Option<u32>;
}
// In-core archive of writeset eras.
// The actual era for a given block is `digested_era + deltas[b]` if `deltas[b]` is non-zero.
struct EraArchive<T> {
digested_era: u32, // maximum possible era in the era array
deltas: Vec<T>,
}
fn new_era_archive(nr_blocks: u32, archived_begin: u32, nr_writesets: u32) -> Box<dyn Archive> {
match nr_writesets + 1 {
0..=255 => Box::new(EraArchive {
digested_era: archived_begin.wrapping_sub(1),
deltas: vec![0u8; nr_blocks as usize],
}),
256..=65535 => Box::new(EraArchive {
digested_era: archived_begin.wrapping_sub(1),
deltas: vec![0u16; nr_blocks as usize],
}),
_ => Box::new(EraArchive {
digested_era: archived_begin.wrapping_sub(1),
deltas: vec![0u32; nr_blocks as usize],
}),
}
}
impl<T: std::convert::TryFrom<u32>> Archive for EraArchive<T>
where
T: Copy + Into<u32> + TryFrom<u32>,
<T as TryFrom<u32>>::Error: std::fmt::Debug,
{
fn set(&mut self, block: u32, delta: u32) -> Result<()> {
self.deltas[block as usize] = T::try_from(delta).unwrap();
Ok(())
}
fn get(&self, block: u32) -> Option<u32> {
if let Some(&delta) = self.deltas.get(block as usize) {
let d: u32 = delta.into();
if d == 0 {
None
} else {
Some(self.digested_era.wrapping_add(d))
}
} else {
None
}
}
}
//------------------------------------------
struct Inner<'a> {
emitter: &'a mut dyn MetadataVisitor,
era_archive: &'a dyn Archive,
}
struct LogicalEraEmitter<'a> {
inner: Mutex<Inner<'a>>,
}
impl<'a> LogicalEraEmitter<'a> {
pub fn new(
emitter: &'a mut dyn MetadataVisitor,
era_archive: &'a dyn Archive,
) -> LogicalEraEmitter<'a> {
LogicalEraEmitter {
inner: Mutex::new(Inner {
emitter,
era_archive,
}),
}
}
}
impl<'a> ArrayVisitor<u32> for LogicalEraEmitter<'a> {
fn visit(&self, index: u64, b: ArrayBlock<u32>) -> array::Result<()> {
let mut inner = self.inner.lock().unwrap();
let begin = index as u32 * b.header.max_entries;
let end = begin + b.header.nr_entries;
for (v, block) in b.values.iter().zip(begin..end) {
let era;
if let Some(archived) = inner.era_archive.get(block) {
era = ir::Era {
block,
era: archived,
}
} else {
era = ir::Era { block, era: *v }
};
inner
.emitter
.era(&era)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
}
}
//------------------------------------------
pub struct EraDumpOptions<'a> {
pub input: &'a Path,
pub output: Option<&'a Path>,
pub async_io: bool,
pub logical: bool,
pub repair: bool,
}
struct Context {
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &EraDumpOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.input, MAX_CONCURRENT_IO, false)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.input, nr_threads, false)?);
}
Ok(Context { engine })
}
// notify the visitor about the marked blocks only
fn dump_writeset(
engine: Arc<dyn IoEngine + Send + Sync>,
out: &mut dyn MetadataVisitor,
era: u32,
ws: &Writeset,
repair: bool,
) -> anyhow::Result<()> {
// TODO: deal with broken writeset
let bits = read_bitset_no_err(engine.clone(), ws.root, ws.nr_bits as usize, repair)?;
out.writeset_b(&ir::Writeset {
era,
nr_bits: ws.nr_bits,
})?;
// [begin, end) denotes the range of set bits.
let mut begin: u32 = 0;
let mut end: u32 = 0;
for (index, entry) in bits.as_slice().iter().enumerate() {
let mut n = *entry;
if n == u32::MAX {
end = std::cmp::min(end + 32, ws.nr_bits);
continue;
}
while n > 0 {
let zeros = n.trailing_zeros();
if zeros > 0 {
if end > begin {
let m = ir::MarkedBlocks {
begin,
len: end - begin,
};
out.writeset_blocks(&m)?;
}
n >>= zeros;
end += zeros;
begin = end;
}
let ones = n.trailing_ones();
n >>= ones;
end = std::cmp::min(end + ones, ws.nr_bits);
}
// emit the range if it ends before the entry boundary
let endpos = ((index as u32) << 5) + 32;
if end < endpos {
if end > begin {
let m = ir::MarkedBlocks {
begin,
len: end - begin,
};
out.writeset_blocks(&m)?;
}
begin = endpos;
end = begin;
}
}
if end > begin {
let m = ir::MarkedBlocks {
begin,
len: end - begin,
};
out.writeset_blocks(&m)?;
}
out.writeset_e()?;
Ok(())
}
pub fn dump_metadata(
engine: Arc<dyn IoEngine + Send + Sync>,
out: &mut dyn MetadataVisitor,
sb: &Superblock,
repair: bool,
) -> anyhow::Result<()> {
let xml_sb = ir::Superblock {
uuid: "".to_string(),
block_size: sb.data_block_size,
nr_blocks: sb.nr_blocks,
current_era: sb.current_era,
};
out.superblock_b(&xml_sb)?;
let writesets = get_writesets_ordered(engine.clone(), sb, repair)?;
for (era, ws) in writesets.iter() {
dump_writeset(engine.clone(), out, *era as u32, ws, repair)?;
}
out.era_b()?;
let w = ArrayWalker::new(engine.clone(), repair);
let mut emitter = EraEmitter::new(out);
w.walk(&mut emitter, sb.era_array_root)?;
out.era_e()?;
out.superblock_e()?;
out.eof()?;
Ok(())
}
//-----------------------------------------
fn get_writesets_ordered(
engine: Arc<dyn IoEngine + Send + Sync>,
sb: &Superblock,
repair: bool,
) -> Result<Vec<(u32, Writeset)>> {
let mut path = vec![0];
let mut writesets =
btree_to_map::<Writeset>(&mut path, engine.clone(), repair, sb.writeset_tree_root)?;
if sb.current_writeset.root != 0 {
if writesets.contains_key(&(sb.current_era as u64)) {
return Err(anyhow!(
"Duplicated era found in current_writeset and the writeset tree"
));
}
writesets.insert(sb.current_era as u64, sb.current_writeset);
}
if writesets.is_empty() {
return Ok(Vec::new());
}
let mut v = Vec::<(u32, Writeset)>::new();
let era_begin = sb.current_era.wrapping_sub((writesets.len() - 1) as u32);
for era in era_begin..=sb.current_era {
if let Some(ws) = writesets.get(&(era as u64)) {
v.push((era, *ws));
} else {
return Err(anyhow!("Writeset of era {} is not present", era));
}
}
Ok(v)
}
fn collate_writeset(index: u32, bitset: &FixedBitSet, archive: &mut dyn Archive) -> Result<()> {
let era_delta = index + 1;
for (i, entry) in bitset.as_slice().iter().enumerate() {
let mut bi = (i << 5) as u32;
let mut n = *entry;
while n > 0 {
if n & 0x1 > 0 {
archive.set(bi, era_delta)?;
}
n >>= 1;
bi += 1;
}
}
Ok(())
}
fn collate_writesets(
engine: Arc<dyn IoEngine + Send + Sync>,
sb: &Superblock,
repair: bool,
) -> Result<Box<dyn Archive>> {
let writesets = get_writesets_ordered(engine.clone(), sb, repair)?;
let archived_begin = writesets.get(0).map_or(0u32, |(era, _ws)| *era);
let mut archive = new_era_archive(sb.nr_blocks, archived_begin, writesets.len() as u32);
for (index, (_era, ws)) in writesets.iter().enumerate() {
let bitset = read_bitset_no_err(engine.clone(), ws.root, ws.nr_bits as usize, repair)?;
collate_writeset(index as u32, &bitset, archive.as_mut())?;
}
Ok(archive)
}
pub fn dump_metadata_logical(
engine: Arc<dyn IoEngine + Send + Sync>,
out: &mut dyn MetadataVisitor,
sb: &Superblock,
repair: bool,
) -> anyhow::Result<()> {
let era_archive = collate_writesets(engine.clone(), sb, repair)?;
let xml_sb = ir::Superblock {
uuid: "".to_string(),
block_size: sb.data_block_size,
nr_blocks: sb.nr_blocks,
current_era: sb.current_era,
};
out.superblock_b(&xml_sb)?;
out.era_b()?;
let w = ArrayWalker::new(engine, repair);
let mut emitter = LogicalEraEmitter::new(out, era_archive.deref());
w.walk(&mut emitter, sb.era_array_root)?;
out.era_e()?;
out.superblock_e()?;
out.eof()?;
Ok(())
}
//-----------------------------------------
pub fn dump(opts: EraDumpOptions) -> anyhow::Result<()> {
let ctx = mk_context(&opts)?;
let sb = read_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION)?;
let writer: Box<dyn Write>;
if opts.output.is_some() {
writer = Box::new(BufWriter::new(File::create(opts.output.unwrap())?));
} else {
writer = Box::new(BufWriter::new(std::io::stdout()));
}
let mut out = xml::XmlWriter::new(writer, false);
let writesets = get_writesets_ordered(ctx.engine.clone(), &sb, opts.repair)?;
if opts.logical && !writesets.is_empty() {
dump_metadata_logical(ctx.engine, &mut out, &sb, opts.repair)
} else {
dump_metadata(ctx.engine, &mut out, &sb, opts.repair)
}
}
//------------------------------------------

View File

@ -1,285 +0,0 @@
use anyhow::Result;
use quick_xml::events::{BytesEnd, BytesStart, Event};
use quick_xml::Writer;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::path::Path;
use std::sync::{Arc, Mutex};
use crate::era::superblock::*;
use crate::era::writeset::*;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::math::div_up;
use crate::pdata::array::{self, value_err, ArrayBlock};
use crate::pdata::array_walker::*;
use crate::pdata::btree_walker::*;
use crate::xml::mk_attr;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
struct BitsetCollator<'a> {
composed_bits: Box<Mutex<&'a mut [u64]>>,
}
impl<'a> BitsetCollator<'a> {
fn new(bitset: &mut [u64]) -> BitsetCollator {
BitsetCollator {
composed_bits: Box::new(Mutex::new(bitset)),
}
}
}
impl<'a> ArrayVisitor<u64> for BitsetCollator<'a> {
fn visit(&self, index: u64, b: ArrayBlock<u64>) -> array::Result<()> {
let mut bitset = self.composed_bits.lock().unwrap();
let idx = index as usize * b.header.max_entries as usize; // index of u64 in bitset array
for (entry, dest) in b.values.iter().zip(bitset.iter_mut().skip(idx)) {
*dest |= entry;
}
Ok(())
}
}
//------------------------------------------
struct EraArrayCollator<'a> {
composed_bits: Box<Mutex<&'a mut [u64]>>,
threshold: u32,
}
impl<'a> EraArrayCollator<'a> {
fn new(bitset: &mut [u64], threshold: u32) -> EraArrayCollator {
EraArrayCollator {
composed_bits: Box::new(Mutex::new(bitset)),
threshold,
}
}
}
impl<'a> ArrayVisitor<u32> for EraArrayCollator<'a> {
fn visit(&self, index: u64, b: ArrayBlock<u32>) -> array::Result<()> {
let blk_begin = index as usize * b.header.max_entries as usize; // range of data blocks
let blk_end = blk_begin + b.header.max_entries as usize;
let mut bitset = self.composed_bits.lock().unwrap();
let mut bitset_iter = bitset.iter_mut();
let mut idx = blk_begin >> 6; // index of u64 in bitset array
let mut dest = bitset_iter
.nth(idx)
.ok_or_else(|| value_err("array index out of bounds".to_string()))?;
let mut buf = *dest;
for (era, blk) in b.values.iter().zip(blk_begin..blk_end) {
if *era < self.threshold {
continue;
}
let steps = (blk >> 6) - idx;
if steps > 0 {
*dest = buf;
idx += steps;
dest = bitset_iter
.nth(steps - 1)
.ok_or_else(|| value_err("array index out of bounds".to_string()))?;
buf = *dest;
}
buf |= 1 << (blk & 0x3F);
}
*dest = buf;
Ok(())
}
}
//------------------------------------------
fn collate_writeset(
engine: Arc<dyn IoEngine + Send + Sync>,
writeset_root: u64,
marked_bits: &mut [u64],
) -> Result<()> {
let w = ArrayWalker::new(engine, false);
let mut c = BitsetCollator::new(marked_bits);
w.walk(&mut c, writeset_root)?;
Ok(())
}
fn collate_era_array(
engine: Arc<dyn IoEngine + Send + Sync>,
era_array_root: u64,
marked_bits: &mut [u64],
threshold: u32,
) -> Result<()> {
let w = ArrayWalker::new(engine, false);
let mut c = EraArrayCollator::new(marked_bits, threshold);
w.walk(&mut c, era_array_root)?;
Ok(())
}
fn mark_blocks_since(
engine: Arc<dyn IoEngine + Send + Sync>,
sb: &Superblock,
threshold: u32,
) -> Result<Vec<u64>> {
let mut marked_bits = Vec::<u64>::new();
marked_bits.resize(div_up(sb.nr_blocks as usize, 64), 0);
let mut path = vec![0];
let wsets = btree_to_map::<Writeset>(&mut path, engine.clone(), false, sb.writeset_tree_root)?;
for (era, ws) in wsets.iter() {
if (*era as u32) < threshold {
continue;
}
collate_writeset(engine.clone(), ws.root, &mut marked_bits)?;
}
if let Some(archived_begin) = wsets.keys().next() {
if *archived_begin as u32 > threshold {
collate_era_array(
engine.clone(),
sb.era_array_root,
&mut marked_bits,
threshold,
)?;
}
}
Ok(marked_bits)
}
fn emit_start<W: Write>(w: &mut Writer<W>) -> Result<()> {
let elem = BytesStart::owned_name(b"blocks".to_vec());
w.write_event(Event::Start(elem))?;
Ok(())
}
fn emit_end<W: Write>(w: &mut Writer<W>) -> Result<()> {
let elem = BytesEnd::borrowed(b"blocks");
w.write_event(Event::End(elem))?;
Ok(())
}
fn emit_range<W: Write>(w: &mut Writer<W>, begin: u32, end: u32) -> Result<()> {
if end > begin + 1 {
let mut elem = BytesStart::owned_name(b"range".to_vec());
elem.push_attribute(mk_attr(b"begin", begin));
elem.push_attribute(mk_attr(b"end", end));
w.write_event(Event::Empty(elem))?;
} else if end > begin {
let mut elem = BytesStart::owned_name(b"block".to_vec());
elem.push_attribute(mk_attr(b"block", begin));
w.write_event(Event::Empty(elem))?;
}
Ok(())
}
fn emit_blocks<W: Write>(marked_bits: &[u64], nr_blocks: u32, w: &mut Writer<W>) -> Result<()> {
let mut begin: u32 = 0;
let mut end: u32 = 0;
emit_start(w)?;
for (index, entry) in marked_bits.iter().enumerate() {
let mut n = *entry;
if n == u64::max_value() {
end = std::cmp::min(end + 64, nr_blocks);
continue;
}
while n > 0 {
let zeros = n.trailing_zeros();
if zeros > 0 {
if end > begin {
emit_range(w, begin, end)?;
}
n >>= zeros;
end += zeros;
begin = end;
}
let ones = n.trailing_ones();
n >>= ones;
end = std::cmp::min(end + ones, nr_blocks);
}
let endpos = (index << 6) as u32 + 64;
if end < endpos {
if end > begin {
emit_range(w, begin, end)?;
}
begin = endpos;
end = begin;
}
}
if end > begin {
emit_range(w, begin, end)?;
}
emit_end(w)?;
Ok(())
}
//------------------------------------------
pub struct EraInvalidateOptions<'a> {
pub input: &'a Path,
pub output: Option<&'a Path>,
pub async_io: bool,
pub threshold: u32,
pub metadata_snap: bool,
}
struct Context {
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &EraInvalidateOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new_with(
opts.input,
MAX_CONCURRENT_IO,
false,
!opts.metadata_snap,
)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new_with(
opts.input,
nr_threads,
false,
!opts.metadata_snap,
)?);
}
Ok(Context { engine })
}
pub fn invalidate(opts: &EraInvalidateOptions) -> Result<()> {
let ctx = mk_context(opts)?;
let mut sb = read_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION)?;
if opts.metadata_snap {
sb = read_superblock(ctx.engine.as_ref(), sb.metadata_snap)?;
}
let w: Box<dyn Write>;
if opts.output.is_some() {
w = Box::new(BufWriter::new(File::create(opts.output.unwrap())?));
} else {
w = Box::new(BufWriter::new(std::io::stdout()));
}
let mut writer = Writer::new_with_indent(w, 0x20, 2);
let marked_bits = mark_blocks_since(ctx.engine, &sb, opts.threshold)?;
emit_blocks(&marked_bits, sb.nr_blocks, &mut writer)
}
//------------------------------------------

View File

@ -1,54 +0,0 @@
use anyhow::Result;
//------------------------------------------
#[derive(Clone)]
pub struct Superblock {
pub uuid: String,
pub block_size: u32,
pub nr_blocks: u32,
pub current_era: u32,
}
#[derive(Clone)]
pub struct Writeset {
pub era: u32,
pub nr_bits: u32,
}
#[derive(Clone)]
pub struct MarkedBlocks {
pub begin: u32,
pub len: u32,
}
#[derive(Clone)]
pub struct Era {
pub block: u32,
pub era: u32,
}
//------------------------------------------
#[derive(Clone)]
pub enum Visit {
Continue,
Stop,
}
pub trait MetadataVisitor {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit>;
fn superblock_e(&mut self) -> Result<Visit>;
fn writeset_b(&mut self, ws: &Writeset) -> Result<Visit>;
fn writeset_e(&mut self) -> Result<Visit>;
fn writeset_blocks(&mut self, blocks: &MarkedBlocks) -> Result<Visit>;
fn era_b(&mut self) -> Result<Visit>;
fn era_e(&mut self) -> Result<Visit>;
fn era(&mut self, era: &Era) -> Result<Visit>;
fn eof(&mut self) -> Result<Visit>;
}
//------------------------------------------

View File

@ -1,9 +0,0 @@
pub mod check;
pub mod dump;
pub mod invalidate;
pub mod ir;
pub mod repair;
pub mod restore;
pub mod superblock;
pub mod writeset;
pub mod xml;

View File

@ -1,68 +0,0 @@
use anyhow::Result;
use std::path::Path;
use std::sync::Arc;
use crate::era::dump::*;
use crate::era::restore::*;
use crate::era::superblock::*;
use crate::io_engine::*;
use crate::pdata::space_map_metadata::*;
use crate::report::*;
use crate::write_batcher::*;
//------------------------------------------
pub struct EraRepairOptions<'a> {
pub input: &'a Path,
pub output: &'a Path,
pub async_io: bool,
pub report: Arc<Report>,
}
struct Context {
_report: Arc<Report>,
engine_in: Arc<dyn IoEngine + Send + Sync>,
engine_out: Arc<dyn IoEngine + Send + Sync>,
}
const MAX_CONCURRENT_IO: u32 = 1024;
fn new_context(opts: &EraRepairOptions) -> Result<Context> {
let engine_in: Arc<dyn IoEngine + Send + Sync>;
let engine_out: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine_in = Arc::new(AsyncIoEngine::new(opts.input, MAX_CONCURRENT_IO, false)?);
engine_out = Arc::new(AsyncIoEngine::new(opts.output, MAX_CONCURRENT_IO, true)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine_in = Arc::new(SyncIoEngine::new(opts.input, nr_threads, false)?);
engine_out = Arc::new(SyncIoEngine::new(opts.output, nr_threads, true)?);
}
Ok(Context {
_report: opts.report.clone(),
engine_in,
engine_out,
})
}
//------------------------------------------
pub fn repair(opts: EraRepairOptions) -> Result<()> {
let ctx = new_context(&opts)?;
let sb = read_superblock(ctx.engine_in.as_ref(), SUPERBLOCK_LOCATION)?;
let sm = core_metadata_sm(ctx.engine_out.get_nr_blocks(), u32::MAX);
let mut w = WriteBatcher::new(
ctx.engine_out.clone(),
sm.clone(),
ctx.engine_out.get_batch_size(),
);
let mut restorer = Restorer::new(&mut w);
dump_metadata(ctx.engine_in, &mut restorer, &sb, true)
}
//------------------------------------------

View File

@ -1,313 +0,0 @@
use anyhow::{anyhow, Result};
use std::collections::BTreeMap;
use std::fs::OpenOptions;
use std::path::Path;
use std::sync::Arc;
use crate::era::ir::{self, MetadataVisitor, Visit};
use crate::era::superblock::*;
use crate::era::writeset::Writeset;
use crate::era::xml;
use crate::io_engine::*;
use crate::math::*;
use crate::pdata::array_builder::*;
use crate::pdata::btree_builder::*;
use crate::pdata::space_map_common::pack_root;
use crate::pdata::space_map_metadata::*;
use crate::report::*;
use crate::write_batcher::*;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
pub struct EraRestoreOptions<'a> {
pub input: &'a Path,
pub output: &'a Path,
pub async_io: bool,
pub report: Arc<Report>,
}
struct Context {
_report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &EraRestoreOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.output, MAX_CONCURRENT_IO, true)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.output, nr_threads, true)?);
}
Ok(Context {
_report: opts.report.clone(),
engine,
})
}
//------------------------------------------
#[derive(PartialEq)]
enum Section {
None,
Superblock,
Writeset,
EraArray,
Finalized,
}
pub struct Restorer<'a> {
w: &'a mut WriteBatcher,
sb: Option<ir::Superblock>,
writesets: BTreeMap<u32, Writeset>,
writeset_builder: Option<ArrayBuilder<u64>>, // bitset
current_writeset: Option<ir::Writeset>,
era_array_builder: Option<ArrayBuilder<u32>>,
writeset_entry: u64,
entry_index: u32,
in_section: Section,
}
impl<'a> Restorer<'a> {
pub fn new(w: &'a mut WriteBatcher) -> Restorer<'a> {
Restorer {
w,
sb: None,
writesets: BTreeMap::new(),
writeset_builder: None,
current_writeset: None,
era_array_builder: None,
writeset_entry: 0,
entry_index: 0,
in_section: Section::None,
}
}
fn finalize(&mut self) -> Result<()> {
let src_sb;
if let Some(sb) = self.sb.take() {
src_sb = sb;
} else {
return Err(anyhow!("not in superblock"));
}
// build the writeset tree
let mut tree_builder = BTreeBuilder::<Writeset>::new(Box::new(NoopRC {}));
let mut writesets = BTreeMap::<u32, Writeset>::new();
std::mem::swap(&mut self.writesets, &mut writesets);
for (era, ws) in writesets {
tree_builder.push_value(self.w, era as u64, ws)?;
}
let writeset_tree_root = tree_builder.complete(self.w)?;
// complete the era array
let era_array_root;
if let Some(builder) = self.era_array_builder.take() {
era_array_root = builder.complete(self.w)?;
} else {
return Err(anyhow!("internal error. couldn't find era array"));
}
// build metadata space map
let metadata_sm_root = build_metadata_sm(self.w)?;
let sb = Superblock {
flags: SuperblockFlags {
clean_shutdown: true,
},
block: SUPERBLOCK_LOCATION,
version: 1,
metadata_sm_root,
data_block_size: src_sb.block_size,
nr_blocks: src_sb.nr_blocks,
current_era: src_sb.current_era,
current_writeset: Writeset {
nr_bits: src_sb.nr_blocks,
root: 0,
},
writeset_tree_root,
era_array_root,
metadata_snap: 0,
};
write_superblock(self.w.engine.as_ref(), SUPERBLOCK_LOCATION, &sb)?;
self.in_section = Section::Finalized;
Ok(())
}
}
impl<'a> MetadataVisitor for Restorer<'a> {
fn superblock_b(&mut self, sb: &ir::Superblock) -> Result<Visit> {
if self.in_section != Section::None {
return Err(anyhow!("duplicated superblock"));
}
self.sb = Some(sb.clone());
let b = self.w.alloc()?;
if b.loc != SUPERBLOCK_LOCATION {
return Err(anyhow!("superblock was occupied"));
}
self.writeset_builder = None;
self.era_array_builder = Some(ArrayBuilder::new(sb.nr_blocks as u64));
self.in_section = Section::Superblock;
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
self.finalize()?;
Ok(Visit::Continue)
}
fn writeset_b(&mut self, ws: &ir::Writeset) -> Result<Visit> {
if self.in_section != Section::Superblock {
return Err(anyhow!("not in superblock"));
}
self.writeset_builder = Some(ArrayBuilder::new(div_up(ws.nr_bits as u64, 64)));
self.entry_index = 0;
self.writeset_entry = 0;
self.current_writeset = Some(ws.clone());
self.in_section = Section::Writeset;
Ok(Visit::Continue)
}
fn writeset_e(&mut self) -> Result<Visit> {
if self.in_section != Section::Writeset {
return Err(anyhow!("not in writeset"));
}
if let Some(mut builder) = self.writeset_builder.take() {
if let Some(ws) = self.current_writeset.take() {
// push the trailing bits
builder.push_value(self.w, self.entry_index as u64, self.writeset_entry)?;
let root = builder.complete(self.w)?;
self.writesets.insert(
ws.era,
Writeset {
root,
nr_bits: ws.nr_bits,
},
);
self.in_section = Section::Superblock;
} else {
return Err(anyhow!("internal error. couldn't find writeset"));
}
} else {
return Err(anyhow!("internal error. couldn't find writeset"));
}
Ok(Visit::Continue)
}
fn writeset_blocks(&mut self, blocks: &ir::MarkedBlocks) -> Result<Visit> {
let first = blocks.begin;
let last = first + blocks.len - 1; // inclusive
let mut idx = first >> 6;
let last_idx = last >> 6; // inclusive
let builder = self.writeset_builder.as_mut().unwrap();
// emit the bufferred bits
if idx > self.entry_index {
builder.push_value(self.w, self.entry_index as u64, self.writeset_entry)?;
self.entry_index = idx;
self.writeset_entry = 0;
}
// buffer the bits of the first entry
let bi_first = first & 63;
if idx == last_idx {
let bi_last = last & 63;
let mask = 1u64 << bi_last;
self.writeset_entry |= (mask ^ mask.wrapping_sub(1)) & (u64::MAX << bi_first);
return Ok(Visit::Continue);
}
self.writeset_entry |= u64::MAX << bi_first;
// emit the all-1 entries if necessary
while idx < last_idx {
builder.push_value(self.w, self.entry_index as u64, self.writeset_entry)?;
self.entry_index += 1;
self.writeset_entry = u64::MAX;
idx += 1;
}
// buffer the bits of the last entry
builder.push_value(self.w, self.entry_index as u64, self.writeset_entry)?;
let bi_last = last & 63;
let mask = 1u64 << bi_last;
self.entry_index += 1;
self.writeset_entry |= mask ^ mask.wrapping_sub(1);
Ok(Visit::Continue)
}
fn era_b(&mut self) -> Result<Visit> {
if self.in_section != Section::Superblock {
return Err(anyhow!("not in superblock"));
}
self.in_section = Section::EraArray;
Ok(Visit::Continue)
}
fn era_e(&mut self) -> Result<Visit> {
if self.in_section != Section::EraArray {
return Err(anyhow!("not in era array"));
}
self.in_section = Section::Superblock;
Ok(Visit::Continue)
}
fn era(&mut self, era: &ir::Era) -> Result<Visit> {
let builder = self.era_array_builder.as_mut().unwrap();
builder.push_value(self.w, era.block as u64, era.era)?;
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
if self.in_section != Section::Finalized {
return Err(anyhow!("incompleted source metadata"));
}
Ok(Visit::Continue)
}
}
//------------------------------------------
fn build_metadata_sm(w: &mut WriteBatcher) -> Result<Vec<u8>> {
let r = write_metadata_sm(w)?;
let sm_root = pack_root(&r, SPACE_MAP_ROOT_SIZE)?;
Ok(sm_root)
}
//------------------------------------------
pub fn restore(opts: EraRestoreOptions) -> Result<()> {
let input = OpenOptions::new()
.read(true)
.write(false)
.open(opts.input)?;
let ctx = mk_context(&opts)?;
let sm = core_metadata_sm(ctx.engine.get_nr_blocks(), u32::MAX);
let mut w = WriteBatcher::new(ctx.engine.clone(), sm.clone(), ctx.engine.get_batch_size());
let mut restorer = Restorer::new(&mut w);
xml::read(input, &mut restorer)?;
Ok(())
}
//------------------------------------------

View File

@ -1,153 +0,0 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{bytes::complete::*, number::complete::*, IResult};
use std::io::Cursor;
use crate::checksum::*;
use crate::era::writeset::Writeset;
use crate::io_engine::*;
//------------------------------------------
pub const SPACE_MAP_ROOT_SIZE: usize = 128;
pub const SUPERBLOCK_LOCATION: u64 = 0;
const MAGIC: u64 = 0o17660203573; // 0x7EC1077B in hex
const UUID_SIZE: usize = 16;
//------------------------------------------
#[derive(Debug, Clone)]
pub struct SuperblockFlags {
pub clean_shutdown: bool,
}
#[derive(Debug, Clone)]
pub struct Superblock {
pub flags: SuperblockFlags,
pub block: u64,
pub version: u32,
pub metadata_sm_root: Vec<u8>,
pub data_block_size: u32,
pub nr_blocks: u32,
pub current_era: u32,
pub current_writeset: Writeset,
pub writeset_tree_root: u64,
pub era_array_root: u64,
pub metadata_snap: u64,
}
fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
let (i, _csum) = le_u32(data)?;
let (i, flags) = le_u32(i)?;
let (i, block) = le_u64(i)?;
let (i, _uuid) = take(16usize)(i)?;
let (i, _magic) = le_u64(i)?;
let (i, version) = le_u32(i)?;
let (i, metadata_sm_root) = take(SPACE_MAP_ROOT_SIZE)(i)?;
let (i, data_block_size) = le_u32(i)?;
let (i, _metadata_block_size) = le_u32(i)?;
let (i, nr_blocks) = le_u32(i)?;
let (i, current_era) = le_u32(i)?;
let (i, nr_bits) = le_u32(i)?;
let (i, root) = le_u64(i)?;
let (i, writeset_tree_root) = le_u64(i)?;
let (i, era_array_root) = le_u64(i)?;
let (i, metadata_snap) = le_u64(i)?;
Ok((
i,
Superblock {
flags: SuperblockFlags {
clean_shutdown: (flags & 0x1) != 0,
},
block,
version,
metadata_sm_root: metadata_sm_root.to_vec(),
data_block_size,
nr_blocks,
current_era,
current_writeset: Writeset { nr_bits, root },
writeset_tree_root,
era_array_root,
metadata_snap,
},
))
}
pub fn read_superblock(engine: &dyn IoEngine, loc: u64) -> Result<Superblock> {
let b = engine.read(loc)?;
if metadata_block_type(b.get_data()) != BT::ERA_SUPERBLOCK {
return Err(anyhow!("bad checksum in superblock"));
}
if let Ok((_, sb)) = unpack(b.get_data()) {
Ok(sb)
} else {
Err(anyhow!("couldn't unpack superblock"))
}
}
//------------------------------------------
fn pack_superblock<W: WriteBytesExt>(sb: &Superblock, w: &mut W) -> Result<()> {
// checksum, which we don't know yet
w.write_u32::<LittleEndian>(0)?;
// flags
let mut flags: u32 = 0;
if sb.flags.clean_shutdown {
flags |= 0x1;
}
w.write_u32::<LittleEndian>(flags)?;
w.write_u64::<LittleEndian>(sb.block)?;
w.write_all(&[0; UUID_SIZE])?;
w.write_u64::<LittleEndian>(MAGIC)?;
w.write_u32::<LittleEndian>(sb.version)?;
w.write_all(&sb.metadata_sm_root)?;
w.write_u32::<LittleEndian>(sb.data_block_size)?;
// metadata block size
w.write_u32::<LittleEndian>((BLOCK_SIZE >> SECTOR_SHIFT) as u32)?;
w.write_u32::<LittleEndian>(sb.nr_blocks)?;
w.write_u32::<LittleEndian>(sb.current_era)?;
w.write_u32::<LittleEndian>(sb.current_writeset.nr_bits)?;
w.write_u64::<LittleEndian>(sb.current_writeset.root)?;
w.write_u64::<LittleEndian>(sb.writeset_tree_root)?;
w.write_u64::<LittleEndian>(sb.era_array_root)?;
w.write_u64::<LittleEndian>(sb.metadata_snap)?;
Ok(())
}
pub fn write_superblock(engine: &dyn IoEngine, _loc: u64, sb: &Superblock) -> Result<()> {
let b = Block::zeroed(SUPERBLOCK_LOCATION);
// pack the superblock
{
let mut cursor = Cursor::new(b.get_data());
pack_superblock(sb, &mut cursor)?;
}
// calculate the checksum
write_checksum(b.get_data(), BT::ERA_SUPERBLOCK)?;
// write
engine.write(&b)?;
Ok(())
}
//------------------------------------------

View File

@ -1,35 +0,0 @@
use anyhow::Result;
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
use crate::pdata::unpack::*;
//------------------------------------------
#[derive(Clone, Copy, Debug)]
pub struct Writeset {
pub nr_bits: u32,
pub root: u64,
}
impl Unpack for Writeset {
fn disk_size() -> u32 {
12
}
fn unpack(i: &[u8]) -> IResult<&[u8], Writeset> {
let (i, nr_bits) = le_u32(i)?;
let (i, root) = le_u64(i)?;
Ok((i, Writeset { nr_bits, root }))
}
}
impl Pack for Writeset {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u32::<LittleEndian>(self.nr_bits)?;
w.write_u64::<LittleEndian>(self.root)?;
Ok(())
}
}
//------------------------------------------

View File

@ -1,313 +0,0 @@
use anyhow::{anyhow, Result};
use quick_xml::events::{BytesEnd, BytesStart, Event};
use quick_xml::{Reader, Writer};
use std::io::{BufRead, BufReader};
use std::io::{Read, Write};
use crate::era::ir::*;
use crate::xml::*;
//---------------------------------------
pub struct XmlWriter<W: Write> {
w: Writer<W>,
compact: bool,
nr_blocks: u32,
emitted_blocks: u32,
}
impl<W: Write> XmlWriter<W> {
pub fn new(w: W, compact: bool) -> XmlWriter<W> {
XmlWriter {
w: Writer::new_with_indent(w, 0x20, 2),
compact,
nr_blocks: 0,
emitted_blocks: 0,
}
}
}
impl<W: Write> MetadataVisitor for XmlWriter<W> {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit> {
let tag = b"superblock";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"uuid", sb.uuid.clone()));
elem.push_attribute(mk_attr(b"block_size", sb.block_size));
elem.push_attribute(mk_attr(b"nr_blocks", sb.nr_blocks));
elem.push_attribute(mk_attr(b"current_era", sb.current_era));
self.w.write_event(Event::Start(elem))?;
self.nr_blocks = sb.nr_blocks;
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
self.w
.write_event(Event::End(BytesEnd::borrowed(b"superblock")))?;
Ok(Visit::Continue)
}
fn writeset_b(&mut self, ws: &Writeset) -> Result<Visit> {
let tag = b"writeset";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"era", ws.era));
elem.push_attribute(mk_attr(b"nr_bits", ws.nr_bits));
self.w.write_event(Event::Start(elem))?;
self.emitted_blocks = 0;
Ok(Visit::Continue)
}
fn writeset_e(&mut self) -> Result<Visit> {
if !self.compact {
for b in self.emitted_blocks..self.nr_blocks {
let tag = b"bit";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block", b));
elem.push_attribute(mk_attr(b"value", "false"));
self.w.write_event(Event::Empty(elem))?;
}
}
self.w
.write_event(Event::End(BytesEnd::borrowed(b"writeset")))?;
Ok(Visit::Continue)
}
fn writeset_blocks(&mut self, blocks: &MarkedBlocks) -> Result<Visit> {
if self.compact {
let tag = b"marked";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block_begin", blocks.begin));
elem.push_attribute(mk_attr(b"len", blocks.len));
self.w.write_event(Event::Empty(elem))?;
} else {
for b in self.emitted_blocks..blocks.begin {
let tag = b"bit";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block", b));
elem.push_attribute(mk_attr(b"value", "false"));
self.w.write_event(Event::Empty(elem))?;
}
let end = blocks.begin + blocks.len;
for b in blocks.begin..end {
let tag = b"bit";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block", b));
elem.push_attribute(mk_attr(b"value", "true"));
self.w.write_event(Event::Empty(elem))?;
}
self.emitted_blocks = end;
}
Ok(Visit::Continue)
}
fn era_b(&mut self) -> Result<Visit> {
let tag = b"era_array";
let elem = BytesStart::owned(tag.to_vec(), tag.len());
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn era_e(&mut self) -> Result<Visit> {
self.w
.write_event(Event::End(BytesEnd::borrowed(b"era_array")))?;
Ok(Visit::Continue)
}
fn era(&mut self, era: &Era) -> Result<Visit> {
let tag = b"era";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block", era.block));
elem.push_attribute(mk_attr(b"era", era.era));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
}
//------------------------------------------
fn parse_superblock(e: &BytesStart) -> Result<Superblock> {
let tag = "superblock";
let mut uuid: Option<String> = None;
let mut block_size: Option<u32> = None;
let mut nr_blocks: Option<u32> = None;
let mut current_era: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"uuid" => uuid = Some(string_val(&kv)),
b"block_size" => block_size = Some(u32_val(&kv)?),
b"nr_blocks" => nr_blocks = Some(u32_val(&kv)?),
b"current_era" => current_era = Some(u32_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
Ok(Superblock {
uuid: check_attr(tag, "uuid", uuid)?,
block_size: check_attr(tag, "block_size", block_size)?,
nr_blocks: check_attr(tag, "nr_cache_blocks", nr_blocks)?,
current_era: check_attr(tag, "current_era", current_era)?,
})
}
fn parse_writeset(e: &BytesStart) -> Result<Writeset> {
let tag = "writeset";
let mut era: Option<u32> = None;
let mut nr_bits: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"era" => era = Some(u32_val(&kv)?),
b"nr_bits" => nr_bits = Some(u32_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
Ok(Writeset {
era: check_attr(tag, "era", era)?,
nr_bits: check_attr(tag, "nr_bits", nr_bits)?,
})
}
fn parse_writeset_bit(e: &BytesStart) -> Result<Option<MarkedBlocks>> {
let tag = "bit";
let mut block: Option<u32> = None;
let mut value: Option<bool> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"block" => block = Some(u32_val(&kv)?),
b"value" => value = Some(bool_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
check_attr(tag, "block", block)?;
check_attr(tag, "value", value)?;
if let Some(true) = value {
Ok(Some(MarkedBlocks {
begin: block.unwrap(),
len: 1,
}))
} else {
Ok(None)
}
}
fn parse_writeset_blocks(e: &BytesStart) -> Result<MarkedBlocks> {
let tag = "marked";
let mut begin: Option<u32> = None;
let mut len: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"block_begin" => begin = Some(u32_val(&kv)?),
b"len" => len = Some(u32_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
Ok(MarkedBlocks {
begin: check_attr(tag, "block_begin", begin)?,
len: check_attr(tag, "len", len)?,
})
}
fn parse_era(e: &BytesStart) -> Result<Era> {
let tag = "era";
let mut block: Option<u32> = None;
let mut era: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"block" => block = Some(u32_val(&kv)?),
b"era" => era = Some(u32_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
Ok(Era {
block: check_attr(tag, "block", block)?,
era: check_attr(tag, "era", era)?,
})
}
fn handle_event<R, M>(reader: &mut Reader<R>, buf: &mut Vec<u8>, visitor: &mut M) -> Result<Visit>
where
R: Read + BufRead,
M: MetadataVisitor,
{
match reader.read_event(buf) {
Ok(Event::Start(ref e)) => match e.name() {
b"superblock" => visitor.superblock_b(&parse_superblock(e)?),
b"writeset" => visitor.writeset_b(&parse_writeset(e)?),
b"era_array" => visitor.era_b(),
_ => return Err(anyhow!("Parse error at byte {}", reader.buffer_position())),
},
Ok(Event::End(ref e)) => match e.name() {
b"superblock" => visitor.superblock_e(),
b"writeset" => visitor.writeset_e(),
b"era_array" => visitor.era_e(),
_ => return Err(anyhow!("Parse error at byte {}", reader.buffer_position())),
},
Ok(Event::Empty(ref e)) => match e.name() {
b"bit" => {
if let Some(b) = parse_writeset_bit(e)? {
visitor.writeset_blocks(&b)
} else {
Ok(Visit::Continue)
}
}
b"marked" => visitor.writeset_blocks(&parse_writeset_blocks(e)?),
b"era" => visitor.era(&parse_era(e)?),
_ => return Err(anyhow!("Parse error at byte {}", reader.buffer_position())),
},
Ok(Event::Text(_)) => Ok(Visit::Continue),
Ok(Event::Comment(_)) => Ok(Visit::Continue),
Ok(Event::Eof) => {
visitor.eof()?;
Ok(Visit::Stop)
}
Ok(_) => return Err(anyhow!("Parse error at byte {}", reader.buffer_position())),
Err(e) => {
return Err(anyhow!(
"Parse error at byte {}: {:?}",
reader.buffer_position(),
e
))
}
}
}
pub fn read<R, M>(input: R, visitor: &mut M) -> Result<()>
where
R: Read,
M: MetadataVisitor,
{
let input = BufReader::new(input);
let mut reader = Reader::from_reader(input);
reader.trim_text(true);
let mut buf = Vec::new();
while let Visit::Continue = handle_event(&mut reader, &mut buf, visitor)? {}
Ok(())
}
//------------------------------------------

View File

@ -1,106 +0,0 @@
use nix::sys::stat;
use nix::sys::stat::{FileStat, SFlag};
use std::fs::{File, OpenOptions};
use std::io;
use std::io::{Seek, Write};
use std::os::unix::io::AsRawFd;
use std::path::Path;
use tempfile::tempfile;
//---------------------------------------
fn test_bit(mode: u32, flag: SFlag) -> bool {
SFlag::from_bits_truncate(mode).contains(flag)
}
pub fn is_file_or_blk_(info: FileStat) -> bool {
test_bit(info.st_mode, SFlag::S_IFBLK) || test_bit(info.st_mode, SFlag::S_IFREG)
}
pub fn file_exists(path: &Path) -> bool {
matches!(stat::stat(path), Ok(_))
}
pub fn is_file_or_blk(path: &Path) -> bool {
match stat::stat(path) {
Ok(info) => is_file_or_blk_(info),
_ => false,
}
}
pub fn is_file(path: &Path) -> bool {
match stat::stat(path) {
Ok(info) => test_bit(info.st_mode, SFlag::S_IFREG),
_ => false,
}
}
//---------------------------------------
const BLKGETSIZE64_CODE: u8 = 0x12;
const BLKGETSIZE64_SEQ: u8 = 114;
ioctl_read!(ioctl_blkgetsize64, BLKGETSIZE64_CODE, BLKGETSIZE64_SEQ, u64);
pub fn fail<T>(msg: &str) -> io::Result<T> {
let e = io::Error::new(io::ErrorKind::Other, msg);
Err(e)
}
fn get_device_size(path: &Path) -> io::Result<u64> {
let file = File::open(path)?;
let fd = file.as_raw_fd();
let mut cap = 0u64;
unsafe {
match ioctl_blkgetsize64(fd, &mut cap) {
Ok(_) => Ok(cap),
_ => fail("BLKGETSIZE64 ioctl failed"),
}
}
}
pub fn file_size(path: &Path) -> io::Result<u64> {
match stat::stat(path) {
Ok(info) => {
if test_bit(info.st_mode, SFlag::S_IFREG) {
Ok(info.st_size as u64)
} else if test_bit(info.st_mode, SFlag::S_IFBLK) {
get_device_size(path)
} else {
fail("Not a block device or regular file")
}
}
_ => fail("stat failed"),
}
}
//---------------------------------------
fn set_size<W: Write + Seek>(w: &mut W, nr_bytes: u64) -> io::Result<()> {
let zeroes: Vec<u8> = vec![0; 1];
if nr_bytes > 0 {
w.seek(io::SeekFrom::Start(nr_bytes - 1))?;
w.write_all(&zeroes)?;
}
Ok(())
}
pub fn temp_file_sized(nr_bytes: u64) -> io::Result<std::fs::File> {
let mut file = tempfile()?;
set_size(&mut file, nr_bytes)?;
Ok(file)
}
pub fn create_sized_file(path: &Path, nr_bytes: u64) -> io::Result<std::fs::File> {
let mut file = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.truncate(true)
.open(path)?;
set_size(&mut file, nr_bytes)?;
Ok(file)
}
//---------------------------------------

View File

@ -1,497 +0,0 @@
use io_uring::opcode::{self, types};
use io_uring::IoUring;
use safemem::write_bytes;
use std::alloc::{alloc, dealloc, Layout};
use std::fs::File;
use std::fs::OpenOptions;
use std::io::Result;
use std::io::{self, Read, Seek, Write};
use std::ops::{Deref, DerefMut};
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::Path;
use std::sync::{Arc, Condvar, Mutex};
use crate::file_utils;
//------------------------------------------
pub const BLOCK_SIZE: usize = 4096;
pub const SECTOR_SHIFT: usize = 9;
const ALIGN: usize = 4096;
#[derive(Clone, Debug)]
pub struct Block {
pub loc: u64,
data: *mut u8,
}
impl Block {
// Creates a new block that corresponds to the given location. The
// memory is not initialised.
pub fn new(loc: u64) -> Block {
let layout = Layout::from_size_align(BLOCK_SIZE, ALIGN).unwrap();
let ptr = unsafe { alloc(layout) };
assert!(!ptr.is_null(), "out of memory");
Block { loc, data: ptr }
}
pub fn zeroed(loc: u64) -> Block {
let r = Self::new(loc);
write_bytes(r.get_data(), 0);
r
}
pub fn get_data<'a>(&self) -> &'a mut [u8] {
unsafe { std::slice::from_raw_parts_mut::<'a>(self.data, BLOCK_SIZE) }
}
pub fn zero(&mut self) {
unsafe {
std::ptr::write_bytes(self.data, 0, BLOCK_SIZE);
}
}
}
impl Drop for Block {
fn drop(&mut self) {
let layout = Layout::from_size_align(BLOCK_SIZE, ALIGN).unwrap();
unsafe {
dealloc(self.data, layout);
}
}
}
unsafe impl Send for Block {}
//------------------------------------------
pub trait IoEngine {
fn get_nr_blocks(&self) -> u64;
fn get_batch_size(&self) -> usize;
fn read(&self, b: u64) -> Result<Block>;
// The whole io could fail, or individual blocks
fn read_many(&self, blocks: &[u64]) -> Result<Vec<Result<Block>>>;
fn write(&self, block: &Block) -> Result<()>;
// The whole io could fail, or individual blocks
fn write_many(&self, blocks: &[Block]) -> Result<Vec<Result<()>>>;
}
fn get_nr_blocks(path: &Path) -> io::Result<u64> {
Ok(file_utils::file_size(path)? / (BLOCK_SIZE as u64))
}
//------------------------------------------
pub struct SyncIoEngine {
nr_blocks: u64,
files: Mutex<Vec<File>>,
cvar: Condvar,
}
struct FileGuard<'a> {
engine: &'a SyncIoEngine,
file: Option<File>,
}
impl<'a> FileGuard<'a> {
fn new(engine: &'a SyncIoEngine, file: File) -> FileGuard<'a> {
FileGuard {
engine,
file: Some(file),
}
}
}
impl<'a> Deref for FileGuard<'a> {
type Target = File;
fn deref(&self) -> &File {
self.file.as_ref().expect("empty file guard")
}
}
impl<'a> DerefMut for FileGuard<'a> {
fn deref_mut(&mut self) -> &mut File {
match &mut self.file {
None => {
todo!();
}
Some(f) => f,
}
}
}
impl<'a> Drop for FileGuard<'a> {
fn drop(&mut self) {
self.engine.put(self.file.take().expect("empty file guard"));
}
}
impl SyncIoEngine {
fn open_file(path: &Path, writable: bool, excl: bool) -> Result<File> {
let file = OpenOptions::new()
.read(true)
.write(writable)
.custom_flags(if excl { libc::O_EXCL } else { 0 })
.open(path)?;
Ok(file)
}
pub fn new(path: &Path, nr_files: usize, writable: bool) -> Result<SyncIoEngine> {
SyncIoEngine::new_with(path, nr_files, writable, true)
}
pub fn new_with(
path: &Path,
nr_files: usize,
writable: bool,
excl: bool,
) -> Result<SyncIoEngine> {
let nr_blocks = get_nr_blocks(path)?; // check file mode eariler
let mut files = Vec::with_capacity(nr_files);
for _n in 0..nr_files {
files.push(SyncIoEngine::open_file(path, writable, excl)?);
}
Ok(SyncIoEngine {
nr_blocks,
files: Mutex::new(files),
cvar: Condvar::new(),
})
}
fn get(&self) -> FileGuard {
let mut files = self.files.lock().unwrap();
while files.len() == 0 {
files = self.cvar.wait(files).unwrap();
}
FileGuard::new(self, files.pop().unwrap())
}
fn put(&self, f: File) {
let mut files = self.files.lock().unwrap();
files.push(f);
self.cvar.notify_one();
}
fn read_(input: &mut File, loc: u64) -> Result<Block> {
let b = Block::new(loc);
input.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
input.read_exact(b.get_data())?;
Ok(b)
}
fn write_(output: &mut File, b: &Block) -> Result<()> {
output.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
output.write_all(b.get_data())?;
Ok(())
}
}
impl IoEngine for SyncIoEngine {
fn get_nr_blocks(&self) -> u64 {
self.nr_blocks
}
fn get_batch_size(&self) -> usize {
1
}
fn read(&self, loc: u64) -> Result<Block> {
SyncIoEngine::read_(&mut self.get(), loc)
}
fn read_many(&self, blocks: &[u64]) -> Result<Vec<Result<Block>>> {
let mut input = self.get();
let mut bs = Vec::new();
for b in blocks {
bs.push(SyncIoEngine::read_(&mut input, *b));
}
Ok(bs)
}
fn write(&self, b: &Block) -> Result<()> {
SyncIoEngine::write_(&mut self.get(), b)
}
fn write_many(&self, blocks: &[Block]) -> Result<Vec<Result<()>>> {
let mut output = self.get();
let mut bs = Vec::new();
for b in blocks {
bs.push(SyncIoEngine::write_(&mut output, b));
}
Ok(bs)
}
}
//------------------------------------------
pub struct AsyncIoEngine_ {
queue_len: u32,
ring: IoUring,
nr_blocks: u64,
fd: RawFd,
input: Arc<File>,
}
pub struct AsyncIoEngine {
inner: Mutex<AsyncIoEngine_>,
}
impl AsyncIoEngine {
pub fn new(path: &Path, queue_len: u32, writable: bool) -> Result<AsyncIoEngine> {
AsyncIoEngine::new_with(path, queue_len, writable, true)
}
pub fn new_with(
path: &Path,
queue_len: u32,
writable: bool,
excl: bool,
) -> Result<AsyncIoEngine> {
let nr_blocks = get_nr_blocks(path)?; // check file mode earlier
let mut flags = libc::O_DIRECT;
if excl {
flags |= libc::O_EXCL;
}
let input = OpenOptions::new()
.read(true)
.write(writable)
.custom_flags(flags)
.open(path)?;
Ok(AsyncIoEngine {
inner: Mutex::new(AsyncIoEngine_ {
queue_len,
ring: IoUring::new(queue_len)?,
nr_blocks,
fd: input.as_raw_fd(),
input: Arc::new(input),
}),
})
}
// FIXME: refactor next two fns
fn read_many_(&self, blocks: Vec<Block>) -> Result<Vec<Result<Block>>> {
use std::io::*;
let mut inner = self.inner.lock().unwrap();
let count = blocks.len();
let fd_inner = inner.input.as_raw_fd();
for (i, b) in blocks.iter().enumerate() {
let read_e = opcode::Read::new(types::Fd(fd_inner), b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(read_e.build().user_data(i as u64))
.ok()
.expect("queue is full");
}
}
inner.ring.submit_and_wait(count)?;
let mut cqes = inner.ring.completion().available().collect::<Vec<_>>();
if cqes.len() != count {
return Err(Error::new(
ErrorKind::Other,
"insufficient io_uring completions",
));
}
// reorder cqes
cqes.sort_by(|a, b| a.user_data().partial_cmp(&b.user_data()).unwrap());
let mut rs = Vec::new();
for (i, b) in blocks.into_iter().enumerate() {
let c = &cqes[i];
let r = c.result();
if r < 0 {
let error = Error::from_raw_os_error(-r);
rs.push(Err(error));
} else if c.result() != BLOCK_SIZE as i32 {
rs.push(Err(Error::new(ErrorKind::UnexpectedEof, "short read")));
} else {
rs.push(Ok(b));
}
}
Ok(rs)
}
fn write_many_(&self, blocks: &[Block]) -> Result<Vec<Result<()>>> {
use std::io::*;
let mut inner = self.inner.lock().unwrap();
let count = blocks.len();
let fd_inner = inner.input.as_raw_fd();
for (i, b) in blocks.iter().enumerate() {
let write_e = opcode::Write::new(types::Fd(fd_inner), b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(write_e.build().user_data(i as u64))
.ok()
.expect("queue is full");
}
}
inner.ring.submit_and_wait(count)?;
let mut cqes = inner.ring.completion().available().collect::<Vec<_>>();
// reorder cqes
cqes.sort_by(|a, b| a.user_data().partial_cmp(&b.user_data()).unwrap());
let mut rs = Vec::new();
for c in cqes {
let r = c.result();
if r < 0 {
let error = Error::from_raw_os_error(-r);
rs.push(Err(error));
} else if r != BLOCK_SIZE as i32 {
rs.push(Err(Error::new(ErrorKind::UnexpectedEof, "short write")));
} else {
rs.push(Ok(()));
}
}
Ok(rs)
}
}
impl Clone for AsyncIoEngine {
fn clone(&self) -> AsyncIoEngine {
let inner = self.inner.lock().unwrap();
eprintln!("in clone, queue_len = {}", inner.queue_len);
AsyncIoEngine {
inner: Mutex::new(AsyncIoEngine_ {
queue_len: inner.queue_len,
ring: IoUring::new(inner.queue_len).expect("couldn't create uring"),
nr_blocks: inner.nr_blocks,
fd: inner.fd,
input: inner.input.clone(),
}),
}
}
}
impl IoEngine for AsyncIoEngine {
fn get_nr_blocks(&self) -> u64 {
let inner = self.inner.lock().unwrap();
inner.nr_blocks
}
fn get_batch_size(&self) -> usize {
self.inner.lock().unwrap().queue_len as usize
}
fn read(&self, b: u64) -> Result<Block> {
let mut inner = self.inner.lock().unwrap();
let fd = types::Fd(inner.input.as_raw_fd());
let b = Block::new(b);
let read_e = opcode::Read::new(fd, b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(read_e.build().user_data(0))
.ok()
.expect("queue is full");
}
inner.ring.submit_and_wait(1)?;
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
let r = cqes[0].result();
use std::io::*;
if r < 0 {
let error = Error::from_raw_os_error(-r);
Err(error)
} else if r != BLOCK_SIZE as i32 {
Err(Error::new(ErrorKind::UnexpectedEof, "short write"))
} else {
Ok(b)
}
}
fn read_many(&self, blocks: &[u64]) -> Result<Vec<Result<Block>>> {
let inner = self.inner.lock().unwrap();
let queue_len = inner.queue_len as usize;
drop(inner);
let mut results = Vec::new();
for cs in blocks.chunks(queue_len) {
let mut bs = Vec::new();
for b in cs {
bs.push(Block::new(*b));
}
results.append(&mut self.read_many_(bs)?);
}
Ok(results)
}
fn write(&self, b: &Block) -> Result<()> {
let mut inner = self.inner.lock().unwrap();
let fd = types::Fd(inner.input.as_raw_fd());
let write_e = opcode::Write::new(fd, b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(write_e.build().user_data(0))
.ok()
.expect("queue is full");
}
inner.ring.submit_and_wait(1)?;
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
let r = cqes[0].result();
use std::io::*;
if r < 0 {
let error = Error::from_raw_os_error(-r);
Err(error)
} else if r != BLOCK_SIZE as i32 {
Err(Error::new(ErrorKind::UnexpectedEof, "short write"))
} else {
Ok(())
}
}
fn write_many(&self, blocks: &[Block]) -> Result<Vec<Result<()>>> {
let inner = self.inner.lock().unwrap();
let queue_len = inner.queue_len as usize;
drop(inner);
let mut results = Vec::new();
let mut done = 0;
while done != blocks.len() {
let len = usize::min(blocks.len() - done, queue_len);
results.append(&mut self.write_many_(&blocks[done..(done + len)])?);
done += len;
}
Ok(results)
}
}
//------------------------------------------

View File

@ -1,33 +0,0 @@
extern crate anyhow;
extern crate byteorder;
extern crate crc32c;
extern crate flate2;
extern crate nom;
extern crate num_cpus;
#[macro_use]
extern crate nix;
#[cfg(test)]
extern crate quickcheck;
#[cfg(test)]
#[macro_use(quickcheck)]
#[cfg(test)]
extern crate quickcheck_macros;
pub mod cache;
pub mod checksum;
pub mod commands;
pub mod era;
pub mod file_utils;
pub mod io_engine;
pub mod math;
pub mod pack;
pub mod pdata;
pub mod report;
pub mod shrink;
pub mod thin;
pub mod units;
pub mod version;
pub mod write_batcher;
pub mod xml;

View File

@ -1,47 +0,0 @@
use std::cmp::PartialEq;
use std::ops::{Add, Div, Rem};
//-----------------------------------------
pub trait Integer:
Sized + Copy + Add<Output = Self> + Div<Output = Self> + Rem<Output = Self> + PartialEq
{
fn zero() -> Self;
fn one() -> Self;
}
pub fn div_up<T: Integer>(v: T, divisor: T) -> T {
if v % divisor != Integer::zero() {
v / divisor + Integer::one()
} else {
v / divisor
}
}
pub fn div_down<T: Integer>(v: T, divisor: T) -> T {
v / divisor
}
//-----------------------------------------
impl Integer for usize {
fn zero() -> Self {
0
}
fn one() -> Self {
1
}
}
impl Integer for u64 {
fn zero() -> Self {
0
}
fn one() -> Self {
1
}
}
//-----------------------------------------

View File

@ -1,169 +0,0 @@
//-------------------------------------------------
#[derive(PartialEq, Debug, Clone)]
pub enum Delta {
Base { n: u64 },
Const { count: u64 },
Pos { delta: u64, count: u64 },
Neg { delta: u64, count: u64 },
}
use Delta::*;
pub fn to_delta(ns: &[u64]) -> Vec<Delta> {
use std::cmp::Ordering::*;
let mut ds = Vec::with_capacity(ns.len());
if !ns.is_empty() {
let mut base = ns[0];
ds.push(Base { n: base });
let mut i = 1;
while i < ns.len() {
let n = ns[i];
match n.cmp(&base) {
Less => {
let delta = base - n;
let mut count = 1;
while i < ns.len() && (ns[i] + (count * delta) == base) {
i += 1;
count += 1;
}
count -= 1;
ds.push(Neg { delta, count });
base -= delta * count;
}
Equal => {
let mut count = 1;
while i < ns.len() && ns[i] == base {
i += 1;
count += 1;
}
count -= 1;
ds.push(Const { count });
}
Greater => {
let delta = n - base;
let mut count = 1;
while i < ns.len() && (ns[i] == (base + (count * delta))) {
i += 1;
count += 1;
}
count -= 1;
ds.push(Pos { delta, count });
base += delta * count;
}
}
}
}
ds
}
#[cfg(test)]
mod tests {
use super::*;
fn from_delta(ds: &[Delta]) -> Vec<u64> {
let mut ns: Vec<u64> = Vec::new();
let mut base = 0u64;
for d in ds {
match d {
Base { n } => {
ns.push(*n);
base = *n;
}
Const { count } => {
for _ in 0..*count {
ns.push(base);
}
}
Pos { delta, count } => {
for _ in 0..*count {
base += delta;
ns.push(base);
}
}
Neg { delta, count } => {
for _ in 0..*count {
assert!(base >= *delta);
base -= delta;
ns.push(base);
}
}
}
}
ns
}
#[test]
fn test_to_delta() {
struct TestCase(Vec<u64>, Vec<Delta>);
let cases = [
TestCase(vec![], vec![]),
TestCase(vec![1], vec![Base { n: 1 }]),
TestCase(vec![1, 2], vec![Base { n: 1 }, Pos { delta: 1, count: 1 }]),
TestCase(
vec![1, 2, 3, 4],
vec![Base { n: 1 }, Pos { delta: 1, count: 3 }],
),
TestCase(
vec![2, 4, 6, 8],
vec![Base { n: 2 }, Pos { delta: 2, count: 3 }],
),
TestCase(
vec![7, 14, 21, 28],
vec![Base { n: 7 }, Pos { delta: 7, count: 3 }],
),
TestCase(
vec![10, 9],
vec![Base { n: 10 }, Neg { delta: 1, count: 1 }],
),
TestCase(
vec![10, 9, 8, 7],
vec![Base { n: 10 }, Neg { delta: 1, count: 3 }],
),
TestCase(
vec![10, 8, 6, 4],
vec![Base { n: 10 }, Neg { delta: 2, count: 3 }],
),
TestCase(
vec![28, 21, 14, 7],
vec![Base { n: 28 }, Neg { delta: 7, count: 3 }],
),
TestCase(
vec![42, 42, 42, 42],
vec![Base { n: 42 }, Const { count: 3 }],
),
TestCase(
vec![1, 2, 3, 10, 20, 30, 40, 38, 36, 34, 0, 0, 0, 0],
vec![
Base { n: 1 },
Pos { delta: 1, count: 2 },
Pos { delta: 7, count: 1 },
Pos {
delta: 10,
count: 3,
},
Neg { delta: 2, count: 3 },
Neg {
delta: 34,
count: 1,
},
Const { count: 3 },
],
),
];
for t in &cases {
assert_eq!(to_delta(&t.0), t.1);
assert_eq!(from_delta(&t.1), t.0);
}
}
}
//-------------------------------------------------

View File

@ -1,5 +0,0 @@
pub mod node_encode;
pub mod toplevel;
pub mod vm;
mod delta_list;

View File

@ -1,118 +0,0 @@
use std::{io, io::Write};
use thiserror::Error;
use nom::{bytes::complete::*, number::complete::*, IResult};
use crate::pack::vm::*;
//-------------------------------------------
#[derive(Error, Debug)]
pub enum PackError {
#[error("Couldn't parse binary data")]
ParseError,
#[error("Write error")]
WriteError { source: std::io::Error },
}
pub type PResult<T> = Result<T, PackError>;
fn nom_to_pr<T>(r: IResult<&[u8], T>) -> PResult<(&[u8], T)> {
r.map_err(|_source| PackError::ParseError)
}
fn io_to_pr<T>(r: io::Result<T>) -> PResult<T> {
r.map_err(|source| PackError::WriteError { source })
}
//-------------------------------------------
fn run64(i: &[u8], count: usize) -> IResult<&[u8], Vec<u64>> {
let (i, ns) = nom::multi::many_m_n(count, count, le_u64)(i)?;
Ok((i, ns))
}
struct NodeSummary {
is_leaf: bool,
max_entries: usize,
value_size: usize,
}
fn summarise_node(data: &[u8]) -> IResult<&[u8], NodeSummary> {
let (i, _csum) = le_u32(data)?;
let (i, flags) = le_u32(i)?;
let (i, _blocknr) = le_u64(i)?;
let (i, _nr_entries) = le_u32(i)?;
let (i, max_entries) = le_u32(i)?;
let (i, value_size) = le_u32(i)?;
let (i, _padding) = le_u32(i)?;
Ok((
i,
NodeSummary {
is_leaf: flags == 2,
max_entries: max_entries as usize,
value_size: value_size as usize,
},
))
}
pub fn pack_btree_node<W: Write>(w: &mut W, data: &[u8]) -> PResult<()> {
let (_, info) = nom_to_pr(summarise_node(data))?;
let (i, hdr) = nom_to_pr(take(32usize)(data))?;
let (i, keys) = nom_to_pr(run64(i, info.max_entries))?;
if info.is_leaf {
if info.value_size == std::mem::size_of::<u64>() {
let (tail, values) = nom_to_pr(run64(i, info.max_entries))?;
io_to_pr(pack_literal(w, hdr))?;
io_to_pr(pack_u64s(w, &keys))?;
io_to_pr(pack_shifted_u64s(w, &values))?;
if !tail.is_empty() {
io_to_pr(pack_literal(w, tail))?;
}
Ok(())
} else {
// We don't bother packing the values if they aren't u64
let tail = i;
io_to_pr(pack_literal(w, hdr))?;
io_to_pr(pack_u64s(w, &keys))?;
io_to_pr(pack_literal(w, tail))?;
Ok(())
}
} else {
// Internal node, values are also u64s
let (tail, values) = nom_to_pr(run64(i, info.max_entries))?;
io_to_pr(pack_literal(w, hdr))?;
io_to_pr(pack_u64s(w, &keys))?;
io_to_pr(pack_u64s(w, &values))?;
if !tail.is_empty() {
io_to_pr(pack_literal(w, tail))?;
}
Ok(())
}
}
pub fn pack_superblock<W: Write>(w: &mut W, bytes: &[u8]) -> PResult<()> {
io_to_pr(pack_literal(w, bytes))
}
pub fn pack_bitmap<W: Write>(w: &mut W, bytes: &[u8]) -> PResult<()> {
io_to_pr(pack_literal(w, bytes))
}
pub fn pack_index<W: Write>(w: &mut W, bytes: &[u8]) -> PResult<()> {
io_to_pr(pack_literal(w, bytes))
}
pub fn pack_array<W: Write>(w: &mut W, bytes: &[u8]) -> PResult<()> {
io_to_pr(pack_literal(w, bytes))
}
//-------------------------------------

View File

@ -1,321 +0,0 @@
use anyhow::{anyhow, Context, Result};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use flate2::{read::ZlibDecoder, write::ZlibEncoder, Compression};
use std::os::unix::fs::OpenOptionsExt;
use std::{
error::Error,
fs::OpenOptions,
io,
io::prelude::*,
io::Write,
ops::DerefMut,
path::Path,
sync::{Arc, Mutex},
thread::spawn,
};
use rand::prelude::*;
use std::sync::mpsc::{sync_channel, Receiver};
use crate::checksum::*;
use crate::file_utils;
use crate::pack::node_encode::*;
const BLOCK_SIZE: u64 = 4096;
const MAGIC: u64 = 0xa537a0aa6309ef77;
const PACK_VERSION: u64 = 3;
fn shuffle<T>(v: &mut Vec<T>) {
let mut rng = rand::thread_rng();
v.shuffle(&mut rng);
}
// Each thread processes multiple contiguous runs of blocks, called
// chunks. Chunks are shuffled so each thread gets chunks spread
// across the dev in case there are large regions that don't contain
// metadata.
fn mk_chunk_vecs(nr_blocks: u64, nr_jobs: u64) -> Vec<Vec<(u64, u64)>> {
use std::cmp::{max, min};
let chunk_size = min(4 * 1024u64, max(128u64, nr_blocks / (nr_jobs * 64)));
let nr_chunks = nr_blocks / chunk_size;
let mut chunks = Vec::with_capacity(nr_chunks as usize);
for i in 0..nr_chunks {
chunks.push((i * chunk_size, (i + 1) * chunk_size));
}
// there may be a smaller chunk at the back of the file.
if nr_chunks * chunk_size < nr_blocks {
chunks.push((nr_chunks * chunk_size, nr_blocks));
}
shuffle(&mut chunks);
let mut vs = Vec::with_capacity(nr_jobs as usize);
for _ in 0..nr_jobs {
vs.push(Vec::new());
}
for c in 0..nr_chunks {
vs[(c % nr_jobs) as usize].push(chunks[c as usize]);
}
vs
}
pub fn pack(input_file: &Path, output_file: &Path) -> Result<(), Box<dyn Error>> {
let nr_blocks = get_nr_blocks(input_file)?;
let nr_jobs = std::cmp::max(1, std::cmp::min(num_cpus::get() as u64, nr_blocks / 128));
let chunk_vecs = mk_chunk_vecs(nr_blocks, nr_jobs);
let input = OpenOptions::new()
.read(true)
.write(false)
.custom_flags(libc::O_EXCL)
.open(input_file)?;
let output = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.truncate(true)
.open(output_file)?;
write_header(&output, nr_blocks).context("unable to write pack file header")?;
let sync_input = Arc::new(Mutex::new(input));
let sync_output = Arc::new(Mutex::new(output));
let mut threads = Vec::new();
for job in 0..nr_jobs {
let sync_input = Arc::clone(&sync_input);
let sync_output = Arc::clone(&sync_output);
let chunks = chunk_vecs[job as usize].clone();
threads.push(spawn(move || crunch(sync_input, sync_output, chunks)));
}
for t in threads {
t.join().unwrap()?;
}
Ok(())
}
fn crunch<R, W>(input: Arc<Mutex<R>>, output: Arc<Mutex<W>>, ranges: Vec<(u64, u64)>) -> Result<()>
where
R: Read + Seek,
W: Write,
{
let mut written = 0u64;
let mut z = ZlibEncoder::new(Vec::new(), Compression::default());
for (lo, hi) in ranges {
// We read multiple blocks at once to reduce contention
// on input.
let mut input = input.lock().unwrap();
let big_data = read_blocks(input.deref_mut(), lo, hi - lo)?;
drop(input);
for b in lo..hi {
let block_start = ((b - lo) * BLOCK_SIZE) as usize;
let data = &big_data[block_start..(block_start + BLOCK_SIZE as usize)];
let kind = metadata_block_type(data);
if kind != BT::UNKNOWN {
z.write_u64::<LittleEndian>(b)?;
pack_block(&mut z, kind, data)?;
written += 1;
if written == 1024 {
let compressed = z.reset(Vec::new())?;
let mut output = output.lock().unwrap();
output.write_u64::<LittleEndian>(compressed.len() as u64)?;
output.write_all(&compressed)?;
written = 0;
}
}
}
}
if written > 0 {
let compressed = z.finish()?;
let mut output = output.lock().unwrap();
output.write_u64::<LittleEndian>(compressed.len() as u64)?;
output.write_all(&compressed)?;
}
Ok(())
}
fn write_header<W>(mut w: W, nr_blocks: u64) -> io::Result<()>
where
W: byteorder::WriteBytesExt,
{
w.write_u64::<LittleEndian>(MAGIC)?;
w.write_u64::<LittleEndian>(PACK_VERSION)?;
w.write_u64::<LittleEndian>(4096)?;
w.write_u64::<LittleEndian>(nr_blocks)?;
Ok(())
}
fn read_header<R>(mut r: R) -> io::Result<u64>
where
R: byteorder::ReadBytesExt,
{
let magic = r.read_u64::<LittleEndian>()?;
if magic != MAGIC {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"Not a pack file",
));
}
let version = r.read_u64::<LittleEndian>()?;
if version != PACK_VERSION {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("unsupported pack file version ({}).", PACK_VERSION),
));
}
let block_size = r.read_u64::<LittleEndian>()?;
if block_size != BLOCK_SIZE {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
format!("block size is not {}", BLOCK_SIZE),
));
}
r.read_u64::<LittleEndian>()
}
fn get_nr_blocks(path: &Path) -> io::Result<u64> {
let len = file_utils::file_size(path)?;
Ok(len / (BLOCK_SIZE as u64))
}
fn read_blocks<R>(rdr: &mut R, b: u64, count: u64) -> io::Result<Vec<u8>>
where
R: io::Read + io::Seek,
{
let mut buf: Vec<u8> = vec![0; (BLOCK_SIZE * count) as usize];
rdr.seek(io::SeekFrom::Start(b * BLOCK_SIZE))?;
rdr.read_exact(&mut buf)?;
Ok(buf)
}
fn pack_block<W: Write>(w: &mut W, kind: BT, buf: &[u8]) -> Result<()> {
match kind {
BT::THIN_SUPERBLOCK | BT::CACHE_SUPERBLOCK | BT::ERA_SUPERBLOCK => {
pack_superblock(w, buf).context("unable to pack superblock")?
}
BT::NODE => pack_btree_node(w, buf).context("unable to pack btree node")?,
BT::INDEX => pack_index(w, buf).context("unable to pack space map index")?,
BT::BITMAP => pack_bitmap(w, buf).context("unable to pack space map bitmap")?,
BT::ARRAY => pack_array(w, buf).context("unable to pack array block")?,
BT::UNKNOWN => return Err(anyhow!("asked to pack an unknown block type")),
}
Ok(())
}
fn write_zero_block<W>(w: &mut W, b: u64) -> io::Result<()>
where
W: Write + Seek,
{
let zeroes: Vec<u8> = vec![0; BLOCK_SIZE as usize];
w.seek(io::SeekFrom::Start(b * BLOCK_SIZE))?;
w.write_all(&zeroes)?;
Ok(())
}
fn write_blocks<W>(w: &Arc<Mutex<W>>, blocks: &mut Vec<(u64, Vec<u8>)>) -> io::Result<()>
where
W: Write + Seek,
{
let mut w = w.lock().unwrap();
while let Some((b, block)) = blocks.pop() {
w.seek(io::SeekFrom::Start(b * BLOCK_SIZE))?;
w.write_all(&block[0..])?;
}
Ok(())
}
fn decode_worker<W>(rx: Receiver<Vec<u8>>, w: Arc<Mutex<W>>) -> io::Result<()>
where
W: Write + Seek,
{
let mut blocks = Vec::new();
while let Ok(bytes) = rx.recv() {
let mut z = ZlibDecoder::new(&bytes[0..]);
while let Ok(b) = z.read_u64::<LittleEndian>() {
let block = crate::pack::vm::unpack(&mut z, BLOCK_SIZE as usize).unwrap();
assert!(metadata_block_type(&block[0..]) != BT::UNKNOWN);
blocks.push((b, block));
if blocks.len() >= 32 {
write_blocks(&w, &mut blocks)?;
}
}
}
write_blocks(&w, &mut blocks)?;
Ok(())
}
pub fn unpack(input_file: &Path, output_file: &Path) -> Result<(), Box<dyn Error>> {
let mut input = OpenOptions::new()
.read(true)
.write(false)
.open(input_file)?;
let nr_blocks = read_header(&input)?;
let mut output = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.truncate(true)
.open(output_file)?;
// zero the last block to size the file
write_zero_block(&mut output, nr_blocks - 1)?;
// Run until we hit the end
let output = Arc::new(Mutex::new(output));
// kick off the workers
let nr_jobs = num_cpus::get();
let mut senders = Vec::new();
let mut threads = Vec::new();
for _ in 0..nr_jobs {
let (tx, rx) = sync_channel(1);
let output = Arc::clone(&output);
senders.push(tx);
threads.push(spawn(move || decode_worker(rx, output)));
}
// Read z compressed chunk, and hand to worker thread.
let mut next_worker = 0;
while let Ok(len) = input.read_u64::<LittleEndian>() {
let mut bytes = vec![0; len as usize];
input.read_exact(&mut bytes)?;
senders[next_worker].send(bytes).unwrap();
next_worker = (next_worker + 1) % nr_jobs;
}
for s in senders {
drop(s);
}
for t in threads {
t.join().unwrap()?;
}
Ok(())
}

View File

@ -1,496 +0,0 @@
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use std::io;
use std::io::{Cursor, Read, Write};
use num_derive::FromPrimitive;
use num_traits::FromPrimitive;
use crate::pack::delta_list::*;
//-------------------------------------------------
// Deltas are converted to instructions. A delta may not fit
// into a single instruction.
#[derive(Debug, FromPrimitive)]
enum Tag {
Set, // Operand width given in nibble
Pos, // Delta in nibble
PosW, // Delta in operand, whose width is in nibble
Neg, // Delta in nibble
NegW, // Delta in operand, whose width is in nibble
Const, // Count in nibble
Const8, // count = (nibble << 8) | byte
// Controls how many times the next instruction is applied.
// Not applicable to Const instructions which hold their own count.
Count, // count stored in nibble
Count8, // count = (nibble << 8) | byte
Lit, // len in nibble
LitW,
ShiftedRun,
}
fn pack_tag<W: Write>(w: &mut W, t: Tag, nibble: u8) -> io::Result<()> {
assert!(nibble < 16);
let mut b: u8 = t as u8;
assert!(b < 16);
b = (b << 4) | nibble;
w.write_u8(b)
}
fn pack_count<W>(w: &mut W, count: u64) -> io::Result<()>
where
W: Write,
{
if count == 1u64 {
Ok(())
} else if count < 16 {
pack_tag(w, Tag::Count, count as u8)
} else {
assert!(count < 4096);
let nibble = count >> 8;
assert!(nibble < 16);
let byte = count & 0xff;
pack_tag(w, Tag::Count8, nibble as u8)?;
w.write_u8(byte as u8)
}
}
fn pack_delta<W: Write>(w: &mut W, d: &Delta) -> io::Result<()> {
use Tag::*;
match d {
Delta::Base { n } => {
if *n <= std::u8::MAX as u64 {
pack_tag(w, Set, 1)?;
w.write_u8(*n as u8)
} else if *n <= std::u16::MAX as u64 {
pack_tag(w, Set, 2)?;
w.write_u16::<LittleEndian>(*n as u16)
} else if *n <= u32::MAX as u64 {
pack_tag(w, Set, 4)?;
w.write_u32::<LittleEndian>(*n as u32)
} else {
pack_tag(w, Set, 8)?;
w.write_u64::<LittleEndian>(*n)
}
}
Delta::Pos { delta, count } => {
pack_count(w, *count)?;
if *delta < 16 {
pack_tag(w, Tag::Pos, *delta as u8)
} else if *delta <= u8::MAX as u64 {
pack_tag(w, PosW, 1)?;
w.write_u8(*delta as u8)
} else if *delta <= u16::MAX as u64 {
pack_tag(w, PosW, 2)?;
w.write_u16::<LittleEndian>(*delta as u16)
} else if *delta <= u32::MAX as u64 {
pack_tag(w, PosW, 4)?;
w.write_u32::<LittleEndian>(*delta as u32)
} else {
pack_tag(w, PosW, 8)?;
w.write_u64::<LittleEndian>(*delta as u64)
}
}
Delta::Neg { delta, count } => {
pack_count(w, *count)?;
if *delta < 16 {
pack_tag(w, Neg, *delta as u8)
} else if *delta <= u8::MAX as u64 {
pack_tag(w, NegW, 1)?;
w.write_u8(*delta as u8)
} else if *delta <= u16::MAX as u64 {
pack_tag(w, NegW, 2)?;
w.write_u16::<LittleEndian>(*delta as u16)
} else if *delta <= u32::MAX as u64 {
pack_tag(w, NegW, 4)?;
w.write_u32::<LittleEndian>(*delta as u32)
} else {
pack_tag(w, NegW, 8)?;
w.write_u64::<LittleEndian>(*delta as u64)
}
}
Delta::Const { count } => {
if *count < 16 {
pack_tag(w, Tag::Const, *count as u8)
} else {
assert!(*count < 4096);
let nibble = *count >> 8;
assert!(nibble < 16);
pack_tag(w, Tag::Const8, nibble as u8)?;
w.write_u8((*count & 0xff) as u8)
}
}
}
}
fn pack_deltas<W: Write>(w: &mut W, ds: &[Delta]) -> io::Result<()> {
for d in ds {
pack_delta(w, d)?;
}
Ok(())
}
//-------------------------------------------------
pub fn pack_u64s<W: Write>(w: &mut W, ns: &[u64]) -> io::Result<()> {
let ds = to_delta(ns);
pack_deltas(w, &ds[0..])
}
fn unshift_nrs(shift: usize, ns: &[u64]) -> (Vec<u64>, Vec<u64>) {
let mut values = Vec::with_capacity(ns.len());
let mut shifts = Vec::with_capacity(ns.len());
let mask = (1 << shift) - 1;
for n in ns {
values.push(n >> shift);
shifts.push(n & mask);
}
(values, shifts)
}
pub fn pack_shifted_u64s<W: Write>(w: &mut W, ns: &[u64]) -> io::Result<()> {
let len = ns.len();
let nibble = len >> 8;
assert!(nibble < 16);
pack_tag(w, Tag::ShiftedRun, nibble as u8)?;
w.write_u8((len & 0xff) as u8)?;
let (high, low) = unshift_nrs(24, ns);
pack_u64s(w, &high[0..])?;
pack_u64s(w, &low[0..])
}
pub fn pack_literal<W: Write>(w: &mut W, bs: &[u8]) -> io::Result<()> {
use Tag::LitW;
let len = bs.len() as u64;
if len < 16 {
pack_tag(w, Tag::Lit, len as u8)?;
} else if len <= u8::MAX as u64 {
pack_tag(w, LitW, 1)?;
w.write_u8(len as u8)?;
} else if len <= u16::MAX as u64 {
pack_tag(w, LitW, 2)?;
w.write_u16::<LittleEndian>(len as u16)?;
} else if len <= u32::MAX as u64 {
pack_tag(w, LitW, 4)?;
w.write_u32::<LittleEndian>(len as u32)?;
} else {
pack_tag(w, LitW, 8)?;
w.write_u64::<LittleEndian>(len as u64)?;
}
w.write_all(bs)
}
//-------------------------------------------------
fn unpack_with_width<R: Read>(r: &mut R, nibble: u8) -> io::Result<u64> {
let v = match nibble {
1 => r.read_u8()? as u64,
2 => r.read_u16::<LittleEndian>()? as u64,
4 => r.read_u32::<LittleEndian>()? as u64,
8 => r.read_u64::<LittleEndian>()? as u64,
_ => {
panic!("SET with bad width");
}
};
Ok(v)
}
pub fn unpack_u64s<R: Read>(r: &mut R, count: usize) -> io::Result<Vec<u64>> {
let mut v = Vec::with_capacity(count);
for _ in 0..count {
let n = r.read_u64::<LittleEndian>()?;
v.push(n);
}
Ok(v)
}
pub struct VM {
base: u64,
bytes_written: usize,
}
impl VM {
pub fn new() -> VM {
VM {
base: 0,
bytes_written: 0,
}
}
fn emit_u64<W: Write>(&mut self, w: &mut W, n: u64) -> io::Result<()> {
w.write_u64::<LittleEndian>(n)?;
self.bytes_written += 8;
Ok(())
}
fn emit_base<W: Write>(&mut self, w: &mut W) -> io::Result<()> {
self.emit_u64(w, self.base)
}
fn emit_bytes<W: Write>(&mut self, w: &mut W, bytes: &[u8]) -> io::Result<()> {
let len = bytes.len();
w.write_all(bytes)?;
self.bytes_written += len;
Ok(())
}
fn unpack_instr<R: Read, W: Write>(
&mut self,
r: &mut R,
w: &mut W,
count: usize,
) -> io::Result<()> {
use Tag::*;
let b = r.read_u8()?;
let kind: Tag = match Tag::from_u8(b >> 4) {
Some(k) => k,
None => {
panic!("bad tag");
}
};
let nibble = b & 0xf;
match kind {
Set => {
self.base = unpack_with_width(r, nibble)?;
for _ in 0..count {
self.emit_base(w)?;
}
}
Pos => {
for _ in 0..count {
self.base += nibble as u64;
self.emit_base(w)?;
}
}
PosW => {
let delta = unpack_with_width(r, nibble)?;
for _ in 0..count {
self.base += delta;
self.emit_base(w)?;
}
}
Neg => {
for _ in 0..count {
self.base -= nibble as u64;
self.emit_base(w)?;
}
}
NegW => {
let delta = unpack_with_width(r, nibble)?;
for _ in 0..count {
self.base -= delta;
self.emit_base(w)?;
}
}
Const => {
assert_eq!(count, 1);
for _ in 0..nibble as usize {
self.emit_base(w)?;
}
}
Const8 => {
assert_eq!(count, 1);
let count = ((nibble as usize) << 8) | (r.read_u8()? as usize);
for _ in 0..count {
self.emit_base(w)?;
}
}
Count => {
self.unpack_instr(r, w, nibble as usize)?;
}
Count8 => {
let count = ((nibble as usize) << 8) | (r.read_u8()? as usize);
self.unpack_instr(r, w, count as usize)?;
}
Lit => {
assert_eq!(count, 1);
let len = nibble as usize;
let mut bytes = vec![0; len];
r.read_exact(&mut bytes[0..])?;
self.emit_bytes(w, &bytes)?;
}
LitW => {
assert_eq!(count, 1);
let len = unpack_with_width(r, nibble)? as usize;
let mut bytes = vec![0; len];
r.read_exact(&mut bytes[0..])?;
self.emit_bytes(w, &bytes)?;
}
ShiftedRun => {
// FIXME: repeated unpack, pack, unpack
let len = ((nibble as usize) << 8) | (r.read_u8()? as usize);
let nr_bytes = (len as usize) * std::mem::size_of::<u64>() as usize;
let mut high_bytes: Vec<u8> = Vec::with_capacity(nr_bytes);
let written = self.exec(r, &mut high_bytes, nr_bytes)?;
self.bytes_written -= written; // hack
let mut high_r = Cursor::new(high_bytes);
let high = unpack_u64s(&mut high_r, len)?;
let mut low_bytes: Vec<u8> = Vec::with_capacity(nr_bytes);
let written = self.exec(r, &mut low_bytes, nr_bytes)?;
self.bytes_written -= written; // hack
let mut low_r = Cursor::new(low_bytes);
let low = unpack_u64s(&mut low_r, len)?;
let mask = (1 << 24) - 1;
for i in 0..len {
self.emit_u64(w, (high[i] << 24) | (low[i] & mask))?;
}
}
}
Ok(())
}
// Runs until at least a number of bytes have been emitted. Returns nr emitted.
pub fn exec<R: Read, W: Write>(
&mut self,
r: &mut R,
w: &mut W,
emit_bytes: usize,
) -> io::Result<usize> {
let begin = self.bytes_written;
while (self.bytes_written - begin) < emit_bytes {
self.unpack_instr(r, w, 1)?;
}
Ok(self.bytes_written - begin)
}
}
impl Default for VM {
fn default() -> Self {
Self::new()
}
}
pub fn unpack<R: Read>(r: &mut R, count: usize) -> io::Result<Vec<u8>> {
let mut w = Vec::with_capacity(4096);
let mut cursor = Cursor::new(&mut w);
let mut vm = VM::new();
let written = vm.exec(r, &mut cursor, count)?;
assert_eq!(w.len(), count);
assert_eq!(written, count);
Ok(w)
}
//-------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_pack_literals() {
struct TestCase(Vec<u8>);
let cases = [
// This is a bad test case, because unpack will not exec
// any instructions.
TestCase(b"".to_vec()),
TestCase(b"foo".to_vec()),
TestCase(vec![42; 15]),
TestCase(vec![42; 256]),
TestCase(vec![42; 4096]),
];
for t in &cases {
let mut bs = Vec::with_capacity(4096);
let mut w = Cursor::new(&mut bs);
pack_literal(&mut w, &t.0[0..]).unwrap();
let mut r = Cursor::new(&mut bs);
let unpacked = unpack(&mut r, t.0.len()).unwrap();
assert_eq!(&t.0[0..], &unpacked[0..]);
}
}
fn check_u64s_match(ns: &[u64], bytes: &[u8]) -> bool {
let mut packed = Vec::with_capacity(ns.len() * 8);
let mut w = Cursor::new(&mut packed);
for n in ns {
w.write_u64::<LittleEndian>(*n).unwrap();
}
packed == bytes
}
fn check_pack_u64s(ns: &[u64]) -> bool {
println!("packing {:?}", &ns);
let mut bs = Vec::with_capacity(4096);
let mut w = Cursor::new(&mut bs);
pack_u64s(&mut w, &ns[0..]).unwrap();
println!("unpacked len = {}, packed len = {}", ns.len() * 8, bs.len());
let mut r = Cursor::new(&mut bs);
let unpacked = unpack(&mut r, ns.len() * 8).unwrap();
check_u64s_match(ns, &unpacked[0..])
}
#[test]
fn test_pack_u64s() {
let cases = [
vec![0],
vec![1, 5, 9, 10],
b"the quick brown fox jumps over the lazy dog"
.iter()
.map(|b| *b as u64)
.collect(),
];
for t in &cases {
assert!(check_pack_u64s(t));
}
}
#[quickcheck]
fn prop_pack_u64s(mut ns: Vec<u64>) -> bool {
ns.push(42); // We don't handle empty vecs
check_pack_u64s(&ns)
}
fn check_pack_shifted_u64s(ns: &[(u64, u64)]) -> bool {
let shifted: Vec<u64> = ns
.iter()
.map(|(h, l)| (h << 24) | (l & ((1 << 24) - 1)))
.collect();
println!("packing {:?}", &ns);
let mut bs = Vec::with_capacity(4096);
let mut w = Cursor::new(&mut bs);
pack_shifted_u64s(&mut w, &shifted[0..]).unwrap();
println!("unpacked len = {}, packed len = {}", ns.len() * 8, bs.len());
let mut r = Cursor::new(&mut bs);
let unpacked = unpack(&mut r, ns.len() * 8).unwrap();
check_u64s_match(&shifted, &unpacked[0..])
}
#[quickcheck]
fn prop_pack_shifted_u64s(mut ns: Vec<(u64, u64)>) -> bool {
ns.push((42, 42));
check_pack_shifted_u64s(&ns)
}
}
//-------------------------------------------------

View File

@ -1,199 +0,0 @@
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{multi::count, number::complete::*, IResult};
use std::fmt;
use thiserror::Error;
use crate::checksum;
use crate::io_engine::BLOCK_SIZE;
use crate::pdata::btree;
use crate::pdata::unpack::{Pack, Unpack};
//------------------------------------------
const ARRAY_BLOCK_HEADER_SIZE: u32 = 24;
pub struct ArrayBlockHeader {
pub csum: u32,
pub max_entries: u32,
pub nr_entries: u32,
pub value_size: u32,
pub blocknr: u64,
}
impl Unpack for ArrayBlockHeader {
fn disk_size() -> u32 {
ARRAY_BLOCK_HEADER_SIZE
}
fn unpack(data: &[u8]) -> IResult<&[u8], ArrayBlockHeader> {
let (i, csum) = le_u32(data)?;
let (i, max_entries) = le_u32(i)?;
let (i, nr_entries) = le_u32(i)?;
let (i, value_size) = le_u32(i)?;
let (i, blocknr) = le_u64(i)?;
Ok((
i,
ArrayBlockHeader {
csum,
max_entries,
nr_entries,
value_size,
blocknr,
},
))
}
}
impl Pack for ArrayBlockHeader {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> anyhow::Result<()> {
// csum needs to be calculated right for the whole metadata block.
w.write_u32::<LittleEndian>(0)?;
w.write_u32::<LittleEndian>(self.max_entries)?;
w.write_u32::<LittleEndian>(self.nr_entries)?;
w.write_u32::<LittleEndian>(self.value_size)?;
w.write_u64::<LittleEndian>(self.blocknr)?;
Ok(())
}
}
//------------------------------------------
pub struct ArrayBlock<V: Unpack> {
pub header: ArrayBlockHeader,
pub values: Vec<V>,
}
impl<V: Unpack> ArrayBlock<V> {
pub fn set_block(&mut self, b: u64) {
self.header.blocknr = b;
}
}
//------------------------------------------
#[derive(Error, Clone, Debug)]
pub enum ArrayError {
//#[error("io_error {0}")]
IoError(u64),
//#[error("block error: {0}")]
BlockError(String),
//#[error("value error: {0}")]
ValueError(String),
//#[error("index: {0:?}")]
IndexContext(u64, Box<ArrayError>),
//#[error("aggregate: {0:?}")]
Aggregate(Vec<ArrayError>),
//#[error("{0:?}, {1}")]
Path(Vec<u64>, Box<ArrayError>),
#[error(transparent)]
BTreeError(#[from] btree::BTreeError),
}
impl fmt::Display for ArrayError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ArrayError::IoError(b) => write!(f, "io error {}", b),
ArrayError::BlockError(msg) => write!(f, "block error: {}", msg),
ArrayError::ValueError(msg) => write!(f, "value error: {}", msg),
ArrayError::IndexContext(idx, e) => {
write!(f, "{}, effecting index {}", e, idx)?;
Ok(())
}
ArrayError::Aggregate(errs) => {
for e in errs {
write!(f, "{}", e)?
}
Ok(())
}
ArrayError::Path(path, e) => write!(f, "{} {}", e, btree::encode_node_path(path)),
ArrayError::BTreeError(e) => write!(f, "{}", e),
}
}
}
pub fn io_err(path: &[u64], blocknr: u64) -> ArrayError {
ArrayError::Path(path.to_vec(), Box::new(ArrayError::IoError(blocknr)))
}
pub fn array_block_err(path: &[u64], msg: &str) -> ArrayError {
ArrayError::Path(
path.to_vec(),
Box::new(ArrayError::BlockError(msg.to_string())),
)
}
pub fn value_err(msg: String) -> ArrayError {
ArrayError::ValueError(msg)
}
pub fn aggregate_error(errs: Vec<ArrayError>) -> ArrayError {
ArrayError::Aggregate(errs)
}
impl ArrayError {
pub fn index_context(self, index: u64) -> ArrayError {
ArrayError::IndexContext(index, Box::new(self))
}
}
pub type Result<T> = std::result::Result<T, ArrayError>;
//------------------------------------------
fn convert_result<'a, V>(path: &[u64], r: IResult<&'a [u8], V>) -> Result<(&'a [u8], V)> {
r.map_err(|_| array_block_err(path, "parse error"))
}
pub fn unpack_array_block<V: Unpack>(path: &[u64], data: &[u8]) -> Result<ArrayBlock<V>> {
let bt = checksum::metadata_block_type(data);
if bt != checksum::BT::ARRAY {
return Err(array_block_err(
path,
&format!(
"checksum failed for array block {}, {:?}",
path.last().unwrap(),
bt
),
));
}
let (i, header) = ArrayBlockHeader::unpack(data)
.map_err(|_| array_block_err(path, "Couldn't parse array block header"))?;
// check value_size
if header.value_size != V::disk_size() {
return Err(array_block_err(
path,
&format!(
"value_size mismatch: expected {}, was {}",
V::disk_size(),
header.value_size
),
));
}
// check max_entries
if header.value_size * header.max_entries + ARRAY_BLOCK_HEADER_SIZE > BLOCK_SIZE as u32 {
return Err(array_block_err(
path,
&format!("max_entries is too large ({})", header.max_entries),
));
}
// TODO: check nr_entries < max_entries
// TODO: check block_nr
let (_i, values) = convert_result(path, count(V::unpack, header.nr_entries as usize)(i))?;
Ok(ArrayBlock { header, values })
}
//------------------------------------------

View File

@ -1,190 +0,0 @@
use anyhow::{anyhow, Result};
use byteorder::WriteBytesExt;
use std::io::Cursor;
use crate::checksum;
use crate::io_engine::*;
use crate::math::*;
use crate::pdata::array::*;
use crate::pdata::btree_builder::*;
use crate::pdata::unpack::*;
use crate::write_batcher::*;
//------------------------------------------
pub struct ArrayBlockBuilder<V: Unpack + Pack> {
array_io: ArrayIO<V>,
nr_entries: u64, // size of the array
entries_per_block: usize,
array_blocks: Vec<u64>, // emitted array blocks
values: Vec<V>, // internal buffer
}
pub struct ArrayBuilder<V: Unpack + Pack> {
block_builder: ArrayBlockBuilder<V>,
}
struct ArrayIO<V: Unpack + Pack> {
dummy: std::marker::PhantomData<V>,
}
struct WriteResult {
loc: u64,
}
//------------------------------------------
fn calc_max_entries<V: Unpack>() -> usize {
(BLOCK_SIZE - ArrayBlockHeader::disk_size() as usize) / V::disk_size() as usize
}
//------------------------------------------
impl<V: Unpack + Pack + Clone + Default> ArrayBlockBuilder<V> {
pub fn new(nr_entries: u64) -> ArrayBlockBuilder<V> {
let entries_per_block = calc_max_entries::<V>();
let nr_blocks = div_up(nr_entries, entries_per_block as u64) as usize;
let next_cap = std::cmp::min(nr_entries, entries_per_block as u64) as usize;
ArrayBlockBuilder {
array_io: ArrayIO::new(),
nr_entries,
entries_per_block,
array_blocks: Vec::with_capacity(nr_blocks),
values: Vec::<V>::with_capacity(next_cap),
}
}
pub fn push_value(&mut self, w: &mut WriteBatcher, index: u64, v: V) -> Result<()> {
let bi = index / self.entries_per_block as u64;
let i = (index % self.entries_per_block as u64) as usize;
if index >= self.nr_entries {
return Err(anyhow!("array index out of bounds"));
}
while (self.array_blocks.len() as u64) < bi {
self.emit_block(w)?;
}
if bi < self.array_blocks.len() as u64 || i < self.values.len() {
return Err(anyhow!("unordered array index"));
}
if i > self.values.len() {
self.values.resize_with(i, Default::default);
}
self.values.push(v);
Ok(())
}
pub fn complete(mut self, w: &mut WriteBatcher) -> Result<Vec<u64>> {
// Emit all the remaining queued values
let nr_blocks = self.array_blocks.capacity();
while self.array_blocks.len() < nr_blocks {
self.emit_block(w)?;
}
Ok(self.array_blocks)
}
/// Emit a fully utilized array block
fn emit_block(&mut self, w: &mut WriteBatcher) -> Result<()> {
let nr_blocks = self.array_blocks.capacity();
let cur_bi = self.array_blocks.len();
let next_cap;
if cur_bi < nr_blocks - 1 {
let next_begin = (cur_bi as u64 + 1) * self.entries_per_block as u64;
next_cap =
std::cmp::min(self.nr_entries - next_begin, self.entries_per_block as u64) as usize;
} else {
next_cap = 0;
}
let mut values = Vec::<V>::with_capacity(next_cap);
std::mem::swap(&mut self.values, &mut values);
values.resize_with(values.capacity(), Default::default);
let wresult = self.array_io.write(w, values)?;
self.array_blocks.push(wresult.loc);
Ok(())
}
}
//------------------------------------------
impl<V: Unpack + Pack + Clone + Default> ArrayBuilder<V> {
pub fn new(nr_entries: u64) -> ArrayBuilder<V> {
ArrayBuilder {
block_builder: ArrayBlockBuilder::<V>::new(nr_entries),
}
}
pub fn push_value(&mut self, w: &mut WriteBatcher, index: u64, v: V) -> Result<()> {
self.block_builder.push_value(w, index, v)
}
pub fn complete(self, w: &mut WriteBatcher) -> Result<u64> {
let blocks = self.block_builder.complete(w)?;
let mut index_builder = BTreeBuilder::<u64>::new(Box::new(NoopRC {}));
for (i, b) in blocks.iter().enumerate() {
index_builder.push_value(w, i as u64, *b)?;
}
index_builder.complete(w)
}
}
//------------------------------------------
impl<V: Unpack + Pack> ArrayIO<V> {
pub fn new() -> ArrayIO<V> {
ArrayIO {
dummy: std::marker::PhantomData,
}
}
fn write(&self, w: &mut WriteBatcher, values: Vec<V>) -> Result<WriteResult> {
let header = ArrayBlockHeader {
csum: 0,
max_entries: calc_max_entries::<V>() as u32,
nr_entries: values.len() as u32,
value_size: V::disk_size(),
blocknr: 0,
};
let ablock = ArrayBlock { header, values };
write_array_block(w, ablock)
}
}
fn write_array_block<V: Unpack + Pack>(
w: &mut WriteBatcher,
mut ablock: ArrayBlock<V>,
) -> Result<WriteResult> {
let b = w.alloc()?;
ablock.set_block(b.loc);
let mut cursor = Cursor::new(b.get_data());
pack_array_block(&ablock, &mut cursor)?;
let loc = b.loc;
w.write(b, checksum::BT::ARRAY)?;
Ok(WriteResult { loc })
}
fn pack_array_block<W: WriteBytesExt, V: Pack + Unpack>(
ablock: &ArrayBlock<V>,
w: &mut W,
) -> Result<()> {
ablock.header.pack(w)?;
for v in ablock.values.iter() {
v.pack(w)?;
}
Ok(())
}
//------------------------------------------

View File

@ -1,177 +0,0 @@
use std::sync::{Arc, Mutex};
use crate::io_engine::*;
use crate::pdata::array::{self, *};
use crate::pdata::btree::{self, *};
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
//------------------------------------------
pub struct ArrayWalker {
engine: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_non_fatal: bool,
}
pub trait ArrayVisitor<V: Unpack> {
fn visit(&self, index: u64, b: ArrayBlock<V>) -> array::Result<()>;
}
//------------------------------------------
// FIXME: Eliminate this structure by impl NodeVisitor for ArrayWalker?
struct BlockValueVisitor<'a, V> {
engine: Arc<dyn IoEngine + Send + Sync>,
array_visitor: &'a mut dyn ArrayVisitor<V>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
array_errs: Mutex<Vec<ArrayError>>,
}
impl<'a, V: Unpack + Copy> BlockValueVisitor<'a, V> {
pub fn new(
e: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
v: &'a mut dyn ArrayVisitor<V>,
) -> BlockValueVisitor<'a, V> {
BlockValueVisitor {
engine: e,
array_visitor: v,
sm,
array_errs: Mutex::new(Vec::new()),
}
}
}
impl<'a, V: Unpack + Copy> NodeVisitor<u64> for BlockValueVisitor<'a, V> {
fn visit(
&self,
path: &[u64],
kr: &KeyRange,
_h: &NodeHeader,
keys: &[u64],
values: &[u64],
) -> btree::Result<()> {
if keys.is_empty() {
return Ok(());
}
// The ordering of array indices had been verified in unpack_node(),
// thus checking the upper bound implies key continuity among siblings.
if *keys.first().unwrap() + keys.len() as u64 != *keys.last().unwrap() + 1 {
return Err(btree::value_err("gaps in array indicies".to_string()));
}
if let Some(end) = kr.end {
if *keys.last().unwrap() + 1 != end {
return Err(btree::value_err(
"gaps or overlaps in array indicies".to_string(),
));
}
}
// FIXME: will the returned blocks be reordered?
match self.engine.read_many(values) {
Err(_) => {
// IO completely failed on all the child blocks
for (i, b) in values.iter().enumerate() {
// TODO: report indices of array entries based on the type size
let mut array_errs = self.array_errs.lock().unwrap();
array_errs.push(array::io_err(path, *b).index_context(keys[i]));
}
}
Ok(rblocks) => {
for (i, rb) in rblocks.into_iter().enumerate() {
match rb {
Err(_) => {
let mut array_errs = self.array_errs.lock().unwrap();
array_errs.push(array::io_err(path, values[i]).index_context(keys[i]));
}
Ok(b) => {
let mut path = path.to_vec();
path.push(b.loc);
match unpack_array_block::<V>(&path, b.get_data()) {
Ok(array_block) => {
if let Err(e) = self.array_visitor.visit(keys[i], array_block) {
self.array_errs.lock().unwrap().push(e);
}
let mut sm = self.sm.lock().unwrap();
sm.inc(b.loc, 1).unwrap();
}
Err(e) => {
self.array_errs.lock().unwrap().push(e);
}
}
path.pop();
}
}
}
}
}
Ok(())
}
fn visit_again(&self, _path: &[u64], _b: u64) -> btree::Result<()> {
Ok(())
}
fn end_walk(&self) -> btree::Result<()> {
Ok(())
}
}
//------------------------------------------
impl ArrayWalker {
pub fn new(engine: Arc<dyn IoEngine + Send + Sync>, ignore_non_fatal: bool) -> ArrayWalker {
let nr_blocks = engine.get_nr_blocks() as u64;
let r: ArrayWalker = ArrayWalker {
engine,
sm: Arc::new(Mutex::new(RestrictedSpaceMap::new(nr_blocks))),
ignore_non_fatal,
};
r
}
pub fn new_with_sm(
engine: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_non_fatal: bool,
) -> array::Result<ArrayWalker> {
{
let sm = sm.lock().unwrap();
assert_eq!(sm.get_nr_blocks().unwrap(), engine.get_nr_blocks());
}
Ok(ArrayWalker {
engine,
sm,
ignore_non_fatal,
})
}
pub fn walk<V>(&self, visitor: &mut dyn ArrayVisitor<V>, root: u64) -> array::Result<()>
where
V: Unpack + Copy,
{
let w =
BTreeWalker::new_with_sm(self.engine.clone(), self.sm.clone(), self.ignore_non_fatal)?;
let mut path = vec![0];
let v = BlockValueVisitor::<V>::new(self.engine.clone(), self.sm.clone(), visitor);
let btree_err = w.walk(&mut path, &v, root).map_err(ArrayError::BTreeError);
let mut array_errs = v.array_errs.into_inner().unwrap();
if let Err(e) = btree_err {
array_errs.push(e);
}
match array_errs.len() {
0 => Ok(()),
1 => Err(array_errs[0].clone()),
_ => Err(ArrayError::Aggregate(array_errs)),
}
}
}
//------------------------------------------

View File

@ -1,174 +0,0 @@
use fixedbitset::FixedBitSet;
use std::sync::{Arc, Mutex};
use crate::io_engine::IoEngine;
use crate::math::div_up;
use crate::pdata::array::{self, ArrayBlock};
use crate::pdata::array_walker::{ArrayVisitor, ArrayWalker};
use crate::pdata::space_map::*;
//------------------------------------------
pub struct CheckedBitSet {
bits: FixedBitSet,
}
impl CheckedBitSet {
pub fn with_capacity(bits: usize) -> CheckedBitSet {
CheckedBitSet {
bits: FixedBitSet::with_capacity(bits << 1),
}
}
pub fn set(&mut self, bit: usize, enabled: bool) {
self.bits.set(bit << 1, true);
self.bits.set((bit << 1) + 1, enabled);
}
pub fn contains(&self, bit: usize) -> Option<bool> {
if !self.bits.contains(bit << 1) {
return None;
}
Some(self.bits.contains((bit << 1) + 1))
}
}
//------------------------------------------
struct BitsetVisitor {
nr_bits: usize,
bits: Mutex<CheckedBitSet>,
}
impl BitsetVisitor {
pub fn new(nr_bits: usize) -> Self {
BitsetVisitor {
nr_bits,
bits: Mutex::new(CheckedBitSet::with_capacity(nr_bits)),
}
}
pub fn get_bitset(self) -> CheckedBitSet {
self.bits.into_inner().unwrap()
}
}
impl ArrayVisitor<u64> for BitsetVisitor {
fn visit(&self, index: u64, b: ArrayBlock<u64>) -> array::Result<()> {
let mut begin = (index as usize * (b.header.max_entries as usize)) << 6;
if begin >= self.nr_bits as usize {
return Err(array::value_err(format!(
"bitset size exceeds limit: {} bits",
self.nr_bits
)));
}
for bits in b.values.iter() {
let end: usize = std::cmp::min(begin + 64, self.nr_bits as usize);
let mut mask = 1;
for bi in begin..end {
self.bits.lock().unwrap().set(bi, bits & mask != 0);
mask <<= 1;
}
begin += 64;
}
Ok(())
}
}
//------------------------------------------
struct BitsetCollector {
bits: Mutex<FixedBitSet>,
nr_bits: usize,
}
impl BitsetCollector {
fn new(nr_bits: usize) -> BitsetCollector {
BitsetCollector {
bits: Mutex::new(FixedBitSet::with_capacity(nr_bits)),
nr_bits,
}
}
pub fn get_bitset(self) -> FixedBitSet {
self.bits.into_inner().unwrap()
}
}
impl ArrayVisitor<u64> for BitsetCollector {
fn visit(&self, index: u64, b: ArrayBlock<u64>) -> array::Result<()> {
let mut bitset = self.bits.lock().unwrap();
let mut idx = (index as usize * b.header.max_entries as usize) << 1; // index of u32 in bitset array
let idx_end = div_up(self.nr_bits, 32);
let mut dest = bitset.as_mut_slice().iter_mut().skip(idx);
for entry in b.values.iter() {
let lower = (*entry & (u32::MAX as u64)) as u32;
*(dest.next().ok_or_else(|| {
array::value_err(format!("bitset size exceeds limit: {} bits", self.nr_bits))
})?) = lower;
idx += 1;
if idx == idx_end {
break;
}
let upper = (*entry >> 32) as u32;
*(dest.next().ok_or_else(|| {
array::value_err(format!("bitset size exceeds limit: {} bits", self.nr_bits))
})?) = upper;
idx += 1;
}
Ok(())
}
}
//------------------------------------------
// TODO: multi-threaded is possible
pub fn read_bitset(
engine: Arc<dyn IoEngine + Send + Sync>,
root: u64,
nr_bits: usize,
ignore_none_fatal: bool,
) -> (CheckedBitSet, Option<array::ArrayError>) {
let w = ArrayWalker::new(engine, ignore_none_fatal);
let mut v = BitsetVisitor::new(nr_bits);
let err = w.walk(&mut v, root);
let e = match err {
Ok(()) => None,
Err(e) => Some(e),
};
(v.get_bitset(), e)
}
// TODO: multi-threaded is possible
pub fn read_bitset_with_sm(
engine: Arc<dyn IoEngine + Send + Sync>,
root: u64,
nr_bits: usize,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_none_fatal: bool,
) -> array::Result<(CheckedBitSet, Option<array::ArrayError>)> {
let w = ArrayWalker::new_with_sm(engine, sm, ignore_none_fatal)?;
let mut v = BitsetVisitor::new(nr_bits);
let err = w.walk(&mut v, root);
let e = match err {
Ok(()) => None,
Err(e) => Some(e),
};
Ok((v.get_bitset(), e))
}
pub fn read_bitset_no_err(
engine: Arc<dyn IoEngine + Send + Sync>,
root: u64,
nr_bits: usize,
ignore_none_fatal: bool,
) -> array::Result<FixedBitSet> {
let w = ArrayWalker::new(engine, ignore_none_fatal);
let mut v = BitsetCollector::new(nr_bits);
w.walk(&mut v, root)?;
Ok(v.get_bitset())
}

View File

@ -1,594 +0,0 @@
use anyhow::anyhow;
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use data_encoding::BASE64;
use nom::{number::complete::*, IResult};
use std::fmt;
use thiserror::Error;
use crate::io_engine::*;
use crate::pack::vm;
use crate::pdata::unpack::*;
//------------------------------------------
#[derive(Clone, Debug, PartialEq)]
pub struct KeyRange {
pub start: Option<u64>,
pub end: Option<u64>, // This is the one-past-the-end value
}
impl KeyRange {
pub fn new() -> KeyRange {
KeyRange {
start: None,
end: None,
}
}
}
impl Default for KeyRange {
fn default() -> Self {
Self::new()
}
}
impl fmt::Display for KeyRange {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match (self.start, self.end) {
(None, None) => write!(f, "[..]"),
(None, Some(e)) => write!(f, "[..{}]", e),
(Some(s), None) => write!(f, "[{}..]", s),
(Some(s), Some(e)) => write!(f, "[{}..{}]", s, e),
}
}
}
impl KeyRange {
// None will be returned if either range would be zero length
fn split(&self, n: u64) -> Option<(KeyRange, KeyRange)> {
match (self.start, self.end) {
(None, None) => Some((
KeyRange {
start: None,
end: Some(n),
},
KeyRange {
start: Some(n),
end: None,
},
)),
(None, Some(e)) => {
if n < e {
Some((
KeyRange {
start: None,
end: Some(n),
},
KeyRange {
start: Some(n),
end: Some(e),
},
))
} else {
None
}
}
(Some(s), None) => {
if s < n {
Some((
KeyRange {
start: Some(s),
end: Some(n),
},
KeyRange {
start: Some(n),
end: None,
},
))
} else {
None
}
}
(Some(s), Some(e)) => {
if s < n && n < e {
Some((
KeyRange {
start: Some(s),
end: Some(n),
},
KeyRange {
start: Some(n),
end: Some(e),
},
))
} else {
None
}
}
}
}
}
#[test]
fn test_split_range() {
struct Test(Option<u64>, Option<u64>, u64, Option<(KeyRange, KeyRange)>);
let tests = vec![
Test(
None,
None,
100,
Some((
KeyRange {
start: None,
end: Some(100),
},
KeyRange {
start: Some(100),
end: None,
},
)),
),
Test(None, Some(100), 1000, None),
Test(
None,
Some(100),
50,
Some((
KeyRange {
start: None,
end: Some(50),
},
KeyRange {
start: Some(50),
end: Some(100),
},
)),
),
Test(None, Some(100), 100, None),
Test(Some(100), None, 50, None),
Test(
Some(100),
None,
150,
Some((
KeyRange {
start: Some(100),
end: Some(150),
},
KeyRange {
start: Some(150),
end: None,
},
)),
),
Test(Some(100), Some(200), 50, None),
Test(Some(100), Some(200), 250, None),
Test(
Some(100),
Some(200),
150,
Some((
KeyRange {
start: Some(100),
end: Some(150),
},
KeyRange {
start: Some(150),
end: Some(200),
},
)),
),
];
for Test(start, end, n, expected) in tests {
let kr = KeyRange { start, end };
let actual = kr.split(n);
assert_eq!(actual, expected);
}
}
fn split_one(path: &[u64], kr: &KeyRange, k: u64) -> Result<(KeyRange, KeyRange)> {
match kr.split(k) {
None => Err(node_err(
path,
&format!("couldn't split key range {} at {}", kr, k),
)),
Some(pair) => Ok(pair),
}
}
pub fn split_key_ranges(path: &[u64], kr: &KeyRange, keys: &[u64]) -> Result<Vec<KeyRange>> {
let mut krs = Vec::with_capacity(keys.len());
if keys.is_empty() {
return Err(node_err(path, "split_key_ranges: no keys present"));
}
// The first key gives the lower bound
let mut kr = KeyRange {
start: Some(keys[0]),
end: kr.end,
};
for k in keys.iter().skip(1) {
let (first, rest) = split_one(path, &kr, *k)?;
krs.push(first);
kr = rest;
}
krs.push(kr);
Ok(krs)
}
//------------------------------------------
// We compress and base64 encode paths to make them easy to
// cut and paste between programs (eg, thin_explore -p <path>)
pub fn encode_node_path(path: &[u64]) -> String {
let mut buffer: Vec<u8> = Vec::with_capacity(128);
let mut cursor = std::io::Cursor::new(&mut buffer);
assert!(path.len() < 256);
// The first entry is normally the superblock (0), so we
// special case this.
if !path.is_empty() && path[0] == 0 {
let count = ((path.len() as u8) - 1) << 1;
cursor.write_u8(count as u8).unwrap();
vm::pack_u64s(&mut cursor, &path[1..]).unwrap();
} else {
let count = ((path.len() as u8) << 1) | 1;
cursor.write_u8(count as u8).unwrap();
vm::pack_u64s(&mut cursor, path).unwrap();
}
BASE64.encode(&buffer)
}
pub fn decode_node_path(text: &str) -> anyhow::Result<Vec<u64>> {
let mut buffer = vec![0; 128];
let bytes = &mut buffer[0..BASE64.decode_len(text.len()).unwrap()];
BASE64
.decode_mut(text.as_bytes(), &mut bytes[0..])
.map_err(|_| anyhow!("bad node path. Unable to base64 decode."))?;
let mut input = std::io::Cursor::new(bytes);
let mut count = input.read_u8()?;
let mut prepend_zero = false;
if (count & 0x1) == 0 {
// Implicit 0 as first entry
prepend_zero = true;
}
count >>= 1;
let count = count as usize;
let mut path;
if count == 0 {
path = vec![];
} else {
let mut output = Vec::with_capacity(count * 8);
let mut cursor = std::io::Cursor::new(&mut output);
let mut vm = vm::VM::new();
let written = vm.exec(&mut input, &mut cursor, count * 8)?;
assert_eq!(written, count * 8);
let mut cursor = std::io::Cursor::new(&mut output);
path = vm::unpack_u64s(&mut cursor, count)?;
}
if prepend_zero {
let mut full_path = vec![0u64];
full_path.append(&mut path);
Ok(full_path)
} else {
Ok(path)
}
}
#[test]
fn test_encode_path() {
struct Test(Vec<u64>);
let tests = vec![
Test(vec![]),
Test(vec![1]),
Test(vec![1, 2]),
Test(vec![1, 2, 3, 4]),
Test(vec![0]),
Test(vec![0, 0]),
Test(vec![0, 1]),
Test(vec![0, 1, 2]),
Test(vec![0, 123, 201231, 3102983012]),
];
for t in tests {
let encoded = encode_node_path(&t.0[0..]);
let decoded = decode_node_path(&encoded).unwrap();
assert_eq!(decoded, &t.0[0..]);
}
}
//------------------------------------------
const NODE_HEADER_SIZE: usize = 32;
#[derive(Error, Clone, Debug)]
pub enum BTreeError {
// #[error("io error")]
IoError, // (std::io::Error), // FIXME: we can't clone an io_error
// #[error("node error: {0}")]
NodeError(String),
// #[error("value error: {0}")]
ValueError(String),
// #[error("keys: {0:?}")]
KeyContext(KeyRange, Box<BTreeError>),
// #[error("aggregate: {0:?}")]
Aggregate(Vec<BTreeError>),
// #[error("{0:?}, {1}")]
Path(Vec<u64>, Box<BTreeError>),
}
impl fmt::Display for BTreeError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
BTreeError::IoError => write!(f, "io error"),
BTreeError::NodeError(msg) => write!(f, "node error: {}", msg),
BTreeError::ValueError(msg) => write!(f, "value error: {}", msg),
BTreeError::KeyContext(kr, be) => write!(f, "{}, effecting keys {}", be, kr),
BTreeError::Aggregate(errs) => {
for e in errs {
write!(f, "{}", e)?
}
Ok(())
}
BTreeError::Path(path, e) => write!(f, "{} {}", e, encode_node_path(path)),
}
}
}
pub fn node_err(path: &[u64], msg: &str) -> BTreeError {
BTreeError::Path(
path.to_vec(),
Box::new(BTreeError::NodeError(msg.to_string())),
)
}
pub fn node_err_s(path: &[u64], msg: String) -> BTreeError {
BTreeError::Path(path.to_vec(), Box::new(BTreeError::NodeError(msg)))
}
pub fn io_err(path: &[u64]) -> BTreeError {
BTreeError::Path(path.to_vec(), Box::new(BTreeError::IoError))
}
pub fn value_err(msg: String) -> BTreeError {
BTreeError::ValueError(msg)
}
pub fn aggregate_error(rs: Vec<BTreeError>) -> BTreeError {
BTreeError::Aggregate(rs)
}
impl BTreeError {
pub fn keys_context(self, keys: &KeyRange) -> BTreeError {
BTreeError::KeyContext(keys.clone(), Box::new(self))
}
}
pub type Result<T> = std::result::Result<T, BTreeError>;
//------------------------------------------
#[derive(Debug, Clone, Copy)]
pub struct NodeHeader {
pub block: u64,
pub is_leaf: bool,
pub nr_entries: u32,
pub max_entries: u32,
pub value_size: u32,
}
#[allow(dead_code)]
const INTERNAL_NODE: u32 = 1;
const LEAF_NODE: u32 = 2;
impl Unpack for NodeHeader {
fn disk_size() -> u32 {
32
}
fn unpack(data: &[u8]) -> IResult<&[u8], NodeHeader> {
let (i, _csum) = le_u32(data)?;
let (i, flags) = le_u32(i)?;
let (i, block) = le_u64(i)?;
let (i, nr_entries) = le_u32(i)?;
let (i, max_entries) = le_u32(i)?;
let (i, value_size) = le_u32(i)?;
let (i, _padding) = le_u32(i)?;
Ok((
i,
NodeHeader {
block,
is_leaf: flags == LEAF_NODE,
nr_entries,
max_entries,
value_size,
},
))
}
}
impl Pack for NodeHeader {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> anyhow::Result<()> {
// csum needs to be calculated right for the whole metadata block.
w.write_u32::<LittleEndian>(0)?;
let flags;
if self.is_leaf {
flags = LEAF_NODE;
} else {
flags = INTERNAL_NODE;
}
w.write_u32::<LittleEndian>(flags)?;
w.write_u64::<LittleEndian>(self.block)?;
w.write_u32::<LittleEndian>(self.nr_entries)?;
w.write_u32::<LittleEndian>(self.max_entries)?;
w.write_u32::<LittleEndian>(self.value_size)?;
w.write_u32::<LittleEndian>(0)?;
Ok(())
}
}
#[derive(Clone)]
pub enum Node<V: Unpack> {
Internal {
header: NodeHeader,
keys: Vec<u64>,
values: Vec<u64>,
},
Leaf {
header: NodeHeader,
keys: Vec<u64>,
values: Vec<V>,
},
}
impl<V: Unpack> Node<V> {
pub fn get_header(&self) -> &NodeHeader {
use Node::*;
match self {
Internal { header, .. } => header,
Leaf { header, .. } => header,
}
}
fn get_mut_header(&mut self) -> &mut NodeHeader {
use Node::*;
match self {
Internal { header, .. } => header,
Leaf { header, .. } => header,
}
}
pub fn get_keys(&self) -> &[u64] {
use Node::*;
match self {
Internal { keys, .. } => &keys[0..],
Leaf { keys, .. } => &keys[0..],
}
}
pub fn set_block(&mut self, b: u64) {
self.get_mut_header().block = b;
}
}
pub fn convert_result<'a, V>(path: &[u64], r: IResult<&'a [u8], V>) -> Result<(&'a [u8], V)> {
r.map_err(|_e| node_err(path, "parse error"))
}
pub fn convert_io_err<V>(path: &[u64], r: std::io::Result<V>) -> Result<V> {
r.map_err(|_| io_err(path))
}
pub fn unpack_node<V: Unpack>(
path: &[u64],
data: &[u8],
ignore_non_fatal: bool,
is_root: bool,
) -> Result<Node<V>> {
use nom::multi::count;
let (i, header) =
NodeHeader::unpack(data).map_err(|_e| node_err(path, "couldn't parse node header"))?;
if header.is_leaf && header.value_size != V::disk_size() {
return Err(node_err_s(
path,
format!(
"value_size mismatch: expected {}, was {}",
V::disk_size(),
header.value_size
),
));
}
let elt_size = header.value_size + 8;
if elt_size as usize * header.max_entries as usize + NODE_HEADER_SIZE > BLOCK_SIZE {
return Err(node_err_s(
path,
format!("max_entries is too large ({})", header.max_entries),
));
}
if header.nr_entries > header.max_entries {
return Err(node_err(path, "nr_entries > max_entries"));
}
if !ignore_non_fatal {
if header.max_entries % 3 != 0 {
return Err(node_err(path, "max_entries is not divisible by 3"));
}
if !is_root {
/*
let min = header.max_entries / 3;
if header.nr_entries < min {
return Err(node_err_s(
path,
format!(
"too few entries {}, expected at least {}",
header.nr_entries, min
),
));
}
*/
}
}
let (i, keys) = convert_result(path, count(le_u64, header.nr_entries as usize)(i))?;
let mut last = None;
for k in &keys {
if let Some(l) = last {
if k <= l {
return Err(node_err(
path,
&format!("keys out of order: {} <= {}", k, l),
));
}
}
last = Some(k);
}
let nr_free = header.max_entries - header.nr_entries;
let (i, _padding) = convert_result(path, count(le_u64, nr_free as usize)(i))?;
if header.is_leaf {
let (_i, values) = convert_result(path, count(V::unpack, header.nr_entries as usize)(i))?;
Ok(Node::Leaf {
header,
keys,
values,
})
} else {
let (_i, values) = convert_result(path, count(le_u64, header.nr_entries as usize)(i))?;
Ok(Node::Internal {
header,
keys,
values,
})
}
}
//------------------------------------------

View File

@ -1,576 +0,0 @@
use anyhow::Result;
use byteorder::{LittleEndian, WriteBytesExt};
use std::collections::VecDeque;
use std::io::Cursor;
use std::sync::{Arc, Mutex};
use crate::checksum;
use crate::io_engine::*;
use crate::pdata::btree::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
use crate::write_batcher::*;
//------------------------------------------
/// A little ref counter abstraction. Used to manage counts for btree
/// values (eg, the block/time in a thin mapping tree).
pub trait RefCounter<Value> {
fn get(&self, v: &Value) -> Result<u32>;
fn inc(&mut self, v: &Value) -> Result<()>;
fn dec(&mut self, v: &Value) -> Result<()>;
}
pub struct NoopRC {}
impl<Value> RefCounter<Value> for NoopRC {
fn get(&self, _v: &Value) -> Result<u32> {
Ok(0)
}
fn inc(&mut self, _v: &Value) -> Result<()> {
Ok(())
}
fn dec(&mut self, _v: &Value) -> Result<()> {
Ok(())
}
}
/// Wraps a space map up to become a RefCounter.
pub struct SMRefCounter {
sm: Arc<Mutex<dyn SpaceMap>>,
}
impl SMRefCounter {
pub fn new(sm: Arc<Mutex<dyn SpaceMap>>) -> SMRefCounter {
SMRefCounter { sm }
}
}
impl RefCounter<u64> for SMRefCounter {
fn get(&self, v: &u64) -> Result<u32> {
self.sm.lock().unwrap().get(*v)
}
fn inc(&mut self, v: &u64) -> Result<()> {
self.sm.lock().unwrap().inc(*v, 1)
}
fn dec(&mut self, v: &u64) -> Result<()> {
self.sm.lock().unwrap().dec(*v)?;
Ok(())
}
}
//------------------------------------------
// Building a btree for a given set of values is straight forward.
// But often we want to merge shared subtrees into the btree we're
// building, which _is_ complicated. Requiring rebalancing of nodes,
// and careful copy-on-write operations so we don't disturb the shared
// subtree.
//
// To avoid these problems this code never produces shared internal nodes.
// With the large fan out of btrees this isn't really a problem; we'll
// allocate more nodes than optimum, but not many compared to the number
// of leaves. Also we can pack the leaves much better than the kernel
// does due to out of order insertions.
//
// There are thus two stages to building a btree.
//
// i) Produce a list of populated leaves. These leaves may well be shared.
// ii) Build the upper levels of the btree above the leaves.
//------------------------------------------
/// Pack the given node ready to write to disk.
pub fn pack_node<W: WriteBytesExt, V: Pack + Unpack>(node: &Node<V>, w: &mut W) -> Result<()> {
match node {
Node::Internal {
header,
keys,
values,
} => {
header.pack(w)?;
for k in keys {
w.write_u64::<LittleEndian>(*k)?;
}
// pad with zeroes
for _i in keys.len()..header.max_entries as usize {
w.write_u64::<LittleEndian>(0)?;
}
for v in values {
v.pack(w)?;
}
}
Node::Leaf {
header,
keys,
values,
} => {
header.pack(w)?;
for k in keys {
w.write_u64::<LittleEndian>(*k)?;
}
// pad with zeroes
for _i in keys.len()..header.max_entries as usize {
w.write_u64::<LittleEndian>(0)?;
}
for v in values {
v.pack(w)?;
}
}
}
Ok(())
}
//------------------------------------------
pub fn calc_max_entries<V: Unpack>() -> usize {
let elt_size = 8 + V::disk_size() as usize;
let total = ((BLOCK_SIZE - NodeHeader::disk_size() as usize) / elt_size) as usize;
total / 3 * 3
}
pub struct WriteResult {
first_key: u64,
loc: u64,
}
/// Write a node to a free metadata block.
fn write_node_<V: Unpack + Pack>(w: &mut WriteBatcher, mut node: Node<V>) -> Result<WriteResult> {
let keys = node.get_keys();
let first_key = *keys.first().unwrap_or(&0u64);
let b = w.alloc()?;
node.set_block(b.loc);
let mut cursor = Cursor::new(b.get_data());
pack_node(&node, &mut cursor)?;
let loc = b.loc;
w.write(b, checksum::BT::NODE)?;
Ok(WriteResult { first_key, loc })
}
/// A node writer takes a Vec of values and packs them into
/// a btree node. It's up to the specific implementation to
/// decide if it produces internal or leaf nodes.
pub trait NodeIO<V: Unpack + Pack> {
fn write(&self, w: &mut WriteBatcher, keys: Vec<u64>, values: Vec<V>) -> Result<WriteResult>;
fn read(&self, w: &mut WriteBatcher, block: u64) -> Result<(Vec<u64>, Vec<V>)>;
}
pub struct LeafIO {}
impl<V: Unpack + Pack> NodeIO<V> for LeafIO {
fn write(&self, w: &mut WriteBatcher, keys: Vec<u64>, values: Vec<V>) -> Result<WriteResult> {
let header = NodeHeader {
block: 0,
is_leaf: true,
nr_entries: keys.len() as u32,
max_entries: calc_max_entries::<V>() as u32,
value_size: V::disk_size(),
};
let node = Node::Leaf {
header,
keys,
values,
};
write_node_(w, node)
}
fn read(&self, w: &mut WriteBatcher, block: u64) -> Result<(Vec<u64>, Vec<V>)> {
let b = w.read(block)?;
let path = Vec::new();
match unpack_node::<V>(&path, b.get_data(), true, true)? {
Node::Internal { .. } => {
panic!("unexpected internal node");
}
Node::Leaf { keys, values, .. } => Ok((keys, values)),
}
}
}
struct InternalIO {}
impl NodeIO<u64> for InternalIO {
fn write(&self, w: &mut WriteBatcher, keys: Vec<u64>, values: Vec<u64>) -> Result<WriteResult> {
let header = NodeHeader {
block: 0,
is_leaf: false,
nr_entries: keys.len() as u32,
max_entries: calc_max_entries::<u64>() as u32,
value_size: u64::disk_size(),
};
let node: Node<u64> = Node::Internal {
header,
keys,
values,
};
write_node_(w, node)
}
fn read(&self, w: &mut WriteBatcher, block: u64) -> Result<(Vec<u64>, Vec<u64>)> {
let b = w.read(block)?;
let path = Vec::new();
match unpack_node::<u64>(&path, b.get_data(), true, true)? {
Node::Internal { keys, values, .. } => Ok((keys, values)),
Node::Leaf { .. } => {
panic!("unexpected leaf node");
}
}
}
}
//------------------------------------------
/// This takes a sequence of values or nodes, and builds a vector of leaf nodes.
/// Care is taken to make sure that all nodes are at least half full unless there's
/// only a single node.
pub struct NodeBuilder<V: Pack + Unpack> {
nio: Box<dyn NodeIO<V>>,
value_rc: Box<dyn RefCounter<V>>,
max_entries_per_node: usize,
values: VecDeque<(u64, V)>,
nodes: Vec<NodeSummary>,
shared: bool,
}
/// When the builder is including pre-built nodes it has to decide whether
/// to use the node as given, or read it and import the values directly
/// for balancing reasons. This struct is used to stop us re-reading
/// the NodeHeaders of nodes that are shared multiple times.
#[derive(Clone)]
pub struct NodeSummary {
block: u64,
key: u64,
nr_entries: usize,
/// This node was passed in pre-built. Important for deciding if
/// we need to adjust the ref counts if we unpack.
shared: bool,
}
impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
/// Create a new NodeBuilder
pub fn new(nio: Box<dyn NodeIO<V>>, value_rc: Box<dyn RefCounter<V>>, shared: bool) -> Self {
NodeBuilder {
nio,
value_rc,
max_entries_per_node: calc_max_entries::<V>(),
values: VecDeque::new(),
nodes: Vec::new(),
shared,
}
}
/// Push a single value. This may emit a new node, hence the Result
/// return type. The value's ref count will be incremented.
pub fn push_value(&mut self, w: &mut WriteBatcher, key: u64, val: V) -> Result<()> {
// Unshift the previously pushed node since it is not the root
let half_full = self.max_entries_per_node / 2;
if self.nodes.len() == 1 && (self.nodes.last().unwrap().nr_entries < half_full) {
self.unshift_node(w)?;
}
// Have we got enough values to emit a node? We try and keep
// at least max_entries_per_node entries unflushed so we
// can ensure the final node is balanced properly.
else if self.values.len() == self.max_entries_per_node * 2 {
self.emit_node(w)?;
}
self.value_rc.inc(&val)?;
self.values.push_back((key, val));
Ok(())
}
// To avoid writing an under populated node we have to grab some
// values from the first of the shared nodes.
fn append_values(&mut self, w: &mut WriteBatcher, node: &NodeSummary) -> Result<()> {
let (keys, values) = self.read_node(w, node.block)?;
for i in 0..keys.len() {
self.value_rc.inc(&values[i])?;
self.values.push_back((keys[i], values[i].clone()));
}
Ok(())
}
/// Push a number of prebuilt, shared nodes. The builder may decide to not
/// use a shared node, instead reading the values and packing them
/// directly. This may do IO to emit nodes, so returns a Result.
/// Any shared nodes that are used have their block incremented in
/// the space map. Will only increment the ref count for values
/// contained in the nodes if it unpacks them.
pub fn push_nodes(&mut self, w: &mut WriteBatcher, nodes: &[NodeSummary]) -> Result<()> {
assert!(!nodes.is_empty());
// Assume that the node is a shared root if it is the first comer.
// A rooted leaf could have any number of entries.
let maybe_root = (nodes.len() == 1) && self.nodes.is_empty() && self.values.is_empty();
if maybe_root {
let n = &nodes[0];
w.sm.lock().unwrap().inc(n.block, 1)?;
self.nodes.push(n.clone());
return Ok(());
}
// As a sanity check we make sure that all the shared nodes contain the
// minimum nr of entries.
// A single shared node could be possibly under populated (less than half-full)
// due to btree removal, or even underfull (<33% residency) due to kernel issues.
// Those kinds of nodes will be merged into their siblings.
let half_full = self.max_entries_per_node / 2;
if nodes.len() > 1 {
for n in nodes {
if n.nr_entries < half_full {
panic!("under populated node");
}
}
}
// Unshift the previously pushed node since it is not the root
if self.nodes.len() == 1 && (self.nodes.last().unwrap().nr_entries < half_full) {
self.unshift_node(w)?;
}
// Decide if we're going to use the pre-built nodes.
if !self.values.is_empty() && (self.values.len() < half_full) {
let mut nodes_iter = nodes.iter();
let n = nodes_iter.next();
self.append_values(w, n.unwrap())?;
// Do not flush if there's no succeeding nodes,
// so that it could produce a more compact metadata.
if nodes.len() > 1 {
// Flush all the values.
self.emit_all(w)?;
// Add the remaining nodes.
for n in nodes_iter {
w.sm.lock().unwrap().inc(n.block, 1)?;
self.nodes.push(n.clone());
}
}
} else {
// Flush all the values.
self.emit_all(w)?;
if nodes[0].nr_entries < half_full {
// An under populated nodes[0] implies nodes.len() == 1,
// and that has to be merged into their siblings.
self.append_values(w, &nodes[0])?;
} else {
// Add the nodes.
for n in nodes {
w.sm.lock().unwrap().inc(n.block, 1)?;
self.nodes.push(n.clone());
}
}
}
Ok(())
}
/// Signal that no more values or nodes will be pushed. Returns a
/// vector of the built nodes. Consumes the builder.
pub fn complete(mut self, w: &mut WriteBatcher) -> Result<Vec<NodeSummary>> {
let half_full = self.max_entries_per_node / 2;
if !self.values.is_empty() && (self.values.len() < half_full) && !self.nodes.is_empty() {
// We don't have enough values to emit a node. So we're going to
// have to rebalance with the previous node.
self.unshift_node(w)?;
}
self.emit_all(w)?;
if self.nodes.is_empty() {
self.emit_empty_leaf(w)?
}
Ok(self.nodes)
}
//-------------------------
// We're only interested in the keys and values from the node, and
// not whether it's a leaf or internal node.
fn read_node(&self, w: &mut WriteBatcher, block: u64) -> Result<(Vec<u64>, Vec<V>)> {
self.nio.read(w, block)
}
/// Writes a node with the first 'nr_entries' values.
fn emit_values(&mut self, w: &mut WriteBatcher, nr_entries: usize) -> Result<()> {
assert!(nr_entries <= self.values.len());
// Write the node
let mut keys = Vec::new();
let mut values = Vec::new();
for _i in 0..nr_entries {
let (k, v) = self.values.pop_front().unwrap();
keys.push(k);
values.push(v);
}
let wresult = self.nio.write(w, keys, values)?;
// Push a summary to the 'nodes' vector.
self.nodes.push(NodeSummary {
block: wresult.loc,
key: wresult.first_key,
nr_entries,
shared: self.shared,
});
Ok(())
}
/// Writes a full node.
fn emit_node(&mut self, w: &mut WriteBatcher) -> Result<()> {
self.emit_values(w, self.max_entries_per_node)
}
/// Emits all remaining values. Panics if there are more than 2 *
/// max_entries_per_node values.
fn emit_all(&mut self, w: &mut WriteBatcher) -> Result<()> {
match self.values.len() {
0 => {
// There's nothing to emit
Ok(())
}
n if n <= self.max_entries_per_node => {
// Emit a single node.
self.emit_values(w, n)
}
n if n <= self.max_entries_per_node * 2 => {
// Emit two nodes.
let n1 = n / 2;
let n2 = n - n1;
self.emit_values(w, n1)?;
self.emit_values(w, n2)
}
_ => {
panic!("self.values shouldn't have more than 2 * max_entries_per_node entries");
}
}
}
fn emit_empty_leaf(&mut self, w: &mut WriteBatcher) -> Result<()> {
self.emit_values(w, 0)
}
/// Pops the last node, and prepends it's values to 'self.values'. Used
/// to rebalance when we have insufficient values for a final node. The
/// node is decremented in the space map.
fn unshift_node(&mut self, w: &mut WriteBatcher) -> Result<()> {
let ls = self.nodes.pop().unwrap();
let (keys, values) = self.read_node(w, ls.block)?;
w.sm.lock().unwrap().dec(ls.block)?;
let mut vals = VecDeque::new();
for i in 0..keys.len() {
// We only need to inc the values if the node was pre built.
if ls.shared {
self.value_rc.inc(&values[i])?;
}
vals.push_back((keys[i], values[i].clone()));
}
vals.append(&mut self.values);
std::mem::swap(&mut self.values, &mut vals);
Ok(())
}
}
//------------------------------------------
pub struct BTreeBuilder<V: Unpack + Pack> {
leaf_builder: NodeBuilder<V>,
}
impl<V: Unpack + Pack + Clone> BTreeBuilder<V> {
pub fn new(value_rc: Box<dyn RefCounter<V>>) -> BTreeBuilder<V> {
BTreeBuilder {
leaf_builder: NodeBuilder::new(Box::new(LeafIO {}), value_rc, false),
}
}
pub fn push_value(&mut self, w: &mut WriteBatcher, k: u64, v: V) -> Result<()> {
self.leaf_builder.push_value(w, k, v)
}
pub fn push_leaves(&mut self, w: &mut WriteBatcher, leaves: &[NodeSummary]) -> Result<()> {
self.leaf_builder.push_nodes(w, leaves)
}
pub fn complete(self, w: &mut WriteBatcher) -> Result<u64> {
let nodes = self.leaf_builder.complete(w)?;
build_btree(w, nodes)
}
}
//------------------------------------------
// Build a btree from a list of pre-built leaves
pub fn build_btree(w: &mut WriteBatcher, leaves: Vec<NodeSummary>) -> Result<u64> {
// Now we iterate, adding layers of internal nodes until we end
// up with a single root.
let mut nodes = leaves;
while nodes.len() > 1 {
let mut builder = NodeBuilder::new(Box::new(InternalIO {}), Box::new(NoopRC {}), false);
for n in nodes {
builder.push_value(w, n.key, n.block)?;
}
nodes = builder.complete(w)?;
}
assert!(nodes.len() == 1);
let root = nodes[0].block;
Ok(root)
}
//------------------------------------------
// The pre-built nodes and the contained values were initialized with
// a ref count 1, which is analogous to a "tempoaray snapshot" of
// potentially shared leaves. We have to drop those temporary references
// to pre-built nodes at the end of device building, and also decrease
// ref counts of the contained values if a pre-built leaf is no longer
// referenced.
pub fn release_leaves<V: Pack + Unpack>(
w: &mut WriteBatcher,
leaves: &[NodeSummary],
value_rc: &mut dyn RefCounter<V>,
) -> Result<()> {
let nio = LeafIO {};
for n in leaves {
let deleted = w.sm.lock().unwrap().dec(n.block)?;
if deleted {
let (_, values) = nio.read(w, n.block)?;
for v in values {
value_rc.dec(&v)?;
}
}
}
Ok(())
}
//------------------------------------------

View File

@ -1,232 +0,0 @@
use fixedbitset::FixedBitSet;
use std::sync::Arc;
use crate::checksum;
use crate::io_engine::*;
use crate::pdata::btree::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
//------------------------------------------
pub trait LeafVisitor<V: Unpack> {
fn visit(&mut self, kr: &KeyRange, b: u64) -> Result<()>;
// Nodes may be shared and thus visited multiple times. The walker avoids
// doing repeated IO, but it does call this method to keep the visitor up to
// date. b may be an internal node obviously.
// FIXME: remove this method?
fn visit_again(&mut self, b: u64) -> Result<()>;
fn end_walk(&mut self) -> Result<()>;
}
// This is useful if you just want to get the space map counts from the walk.
pub struct NoopLeafVisitor {}
impl<V: Unpack> LeafVisitor<V> for NoopLeafVisitor {
fn visit(&mut self, _kr: &KeyRange, _b: u64) -> Result<()> {
Ok(())
}
fn visit_again(&mut self, _b: u64) -> Result<()> {
Ok(())
}
fn end_walk(&mut self) -> Result<()> {
Ok(())
}
}
pub struct LeafWalker<'a> {
engine: Arc<dyn IoEngine + Send + Sync>,
sm: &'a mut dyn SpaceMap,
leaves: FixedBitSet,
ignore_non_fatal: bool,
}
impl<'a> LeafWalker<'a> {
pub fn new(
engine: Arc<dyn IoEngine + Send + Sync>,
sm: &'a mut dyn SpaceMap,
ignore_non_fatal: bool,
) -> LeafWalker<'a> {
let nr_blocks = engine.get_nr_blocks() as usize;
LeafWalker {
engine,
sm,
leaves: FixedBitSet::with_capacity(nr_blocks),
ignore_non_fatal,
}
}
// Atomically increments the ref count, and returns the _old_ count.
fn sm_inc(&mut self, b: u64) -> u32 {
let sm = &mut self.sm;
let count = sm.get(b).unwrap();
sm.inc(b, 1).unwrap();
count
}
fn walk_nodes<LV, V>(
&mut self,
depth: usize,
path: &mut Vec<u64>,
visitor: &mut LV,
krs: &[KeyRange],
bs: &[u64],
) -> Result<()>
where
LV: LeafVisitor<V>,
V: Unpack,
{
assert_eq!(krs.len(), bs.len());
let mut blocks = Vec::with_capacity(bs.len());
let mut filtered_krs = Vec::with_capacity(krs.len());
for i in 0..bs.len() {
self.sm_inc(bs[i]);
blocks.push(bs[i]);
filtered_krs.push(krs[i].clone());
}
let rblocks = self
.engine
.read_many(&blocks[0..])
.map_err(|_e| io_err(path))?;
for (i, rb) in rblocks.into_iter().enumerate() {
match rb {
Err(_) => {
return Err(io_err(path).keys_context(&filtered_krs[i]));
}
Ok(b) => {
self.walk_node(depth - 1, path, visitor, &filtered_krs[i], &b, false)?;
}
}
}
Ok(())
}
fn walk_node_<LV, V>(
&mut self,
depth: usize,
path: &mut Vec<u64>,
visitor: &mut LV,
kr: &KeyRange,
b: &Block,
is_root: bool,
) -> Result<()>
where
LV: LeafVisitor<V>,
V: Unpack,
{
use Node::*;
let bt = checksum::metadata_block_type(b.get_data());
if bt != checksum::BT::NODE {
return Err(node_err_s(
path,
format!("checksum failed for node {}, {:?}", b.loc, bt),
)
.keys_context(kr));
}
let node = unpack_node::<V>(path, b.get_data(), self.ignore_non_fatal, is_root)?;
if let Internal { keys, values, .. } = node {
let krs = split_key_ranges(path, kr, &keys)?;
if depth == 0 {
// it is the lowest internal
for i in 0..krs.len() {
self.sm.inc(values[i], 1).expect("sm.inc() failed");
for v in &values {
self.leaves.insert(*v as usize);
}
visitor.visit(&krs[i], values[i])?;
}
Ok(())
} else {
self.walk_nodes(depth, path, visitor, &krs, &values)
}
} else {
Err(node_err(path, "btree nodes are not all at the same depth."))
}
}
fn walk_node<LV, V>(
&mut self,
depth: usize,
path: &mut Vec<u64>,
visitor: &mut LV,
kr: &KeyRange,
b: &Block,
is_root: bool,
) -> Result<()>
where
LV: LeafVisitor<V>,
V: Unpack,
{
path.push(b.loc);
let r = self.walk_node_(depth, path, visitor, kr, b, is_root);
path.pop();
visitor.end_walk()?;
r
}
fn get_depth<V: Unpack>(&self, path: &mut Vec<u64>, root: u64, is_root: bool) -> Result<usize> {
use Node::*;
let b = self.engine.read(root).map_err(|_| io_err(path))?;
let bt = checksum::metadata_block_type(b.get_data());
if bt != checksum::BT::NODE {
return Err(node_err_s(
path,
format!("checksum failed for node {}, {:?}", root, bt),
));
}
let node = unpack_node::<V>(path, b.get_data(), self.ignore_non_fatal, is_root)?;
match node {
Internal { values, .. } => {
let n = self.get_depth::<V>(path, values[0], false)?;
Ok(n + 1)
}
Leaf { .. } => Ok(0),
}
}
pub fn walk<LV, V>(&mut self, path: &mut Vec<u64>, visitor: &mut LV, root: u64) -> Result<()>
where
LV: LeafVisitor<V>,
V: Unpack,
{
let kr = KeyRange {
start: None,
end: None,
};
let depth = self.get_depth::<V>(path, root, true)?;
self.sm_inc(root);
if depth == 0 {
// root is a leaf
self.leaves.insert(root as usize);
visitor.visit(&kr, root)?;
Ok(())
} else {
let root = self.engine.read(root).map_err(|_| io_err(path))?;
self.walk_node(depth - 1, path, visitor, &kr, &root, true)
}
}
// Call this to extract the leaves bitset after you've done your walking.
pub fn get_leaves(self) -> FixedBitSet {
self.leaves
}
}
//------------------------------------------

View File

@ -1,134 +0,0 @@
use anyhow::Result;
use std::sync::{Arc, Mutex};
use crate::io_engine::*;
use crate::pdata::btree;
use crate::pdata::btree::*;
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
use crate::write_batcher::*;
//------------------------------------------
// The subtrees will often consist of a single under populated leaf node. Given this
// we're going to merge by:
// i) Building an ordered list of all leaf nodes across all subtrees.
// ii) Merge leaf nodes where they can be packed more efficiently (non destructively to original subtrees).
// iii) Build higher levels from scratch. There are very few of these internal nodes compared to leaves anyway.
#[allow(dead_code)]
struct NodeSummary {
block: u64,
nr_entries: usize,
key_low: u64,
key_high: u64, // inclusive
}
#[allow(dead_code)]
struct LVInner {
last_key: Option<u64>,
leaves: Vec<NodeSummary>,
}
struct LeafVisitor {
inner: Mutex<LVInner>,
}
impl LeafVisitor {
fn new() -> LeafVisitor {
LeafVisitor {
inner: Mutex::new(LVInner {
last_key: None,
leaves: Vec::new(),
}),
}
}
}
impl<V: Unpack> NodeVisitor<V> for LeafVisitor {
fn visit(
&self,
path: &[u64],
_kr: &KeyRange,
_header: &NodeHeader,
keys: &[u64],
_values: &[V],
) -> btree::Result<()> {
// ignore empty nodes
if keys.is_empty() {
return Ok(());
}
let mut inner = self.inner.lock().unwrap();
// Check keys are ordered.
if !inner.leaves.is_empty() {
let last_key = inner.leaves.last().unwrap().key_high;
if keys[0] <= last_key {
return Err(BTreeError::NodeError(
"unable to merge btrees: sub trees out of order".to_string(),
));
}
}
let l = NodeSummary {
block: *path.last().unwrap(),
nr_entries: keys.len(),
key_low: keys[0],
key_high: *keys.last().unwrap(),
};
inner.leaves.push(l);
Ok(())
}
fn visit_again(&self, _path: &[u64], _b: u64) -> btree::Result<()> {
Ok(())
}
fn end_walk(&self) -> btree::Result<()> {
Ok(())
}
}
pub type AEngine = Arc<dyn IoEngine + Send + Sync>;
fn collect_leaves<V: Unpack>(engine: AEngine, roots: &[u64]) -> Result<Vec<NodeSummary>> {
let lv = LeafVisitor::new();
let walker = BTreeWalker::new(engine, false);
let mut path = Vec::new();
for root in roots {
walker.walk::<LeafVisitor, V>(&mut path, &lv, *root)?;
}
Ok(lv.inner.into_inner().unwrap().leaves)
}
//------------------------------------------
fn optimise_leaves<V: Unpack + Pack>(
_batcher: &mut WriteBatcher,
lvs: Vec<NodeSummary>,
) -> Result<Vec<NodeSummary>> {
// FIXME: implement
Ok(lvs)
}
//------------------------------------------
pub fn merge<V: Unpack + Pack>(
engine: AEngine,
sm: Arc<Mutex<dyn SpaceMap>>,
roots: &[u64],
) -> Result<u64> {
let lvs = collect_leaves::<V>(engine.clone(), roots)?;
let mut batcher = WriteBatcher::new(engine, sm, 256);
let _lvs = optimise_leaves::<V>(&mut batcher, lvs)?;
todo!();
}
//------------------------------------------

View File

@ -1,666 +0,0 @@
use std::collections::{BTreeMap, BTreeSet};
use std::sync::{Arc, Mutex};
use threadpool::ThreadPool;
use crate::checksum;
use crate::io_engine::*;
use crate::pdata::btree::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
//------------------------------------------
pub trait NodeVisitor<V: Unpack> {
// &self is deliberately non mut to allow the walker to use multiple threads.
fn visit(
&self,
path: &[u64],
kr: &KeyRange,
header: &NodeHeader,
keys: &[u64],
values: &[V],
) -> Result<()>;
// Nodes may be shared and thus visited multiple times. The walker avoids
// doing repeated IO, but it does call this method to keep the visitor up to
// date.
fn visit_again(&self, path: &[u64], b: u64) -> Result<()>;
fn end_walk(&self) -> Result<()>;
}
#[derive(Clone)]
pub struct BTreeWalker {
engine: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
fails: Arc<Mutex<BTreeMap<u64, BTreeError>>>,
ignore_non_fatal: bool,
}
impl BTreeWalker {
pub fn new(engine: Arc<dyn IoEngine + Send + Sync>, ignore_non_fatal: bool) -> BTreeWalker {
let nr_blocks = engine.get_nr_blocks() as usize;
let r: BTreeWalker = BTreeWalker {
engine,
sm: Arc::new(Mutex::new(RestrictedSpaceMap::new(nr_blocks as u64))),
fails: Arc::new(Mutex::new(BTreeMap::new())),
ignore_non_fatal,
};
r
}
pub fn new_with_sm(
engine: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_non_fatal: bool,
) -> Result<BTreeWalker> {
{
let sm = sm.lock().unwrap();
assert_eq!(sm.get_nr_blocks().unwrap(), engine.get_nr_blocks());
}
Ok(BTreeWalker {
engine,
sm,
fails: Arc::new(Mutex::new(BTreeMap::new())),
ignore_non_fatal,
})
}
fn failed(&self, b: u64) -> Option<BTreeError> {
let fails = self.fails.lock().unwrap();
fails.get(&b).cloned()
}
fn set_fail(&self, b: u64, err: BTreeError) {
// FIXME: should we monitor the size of fails, and abort if too many errors?
let mut fails = self.fails.lock().unwrap();
fails.insert(b, err);
}
// Atomically increments the ref count, and returns the _old_ count.
fn sm_inc(&self, b: u64) -> u32 {
let mut sm = self.sm.lock().unwrap();
let count = sm.get(b).unwrap();
sm.inc(b, 1).unwrap();
count
}
fn build_aggregate(&self, b: u64, errs: Vec<BTreeError>) -> Result<()> {
match errs.len() {
0 => Ok(()),
1 => {
let e = errs[0].clone();
self.set_fail(b, e.clone());
Err(e)
}
_ => {
let e = aggregate_error(errs);
self.set_fail(b, e.clone());
Err(e)
}
}
}
fn walk_nodes<NV, V>(
&self,
path: &mut Vec<u64>,
visitor: &NV,
krs: &[KeyRange],
bs: &[u64],
) -> Vec<BTreeError>
where
NV: NodeVisitor<V>,
V: Unpack,
{
assert_eq!(krs.len(), bs.len());
let mut errs: Vec<BTreeError> = Vec::new();
let mut blocks = Vec::with_capacity(bs.len());
let mut filtered_krs = Vec::with_capacity(krs.len());
for i in 0..bs.len() {
if self.sm_inc(bs[i]) == 0 {
// Node not yet seen
blocks.push(bs[i]);
filtered_krs.push(krs[i].clone());
} else {
// This node has already been checked ...
match self.failed(bs[i]) {
None => {
// ... it was clean.
if let Err(e) = visitor.visit_again(path, bs[i]) {
// ... but the visitor isn't happy
errs.push(e.clone());
}
}
Some(e) => {
// ... there was an error
errs.push(e.clone());
}
}
}
}
match self.engine.read_many(&blocks[0..]) {
Err(_) => {
// IO completely failed, error every block
for (i, b) in blocks.iter().enumerate() {
let e = io_err(path).keys_context(&filtered_krs[i]);
errs.push(e.clone());
self.set_fail(*b, e);
}
}
Ok(rblocks) => {
for (i, rb) in rblocks.into_iter().enumerate() {
match rb {
Err(_) => {
let e = io_err(path).keys_context(&filtered_krs[i]);
errs.push(e.clone());
self.set_fail(blocks[i], e);
}
Ok(b) => match self.walk_node(path, visitor, &filtered_krs[i], &b, false) {
Err(e) => {
errs.push(e);
}
Ok(()) => {}
},
}
}
}
}
errs
}
fn walk_node_<NV, V>(
&self,
path: &mut Vec<u64>,
visitor: &NV,
kr: &KeyRange,
b: &Block,
is_root: bool,
) -> Result<()>
where
NV: NodeVisitor<V>,
V: Unpack,
{
use Node::*;
let bt = checksum::metadata_block_type(b.get_data());
if bt != checksum::BT::NODE {
return Err(node_err_s(
path,
format!("checksum failed for node {}, {:?}", b.loc, bt),
)
.keys_context(kr));
}
let node = unpack_node::<V>(path, b.get_data(), self.ignore_non_fatal, is_root)?;
match node {
Internal { keys, values, .. } => {
let krs = split_key_ranges(path, kr, &keys)?;
let errs = self.walk_nodes(path, visitor, &krs, &values);
return self.build_aggregate(b.loc, errs);
}
Leaf {
header,
keys,
values,
} => {
if let Err(e) = visitor.visit(path, kr, &header, &keys, &values) {
let e = BTreeError::Path(path.clone(), Box::new(e));
self.set_fail(b.loc, e.clone());
return Err(e);
}
}
}
Ok(())
}
fn walk_node<NV, V>(
&self,
path: &mut Vec<u64>,
visitor: &NV,
kr: &KeyRange,
b: &Block,
is_root: bool,
) -> Result<()>
where
NV: NodeVisitor<V>,
V: Unpack,
{
path.push(b.loc);
let r = self.walk_node_(path, visitor, kr, b, is_root);
path.pop();
visitor.end_walk()?;
r
}
pub fn walk<NV, V>(&self, path: &mut Vec<u64>, visitor: &NV, root: u64) -> Result<()>
where
NV: NodeVisitor<V>,
V: Unpack,
{
if self.sm_inc(root) > 0 {
if let Some(e) = self.failed(root) {
Err(e)
} else {
visitor.visit_again(path, root)
}
} else {
let root = self.engine.read(root).map_err(|_| io_err(path))?;
let kr = KeyRange {
start: None,
end: None,
};
self.walk_node(path, visitor, &kr, &root, true)
}
}
}
//--------------------------------
fn walk_node_threaded_<NV, V>(
w: Arc<BTreeWalker>,
path: &mut Vec<u64>,
pool: &ThreadPool,
visitor: Arc<NV>,
kr: &KeyRange,
b: &Block,
is_root: bool,
) -> Result<()>
where
NV: NodeVisitor<V> + Send + Sync + 'static,
V: Unpack,
{
use Node::*;
let bt = checksum::metadata_block_type(b.get_data());
if bt != checksum::BT::NODE {
return Err(node_err_s(
path,
format!("checksum failed for node {}, {:?}", b.loc, bt),
)
.keys_context(kr));
}
let node = unpack_node::<V>(path, b.get_data(), w.ignore_non_fatal, is_root)?;
match node {
Internal { keys, values, .. } => {
let krs = split_key_ranges(path, kr, &keys)?;
let errs = walk_nodes_threaded(w.clone(), path, pool, visitor, &krs, &values);
return w.build_aggregate(b.loc, errs);
}
Leaf {
header,
keys,
values,
} => {
visitor.visit(path, kr, &header, &keys, &values)?;
}
}
Ok(())
}
fn walk_node_threaded<NV, V>(
w: Arc<BTreeWalker>,
path: &mut Vec<u64>,
pool: &ThreadPool,
visitor: Arc<NV>,
kr: &KeyRange,
b: &Block,
is_root: bool,
) -> Result<()>
where
NV: NodeVisitor<V> + Send + Sync + 'static,
V: Unpack,
{
path.push(b.loc);
let r = walk_node_threaded_(w, path, pool, visitor.clone(), kr, b, is_root);
path.pop();
visitor.end_walk()?;
r
}
fn walk_nodes_threaded<NV, V>(
w: Arc<BTreeWalker>,
path: &mut Vec<u64>,
pool: &ThreadPool,
visitor: Arc<NV>,
krs: &[KeyRange],
bs: &[u64],
) -> Vec<BTreeError>
where
NV: NodeVisitor<V> + Send + Sync + 'static,
V: Unpack,
{
assert_eq!(krs.len(), bs.len());
let mut errs: Vec<BTreeError> = Vec::new();
let mut blocks = Vec::with_capacity(bs.len());
let mut filtered_krs = Vec::with_capacity(krs.len());
for i in 0..bs.len() {
if w.sm_inc(bs[i]) == 0 {
// Node not yet seen
blocks.push(bs[i]);
filtered_krs.push(krs[i].clone());
} else {
// This node has already been checked ...
match w.failed(bs[i]) {
None => {
// ... it was clean.
if let Err(e) = visitor.visit_again(path, bs[i]) {
// ... but the visitor isn't happy
errs.push(e.clone());
}
}
Some(e) => {
// ... there was an error
errs.push(e.clone());
}
}
}
}
match w.engine.read_many(&blocks[0..]) {
Err(_) => {
// IO completely failed error every block
for (i, b) in blocks.iter().enumerate() {
let e = io_err(path).keys_context(&filtered_krs[i]);
errs.push(e.clone());
w.set_fail(*b, e);
}
}
Ok(rblocks) => {
let child_errs = Arc::new(Mutex::new(Vec::new()));
for (i, rb) in rblocks.into_iter().enumerate() {
match rb {
Err(_) => {
let e = io_err(path).keys_context(&filtered_krs[i]);
let mut errs = child_errs.lock().unwrap();
errs.push(e.clone());
w.set_fail(blocks[i], e);
}
Ok(b) => {
let w = w.clone();
let visitor = visitor.clone();
let kr = filtered_krs[i].clone();
let errs = child_errs.clone();
let mut path = path.clone();
pool.execute(move || {
match w.walk_node(&mut path, visitor.as_ref(), &kr, &b, false) {
Err(e) => {
let mut errs = errs.lock().unwrap();
errs.push(e);
}
Ok(()) => {}
}
});
}
}
}
pool.join();
let mut child_errs = Arc::try_unwrap(child_errs).unwrap().into_inner().unwrap();
errs.append(&mut child_errs);
}
}
errs
}
pub fn walk_threaded<NV, V>(
path: &mut Vec<u64>,
w: Arc<BTreeWalker>,
pool: &ThreadPool,
visitor: Arc<NV>,
root: u64,
) -> Result<()>
where
NV: NodeVisitor<V> + Send + Sync + 'static,
V: Unpack,
{
if w.sm_inc(root) > 0 {
if let Some(e) = w.failed(root) {
Err(e)
} else {
visitor.visit_again(path, root)
}
} else {
let root = w.engine.read(root).map_err(|_| io_err(path))?;
let kr = KeyRange {
start: None,
end: None,
};
walk_node_threaded(w, path, pool, visitor, &kr, &root, true)
}
}
//------------------------------------------
struct ValueCollector<V> {
values: Mutex<BTreeMap<u64, V>>,
}
impl<V> ValueCollector<V> {
fn new() -> ValueCollector<V> {
ValueCollector {
values: Mutex::new(BTreeMap::new()),
}
}
}
// FIXME: should we be using Copy rather than clone? (Yes)
impl<V: Unpack + Copy> NodeVisitor<V> for ValueCollector<V> {
fn visit(
&self,
_path: &[u64],
_kr: &KeyRange,
_h: &NodeHeader,
keys: &[u64],
values: &[V],
) -> Result<()> {
let mut vals = self.values.lock().unwrap();
for n in 0..keys.len() {
vals.insert(keys[n], values[n]);
}
Ok(())
}
fn visit_again(&self, _path: &[u64], _b: u64) -> Result<()> {
Ok(())
}
fn end_walk(&self) -> Result<()> {
Ok(())
}
}
pub fn btree_to_map<V: Unpack + Copy>(
path: &mut Vec<u64>,
engine: Arc<dyn IoEngine + Send + Sync>,
ignore_non_fatal: bool,
root: u64,
) -> Result<BTreeMap<u64, V>> {
let walker = BTreeWalker::new(engine, ignore_non_fatal);
let visitor = ValueCollector::<V>::new();
walker.walk(path, &visitor, root)?;
Ok(visitor.values.into_inner().unwrap())
}
pub fn btree_to_map_with_sm<V: Unpack + Copy>(
path: &mut Vec<u64>,
engine: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_non_fatal: bool,
root: u64,
) -> Result<BTreeMap<u64, V>> {
let walker = BTreeWalker::new_with_sm(engine, sm, ignore_non_fatal)?;
let visitor = ValueCollector::<V>::new();
walker.walk(path, &visitor, root)?;
Ok(visitor.values.into_inner().unwrap())
}
//------------------------------------------
struct ValuePathCollector<V> {
values: Mutex<BTreeMap<u64, (Vec<u64>, V)>>,
}
impl<V> ValuePathCollector<V> {
fn new() -> ValuePathCollector<V> {
ValuePathCollector {
values: Mutex::new(BTreeMap::new()),
}
}
}
impl<V: Unpack + Clone> NodeVisitor<V> for ValuePathCollector<V> {
fn visit(
&self,
path: &[u64],
_kr: &KeyRange,
_h: &NodeHeader,
keys: &[u64],
values: &[V],
) -> Result<()> {
let mut vals = self.values.lock().unwrap();
for n in 0..keys.len() {
vals.insert(keys[n], (path.to_vec(), values[n].clone()));
}
Ok(())
}
fn visit_again(&self, _path: &[u64], _b: u64) -> Result<()> {
Ok(())
}
fn end_walk(&self) -> Result<()> {
Ok(())
}
}
pub fn btree_to_map_with_path<V: Unpack + Copy>(
path: &mut Vec<u64>,
engine: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_non_fatal: bool,
root: u64,
) -> Result<BTreeMap<u64, (Vec<u64>, V)>> {
let walker = BTreeWalker::new_with_sm(engine, sm, ignore_non_fatal)?;
let visitor = ValuePathCollector::<V>::new();
walker.walk(path, &visitor, root)?;
Ok(visitor.values.into_inner().unwrap())
}
//------------------------------------------
struct KeyCollector {
keys: Mutex<BTreeSet<u64>>,
}
impl KeyCollector {
fn new() -> KeyCollector {
KeyCollector {
keys: Mutex::new(BTreeSet::new()),
}
}
}
impl<V: Unpack + Copy> NodeVisitor<V> for KeyCollector {
fn visit(
&self,
_path: &[u64],
_kr: &KeyRange,
_h: &NodeHeader,
keys: &[u64],
_values: &[V],
) -> Result<()> {
let mut keyset = self.keys.lock().unwrap();
for k in keys {
keyset.insert(*k);
}
Ok(())
}
fn visit_again(&self, _path: &[u64], _b: u64) -> Result<()> {
Ok(())
}
fn end_walk(&self) -> Result<()> {
Ok(())
}
}
pub fn btree_to_key_set<V: Unpack + Copy>(
path: &mut Vec<u64>,
engine: Arc<dyn IoEngine + Send + Sync>,
ignore_non_fatal: bool,
root: u64,
) -> Result<BTreeSet<u64>> {
let walker = BTreeWalker::new(engine, ignore_non_fatal);
let visitor = KeyCollector::new();
walker.walk::<_, V>(path, &visitor, root)?;
Ok(visitor.keys.into_inner().unwrap())
}
//------------------------------------------
struct NoopVisitor<V> {
dummy: std::marker::PhantomData<V>,
}
impl<V> NoopVisitor<V> {
pub fn new() -> NoopVisitor<V> {
NoopVisitor {
dummy: std::marker::PhantomData,
}
}
}
impl<V: Unpack> NodeVisitor<V> for NoopVisitor<V> {
fn visit(
&self,
_path: &[u64],
_kr: &KeyRange,
_header: &NodeHeader,
_keys: &[u64],
_values: &[V],
) -> Result<()> {
Ok(())
}
//fn visit_again(&self, _path: &[u64], _b: u64) -> Result<()> {
fn visit_again(&self, _path: &[u64], _b: u64) -> Result<()> {
Ok(())
}
fn end_walk(&self) -> Result<()> {
Ok(())
}
}
pub fn count_btree_blocks<V: Unpack>(
engine: Arc<dyn IoEngine + Send + Sync>,
path: &mut Vec<u64>,
root: u64,
metadata_sm: ASpaceMap,
ignore_non_fatal: bool,
) -> Result<()> {
let w = BTreeWalker::new_with_sm(engine, metadata_sm, ignore_non_fatal)?;
let v = NoopVisitor::<V>::new();
w.walk(path, &v, root)
}
//------------------------------------------

View File

@ -1,15 +0,0 @@
pub mod array;
pub mod array_builder;
pub mod array_walker;
pub mod bitset;
pub mod btree;
pub mod btree_builder;
pub mod btree_leaf_walker;
pub mod btree_merge;
pub mod btree_walker;
pub mod space_map;
pub mod space_map_checker;
pub mod space_map_common;
pub mod space_map_disk;
pub mod space_map_metadata;
pub mod unpack;

View File

@ -1,264 +0,0 @@
use anyhow::Result;
use fixedbitset::FixedBitSet;
use num_traits::Bounded;
use std::boxed::Box;
use std::convert::{TryFrom, TryInto};
use std::sync::{Arc, Mutex};
//------------------------------------------
pub trait SpaceMap {
fn get_nr_blocks(&self) -> Result<u64>;
fn get_nr_allocated(&self) -> Result<u64>;
fn get(&self, b: u64) -> Result<u32>;
/// Returns the old ref count
fn set(&mut self, b: u64, v: u32) -> Result<u32>;
fn inc(&mut self, begin: u64, len: u64) -> Result<()>;
/// Returns true if the block is now free
fn dec(&mut self, b: u64) -> Result<bool> {
let old = self.get(b)?;
assert!(old > 0);
self.set(b, old - 1)?;
Ok(old == 1)
}
/// Finds a block with a zero reference count. Increments the count.
/// Returns Ok(None) if no free block (ENOSPC)
/// Returns Err on fatal error
fn alloc(&mut self) -> Result<Option<u64>>;
/// Finds a free block within the range
fn find_free(&mut self, begin: u64, end: u64) -> Result<Option<u64>>;
/// Returns the position where allocation starts
fn get_alloc_begin(&self) -> Result<u64>;
}
pub type ASpaceMap = Arc<Mutex<dyn SpaceMap + Sync + Send>>;
//------------------------------------------
pub struct CoreSpaceMap<T> {
nr_allocated: u64,
alloc_begin: u64,
counts: Vec<T>,
}
impl<V> CoreSpaceMap<V>
where
V: Copy + Default + std::ops::AddAssign + From<u8>,
{
pub fn new(nr_entries: u64) -> CoreSpaceMap<V> {
CoreSpaceMap {
nr_allocated: 0,
alloc_begin: 0,
counts: vec![V::default(); nr_entries as usize],
}
}
}
impl<V> SpaceMap for CoreSpaceMap<V>
where
V: Copy
+ Default
+ Eq
+ std::ops::AddAssign
+ From<u8>
+ Into<u32>
+ Bounded
+ TryFrom<u32>
+ std::cmp::PartialOrd,
<V as TryFrom<u32>>::Error: std::fmt::Debug,
{
fn get_nr_blocks(&self) -> Result<u64> {
Ok(self.counts.len() as u64)
}
fn get_nr_allocated(&self) -> Result<u64> {
Ok(self.nr_allocated)
}
fn get(&self, b: u64) -> Result<u32> {
Ok(self.counts[b as usize].into())
}
fn set(&mut self, b: u64, v: u32) -> Result<u32> {
let old = self.counts[b as usize];
assert!(v <= V::max_value().into());
self.counts[b as usize] = v.try_into().unwrap(); // FIXME: do not panic
if old == V::from(0u8) && v != 0 {
self.nr_allocated += 1;
} else if old != V::from(0u8) && v == 0 {
self.nr_allocated -= 1;
}
Ok(old.into())
}
fn inc(&mut self, begin: u64, len: u64) -> Result<()> {
for b in begin..(begin + len) {
let c = &mut self.counts[b as usize];
assert!(*c < V::max_value());
if *c == V::from(0u8) {
// FIXME: can we get a ref to save dereferencing counts twice?
self.nr_allocated += 1;
*c = V::from(1u8);
} else {
*c += V::from(1u8);
}
}
Ok(())
}
fn alloc(&mut self) -> Result<Option<u64>> {
let mut b = self.find_free(self.alloc_begin, self.counts.len() as u64)?;
if b.is_none() {
b = self.find_free(0, self.alloc_begin)?;
if b.is_none() {
return Ok(None);
}
}
self.counts[b.unwrap() as usize] = V::from(1u8);
self.nr_allocated += 1;
self.alloc_begin = b.unwrap() + 1;
Ok(b)
}
fn find_free(&mut self, begin: u64, end: u64) -> Result<Option<u64>> {
for b in begin..end {
if self.counts[b as usize] == V::from(0u8) {
return Ok(Some(b));
}
}
Ok(None)
}
fn get_alloc_begin(&self) -> Result<u64> {
Ok(self.alloc_begin as u64)
}
}
pub fn core_sm(nr_entries: u64, max_count: u32) -> Arc<Mutex<dyn SpaceMap + Send + Sync>> {
if max_count <= u8::MAX as u32 {
Arc::new(Mutex::new(CoreSpaceMap::<u8>::new(nr_entries)))
} else if max_count <= u16::MAX as u32 {
Arc::new(Mutex::new(CoreSpaceMap::<u16>::new(nr_entries)))
} else {
Arc::new(Mutex::new(CoreSpaceMap::<u32>::new(nr_entries)))
}
}
pub fn core_sm_without_mutex(nr_entries: u64, max_count: u32) -> Box<dyn SpaceMap> {
if max_count <= u8::MAX as u32 {
Box::new(CoreSpaceMap::<u8>::new(nr_entries))
} else if max_count <= u16::MAX as u32 {
Box::new(CoreSpaceMap::<u16>::new(nr_entries))
} else {
Box::new(CoreSpaceMap::<u32>::new(nr_entries))
}
}
//------------------------------------------
// This in core space map can only count to one, useful when walking
// btrees when we want to avoid visiting a node more than once, but
// aren't interested in counting how many times we've visited.
pub struct RestrictedSpaceMap {
nr_allocated: u64,
alloc_begin: usize,
counts: FixedBitSet,
}
impl RestrictedSpaceMap {
pub fn new(nr_entries: u64) -> RestrictedSpaceMap {
RestrictedSpaceMap {
nr_allocated: 0,
counts: FixedBitSet::with_capacity(nr_entries as usize),
alloc_begin: 0,
}
}
}
impl SpaceMap for RestrictedSpaceMap {
fn get_nr_blocks(&self) -> Result<u64> {
Ok(self.counts.len() as u64)
}
fn get_nr_allocated(&self) -> Result<u64> {
Ok(self.nr_allocated)
}
fn get(&self, b: u64) -> Result<u32> {
if self.counts.contains(b as usize) {
Ok(1)
} else {
Ok(0)
}
}
fn set(&mut self, b: u64, v: u32) -> Result<u32> {
let old = self.counts.contains(b as usize);
if v > 0 {
if !old {
self.nr_allocated += 1;
}
self.counts.insert(b as usize);
} else {
if old {
self.nr_allocated -= 1;
}
self.counts.set(b as usize, false);
}
Ok(if old { 1 } else { 0 })
}
fn inc(&mut self, begin: u64, len: u64) -> Result<()> {
for b in begin..(begin + len) {
if !self.counts.contains(b as usize) {
self.nr_allocated += 1;
self.counts.insert(b as usize);
}
}
Ok(())
}
fn alloc(&mut self) -> Result<Option<u64>> {
let mut b = self.find_free(self.alloc_begin as u64, self.counts.len() as u64)?;
if b.is_none() {
b = self.find_free(0, self.alloc_begin as u64)?;
if b.is_none() {
return Ok(None);
}
}
self.counts.insert(b.unwrap() as usize);
self.nr_allocated += 1;
self.alloc_begin = b.unwrap() as usize + 1;
Ok(b)
}
fn find_free(&mut self, begin: u64, end: u64) -> Result<Option<u64>> {
for b in begin..end {
if !self.counts.contains(b as usize) {
return Ok(Some(b));
}
}
Ok(None)
}
fn get_alloc_begin(&self) -> Result<u64> {
Ok(self.alloc_begin as u64)
}
}
//------------------------------------------

View File

@ -1,329 +0,0 @@
use anyhow::{anyhow, Result};
use std::io::Cursor;
use std::sync::Arc;
use crate::checksum;
use crate::io_engine::IoEngine;
use crate::pdata::btree::{self, *};
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_common::*;
use crate::pdata::space_map_metadata::*;
use crate::pdata::unpack::*;
use crate::report::Report;
//------------------------------------------
pub struct BitmapLeak {
blocknr: u64, // blocknr for the first entry in the bitmap
loc: u64, // location of the bitmap
}
//------------------------------------------
struct OverflowChecker<'a> {
kind: &'a str,
sm: &'a dyn SpaceMap,
}
impl<'a> OverflowChecker<'a> {
fn new(kind: &'a str, sm: &'a dyn SpaceMap) -> OverflowChecker<'a> {
OverflowChecker { kind, sm }
}
}
impl<'a> NodeVisitor<u32> for OverflowChecker<'a> {
fn visit(
&self,
_path: &[u64],
_kr: &KeyRange,
_h: &NodeHeader,
keys: &[u64],
values: &[u32],
) -> btree::Result<()> {
for n in 0..keys.len() {
let k = keys[n];
let v = values[n];
let expected = self.sm.get(k).unwrap();
if expected != v {
return Err(value_err(format!(
"Bad reference count for {} block {}. Expected {}, but space map contains {}.",
self.kind, k, expected, v
)));
}
}
Ok(())
}
fn visit_again(&self, _path: &[u64], _b: u64) -> btree::Result<()> {
Ok(())
}
fn end_walk(&self) -> btree::Result<()> {
Ok(())
}
}
//------------------------------------------
fn inc_entries(sm: &ASpaceMap, entries: &[IndexEntry]) -> Result<()> {
let mut sm = sm.lock().unwrap();
for ie in entries {
// FIXME: checksumming bitmaps?
sm.inc(ie.blocknr, 1)?;
}
Ok(())
}
// Compare the refernece counts in bitmaps against the expected values
//
// `sm` - The in-core space map of expected reference counts
fn check_low_ref_counts(
engine: Arc<dyn IoEngine + Send + Sync>,
report: Arc<Report>,
kind: &str,
entries: Vec<IndexEntry>,
sm: ASpaceMap,
) -> Result<Vec<BitmapLeak>> {
// gathering bitmap blocknr
let mut blocks = Vec::with_capacity(entries.len());
for i in &entries {
blocks.push(i.blocknr);
}
// read bitmap blocks
// FIXME: we should do this in batches
let blocks = engine.read_many(&blocks)?;
// compare ref-counts in bitmap blocks
let mut leaks = 0;
let mut failed = false;
let mut blocknr = 0;
let mut bitmap_leaks = Vec::new();
let sm = sm.lock().unwrap();
let nr_blocks = sm.get_nr_blocks()?;
for b in blocks.iter().take(entries.len()) {
match b {
Err(_e) => {
return Err(anyhow!("Unable to read bitmap block"));
}
Ok(b) => {
if checksum::metadata_block_type(b.get_data()) != checksum::BT::BITMAP {
report.fatal(&format!(
"Index entry points to block ({}) that isn't a bitmap",
b.loc
));
failed = true;
// FIXME: revert the ref-count at b.loc?
}
let bitmap = unpack::<Bitmap>(b.get_data())?;
let first_blocknr = blocknr;
let mut contains_leak = false;
for e in bitmap.entries.iter() {
if blocknr >= nr_blocks {
break;
}
match e {
BitmapEntry::Small(actual) => {
let expected = sm.get(blocknr)?;
if *actual == 1 && expected == 0 {
leaks += 1;
contains_leak = true;
} else if *actual != expected as u8 {
report.fatal(&format!("Bad reference count for {} block {}. Expected {}, but space map contains {}.",
kind, blocknr, expected, actual));
failed = true;
}
}
BitmapEntry::Overflow => {
let expected = sm.get(blocknr)?;
if expected < 3 {
report.fatal(&format!("Bad reference count for {} block {}. Expected {}, but space map says it's >= 3.",
kind, blocknr, expected));
failed = true;
}
}
}
blocknr += 1;
}
if contains_leak {
bitmap_leaks.push(BitmapLeak {
blocknr: first_blocknr,
loc: b.loc,
});
}
}
}
}
if leaks > 0 {
report.non_fatal(&format!("{} {} blocks have leaked.", leaks, kind));
}
if failed {
Err(anyhow!("Fatal errors in {} space map", kind))
} else {
Ok(bitmap_leaks)
}
}
fn gather_disk_index_entries(
engine: Arc<dyn IoEngine + Send + Sync>,
bitmap_root: u64,
metadata_sm: ASpaceMap,
ignore_non_fatal: bool,
) -> Result<Vec<IndexEntry>> {
let entries_map = btree_to_map_with_sm::<IndexEntry>(
&mut vec![0],
engine,
metadata_sm.clone(),
ignore_non_fatal,
bitmap_root,
)?;
let entries: Vec<IndexEntry> = entries_map.values().cloned().collect();
inc_entries(&metadata_sm, &entries[0..])?;
Ok(entries)
}
fn gather_metadata_index_entries(
engine: Arc<dyn IoEngine + Send + Sync>,
bitmap_root: u64,
metadata_sm: ASpaceMap,
) -> Result<Vec<IndexEntry>> {
let b = engine.read(bitmap_root)?;
let entries = unpack::<MetadataIndex>(b.get_data())?.indexes;
metadata_sm.lock().unwrap().inc(bitmap_root, 1)?;
inc_entries(&metadata_sm, &entries[0..])?;
Ok(entries)
}
//------------------------------------------
// This checks the space map and returns any leak blocks for auto-repair to process.
//
// `disk_sm` - The in-core space map of expected data block ref-counts
// `metadata_sm` - The in-core space for storing ref-counts of verified blocks
pub fn check_disk_space_map(
engine: Arc<dyn IoEngine + Send + Sync>,
report: Arc<Report>,
root: SMRoot,
disk_sm: ASpaceMap,
metadata_sm: ASpaceMap,
ignore_non_fatal: bool,
) -> Result<Vec<BitmapLeak>> {
let entries = gather_disk_index_entries(
engine.clone(),
root.bitmap_root,
metadata_sm.clone(),
ignore_non_fatal,
)?;
// check overflow ref-counts
{
let sm = disk_sm.lock().unwrap();
let v = OverflowChecker::new("data", &*sm);
let w = BTreeWalker::new_with_sm(engine.clone(), metadata_sm.clone(), false)?;
w.walk(&mut vec![0], &v, root.ref_count_root)?;
}
// check low ref-counts in bitmaps
check_low_ref_counts(engine, report, "data", entries, disk_sm)
}
// This checks the space map and returns any leak blocks for auto-repair to process.
//
// `metadata_sm`: The in-core space map of expected metadata block ref-counts
pub fn check_metadata_space_map(
engine: Arc<dyn IoEngine + Send + Sync>,
report: Arc<Report>,
root: SMRoot,
metadata_sm: ASpaceMap,
ignore_non_fatal: bool,
) -> Result<Vec<BitmapLeak>> {
count_btree_blocks::<u32>(
engine.clone(),
&mut vec![0],
root.ref_count_root,
metadata_sm.clone(),
false,
)?;
let entries =
gather_metadata_index_entries(engine.clone(), root.bitmap_root, metadata_sm.clone())?;
// check overflow ref-counts
{
let sm = metadata_sm.lock().unwrap();
let v = OverflowChecker::new("metadata", &*sm);
let w = BTreeWalker::new(engine.clone(), ignore_non_fatal);
w.walk(&mut vec![0], &v, root.ref_count_root)?;
}
// check low ref-counts in bitmaps
check_low_ref_counts(engine, report, "metadata", entries, metadata_sm)
}
// This assumes the only errors in the space map are leaks. Entries should just be
// those that contain leaks.
pub fn repair_space_map(
engine: Arc<dyn IoEngine + Send + Sync>,
entries: Vec<BitmapLeak>,
sm: ASpaceMap,
) -> Result<()> {
let sm = sm.lock().unwrap();
let mut blocks = Vec::with_capacity(entries.len());
for i in &entries {
blocks.push(i.loc);
}
// FIXME: we should do this in batches
let rblocks = engine.read_many(&blocks[0..])?;
let mut write_blocks = Vec::new();
for (i, rb) in rblocks.into_iter().enumerate() {
if let Ok(b) = rb {
let be = &entries[i];
let mut blocknr = be.blocknr;
let mut bitmap = unpack::<Bitmap>(b.get_data())?;
for e in bitmap.entries.iter_mut() {
if blocknr >= sm.get_nr_blocks()? {
break;
}
if let BitmapEntry::Small(actual) = e {
let expected = sm.get(blocknr)?;
if *actual == 1 && expected == 0 {
*e = BitmapEntry::Small(0);
}
}
blocknr += 1;
}
let mut out = Cursor::new(b.get_data());
bitmap.pack(&mut out)?;
checksum::write_checksum(b.get_data(), checksum::BT::BITMAP)?;
write_blocks.push(b);
} else {
return Err(anyhow!("Unable to reread bitmap blocks for repair"));
}
}
let results = engine.write_many(&write_blocks[0..])?;
for ret in results {
if ret.is_err() {
return Err(anyhow!("Unable to repair space map: {:?}", ret));
}
}
Ok(())
}
//------------------------------------------

View File

@ -1,358 +0,0 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
use std::io::Cursor;
use crate::checksum;
use crate::io_engine::*;
use crate::math::*;
use crate::pdata::btree_builder::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
use crate::write_batcher::*;
//------------------------------------------
pub const ENTRIES_PER_BITMAP: usize = WORDS_PER_BITMAP * 8 * ENTRIES_PER_BYTE;
const WORDS_PER_BITMAP: usize = (BLOCK_SIZE - 16) / 8;
const ENTRIES_PER_BYTE: usize = 4;
//------------------------------------------
#[derive(Clone, Copy, Debug)]
pub struct IndexEntry {
pub blocknr: u64,
pub nr_free: u32,
pub none_free_before: u32,
}
impl Unpack for IndexEntry {
fn disk_size() -> u32 {
16
}
fn unpack(i: &[u8]) -> IResult<&[u8], IndexEntry> {
let (i, blocknr) = le_u64(i)?;
let (i, nr_free) = le_u32(i)?;
let (i, none_free_before) = le_u32(i)?;
Ok((
i,
IndexEntry {
blocknr,
nr_free,
none_free_before,
},
))
}
}
impl Pack for IndexEntry {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u64::<LittleEndian>(self.blocknr)?;
w.write_u32::<LittleEndian>(self.nr_free)?;
w.write_u32::<LittleEndian>(self.none_free_before)?;
Ok(())
}
}
//------------------------------------------
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BitmapEntry {
Small(u8),
Overflow,
}
#[derive(Debug)]
pub struct Bitmap {
pub blocknr: u64,
pub entries: Vec<BitmapEntry>,
}
impl Unpack for Bitmap {
fn disk_size() -> u32 {
BLOCK_SIZE as u32
}
fn unpack(data: &[u8]) -> IResult<&[u8], Self> {
let (i, _csum) = le_u32(data)?;
let (i, _not_used) = le_u32(i)?;
let (mut i, blocknr) = le_u64(i)?;
let header_size = 16;
let nr_words = (BLOCK_SIZE - header_size) / 8;
let mut entries = Vec::with_capacity(nr_words * 32);
for _w in 0..nr_words {
let (tmp, mut word) = le_u64(i)?;
for _b in 0..32 {
let val = word & 0x3;
word >>= 2;
// The bits are stored with the high bit at b * 2 + 1,
// and low at b *2. So we have to interpret this val.
entries.push(match val {
0 => BitmapEntry::Small(0),
1 => BitmapEntry::Small(2),
2 => BitmapEntry::Small(1),
_ => BitmapEntry::Overflow,
});
}
i = tmp;
}
Ok((i, Bitmap { blocknr, entries }))
}
}
impl Pack for Bitmap {
fn pack<W: WriteBytesExt>(&self, out: &mut W) -> Result<()> {
use BitmapEntry::*;
out.write_u32::<LittleEndian>(0)?; // csum
out.write_u32::<LittleEndian>(0)?; // padding
out.write_u64::<LittleEndian>(self.blocknr)?;
for chunk in self.entries.chunks(32) {
let mut w = 0u64;
for e in chunk {
w >>= 2;
match e {
Small(0) => {}
Small(1) => {
w |= 0x2 << 62;
}
Small(2) => {
w |= 0x1 << 62;
}
Small(_) => {
return Err(anyhow!("Bad small value in bitmap entry"));
}
Overflow => {
w |= 0x3 << 62;
}
}
}
w >>= 64 - chunk.len() * 2;
u64::pack(&w, out)?;
}
Ok(())
}
}
//------------------------------------------
#[derive(Debug)]
pub struct SMRoot {
pub nr_blocks: u64,
pub nr_allocated: u64,
pub bitmap_root: u64,
pub ref_count_root: u64,
}
impl Unpack for SMRoot {
fn disk_size() -> u32 {
32
}
fn unpack(i: &[u8]) -> IResult<&[u8], Self> {
let (i, nr_blocks) = le_u64(i)?;
let (i, nr_allocated) = le_u64(i)?;
let (i, bitmap_root) = le_u64(i)?;
let (i, ref_count_root) = le_u64(i)?;
Ok((
i,
SMRoot {
nr_blocks,
nr_allocated,
bitmap_root,
ref_count_root,
},
))
}
}
pub fn unpack_root(data: &[u8]) -> Result<SMRoot> {
match SMRoot::unpack(data) {
Err(_e) => Err(anyhow!("couldn't parse SMRoot")),
Ok((_i, v)) => Ok(v),
}
}
impl Pack for SMRoot {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u64::<LittleEndian>(self.nr_blocks)?;
w.write_u64::<LittleEndian>(self.nr_allocated)?;
w.write_u64::<LittleEndian>(self.bitmap_root)?;
w.write_u64::<LittleEndian>(self.ref_count_root)?;
Ok(())
}
}
pub fn pack_root(root: &SMRoot, size: usize) -> Result<Vec<u8>> {
let mut sm_root = vec![0u8; size];
let mut cur = Cursor::new(&mut sm_root);
root.pack(&mut cur)?;
Ok(sm_root)
}
//------------------------------------------
pub fn write_common(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<(Vec<IndexEntry>, u64)> {
use BitmapEntry::*;
let mut index_entries = Vec::new();
let mut overflow_builder: BTreeBuilder<u32> = BTreeBuilder::new(Box::new(NoopRC {}));
// how many bitmaps do we need?
let nr_blocks = sm.get_nr_blocks()?;
let nr_bitmaps = div_up(nr_blocks, ENTRIES_PER_BITMAP as u64) as usize;
for bm in 0..nr_bitmaps {
let begin = bm as u64 * ENTRIES_PER_BITMAP as u64;
let len = std::cmp::min(nr_blocks - begin, ENTRIES_PER_BITMAP as u64);
let mut entries = Vec::with_capacity(ENTRIES_PER_BITMAP);
let mut first_free: Option<u32> = None;
let mut nr_free: u32 = 0;
for i in 0..len {
let b = begin + i;
let rc = sm.get(b)?;
let e = match rc {
0 => {
nr_free += 1;
if first_free.is_none() {
first_free = Some(i as u32);
}
Small(0)
}
1 => Small(1),
2 => Small(2),
_ => {
overflow_builder.push_value(w, b as u64, rc)?;
Overflow
}
};
entries.push(e);
}
let blocknr = write_bitmap(w, entries)?;
// Insert into the index list
let ie = IndexEntry {
blocknr,
nr_free,
none_free_before: first_free.unwrap_or(len as u32),
};
index_entries.push(ie);
}
let ref_count_root = overflow_builder.complete(w)?;
Ok((index_entries, ref_count_root))
}
pub fn write_metadata_common(w: &mut WriteBatcher) -> Result<(Vec<IndexEntry>, u64)> {
use BitmapEntry::*;
let mut index_entries = Vec::new();
let mut overflow_builder: BTreeBuilder<u32> = BTreeBuilder::new(Box::new(NoopRC {}));
// how many bitmaps do we need?
let nr_blocks = w.sm.lock().unwrap().get_nr_blocks()?;
let nr_bitmaps = div_up(nr_blocks, ENTRIES_PER_BITMAP as u64) as usize;
// how many blocks are allocated or reserved so far?
let reserved = w.get_reserved_range();
if reserved.end < reserved.start {
return Err(anyhow!("unsupported allocation pattern"));
}
let nr_used_bitmaps = div_up(reserved.end, ENTRIES_PER_BITMAP as u64) as usize;
for bm in 0..nr_used_bitmaps {
let begin = bm as u64 * ENTRIES_PER_BITMAP as u64;
let len = std::cmp::min(nr_blocks - begin, ENTRIES_PER_BITMAP as u64);
let mut entries = Vec::with_capacity(ENTRIES_PER_BITMAP);
let mut first_free: Option<u32> = None;
// blocks beyond the limit won't be checked right now, thus are marked as freed
let limit = std::cmp::min(reserved.end - begin, ENTRIES_PER_BITMAP as u64);
let mut nr_free: u32 = (len - limit) as u32;
for i in 0..limit {
let b = begin + i;
let rc = w.sm.lock().unwrap().get(b)?;
let e = match rc {
0 => {
nr_free += 1;
if first_free.is_none() {
first_free = Some(i as u32);
}
Small(0)
}
1 => Small(1),
2 => Small(2),
_ => {
overflow_builder.push_value(w, b as u64, rc)?;
Overflow
}
};
entries.push(e);
}
// Fill unused entries with zeros
if limit < len {
entries.resize_with(len as usize, || BitmapEntry::Small(0));
}
let blocknr = write_bitmap(w, entries)?;
// Insert into the index list
let ie = IndexEntry {
blocknr,
nr_free,
none_free_before: first_free.unwrap_or(limit as u32),
};
index_entries.push(ie);
}
// Fill the rest of the bitmaps with zeros
for bm in nr_used_bitmaps..nr_bitmaps {
let begin = bm as u64 * ENTRIES_PER_BITMAP as u64;
let len = std::cmp::min(nr_blocks - begin, ENTRIES_PER_BITMAP as u64);
let entries = vec![BitmapEntry::Small(0); ENTRIES_PER_BITMAP];
let blocknr = write_bitmap(w, entries)?;
// Insert into the index list
let ie = IndexEntry {
blocknr,
nr_free: len as u32,
none_free_before: 0,
};
index_entries.push(ie);
}
let ref_count_root = overflow_builder.complete(w)?;
Ok((index_entries, ref_count_root))
}
fn write_bitmap(w: &mut WriteBatcher, entries: Vec<BitmapEntry>) -> Result<u64> {
// allocate a new block
let b = w.alloc_zeroed()?;
let mut cursor = Cursor::new(b.get_data());
// write the bitmap to it
let blocknr = b.loc;
let bitmap = Bitmap { blocknr, entries };
bitmap.pack(&mut cursor)?;
w.write(b, checksum::BT::BITMAP)?;
Ok(blocknr)
}
//------------------------------------------

View File

@ -1,29 +0,0 @@
use anyhow::Result;
use crate::pdata::btree_builder::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_common::*;
use crate::write_batcher::*;
//------------------------------------------
pub fn write_disk_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRoot> {
let (index_entries, ref_count_root) = write_common(w, sm)?;
let mut index_builder: BTreeBuilder<IndexEntry> = BTreeBuilder::new(Box::new(NoopRC {}));
for (i, ie) in index_entries.iter().enumerate() {
index_builder.push_value(w, i as u64, *ie)?;
}
let bitmap_root = index_builder.complete(w)?;
w.flush()?;
Ok(SMRoot {
nr_blocks: sm.get_nr_blocks()?,
nr_allocated: sm.get_nr_allocated()?,
bitmap_root,
ref_count_root,
})
}
//------------------------------------------

View File

@ -1,166 +0,0 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
use std::io::Cursor;
use std::sync::{Arc, Mutex};
use crate::checksum;
use crate::io_engine::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_common::*;
use crate::pdata::unpack::*;
use crate::write_batcher::*;
//------------------------------------------
const MAX_METADATA_BITMAPS: usize = 255;
const MAX_METADATA_BLOCKS: usize = MAX_METADATA_BITMAPS * ENTRIES_PER_BITMAP;
//------------------------------------------
pub struct MetadataIndex {
pub blocknr: u64,
pub indexes: Vec<IndexEntry>,
}
impl Unpack for MetadataIndex {
fn disk_size() -> u32 {
BLOCK_SIZE as u32
}
fn unpack(i: &[u8]) -> IResult<&[u8], MetadataIndex> {
// FIXME: check the checksum
let (i, _csum) = le_u32(i)?;
let (i, _padding) = le_u32(i)?;
let (i, blocknr) = le_u64(i)?;
let (i, indexes) = nom::multi::count(IndexEntry::unpack, MAX_METADATA_BITMAPS)(i)?;
// Filter out unused entries
let indexes: Vec<IndexEntry> = indexes
.iter()
.take_while(|e| e.blocknr != 0)
.cloned()
.collect();
Ok((i, MetadataIndex { blocknr, indexes }))
}
}
impl Pack for MetadataIndex {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u32::<LittleEndian>(0)?; // csum
w.write_u32::<LittleEndian>(0)?; // padding
w.write_u64::<LittleEndian>(self.blocknr)?;
assert!(self.indexes.len() <= MAX_METADATA_BITMAPS);
for ie in &self.indexes {
ie.pack(w)?;
}
Ok(())
}
}
//------------------------------------------
fn block_to_bitmap(b: u64) -> usize {
(b / ENTRIES_PER_BITMAP as u64) as usize
}
fn adjust_counts(
w: &mut WriteBatcher,
ie: &IndexEntry,
begin: u64,
end: u64,
) -> Result<IndexEntry> {
use BitmapEntry::*;
let mut first_free = ie.none_free_before;
let nr_free = ie.nr_free - (end - begin) as u32;
// Read the bitmap
let bitmap_block = w.read(ie.blocknr)?;
let (_, mut bitmap) = Bitmap::unpack(bitmap_block.get_data())?;
// Update all the entries
for a in begin..end {
if first_free == a as u32 {
first_free = a as u32 + 1;
}
bitmap.entries[a as usize] = Small(1);
}
// Write the bitmap
let mut cur = Cursor::new(bitmap_block.get_data());
bitmap.pack(&mut cur)?;
w.write(bitmap_block, checksum::BT::BITMAP)?;
// Return the adjusted index entry
Ok(IndexEntry {
blocknr: ie.blocknr,
nr_free,
none_free_before: first_free,
})
}
//------------------------------------------
pub fn core_metadata_sm(nr_blocks: u64, max_count: u32) -> Arc<Mutex<dyn SpaceMap + Send + Sync>> {
core_sm(
std::cmp::min(nr_blocks, MAX_METADATA_BLOCKS as u64),
max_count,
)
}
pub fn write_metadata_sm(w: &mut WriteBatcher) -> Result<SMRoot> {
let r1 = w.get_reserved_range();
let (mut indexes, ref_count_root) = write_metadata_common(w)?;
let bitmap_root = w.alloc_zeroed()?;
// Now we need to patch up the counts for the metadata that was used for storing
// the space map itself. These ref counts all went from 0 to 1.
let r2 = w.get_reserved_range();
if r2.end < r1.end {
return Err(anyhow!("unsupported allocation pattern"));
}
let bi_begin = block_to_bitmap(r1.end);
let bi_end = block_to_bitmap(r2.end) + 1;
for (bm, ie) in indexes.iter_mut().enumerate().take(bi_end).skip(bi_begin) {
let begin = if bm == bi_begin {
r1.end % ENTRIES_PER_BITMAP as u64
} else {
0
};
let end = if bm == bi_end - 1 {
r2.end % ENTRIES_PER_BITMAP as u64
} else {
ENTRIES_PER_BITMAP as u64
};
*ie = adjust_counts(w, ie, begin, end)?
}
// Write out the metadata index
let metadata_index = MetadataIndex {
blocknr: bitmap_root.loc,
indexes,
};
let mut cur = Cursor::new(bitmap_root.get_data());
metadata_index.pack(&mut cur)?;
let loc = bitmap_root.loc;
w.write(bitmap_root, checksum::BT::INDEX)?;
w.flush()?;
let sm = w.sm.lock().unwrap();
Ok(SMRoot {
nr_blocks: sm.get_nr_blocks()?,
nr_allocated: sm.get_nr_allocated()?,
bitmap_root: loc,
ref_count_root,
})
}
//------------------------------------------

View File

@ -1,64 +0,0 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
//------------------------------------------
pub trait Unpack {
// The size of the value when on disk.
fn disk_size() -> u32;
fn unpack(data: &[u8]) -> IResult<&[u8], Self>
where
Self: std::marker::Sized;
}
pub fn unpack<U: Unpack>(data: &[u8]) -> Result<U> {
match U::unpack(data) {
Err(_e) => Err(anyhow!("couldn't parse SMRoot")),
Ok((_i, v)) => Ok(v),
}
}
//------------------------------------------
pub trait Pack {
fn pack<W: WriteBytesExt>(&self, data: &mut W) -> Result<()>;
}
//------------------------------------------
impl Unpack for u64 {
fn disk_size() -> u32 {
8
}
fn unpack(i: &[u8]) -> IResult<&[u8], u64> {
le_u64(i)
}
}
impl Pack for u64 {
fn pack<W: WriteBytesExt>(&self, out: &mut W) -> Result<()> {
out.write_u64::<LittleEndian>(*self)?;
Ok(())
}
}
impl Unpack for u32 {
fn disk_size() -> u32 {
4
}
fn unpack(i: &[u8]) -> IResult<&[u8], u32> {
le_u32(i)
}
}
impl Pack for u32 {
fn pack<W: WriteBytesExt>(&self, out: &mut W) -> Result<()> {
out.write_u32::<LittleEndian>(*self)?;
Ok(())
}
}
//------------------------------------------

Some files were not shown because too many files have changed in this diff Show More