Merge branch '2020-06-13-thin-check-rewrite'

This commit is contained in:
Joe Thornber 2020-08-12 11:18:28 +01:00
commit bf202d076b
40 changed files with 4282 additions and 1536 deletions

691
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -7,21 +7,29 @@ license = "GPL3"
[dependencies]
anyhow = "1.0"
base64 = "0.12"
byteorder = "1.3"
clap = "2.33"
crc32c = "0.4"
flate2 = "1.0"
duct = "0.13"
fixedbitset = "0.3"
futures = "0.3"
flate2 = "1.0"
io-uring = "0.3"
indicatif = "0.15"
libc = "0.2.71"
quick-xml = "0.18"
nix = "0.17"
nom = "5.1"
num_cpus = "1.13"
num-derive = "0.3"
num-traits = "0.2"
quick-xml = "0.18"
rand = "0.7"
tempfile = "3.1"
num-traits = "0.2"
num-derive = "0.3"
threadpool = "1.8"
thiserror = "1.0"
[dev-dependencies]
json = "0.12"
quickcheck = "0.9"
quickcheck_macros = "0.9"

View File

@ -49,105 +49,6 @@
;; to run.
(define (register-cache-tests) #t)
;;;-----------------------------------------------------------
;;; cache_check scenarios
;;;-----------------------------------------------------------
(define-scenario (cache-check v)
"cache_check -V"
(run-ok-rcv (stdout _) (cache-check "-V")
(assert-equal tools-version stdout)))
(define-scenario (cache-check version)
"cache_check --version"
(run-ok-rcv (stdout _) (cache-check "--version")
(assert-equal tools-version stdout)))
(define-scenario (cache-check h)
"cache_check -h"
(run-ok-rcv (stdout _) (cache-check "-h")
(assert-equal cache-check-help stdout)))
(define-scenario (cache-check help)
"cache_check --help"
(run-ok-rcv (stdout _) (cache-check "--help")
(assert-equal cache-check-help stdout)))
(define-scenario (cache-check must-specify-metadata)
"Metadata file must be specified"
(run-fail-rcv (_ stderr) (cache-check)
(assert-equal
(string-append "No input file provided.\n"
cache-check-help)
stderr)))
(define-scenario (cache-check no-such-metadata)
"Metadata file doesn't exist."
(let ((bad-path "/arbitrary/filename"))
(run-fail-rcv (_ stderr) (cache-check bad-path)
(assert-starts-with
(string-append bad-path ": No such file or directory")
stderr))))
(define-scenario (cache-check metadata-file-cannot-be-a-directory)
"Metadata file must not be a directory"
(let ((bad-path "/tmp"))
(run-fail-rcv (_ stderr) (cache-check bad-path)
(assert-starts-with
(string-append bad-path ": Not a block device or regular file")
stderr))))
(define-scenario (cache-check unreadable-metadata)
"Metadata file exists, but is unreadable."
(with-valid-metadata (md)
(run-ok "chmod" "-r" md)
(run-fail-rcv (_ stderr) (cache-check md)
(assert-starts-with "syscall 'open' failed: Permission denied" stderr))))
(define-scenario (cache-check fails-with-corrupt-metadata)
"Fail with corrupt superblock"
(with-corrupt-metadata (md)
(run-fail (cache-check md))))
(define-scenario (cache-check failing-q)
"Fail quietly with -q"
(with-corrupt-metadata (md)
(run-fail-rcv (stdout stderr) (cache-check "-q" md)
(assert-eof stdout)
(assert-eof stderr))))
(define-scenario (cache-check failing-quiet)
"Fail quietly with --quiet"
(with-corrupt-metadata (md)
(run-fail-rcv (stdout stderr) (cache-check "--quiet" md)
(assert-eof stdout)
(assert-eof stderr))))
(define-scenario (cache-check valid-metadata-passes)
"A valid metadata area passes"
(with-valid-metadata (md)
(run-ok (cache-check md))))
(define-scenario (cache-check bad-metadata-version)
"Invalid metadata version fails"
(with-cache-xml (xml)
(with-empty-metadata (md)
(cache-restore "-i" xml "-o" md "--debug-override-metadata-version" "12345")
(run-fail (cache-check md)))))
(define-scenario (cache-check tiny-metadata)
"Prints helpful message in case tiny metadata given"
(with-temp-file-sized ((md "cache.bin" 1024))
(run-fail-rcv (_ stderr) (cache-check md)
(assert-starts-with "Metadata device/file too small. Is this binary metadata?" stderr))))
(define-scenario (cache-check spot-accidental-xml-data)
"Prints helpful message if XML metadata given"
(with-cache-xml (xml)
(system (fmt #f "man bash >> " xml))
(run-fail-rcv (_ stderr) (cache-check xml)
(assert-matches ".*This looks like XML. cache_check only checks the binary metadata format." stderr))))
;;;-----------------------------------------------------------
;;; cache_restore scenarios
;;;-----------------------------------------------------------

View File

@ -1,10 +1,8 @@
(import (rnrs)
(test-runner)
(cache-functional-tests)
(era-functional-tests)
(thin-functional-tests))
(era-functional-tests))
(register-thin-tests)
(register-cache-tests)
(register-era-tests)

View File

@ -14,8 +14,7 @@
(only (srfi s1 lists) break)
(regex)
(srfi s8 receive)
(temp-file)
(thin-functional-tests))
(temp-file))
;;------------------------------------------------

View File

@ -71,30 +71,6 @@
;;; thin_check scenarios
;;;-----------------------------------------------------------
(define-scenario (thin-check v)
"thin_check -V"
(run-ok-rcv (stdout _) (thin-check "-V")
(assert-equal tools-version stdout)))
(define-scenario (thin-check version)
"thin_check --version"
(run-ok-rcv (stdout _) (thin-check "--version")
(assert-equal tools-version stdout)))
(define-scenario (thin-check h)
"print help (-h)"
(run-ok-rcv (stdout _) (thin-check "-h")
(assert-equal thin-check-help stdout)))
(define-scenario (thin-check help)
"print help (--help)"
(run-ok-rcv (stdout _) (thin-check "--help")
(assert-equal thin-check-help stdout)))
(define-scenario (thin-check bad-option)
"Unrecognised option should cause failure"
(run-fail (thin-check "--hedgehogs-only")))
(define-scenario (thin-check incompatible-options auto-repair)
"Incompatible options should cause failure"
(with-valid-metadata (md)
@ -113,523 +89,9 @@
(run-fail (thin-check "--clear-needs-check-flag" "--skip-mappings" md))
(run-fail (thin-check "--clear-needs-check-flag" "--ignore-non-fatal-errors" md))))
(define-scenario (thin-check superblock-only-valid)
"--super-block-only check passes on valid metadata"
(with-valid-metadata (md)
(run-ok (thin-check "--super-block-only" md))))
(define-scenario (thin-check superblock-only-invalid)
"--super-block-only check fails with corrupt metadata"
(with-corrupt-metadata (md)
(run-fail (thin-check "--super-block-only" md))))
(define-scenario (thin-check skip-mappings-valid)
"--skip-mappings check passes on valid metadata"
(with-valid-metadata (md)
(run-ok (thin-check "--skip-mappings" md))))
(define-scenario (thin-check ignore-non-fatal-errors)
"--ignore-non-fatal-errors check passes on valid metadata"
(with-valid-metadata (md)
(run-ok (thin-check "--ignore-non-fatal-errors" md))))
(define-scenario (thin-check quiet)
"--quiet should give no output"
(with-valid-metadata (md)
(run-ok-rcv (stdout stderr) (thin-check "--quiet" md)
(assert-eof stdout)
(assert-eof stderr))))
(define-scenario (thin-check clear-needs-check-flag)
"Accepts --clear-needs-check-flag"
(with-valid-metadata (md)
(run-ok (thin-check "--clear-needs-check-flag" md))))
(define-scenario (thin-check auto-repair)
"Accepts --auto-repair"
(with-valid-metadata (md)
(run-ok (thin-check "--auto-repair" md))))
(define-scenario (thin-check tiny-metadata)
"Prints helpful message in case tiny metadata given"
(with-temp-file-sized ((md "thin.bin" 1024))
(run-fail-rcv (_ stderr) (thin-check md)
(assert-starts-with "Metadata device/file too small. Is this binary metadata?" stderr))))
(define-scenario (thin-check spot-accidental-xml-data)
"Prints helpful message if XML metadata given"
(with-thin-xml (xml)
(system (fmt #f "man bash >> " xml))
(run-fail-rcv (_ stderr) (thin-check xml)
(assert-matches ".*This looks like XML. thin_check only checks the binary metadata format." stderr))))
(define-scenario (thin-check info-fields)
"Outputs info fields"
(with-valid-metadata (md)
(run-ok-rcv (stdout stderr) (thin-check md)
(assert-matches ".*TRANSACTION_ID=[0-9]+.*" stdout)
(assert-matches ".*METADATA_FREE_BLOCKS=[0-9]+.*" stdout))))
;;;-----------------------------------------------------------
;;; thin_restore scenarios
;;;-----------------------------------------------------------
(define-scenario (thin-restore print-version-v)
"print help (-V)"
(run-ok-rcv (stdout _) (thin-restore "-V")
(assert-equal tools-version stdout)))
(define-scenario (thin-restore print-version-long)
"print help (--version)"
(run-ok-rcv (stdout _) (thin-restore "--version")
(assert-equal tools-version stdout)))
(define-scenario (thin-restore h)
"print help (-h)"
(run-ok-rcv (stdout _) (thin-restore "-h")
(assert-equal thin-restore-help stdout)))
(define-scenario (thin-restore help)
"print help (-h)"
(run-ok-rcv (stdout _) (thin-restore "--help")
(assert-equal thin-restore-help stdout)))
(define-scenario (thin-restore no-input-file)
"forget to specify an input file"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-restore "-o" md)
(assert-starts-with "No input file provided." stderr))))
(define-scenario (thin-restore missing-input-file)
"the input file can't be found"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-restore "-i no-such-file -o" md)
(assert-superblock-all-zeroes md)
(assert-starts-with "Couldn't stat file" stderr))))
(define-scenario (thin-restore garbage-input-file)
"the input file is just zeroes"
(with-empty-metadata (md)
(with-temp-file-sized ((xml "thin.xml" 4096))
(run-fail-rcv (_ stderr) (thin-restore "-i " xml "-o" md)
(assert-superblock-all-zeroes md)))))
(define-scenario (thin-restore missing-output-file)
"the output file can't be found"
(with-thin-xml (xml)
(run-fail-rcv (_ stderr) (thin-restore "-i " xml)
(assert-starts-with "No output file provided." stderr))))
(define-scenario (thin-restore tiny-output-file)
"Fails if the output file is too small."
(with-temp-file-sized ((md "thin.bin" 4096))
(with-thin-xml (xml)
(run-fail-rcv (_ stderr) (thin-restore "-i" xml "-o" md)
(assert-starts-with thin-restore-outfile-too-small-text stderr)))))
(define-scenario (thin-restore q)
"thin_restore accepts -q"
(with-empty-metadata (md)
(with-thin-xml (xml)
(run-ok-rcv (stdout _) (thin-restore "-i" xml "-o" md "-q")
(assert-eof stdout)))))
(define-scenario (thin-restore quiet)
"thin_restore accepts --quiet"
(with-empty-metadata (md)
(with-thin-xml (xml)
(run-ok-rcv (stdout _) (thin-restore "-i" xml "-o" md "--quiet")
(assert-eof stdout)))))
(define-scenario (thin-restore override transaction-id)
"thin_restore obeys the --transaction-id override"
(with-empty-metadata (md)
(with-thin-xml (xml)
(run-ok-rcv (stdout stderr) (thin-restore "--transaction-id 2345" "-i" xml "-o" md)
(assert-eof stderr))
(run-ok-rcv (stdout stderr) (thin-dump md)
(assert-matches ".*transaction=\"2345\"" stdout)))))
(define-scenario (thin-restore override data-block-size)
"thin_restore obeys the --data-block-size override"
(with-empty-metadata (md)
(with-thin-xml (xml)
(run-ok-rcv (stdout stderr) (thin-restore "--data-block-size 8192" "-i" xml "-o" md)
(assert-eof stderr))
(run-ok-rcv (stdout stderr) (thin-dump md)
(assert-matches ".*data_block_size=\"8192\"" stdout)))))
(define-scenario (thin-restore override nr-data-blocks)
"thin_restore obeys the --nr-data-blocks override"
(with-empty-metadata (md)
(with-thin-xml (xml)
(run-ok-rcv (stdout stderr) (thin-restore "--nr-data-blocks 234500" "-i" xml "-o" md)
(assert-eof stderr))
(run-ok-rcv (stdout stderr) (thin-dump md)
(assert-matches ".*nr_data_blocks=\"234500\"" stdout)))))
;;;-----------------------------------------------------------
;;; thin_dump scenarios
;;;-----------------------------------------------------------
(define-scenario (thin-dump small-input-file)
"Fails with small input file"
(with-temp-file-sized ((md "thin.bin" 512))
(run-fail (thin-dump md))))
(define-scenario (thin-dump restore-is-noop)
"thin_dump followed by thin_restore is a noop."
(with-valid-metadata (md)
(run-ok-rcv (d1-stdout _) (thin-dump md)
(with-temp-file-containing ((xml "thin.xml" d1-stdout))
(run-ok (thin-restore "-i" xml "-o" md))
(run-ok-rcv (d2-stdout _) (thin-dump md)
(assert-equal d1-stdout d2-stdout))))))
(define-scenario (thin-dump no-stderr)
"thin_dump of clean data does not output error messages to stderr"
(with-valid-metadata (md)
(run-ok-rcv (stdout stderr) (thin-dump md)
(assert-eof stderr))))
(define-scenario (thin-dump override transaction-id)
"thin_dump obeys the --transaction-id override"
(with-valid-metadata (md)
(run-ok-rcv (stdout stderr) (thin-dump "--transaction-id 2345" md)
(assert-eof stderr)
(assert-matches ".*transaction=\"2345\"" stdout))))
(define-scenario (thin-dump override data-block-size)
"thin_dump obeys the --data-block-size override"
(with-valid-metadata (md)
(run-ok-rcv (stdout stderr) (thin-dump "--data-block-size 8192" md)
(assert-eof stderr)
(assert-matches ".*data_block_size=\"8192\"" stdout))))
(define-scenario (thin-dump override nr-data-blocks)
"thin_dump obeys the --nr-data-blocks override"
(with-valid-metadata (md)
(run-ok-rcv (stdout stderr) (thin-dump "--nr-data-blocks 234500" md)
(assert-eof stderr)
(assert-matches ".*nr_data_blocks=\"234500\"" stdout))))
(define-scenario (thin-dump repair-superblock succeeds)
"thin_dump can restore a missing superblock"
(with-valid-metadata (md)
(run-ok-rcv (expected-xml stderr) (thin-dump "--transaction-id=5" "--data-block-size=128" "--nr-data-blocks=4096000" md)
(damage-superblock md)
(run-ok-rcv (repaired-xml stderr) (thin-dump "--repair" "--transaction-id=5" "--data-block-size=128" "--nr-data-blocks=4096000" md)
(assert-eof stderr)
(assert-equal expected-xml repaired-xml)))))
(define-scenario (thin-dump repair-superblock missing-transaction-id)
"--transaction-id is mandatory if the superblock is damaged"
(with-damaged-superblock (md)
(run-fail-rcv (_ stderr) (thin-dump "--repair" "--data-block-size=128" "--nr-data-blocks=4096000" md)
(assert-matches ".*transaction id.*" stderr))))
(define-scenario (thin-dump repair-superblock missing-data-block-size)
"--data-block-size is mandatory if the superblock is damaged"
(with-damaged-superblock (md)
(run-fail-rcv (_ stderr) (thin-dump "--repair" "--transaction-id=5" "--nr-data-blocks=4096000" md)
(assert-matches ".*data block size.*" stderr))))
(define-scenario (thin-dump repair-superblock missing-nr-data-blocks)
"--nr-data-blocks is mandatory if the superblock is damaged"
(with-damaged-superblock (md)
(run-fail-rcv (_ stderr) (thin-dump "--repair" "--transaction-id=5" "--data-block-size=128" md)
(assert-matches ".*nr data blocks.*" stderr))))
;;;-----------------------------------------------------------
;;; thin_rmap scenarios
;;;-----------------------------------------------------------
(define-scenario (thin-rmap v)
"thin_rmap accepts -V"
(run-ok-rcv (stdout _) (thin-rmap "-V")
(assert-equal tools-version stdout)))
(define-scenario (thin-rmap version)
"thin_rmap accepts --version"
(run-ok-rcv (stdout _) (thin-rmap "--version")
(assert-equal tools-version stdout)))
(define-scenario (thin-rmap h)
"thin_rmap accepts -h"
(run-ok-rcv (stdout _) (thin-rmap "-h")
(assert-equal thin-rmap-help stdout)))
(define-scenario (thin-rmap help)
"thin_rmap accepts --help"
(run-ok-rcv (stdout _) (thin-rmap "--help")
(assert-equal thin-rmap-help stdout)))
(define-scenario (thin-rmap unrecognised-flag)
"thin_rmap complains with bad flags."
(run-fail (thin-rmap "--unleash-the-hedgehogs")))
(define-scenario (thin-rmap valid-region-format-should-pass)
"thin_rmap with a valid region format should pass."
(with-valid-metadata (md)
(run-ok
(thin-rmap "--region 23..7890" md))))
(define-scenario (thin-rmap invalid-region-should-fail)
"thin_rmap with an invalid region format should fail."
(for-each (lambda (pattern)
(with-valid-metadata (md)
(run-fail (thin-rmap "--region" pattern md))))
'("23,7890" "23..six" "found..7890" "89..88" "89..89" "89.." "" "89...99")))
(define-scenario (thin-rmap multiple-regions-should-pass)
"thin_rmap should handle multiple regions."
(with-valid-metadata (md)
(run-ok (thin-rmap "--region 1..23 --region 45..78" md))))
(define-scenario (thin-rmap handles-junk-input)
"Fail gracefully if given nonsense"
(with-thin-xml (xml)
(run-fail-rcv (_ stderr) (thin-rmap "--region 0..-1" xml)
#t)))
;;;-----------------------------------------------------------
;;; thin_delta scenarios
;;;-----------------------------------------------------------
(define-scenario (thin-delta v)
"thin_delta accepts -V"
(run-ok-rcv (stdout _) (thin-delta "-V")
(assert-equal tools-version stdout)))
(define-scenario (thin-delta version)
"thin_delta accepts --version"
(run-ok-rcv (stdout _) (thin-delta "--version")
(assert-equal tools-version stdout)))
(define-scenario (thin-delta h)
"thin_delta accepts -h"
(run-ok-rcv (stdout _) (thin-delta "-h")
(assert-equal thin-delta-help stdout)))
(define-scenario (thin-delta help)
"thin_delta accepts --help"
(run-ok-rcv (stdout _) (thin-delta "--help")
(assert-equal thin-delta-help stdout)))
(define-scenario (thin-delta unrecognised-option)
"Unrecognised option should cause failure"
(with-valid-metadata (md)
(run-fail-rcv (stdout stderr) (thin-delta "--unleash-the-hedgehogs")
(assert-matches ".*thin_delta: unrecognized option '--unleash-the-hedgehogs" stderr))))
(define-scenario (thin-delta snap1-unspecified)
"Fails without --snap1 fails"
(run-fail-rcv (_ stderr) (thin-delta "--snap2 45 foo")
(assert-starts-with "--snap1 not specified." stderr)))
(define-scenario (thin-delta snap2-unspecified)
"Fails without --snap2 fails"
(run-fail-rcv (_ stderr) (thin-delta "--snap1 45 foo")
(assert-starts-with "--snap2 not specified." stderr)))
(define-scenario (thin-delta device-unspecified)
"Fails if no device given"
(run-fail-rcv (_ stderr) (thin-delta "--snap1 45 --snap2 46")
(assert-starts-with "No input device provided." stderr)))
;;;-----------------------------------------------------------
;;; thin_repair scenarios
;;;-----------------------------------------------------------
(define-scenario (thin-repair dont-repair-xml)
"Fails gracefully if run on XML rather than metadata"
(with-thin-xml (xml)
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-repair "-i" xml "-o" md)
#t))))
(define-scenario (thin-repair missing-input-file)
"the input file can't be found"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-repair "-i no-such-file -o" md)
(assert-superblock-all-zeroes md)
(assert-starts-with "Couldn't stat file" stderr))))
(define-scenario (thin-repair garbage-input-file)
"the input file is just zeroes"
(with-empty-metadata (md1)
(with-corrupt-metadata (md2)
(run-fail-rcv (_ stderr) (thin-repair "-i " md1 "-o" md2)
(assert-superblock-all-zeroes md2)))))
(define-scenario (thin-repair missing-output-file)
"the output file can't be found"
(with-thin-xml (xml)
(run-fail-rcv (_ stderr) (thin-repair "-i " xml)
(assert-starts-with "No output file provided." stderr))))
(define-scenario (thin-repair override transaction-id)
"thin_repair obeys the --transaction-id override"
(with-valid-metadata (md1)
(with-empty-metadata (md2)
(run-ok-rcv (stdout stderr) (thin-repair "--transaction-id 2345" "-i" md1 "-o" md2)
(assert-eof stderr))
(run-ok-rcv (stdout stderr) (thin-dump md2)
(assert-matches ".*transaction=\"2345\"" stdout)))))
(define-scenario (thin-repair override data-block-size)
"thin_repair obeys the --data-block-size override"
(with-valid-metadata (md1)
(with-empty-metadata (md2)
(run-ok-rcv (stdout stderr) (thin-repair "--data-block-size 8192" "-i" md1 "-o" md2)
(assert-eof stderr))
(run-ok-rcv (stdout stderr) (thin-dump md2)
(assert-matches ".*data_block_size=\"8192\"" stdout)))))
(define-scenario (thin-repair override nr-data-blocks)
"thin_repair obeys the --nr-data-blocks override"
(with-valid-metadata (md1)
(with-empty-metadata (md2)
(run-ok-rcv (stdout stderr) (thin-repair "--nr-data-blocks 234500" "-i" md1 "-o" md2)
(assert-eof stderr))
(run-ok-rcv (stdout stderr) (thin-dump md2)
(assert-matches ".*nr_data_blocks=\"234500\"" stdout)))))
(define-scenario (thin-repair superblock succeeds)
"thin_repair can restore a missing superblock"
(with-valid-metadata (md1)
(run-ok-rcv (expected-xml stderr) (thin-dump "--transaction-id=5" "--data-block-size=128" "--nr-data-blocks=4096000" md1)
(damage-superblock md1)
(with-empty-metadata (md2)
(run-ok-rcv (_ stderr) (thin-repair "--transaction-id=5" "--data-block-size=128" "--nr-data-blocks=4096000" "-i" md1 "-o" md2)
(assert-eof stderr))
(run-ok-rcv (repaired-xml stderr) (thin-dump md2)
(assert-eof stderr)
(assert-equal expected-xml repaired-xml))))))
(define-scenario (thin-repair superblock missing-transaction-id)
"--transaction-id is mandatory if the superblock is damaged"
(with-damaged-superblock (md1)
(with-empty-metadata (md2)
(run-fail-rcv (_ stderr) (thin-repair "--data-block-size=128" "--nr-data-blocks=4096000" "-i" md1 "-o" md2)
(assert-matches ".*transaction id.*" stderr)))))
(define-scenario (thin-repair superblock missing-data-block-size)
"--data-block-size is mandatory if the superblock is damaged"
(with-damaged-superblock (md1)
(with-empty-metadata (md2)
(run-fail-rcv (_ stderr) (thin-repair "--transaction-id=5" "--nr-data-blocks=4096000" "-i" md1 "-o" md2)
(assert-matches ".*data block size.*" stderr)))))
(define-scenario (thin-repair superblock missing-nr-data-blocks)
"--nr-data-blocks is mandatory if the superblock is damaged"
(with-damaged-superblock (md1)
(with-empty-metadata (md2)
(run-fail-rcv (_ stderr) (thin-repair "--transaction-id=5" "--data-block-size=128" "-i" md1 "-o" md2)
(assert-matches ".*nr data blocks.*" stderr)))))
;;;-----------------------------------------------------------
;;; thin_metadata_pack scenarios
;;;-----------------------------------------------------------
(define-scenario (thin-metadata-pack version)
"accepts --version"
(run-ok-rcv (stdout _) (thin-metadata-pack "--version")
(assert-equal "thin_metadata_pack 0.9.0-rc2" stdout)))
(define-scenario (thin-metadata-pack h)
"accepts -h"
(run-ok-rcv (stdout _) (thin-metadata-pack "-h")
(assert-equal thin-metadata-pack-help stdout)))
(define-scenario (thin-metadata-pack help)
"accepts --help"
(run-ok-rcv (stdout _) (thin-metadata-pack "--help")
(assert-equal thin-metadata-pack-help stdout)))
(define-scenario (thin-metadata-pack unrecognised-option)
"Unrecognised option should cause failure"
(with-valid-metadata (md)
(run-fail-rcv (stdout stderr) (thin-metadata-pack "--unleash-the-hedgehogs")
(assert-starts-with "error: Found argument '--unleash-the-hedgehogs'" stderr))))
(define-scenario (thin-metadata-pack missing-input-file)
"the input file wasn't specified"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-metadata-pack "-o " md)
(assert-starts-with "error: The following required arguments were not provided:\n -i <DEV>" stderr))))
(define-scenario (thin-metadata-pack no-such-input-file)
"the input file can't be found"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-metadata-pack "-i no-such-file -o" md)
(assert-starts-with "Couldn't find input file" stderr))))
(define-scenario (thin-metadata-pack missing-output-file)
"the output file wasn't specified"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-metadata-pack "-i" md)
(assert-starts-with "error: The following required arguments were not provided:\n -o <FILE>" stderr))))
;;;-----------------------------------------------------------
;;; thin_metadata_unpack scenarios
;;;-----------------------------------------------------------
(define-scenario (thin-metadata-unpack version)
"accepts --version"
(run-ok-rcv (stdout _) (thin-metadata-unpack "--version")
(assert-equal "thin_metadata_unpack 0.9.0-rc2" stdout)))
(define-scenario (thin-metadata-unpack h)
"accepts -h"
(run-ok-rcv (stdout _) (thin-metadata-unpack "-h")
(assert-equal thin-metadata-unpack-help stdout)))
(define-scenario (thin-metadata-unpack help)
"accepts --help"
(run-ok-rcv (stdout _) (thin-metadata-unpack "--help")
(assert-equal thin-metadata-unpack-help stdout)))
(define-scenario (thin-metadata-unpack unrecognised-option)
"Unrecognised option should cause failure"
(with-valid-metadata (md)
(run-fail-rcv (stdout stderr) (thin-metadata-unpack "--unleash-the-hedgehogs")
(assert-starts-with "error: Found argument '--unleash-the-hedgehogs'" stderr))))
(define-scenario (thin-metadata-unpack missing-input-file)
"the input file wasn't specified"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-metadata-unpack "-o " md)
(assert-starts-with "error: The following required arguments were not provided:\n -i <DEV>" stderr))))
(define-scenario (thin-metadata-unpack no-such-input-file)
"the input file can't be found"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-metadata-unpack "-i no-such-file -o" md)
(assert-starts-with "Couldn't find input file" stderr))))
(define-scenario (thin-metadata-unpack missing-output-file)
"the output file wasn't specified"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (thin-metadata-unpack "-i" md)
(assert-starts-with "error: The following required arguments were not provided:\n -o <FILE>" stderr))))
(define-scenario (thin-metadata-unpack garbage-input-file)
"the input file is just zeroes"
(with-empty-metadata (bad-pack)
(run-fail-rcv (_ stderr) (thin-metadata-unpack "-i " bad-pack "-o junk")
(assert-starts-with "Not a pack file." stderr))))
;;;-----------------------------------------------------------
;;; thin_metadata_pack/unpack end to end scenario
;;;-----------------------------------------------------------)
(define-scenario (thin-metadata-pack end-to-end)
"pack -> unpack recovers metadata"
(let ((pack-file "md.pack"))
(with-valid-metadata (md-in)
(with-empty-metadata (md-out)
(run-ok (thin-metadata-pack "-i" md-in "-o" pack-file))
(run-ok (thin-metadata-unpack "-i" pack-file "-o" md-out))
(run-ok-rcv (dump1 _) (thin-dump md-in)
(run-ok-rcv (dump2 _) (thin-dump md-out)
(assert-equal dump1 dump2)))))))
)

86
src/bin/thin_check.rs Normal file
View File

@ -0,0 +1,86 @@
extern crate clap;
extern crate thinp;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use thinp::file_utils;
use thinp::thin::check::{check, ThinCheckOptions};
use std::process::exit;
fn main() {
let parser = App::new("thin_check")
.version(thinp::version::TOOLS_VERSION)
.about("Validates thin provisioning metadata on a device or file.")
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet")
.value_name("QUIET"),
)
.arg(
Arg::with_name("SB_ONLY")
.help("Only check the superblock.")
.long("super-block-only")
.value_name("SB_ONLY"),
)
.arg(
Arg::with_name("ignore-non-fatal-errors")
.help("Only return a non-zero exit code if a fatal error is found.")
.long("ignore-non-fatal-errors")
.value_name("IGNORE_NON_FATAL"),
)
.arg(
Arg::with_name("clear-needs-check-flag")
.help("Clears the 'needs_check' flag in the superblock")
.long("clear-needs-check")
.value_name("CLEAR_NEEDS_CHECK"),
)
.arg(
Arg::with_name("OVERRIDE_MAPPING_ROOT")
.help("Specify a mapping root to use")
.long("override-mapping-root")
.value_name("OVERRIDE_MAPPING_ROOT")
.takes_value(true),
)
.arg(
Arg::with_name("METADATA_SNAPSHOT")
.help("Check the metadata snapshot on a live pool")
.short("m")
.long("metadata-snapshot")
.value_name("METADATA_SNAPSHOT"),
)
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to check")
.required(true)
.index(1),
)
.arg(
Arg::with_name("SYNC_IO")
.help("Force use of synchronous io")
.long("sync-io")
.value_name("SYNC_IO")
.takes_value(false),
);
let matches = parser.get_matches();
let input_file = Path::new(matches.value_of("INPUT").unwrap());
if !file_utils::file_exists(input_file) {
eprintln!("Couldn't find input file '{:?}'.", &input_file);
exit(1);
}
let opts = ThinCheckOptions {
dev: &input_file,
async_io: !matches.is_present("SYNC_IO"),
};
if let Err(reason) = check(&opts) {
println!("Application error: {}", reason);
process::exit(1);
}
}

View File

@ -1,51 +0,0 @@
use std::io;
use std::io::{Read, Seek};
use std::fs::OpenOptions;
use std::os::unix::fs::OpenOptionsExt;
use std::fs::File;
pub const BLOCK_SIZE: usize = 4096;
#[repr(align(4096))]
pub struct Block {
pub data: [u8; BLOCK_SIZE as usize],
}
pub struct BlockManager {
pub nr_blocks: u64,
input: File,
}
fn get_nr_blocks(path: &str) -> io::Result<u64> {
let metadata = std::fs::metadata(path)?;
Ok(metadata.len() / (BLOCK_SIZE as u64))
}
impl BlockManager {
pub fn new(path: &str, _cache_size: usize) -> io::Result<BlockManager> {
let input = OpenOptions::new()
.read(true)
.write(false)
.custom_flags(libc::O_DIRECT)
.open(path)?;
Ok(BlockManager {
nr_blocks: get_nr_blocks(path)?,
input,
})
}
pub fn get(&mut self, b: u64) -> io::Result<Block> {
self.read_block(b)
}
fn read_block(&mut self, b: u64) -> io::Result<Block>
{
let mut buf = Block {data: [0; BLOCK_SIZE]};
self.input.seek(io::SeekFrom::Start(b * (BLOCK_SIZE as u64)))?;
self.input.read_exact(&mut buf.data)?;
Ok(buf)
}
}

1
src/cache/mod.rs vendored Normal file
View File

@ -0,0 +1 @@
pub mod xml;

175
src/cache/xml.rs vendored Normal file
View File

@ -0,0 +1,175 @@
use anyhow::Result;
use base64::encode;
use std::{borrow::Cow, fmt::Display, io::Write};
use quick_xml::events::attributes::Attribute;
use quick_xml::events::{BytesEnd, BytesStart, Event};
use quick_xml::Writer;
//---------------------------------------
#[derive(Clone)]
pub struct Superblock {
pub uuid: String,
pub block_size: u64,
pub nr_cache_blocks: u64,
pub policy: String,
pub hint_width: u32,
}
#[derive(Clone)]
pub struct Map {
pub cblock: u64,
pub oblock: u64,
pub dirty: bool,
}
#[derive(Clone)]
pub struct Hint {
pub cblock: u64,
pub data: Vec<u8>,
}
#[derive(Clone)]
pub struct Discard {
pub begin: u64,
pub end: u64,
}
#[derive(Clone)]
pub enum Visit {
Continue,
Stop,
}
pub trait MetadataVisitor {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit>;
fn superblock_e(&mut self) -> Result<Visit>;
fn mappings_b(&mut self) -> Result<Visit>;
fn mappings_e(&mut self) -> Result<Visit>;
fn mapping(&mut self, m: &Map) -> Result<Visit>;
fn hints_b(&mut self) -> Result<Visit>;
fn hints_e(&mut self) -> Result<Visit>;
fn hint(&mut self, h: &Hint) -> Result<Visit>;
fn discards_b(&mut self) -> Result<Visit>;
fn discards_e(&mut self) -> Result<Visit>;
fn discard(&mut self, d: &Discard) -> Result<Visit>;
fn eof(&mut self) -> Result<Visit>;
}
pub struct XmlWriter<W: Write> {
w: Writer<W>,
}
impl<W: Write> XmlWriter<W> {
pub fn new(w: W) -> XmlWriter<W> {
XmlWriter {
w: Writer::new_with_indent(w, 0x20, 2),
}
}
}
fn mk_attr_<'a, T: Display>(n: T) -> Cow<'a, [u8]> {
let str = format!("{}", n);
Cow::Owned(str.into_bytes())
}
fn mk_attr<T: Display>(key: &[u8], value: T) -> Attribute {
Attribute {
key,
value: mk_attr_(value),
}
}
impl<W: Write> MetadataVisitor for XmlWriter<W> {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit> {
let tag = b"superblock";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"uuid", sb.uuid.clone()));
elem.push_attribute(mk_attr(b"block_size", sb.block_size));
elem.push_attribute(mk_attr(b"nr_cache_blocks", sb.nr_cache_blocks));
elem.push_attribute(mk_attr(b"policy", sb.policy.clone()));
elem.push_attribute(mk_attr(b"hint_width", sb.hint_width));
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
self.w.write_event(Event::End(BytesEnd::borrowed(b"superblock")))?;
Ok(Visit::Continue)
}
fn mappings_b(&mut self) -> Result<Visit> {
let tag = b"mappings";
let elem = BytesStart::owned(tag.to_vec(), tag.len());
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn mappings_e(&mut self) -> Result<Visit> {
self.w.write_event(Event::End(BytesEnd::borrowed(b"mappings")))?;
Ok(Visit::Continue)
}
fn mapping(&mut self, m: &Map) -> Result<Visit> {
let tag = b"map";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"cache_block", m.cblock));
elem.push_attribute(mk_attr(b"origin_block", m.oblock));
elem.push_attribute(mk_attr(b"dirty", m.dirty));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn hints_b(&mut self) -> Result<Visit> {
let tag = b"hints";
let elem = BytesStart::owned(tag.to_vec(), tag.len());
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn hints_e(&mut self) -> Result<Visit> {
self.w.write_event(Event::End(BytesEnd::borrowed(b"hints")))?;
Ok(Visit::Continue)
}
fn hint(&mut self, h: &Hint) -> Result<Visit> {
let tag = b"hint";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"cache_block", h.cblock));
elem.push_attribute(mk_attr(b"data", encode(&h.data[0..])));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn discards_b(&mut self) -> Result<Visit> {
let tag = b"discards";
let elem = BytesStart::owned(tag.to_vec(), tag.len());
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn discards_e(&mut self) -> Result<Visit> {
self.w.write_event(Event::End(BytesEnd::borrowed(b"discards")))?;
Ok(Visit::Continue)
}
fn discard(&mut self, d: &Discard) -> Result<Visit> {
let tag = b"discard";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"dbegin", d.begin));
elem.push_attribute(mk_attr(b"dend", d.end));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
}

View File

@ -1,13 +0,0 @@
use std::error::Error;
use crate::block_manager::BlockManager;
pub fn check(dev: &str) -> Result<(), Box<dyn Error>> {
let mut bm = BlockManager::new(dev, 1024)?;
for b in 0..100 {
let _block = bm.get(b)?;
}
Ok(())
}

46
src/checksum.rs Normal file
View File

@ -0,0 +1,46 @@
use byteorder::{LittleEndian, ReadBytesExt};
use crc32c::crc32c;
use std::io::Cursor;
const BLOCK_SIZE: u64 = 4096;
#[allow(dead_code)]
const MAGIC: u64 = 0xa537a0aa6309ef77;
const SUPERBLOCK_CSUM_XOR: u32 = 160774;
const BITMAP_CSUM_XOR: u32 = 240779;
const INDEX_CSUM_XOR: u32 = 160478;
const BTREE_CSUM_XOR: u32 = 121107;
fn checksum(buf: &[u8]) -> u32 {
crc32c(&buf[4..]) ^ 0xffffffff
}
#[derive(Debug, PartialEq)]
pub enum BT {
SUPERBLOCK,
NODE,
INDEX,
BITMAP,
UNKNOWN,
}
pub fn metadata_block_type(buf: &[u8]) -> BT {
if buf.len() != BLOCK_SIZE as usize {
return BT::UNKNOWN;
}
// The checksum is always stored in the first u32 of the buffer.
let mut rdr = Cursor::new(buf);
let sum_on_disk = rdr.read_u32::<LittleEndian>().unwrap();
let csum = checksum(buf);
let btype = csum ^ sum_on_disk;
match btype {
SUPERBLOCK_CSUM_XOR => BT::SUPERBLOCK,
BTREE_CSUM_XOR => BT::NODE,
BITMAP_CSUM_XOR => BT::BITMAP,
INDEX_CSUM_XOR => BT::INDEX,
_ => BT::UNKNOWN,
}
}

263
src/io_engine.rs Normal file
View File

@ -0,0 +1,263 @@
use anyhow::Result;
use io_uring::opcode::{self, types};
use io_uring::IoUring;
use std::alloc::{alloc, dealloc, Layout};
use std::fs::File;
use std::fs::OpenOptions;
use std::io::{self, Read, Seek};
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::Path;
use std::sync::{Arc, Mutex, Condvar};
//------------------------------------------
pub const BLOCK_SIZE: usize = 4096;
const ALIGN: usize = 4096;
#[derive(Debug)]
pub struct Block {
pub loc: u64,
pub data: *mut u8,
}
impl Block {
pub fn new(loc: u64) -> Block {
let layout = Layout::from_size_align(BLOCK_SIZE, ALIGN).unwrap();
let ptr = unsafe { alloc(layout) };
assert!(!ptr.is_null(), "out of memory");
Block { loc, data: ptr }
}
pub fn get_data<'a>(&self) -> &'a mut [u8] {
unsafe { std::slice::from_raw_parts_mut::<'a>(self.data, BLOCK_SIZE) }
}
}
impl Drop for Block {
fn drop(&mut self) {
let layout = Layout::from_size_align(BLOCK_SIZE, ALIGN).unwrap();
unsafe {
dealloc(self.data, layout);
}
}
}
unsafe impl Send for Block {}
//------------------------------------------
pub trait IoEngine {
fn get_nr_blocks(&self) -> u64;
fn read(&self, block: &mut Block) -> Result<()>;
fn read_many(&self, blocks: &mut Vec<Block>) -> Result<()>;
}
fn get_nr_blocks(path: &Path) -> io::Result<u64> {
let metadata = std::fs::metadata(path)?;
Ok(metadata.len() / (BLOCK_SIZE as u64))
}
//------------------------------------------
pub struct SyncIoEngine {
nr_blocks: u64,
files: Mutex<Vec<File>>,
cvar: Condvar,
}
impl SyncIoEngine {
fn open_file(path: &Path) -> Result<File> {
let file = OpenOptions::new()
.read(true)
.write(false)
.custom_flags(libc::O_DIRECT)
.open(path)?;
Ok(file)
}
pub fn new(path: &Path, nr_files: usize) -> Result<SyncIoEngine> {
let mut files = Vec::new();
for _n in 0..nr_files {
files.push(SyncIoEngine::open_file(path)?);
}
Ok(SyncIoEngine {
nr_blocks: get_nr_blocks(path)?,
files: Mutex::new(files),
cvar: Condvar::new(),
})
}
fn get(&self) -> File {
let mut files = self.files.lock().unwrap();
while files.len() == 0 {
files = self.cvar.wait(files).unwrap();
}
files.pop().unwrap()
}
fn put(&self, f: File) {
let mut files = self.files.lock().unwrap();
files.push(f);
self.cvar.notify_one();
}
}
impl IoEngine for SyncIoEngine {
fn get_nr_blocks(&self) -> u64 {
self.nr_blocks
}
fn read(&self, b: &mut Block) -> Result<()> {
let mut input = self.get();
input.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
input.read_exact(&mut b.get_data())?;
self.put(input);
Ok(())
}
fn read_many(&self, blocks: &mut Vec<Block>) -> Result<()> {
let mut input = self.get();
for b in blocks {
input.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
input.read_exact(&mut b.get_data())?;
}
self.put(input);
Ok(())
}
}
//------------------------------------------
pub struct AsyncIoEngine_ {
queue_len: u32,
ring: IoUring,
nr_blocks: u64,
fd: RawFd,
input: Arc<File>,
}
pub struct AsyncIoEngine {
inner: Mutex<AsyncIoEngine_>,
}
impl AsyncIoEngine {
pub fn new(path: &Path, queue_len: u32) -> Result<AsyncIoEngine> {
let input = OpenOptions::new()
.read(true)
.write(false)
.custom_flags(libc::O_DIRECT)
.open(path)?;
Ok(AsyncIoEngine {
inner: Mutex::new(AsyncIoEngine_ {
queue_len,
ring: IoUring::new(queue_len)?,
nr_blocks: get_nr_blocks(path)?,
fd: input.as_raw_fd(),
input: Arc::new(input),
}),
})
}
fn read_many_(&self, blocks: &mut [Block]) -> Result<()> {
let mut inner = self.inner.lock().unwrap();
let count = blocks.len();
let fd = types::Target::Fd(inner.input.as_raw_fd());
for b in blocks.iter_mut() {
let read_e = opcode::Read::new(fd, b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(read_e.build().user_data(1))
.ok()
.expect("queue is full");
}
}
inner.ring.submit_and_wait(count)?;
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
// FIXME: return proper errors
assert_eq!(cqes.len(), count);
for c in &cqes {
assert_eq!(c.result(), BLOCK_SIZE as i32);
}
Ok(())
}
}
impl Clone for AsyncIoEngine {
fn clone(&self) -> AsyncIoEngine {
let inner = self.inner.lock().unwrap();
eprintln!("in clone, queue_len = {}", inner.queue_len);
AsyncIoEngine {
inner: Mutex::new(AsyncIoEngine_ {
queue_len: inner.queue_len,
ring: IoUring::new(inner.queue_len).expect("couldn't create uring"),
nr_blocks: inner.nr_blocks,
fd: inner.fd,
input: inner.input.clone(),
}),
}
}
}
impl IoEngine for AsyncIoEngine {
fn get_nr_blocks(&self) -> u64 {
let inner = self.inner.lock().unwrap();
inner.nr_blocks
}
fn read(&self, b: &mut Block) -> Result<()> {
let mut inner = self.inner.lock().unwrap();
let fd = types::Target::Fd(inner.input.as_raw_fd());
let read_e = opcode::Read::new(fd, b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(read_e.build().user_data(1))
.ok()
.expect("queue is full");
}
inner.ring.submit_and_wait(1)?;
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
// FIXME: return proper errors
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 1);
assert_eq!(cqes[0].result(), BLOCK_SIZE as i32);
Ok(())
}
fn read_many(&self, blocks: &mut Vec<Block>) -> Result<()> {
let inner = self.inner.lock().unwrap();
let queue_len = inner.queue_len as usize;
drop(inner);
let mut done = 0;
while done != blocks.len() {
let len = usize::min(blocks.len() - done, queue_len);
self.read_many_(&mut blocks[done..(done + len)])?;
done += len;
}
Ok(())
}
}
//------------------------------------------

View File

@ -15,10 +15,12 @@ extern crate quickcheck;
#[cfg(test)]
extern crate quickcheck_macros;
pub mod block_manager;
pub mod check;
pub mod io_engine;
pub mod cache;
pub mod checksum;
pub mod file_utils;
pub mod pack;
pub mod pdata;
pub mod shrink;
pub mod thin;
pub mod version;

View File

@ -1,3 +1,4 @@
use thiserror::Error;
use std::{io, io::Write};
use nom::{bytes::complete::*, number::complete::*, IResult};
@ -6,41 +7,27 @@ use crate::pack::vm::*;
//-------------------------------------------
#[derive(Debug)]
#[derive(Error, Debug)]
pub enum PackError {
#[error("Couldn't parse binary data")]
ParseError,
IOError,
}
impl std::error::Error for PackError {}
#[error("Write error")]
WriteError { source: std::io::Error },
}
pub type PResult<T> = Result<T, PackError>;
fn nom_to_pr<T>(r: IResult<&[u8], T>) -> PResult<(&[u8], T)> {
match r {
Ok(v) => Ok(v),
Err(_) => Err(PackError::ParseError),
}
r.map_err(|_source| PackError::ParseError)
}
fn io_to_pr<T>(r: io::Result<T>) -> PResult<T> {
match r {
Ok(v) => Ok(v),
Err(_) => Err(PackError::IOError),
}
r.map_err(|source| PackError::WriteError {source})
}
//-------------------------------------------
impl std::fmt::Display for PackError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
PackError::ParseError => write!(f, "parse error"),
PackError::IOError => write!(f, "IO error"),
}
}
}
fn run64(i: &[u8], count: usize) -> IResult<&[u8], Vec<u64>> {
let (i, ns) = nom::multi::many_m_n(count, count, le_u64)(i)?;
Ok((i, ns))

View File

@ -1,4 +1,4 @@
use anyhow::Result;
use anyhow::{anyhow, Context, Result};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use flate2::{read::ZlibDecoder, write::ZlibEncoder, Compression};
@ -7,12 +7,11 @@ use std::os::unix::fs::OpenOptionsExt;
use std::{
error::Error,
fs::OpenOptions,
path::Path,
io,
io::prelude::*,
io::Cursor,
io::Write,
ops::DerefMut,
path::Path,
sync::{Arc, Mutex},
thread::spawn,
};
@ -20,16 +19,13 @@ use std::{
use rand::prelude::*;
use std::sync::mpsc::{sync_channel, Receiver};
use crate::checksum::*;
use crate::file_utils;
use crate::pack::node_encode::*;
const BLOCK_SIZE: u64 = 4096;
const MAGIC: u64 = 0xa537a0aa6309ef77;
const PACK_VERSION: u64 = 3;
const SUPERBLOCK_CSUM_XOR: u32 = 160774;
const BITMAP_CSUM_XOR: u32 = 240779;
const INDEX_CSUM_XOR: u32 = 160478;
const BTREE_CSUM_XOR: u32 = 121107;
fn shuffle<T>(v: &mut Vec<T>) {
let mut rng = rand::thread_rng();
@ -87,7 +83,7 @@ pub fn pack(input_file: &Path, output_file: &Path) -> Result<(), Box<dyn Error>>
.truncate(true)
.open(output_file)?;
write_header(&output, nr_blocks)?;
write_header(&output, nr_blocks).context("unable to write pack file header")?;
let sync_input = Arc::new(Mutex::new(input));
let sync_output = Arc::new(Mutex::new(output));
@ -126,7 +122,7 @@ where
let kind = metadata_block_type(data);
if kind != BT::UNKNOWN {
z.write_u64::<LittleEndian>(b)?;
pack_block(&mut z, kind, &data);
pack_block(&mut z, kind, &data)?;
written += 1;
if written == 1024 {
@ -207,55 +203,16 @@ where
Ok(buf)
}
fn checksum(buf: &[u8]) -> u32 {
crc32c::crc32c(&buf[4..]) ^ 0xffffffff
}
#[derive(PartialEq)]
enum BT {
SUPERBLOCK,
NODE,
INDEX,
BITMAP,
UNKNOWN,
}
fn metadata_block_type(buf: &[u8]) -> BT {
if buf.len() != BLOCK_SIZE as usize {
return BT::UNKNOWN;
}
// The checksum is always stored in the first u32 of the buffer.
let mut rdr = Cursor::new(buf);
let sum_on_disk = rdr.read_u32::<LittleEndian>().unwrap();
let csum = checksum(buf);
let btype = csum ^ sum_on_disk;
match btype {
SUPERBLOCK_CSUM_XOR => BT::SUPERBLOCK,
BTREE_CSUM_XOR => BT::NODE,
BITMAP_CSUM_XOR => BT::BITMAP,
INDEX_CSUM_XOR => BT::INDEX,
_ => BT::UNKNOWN,
}
}
fn check<T>(r: &PResult<T>) {
match r {
Ok(_) => {}
Err(PackError::ParseError) => panic!("parse error"),
Err(PackError::IOError) => panic!("io error"),
}
}
fn pack_block<W: Write>(w: &mut W, kind: BT, buf: &[u8]) {
fn pack_block<W: Write>(w: &mut W, kind: BT, buf: &[u8]) -> Result<()> {
match kind {
BT::SUPERBLOCK => check(&pack_superblock(w, buf)),
BT::NODE => check(&pack_btree_node(w, buf)),
BT::INDEX => check(&pack_index(w, buf)),
BT::BITMAP => check(&pack_bitmap(w, buf)),
BT::UNKNOWN => panic!("asked to pack an unknown block type"),
BT::SUPERBLOCK => pack_superblock(w, buf).context("unable to pack superblock")?,
BT::NODE => pack_btree_node(w, buf).context("unable to pack btree node")?,
BT::INDEX => pack_index(w, buf).context("unable to pack space map index")?,
BT::BITMAP => pack_bitmap(w, buf).context("unable to pack space map bitmap")?,
BT::UNKNOWN => return Err(anyhow!("asked to pack an unknown block type")),
}
Ok(())
}
fn write_zero_block<W>(w: &mut W, b: u64) -> io::Result<()>

View File

@ -416,7 +416,7 @@ mod tests {
}
}
fn check_u64s_match(ns: &Vec<u64>, bytes: &[u8]) -> bool {
fn check_u64s_match(ns: &[u64], bytes: &[u8]) -> bool {
let mut packed = Vec::with_capacity(ns.len() * 8);
let mut w = Cursor::new(&mut packed);
for n in ns {
@ -425,7 +425,7 @@ mod tests {
packed == bytes
}
fn check_pack_u64s(ns: &Vec<u64>) -> bool {
fn check_pack_u64s(ns: &[u64]) -> bool {
println!("packing {:?}", &ns);
let mut bs = Vec::with_capacity(4096);
@ -461,7 +461,7 @@ mod tests {
check_pack_u64s(&ns)
}
fn check_pack_shifted_u64s(ns: &Vec<(u64, u64)>) -> bool {
fn check_pack_shifted_u64s(ns: &[(u64, u64)]) -> bool {
let shifted: Vec<u64> = ns
.iter()
.map(|(h, l)| (h << 24) | (l & ((1 << 24) - 1)))

327
src/pdata/btree.rs Normal file
View File

@ -0,0 +1,327 @@
use anyhow::{anyhow, Result};
use nom::{number::complete::*, IResult};
use std::collections::BTreeMap;
use std::sync::{Arc, Mutex};
use crate::checksum;
use crate::io_engine::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
// FIXME: check that keys are in ascending order between nodes.
//------------------------------------------
const NODE_HEADER_SIZE: usize = 32;
pub struct NodeHeader {
is_leaf: bool,
nr_entries: u32,
max_entries: u32,
value_size: u32,
}
#[allow(dead_code)]
const INTERNAL_NODE: u32 = 1;
const LEAF_NODE: u32 = 2;
pub fn unpack_node_header(data: &[u8]) -> IResult<&[u8], NodeHeader> {
let (i, _csum) = le_u32(data)?;
let (i, flags) = le_u32(i)?;
let (i, _block) = le_u64(i)?;
let (i, nr_entries) = le_u32(i)?;
let (i, max_entries) = le_u32(i)?;
let (i, value_size) = le_u32(i)?;
let (i, _padding) = le_u32(i)?;
Ok((
i,
NodeHeader {
is_leaf: flags == LEAF_NODE,
nr_entries,
max_entries,
value_size,
},
))
}
pub enum Node<V: Unpack> {
Internal {
header: NodeHeader,
keys: Vec<u64>,
values: Vec<u64>,
},
Leaf {
header: NodeHeader,
keys: Vec<u64>,
values: Vec<V>,
},
}
pub fn node_err<V>(msg: String) -> Result<V> {
let msg = format!("btree node error: {}", msg);
Err(anyhow!(msg))
}
pub fn to_any<'a, V>(r: IResult<&'a [u8], V>) -> Result<(&'a [u8], V)> {
if let Ok((i, v)) = r {
Ok((i, v))
} else {
Err(anyhow!("btree node error: parse error"))
}
}
pub fn unpack_node<V: Unpack>(
data: &[u8],
ignore_non_fatal: bool,
is_root: bool,
) -> Result<Node<V>> {
use nom::multi::count;
let (i, header) = to_any(unpack_node_header(data))?;
if header.is_leaf && header.value_size != V::disk_size() {
return node_err(format!(
"value_size mismatch: expected {}, was {}",
V::disk_size(),
header.value_size
));
}
let elt_size = header.value_size + 8;
if elt_size as usize * header.max_entries as usize + NODE_HEADER_SIZE > BLOCK_SIZE {
return node_err(format!("max_entries is too large ({})", header.max_entries));
}
if header.nr_entries > header.max_entries {
return node_err("nr_entries > max_entries".to_string());
}
if !ignore_non_fatal {
if header.max_entries % 3 != 0 {
return node_err("max_entries is not divisible by 3".to_string());
}
if !is_root {
let min = header.max_entries / 3;
if header.nr_entries < min {
return node_err("too few entries".to_string());
}
}
}
let (i, keys) = to_any(count(le_u64, header.nr_entries as usize)(i))?;
let mut last = None;
for k in &keys {
if let Some(l) = last {
if k <= l {
return node_err("keys out of order".to_string());
}
}
last = Some(k);
}
let nr_free = header.max_entries - header.nr_entries;
let (i, _padding) = to_any(count(le_u64, nr_free as usize)(i))?;
if header.is_leaf {
let (_i, values) = to_any(count(V::unpack, header.nr_entries as usize)(i))?;
Ok(Node::Leaf {
header,
keys,
values,
})
} else {
let (_i, values) = to_any(count(le_u64, header.nr_entries as usize)(i))?;
Ok(Node::Internal {
header,
keys,
values,
})
}
}
//------------------------------------------
pub trait NodeVisitor<V: Unpack> {
fn visit(&mut self, w: &BTreeWalker, b: &Block, node: &Node<V>) -> Result<()>;
}
#[derive(Clone)]
pub struct BTreeWalker {
pub engine: Arc<dyn IoEngine + Send + Sync>,
pub sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_non_fatal: bool,
}
impl BTreeWalker {
pub fn new(engine: Arc<dyn IoEngine + Send + Sync>, ignore_non_fatal: bool) -> BTreeWalker {
let nr_blocks = engine.get_nr_blocks() as usize;
let r: BTreeWalker = BTreeWalker {
engine,
sm: Arc::new(Mutex::new(RestrictedSpaceMap::new(nr_blocks as u64))),
ignore_non_fatal,
};
r
}
pub fn new_with_sm(
engine: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_non_fatal: bool,
) -> Result<BTreeWalker> {
{
let sm = sm.lock().unwrap();
assert_eq!(sm.get_nr_blocks()?, engine.get_nr_blocks());
}
Ok(BTreeWalker {
engine,
sm,
ignore_non_fatal,
})
}
// Atomically increments the ref count, and returns the _old_ count.
fn sm_inc(&self, b: u64) -> Result<u32> {
let mut sm = self.sm.lock().unwrap();
let count = sm.get(b)?;
sm.inc(b, 1)?;
Ok(count)
}
fn walk_nodes<NV, V>(&mut self, visitor: &mut NV, bs: &[u64]) -> Result<()>
where
NV: NodeVisitor<V>,
V: Unpack,
{
let mut blocks = Vec::new();
for b in bs {
if self.sm_inc(*b)? == 0 {
blocks.push(Block::new(*b));
}
}
self.engine.read_many(&mut blocks)?;
for b in blocks {
self.walk_node(visitor, &b, false)?;
}
Ok(())
}
fn walk_node<NV, V>(&mut self, visitor: &mut NV, b: &Block, is_root: bool) -> Result<()>
where
NV: NodeVisitor<V>,
V: Unpack,
{
let bt = checksum::metadata_block_type(b.get_data());
if bt != checksum::BT::NODE {
return Err(anyhow!("checksum failed for node {}, {:?}", b.loc, bt));
}
let node = unpack_node::<V>(&b.get_data(), self.ignore_non_fatal, is_root)?;
visitor.visit(self, &b, &node)?;
if let Node::Internal {
header: _h,
keys: _k,
values,
} = node
{
self.walk_nodes(visitor, &values)?;
}
Ok(())
}
pub fn walk_b<NV, V>(&mut self, visitor: &mut NV, root: &Block) -> Result<()>
where
NV: NodeVisitor<V>,
V: Unpack,
{
if self.sm_inc(root.loc)? > 0 {
Ok(())
} else {
self.walk_node(visitor, &root, true)
}
}
pub fn walk<NV, V>(&mut self, visitor: &mut NV, root: u64) -> Result<()>
where
NV: NodeVisitor<V>,
V: Unpack,
{
if self.sm_inc(root)? > 0 {
Ok(())
} else {
let mut root = Block::new(root);
self.engine.read(&mut root)?;
self.walk_node(visitor, &root, true)
}
}
}
//------------------------------------------
struct ValueCollector<V> {
values: BTreeMap<u64, V>,
}
impl<V> ValueCollector<V> {
fn new() -> ValueCollector<V> {
ValueCollector {
values: BTreeMap::new(),
}
}
}
impl<V: Unpack + Clone> NodeVisitor<V> for ValueCollector<V> {
fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<V>) -> Result<()> {
if let Node::Leaf {
header: _h,
keys,
values,
} = node
{
for n in 0..keys.len() {
let k = keys[n];
let v = values[n].clone();
self.values.insert(k, v);
}
}
Ok(())
}
}
pub fn btree_to_map<V: Unpack + Clone>(
engine: Arc<dyn IoEngine + Send + Sync>,
ignore_non_fatal: bool,
root: u64,
) -> Result<BTreeMap<u64, V>> {
let mut walker = BTreeWalker::new(engine, ignore_non_fatal);
let mut visitor = ValueCollector::<V>::new();
walker.walk(&mut visitor, root)?;
Ok(visitor.values)
}
pub fn btree_to_map_with_sm<V: Unpack + Clone>(
engine: Arc<dyn IoEngine + Send + Sync>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_non_fatal: bool,
root: u64,
) -> Result<BTreeMap<u64, V>> {
let mut walker = BTreeWalker::new_with_sm(engine, sm, ignore_non_fatal)?;
let mut visitor = ValueCollector::<V>::new();
walker.walk(&mut visitor, root)?;
Ok(visitor.values)
}
//------------------------------------------

4
src/pdata/mod.rs Normal file
View File

@ -0,0 +1,4 @@
pub mod btree;
pub mod space_map;
pub mod unpack;

293
src/pdata/space_map.rs Normal file
View File

@ -0,0 +1,293 @@
use anyhow::{anyhow, Result};
use fixedbitset::FixedBitSet;
use nom::{multi::count, number::complete::*, IResult};
use std::sync::{Arc, Mutex};
use crate::io_engine::*;
use crate::pdata::unpack::Unpack;
//------------------------------------------
#[derive(Debug)]
pub struct SMRoot {
pub nr_blocks: u64,
pub nr_allocated: u64,
pub bitmap_root: u64,
pub ref_count_root: u64,
}
pub fn unpack_root(data: &[u8]) -> Result<SMRoot> {
match SMRoot::unpack(data) {
Err(_e) => Err(anyhow!("couldn't parse SMRoot")),
Ok((_i, v)) => Ok(v),
}
}
impl Unpack for SMRoot {
fn disk_size() -> u32 {
32
}
fn unpack(data: &[u8]) -> IResult<&[u8], SMRoot> {
let (i, nr_blocks) = le_u64(data)?;
let (i, nr_allocated) = le_u64(i)?;
let (i, bitmap_root) = le_u64(i)?;
let (i, ref_count_root) = le_u64(i)?;
Ok((
i,
SMRoot {
nr_blocks,
nr_allocated,
bitmap_root,
ref_count_root,
},
))
}
}
//------------------------------------------
#[derive(Clone, Debug)]
pub struct IndexEntry {
pub blocknr: u64,
pub nr_free: u32,
pub none_free_before: u32,
}
impl Unpack for IndexEntry {
fn disk_size() -> u32 {
16
}
fn unpack(data: &[u8]) -> IResult<&[u8], Self> {
let (i, blocknr) = le_u64(data)?;
let (i, nr_free) = le_u32(i)?;
let (i, none_free_before) = le_u32(i)?;
Ok((
i,
IndexEntry {
blocknr,
nr_free,
none_free_before,
},
))
}
}
//------------------------------------------
const MAX_METADATA_BITMAPS: usize = 255;
pub struct MetadataIndex {
pub indexes: Vec<IndexEntry>,
}
impl Unpack for MetadataIndex {
fn disk_size() -> u32 {
BLOCK_SIZE as u32
}
fn unpack(data: &[u8]) -> IResult<&[u8], Self> {
let (i, _csum) = le_u32(data)?;
let (i, _padding) = le_u32(i)?;
let (i, _blocknr) = le_u64(i)?;
let (i, indexes) = count(IndexEntry::unpack, MAX_METADATA_BITMAPS)(i)?;
Ok((i, MetadataIndex {indexes}))
}
}
//------------------------------------------
#[derive(Debug)]
pub struct BitmapHeader {
pub csum: u32,
pub not_used: u32,
pub blocknr: u64,
}
impl Unpack for BitmapHeader {
fn disk_size() -> u32 {
16
}
fn unpack(data: &[u8]) -> IResult<&[u8], Self> {
let (i, csum) = le_u32(data)?;
let (i, not_used) = le_u32(i)?;
let (i, blocknr) = le_u64(i)?;
Ok((
i,
BitmapHeader {
csum,
not_used,
blocknr,
},
))
}
}
#[derive(Clone, Debug, PartialEq)]
pub enum BitmapEntry {
Small(u8),
Overflow,
}
#[derive(Debug)]
pub struct Bitmap {
pub header: BitmapHeader,
pub entries: Vec<BitmapEntry>,
}
impl Unpack for Bitmap {
fn disk_size() -> u32 {
BLOCK_SIZE as u32
}
fn unpack(data: &[u8]) -> IResult<&[u8], Self> {
let (mut i, header) = BitmapHeader::unpack(data)?;
let mut entries = Vec::new();
let nr_words = (BLOCK_SIZE - BitmapHeader::disk_size() as usize) / 8;
for _w in 0..nr_words {
let (tmp, mut word) = le_u64(i)?;
for _b in 0..32 {
let val = word & 0x3;
word >>= 2;
// The bits are stored with the high bit at b * 2 + 1,
// and low at b *2. So we have to interpret this val.
entries.push(match val {
0 => BitmapEntry::Small(0),
1 => BitmapEntry::Small(2),
2 => BitmapEntry::Small(1),
_ => BitmapEntry::Overflow,
});
}
i = tmp;
}
Ok((i, Bitmap { header, entries }))
}
}
//------------------------------------------
pub trait SpaceMap {
fn get_nr_blocks(&self) -> Result<u64>;
fn get_nr_allocated(&self) -> Result<u64>;
fn get(&self, b: u64) -> Result<u32>;
fn inc(&mut self, begin: u64, len: u64) -> Result<()>;
}
//------------------------------------------
pub struct CoreSpaceMap<T> {
nr_allocated: u64,
counts: Vec<T>,
}
impl<V> CoreSpaceMap<V>
where
V: Copy + Default + std::ops::AddAssign + From<u8>,
{
pub fn new(nr_entries: u64) -> CoreSpaceMap<V> {
CoreSpaceMap {
nr_allocated: 0,
counts: vec![V::default(); nr_entries as usize],
}
}
}
impl<V> SpaceMap for CoreSpaceMap<V>
where
V: Copy + Default + Eq + std::ops::AddAssign + From<u8> + Into<u32>,
{
fn get_nr_blocks(&self) -> Result<u64> {
Ok(self.counts.len() as u64)
}
fn get_nr_allocated(&self) -> Result<u64> {
Ok(self.nr_allocated)
}
fn get(&self, b: u64) -> Result<u32> {
Ok(self.counts[b as usize].into())
}
fn inc(&mut self, begin: u64, len: u64) -> Result<()> {
for b in begin..(begin + len) {
if self.counts[b as usize] == V::from(0u8) {
// FIXME: can we get a ref to save dereferencing counts twice?
self.nr_allocated += 1;
self.counts[b as usize] = V::from(1u8);
} else {
self.counts[b as usize] += V::from(1u8);
}
}
Ok(())
}
}
pub fn core_sm(nr_entries: u64, max_count: u32) -> Arc<Mutex<dyn SpaceMap + Send + Sync>> {
if max_count <= u8::MAX as u32 {
Arc::new(Mutex::new(CoreSpaceMap::<u8>::new(nr_entries)))
} else if max_count <= u16::MAX as u32 {
Arc::new(Mutex::new(CoreSpaceMap::<u16>::new(nr_entries)))
} else {
Arc::new(Mutex::new(CoreSpaceMap::<u32>::new(nr_entries)))
}
}
//------------------------------------------
// This in core space map can only count to one, useful when walking
// btrees when we want to avoid visiting a node more than once, but
// aren't interested in counting how many times we've visited.
pub struct RestrictedSpaceMap {
nr_allocated: u64,
counts: FixedBitSet,
}
impl RestrictedSpaceMap {
pub fn new(nr_entries: u64) -> RestrictedSpaceMap {
RestrictedSpaceMap {
nr_allocated: 0,
counts: FixedBitSet::with_capacity(nr_entries as usize),
}
}
}
impl SpaceMap for RestrictedSpaceMap {
fn get_nr_blocks(&self) -> Result<u64> {
Ok(self.counts.len() as u64)
}
fn get_nr_allocated(&self) -> Result<u64> {
Ok(self.nr_allocated)
}
fn get(&self, b: u64) -> Result<u32> {
if self.counts.contains(b as usize) {
Ok(1)
} else {
Ok(0)
}
}
fn inc(&mut self, begin: u64, len: u64) -> Result<()> {
for b in begin..(begin + len) {
if !self.counts.contains(b as usize) {
self.nr_allocated += 1;
self.counts.insert(b as usize);
}
}
Ok(())
}
}
//------------------------------------------

43
src/pdata/unpack.rs Normal file
View File

@ -0,0 +1,43 @@
use anyhow::{anyhow, Result};
use nom::{number::complete::*, IResult};
//------------------------------------------
pub trait Unpack {
// The size of the value when on disk.
fn disk_size() -> u32;
fn unpack(data: &[u8]) -> IResult<&[u8], Self>
where
Self: std::marker::Sized;
}
pub fn unpack<U: Unpack>(data: &[u8]) -> Result<U> {
match U::unpack(data) {
Err(_e) => Err(anyhow!("couldn't parse SMRoot")),
Ok((_i, v)) => Ok(v),
}
}
//------------------------------------------
impl Unpack for u64 {
fn disk_size() -> u32 {
8
}
fn unpack(i: &[u8]) -> IResult<&[u8], u64> {
le_u64(i)
}
}
impl Unpack for u32 {
fn disk_size() -> u32 {
4
}
fn unpack(i: &[u8]) -> IResult<&[u8], u32> {
le_u32(i)
}
}
//------------------------------------------

View File

@ -133,6 +133,7 @@ impl<W: Write> xml::MetadataVisitor for Pass2<W> {
}
//---------------------------------------
type BlockRange = std::ops::Range<u64>;
fn bits_to_ranges(bits: &FixedBitSet) -> Vec<BlockRange> {

558
src/thin/check.rs Normal file
View File

@ -0,0 +1,558 @@
use anyhow::{anyhow, Result};
use indicatif::{ProgressBar, ProgressStyle};
use nom::{number::complete::*, IResult};
use std::collections::BTreeMap;
use std::path::Path;
use std::sync::mpsc::{channel, Receiver, Sender, TryRecvError};
use std::sync::{Arc, Mutex};
use std::{thread, time};
use threadpool::ThreadPool;
use crate::checksum;
use crate::io_engine::{AsyncIoEngine, Block, IoEngine, SyncIoEngine};
use crate::pdata::btree::{btree_to_map, btree_to_map_with_sm, BTreeWalker, Node, NodeVisitor};
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
use crate::thin::superblock::*;
//------------------------------------------
struct TopLevelVisitor<'a> {
roots: &'a mut BTreeMap<u32, u64>,
}
impl<'a> NodeVisitor<u64> for TopLevelVisitor<'a> {
fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<u64>) -> Result<()> {
if let Node::Leaf {
header: _h,
keys,
values,
} = node
{
for n in 0..keys.len() {
let k = keys[n];
let root = values[n];
self.roots.insert(k as u32, root);
}
}
Ok(())
}
}
//------------------------------------------
#[allow(dead_code)]
struct BlockTime {
block: u64,
time: u32,
}
impl Unpack for BlockTime {
fn disk_size() -> u32 {
8
}
fn unpack(i: &[u8]) -> IResult<&[u8], BlockTime> {
let (i, n) = le_u64(i)?;
let block = n >> 24;
let time = n & ((1 << 24) - 1);
Ok((
i,
BlockTime {
block,
time: time as u32,
},
))
}
}
struct BottomLevelVisitor {
data_sm: Arc<Mutex<dyn SpaceMap + Send>>,
}
impl NodeVisitor<BlockTime> for BottomLevelVisitor {
fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<BlockTime>) -> Result<()> {
// FIXME: do other checks
if let Node::Leaf {
header: _h,
keys: _k,
values,
} = node
{
if values.len() == 0 {
return Ok(());
}
let mut data_sm = self.data_sm.lock().unwrap();
let mut start = values[0].block;
let mut len = 1;
for n in 1..values.len() {
let block = values[n].block;
if block == start + len {
len += 1;
} else {
data_sm.inc(start, len)?;
start = block;
len = 1;
}
}
data_sm.inc(start, len)?;
}
Ok(())
}
}
//------------------------------------------
#[derive(Clone)]
struct DeviceDetail {
mapped_blocks: u64,
transaction_id: u64,
creation_time: u32,
snapshotted_time: u32,
}
impl Unpack for DeviceDetail {
fn disk_size() -> u32 {
24
}
fn unpack(i: &[u8]) -> IResult<&[u8], DeviceDetail> {
let (i, mapped_blocks) = le_u64(i)?;
let (i, transaction_id) = le_u64(i)?;
let (i, creation_time) = le_u32(i)?;
let (i, snapshotted_time) = le_u32(i)?;
Ok((
i,
DeviceDetail {
mapped_blocks,
transaction_id,
creation_time,
snapshotted_time,
},
))
}
}
//------------------------------------------
struct OverflowChecker<'a> {
data_sm: &'a dyn SpaceMap,
}
impl<'a> OverflowChecker<'a> {
fn new(data_sm: &'a dyn SpaceMap) -> OverflowChecker<'a> {
OverflowChecker { data_sm }
}
}
impl<'a> NodeVisitor<u32> for OverflowChecker<'a> {
fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<u32>) -> Result<()> {
if let Node::Leaf {
header: _h,
keys,
values,
} = node
{
for n in 0..keys.len() {
let k = keys[n];
let v = values[n];
let expected = self.data_sm.get(k)?;
if expected != v {
return Err(anyhow!("Bad reference count for data block {}. Expected {}, but space map contains {}.",
k, expected, v));
}
}
}
Ok(())
}
}
//------------------------------------------
struct ReportOptions {}
#[derive(Clone)]
enum ReportOutcome {
Success,
NonFatal,
Fatal,
}
use ReportOutcome::*;
impl ReportOutcome {
fn combine(lhs: &ReportOutcome, rhs: &ReportOutcome) -> ReportOutcome {
match (lhs, rhs) {
(Success, rhs) => rhs.clone(),
(lhs, Success) => lhs.clone(),
(Fatal, _) => Fatal,
(_, Fatal) => Fatal,
(_, _) => NonFatal,
}
}
}
enum ReportCmd {
Log(String),
Complete,
Title(String),
}
struct Report {
opts: ReportOptions,
outcome: ReportOutcome,
tx: Sender<ReportCmd>,
tid: thread::JoinHandle<()>,
}
impl Report {
fn new(
opts: ReportOptions,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
total_allocated: u64,
) -> Result<Report> {
let (tx, rx) = channel();
let tid = thread::spawn(move || report_thread(sm, total_allocated, rx));
Ok(Report {
opts,
outcome: ReportOutcome::Success,
tx,
tid,
})
}
fn info<I: Into<String>>(&mut self, txt: I) -> Result<()> {
self.tx.send(ReportCmd::Log(txt.into()))?;
Ok(())
}
fn add_outcome(&mut self, rhs: ReportOutcome) {
self.outcome = ReportOutcome::combine(&self.outcome, &rhs);
}
fn non_fatal<I: Into<String>>(&mut self, txt: I) -> Result<()> {
self.add_outcome(NonFatal);
self.tx.send(ReportCmd::Log(txt.into()))?;
Ok(())
}
fn fatal<I: Into<String>>(&mut self, txt: I) -> Result<()> {
self.add_outcome(Fatal);
self.tx.send(ReportCmd::Log(txt.into()))?;
Ok(())
}
fn complete(self) -> Result<()> {
self.tx.send(ReportCmd::Complete)?;
self.tid.join();
Ok(())
}
fn set_title(&mut self, txt: &str) -> Result<()> {
self.tx.send(ReportCmd::Title(txt.to_string()))?;
Ok(())
}
}
fn report_thread(
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
total_allocated: u64,
rx: Receiver<ReportCmd>,
) {
let interval = time::Duration::from_millis(250);
let bar = ProgressBar::new(total_allocated);
loop {
loop {
match rx.try_recv() {
Ok(ReportCmd::Log(txt)) => {
bar.println(txt);
}
Ok(ReportCmd::Complete) => {
bar.finish();
return;
}
Ok(ReportCmd::Title(txt)) => {
let mut fmt = "Checking thin metadata [{bar:40}] Remaining {eta}, ".to_string();
fmt.push_str(&txt);
bar.set_style(
ProgressStyle::default_bar()
.template(&fmt)
.progress_chars("=> "),
);
}
Err(TryRecvError::Disconnected) => {
return;
}
Err(TryRecvError::Empty) => {
break;
}
}
}
let sm = sm.lock().unwrap();
let nr_allocated = sm.get_nr_allocated().unwrap();
drop(sm);
bar.set_position(nr_allocated);
bar.tick();
thread::sleep(interval);
}
}
//------------------------------------------
fn check_space_map(
kind: &str,
engine: Arc<dyn IoEngine + Send + Sync>,
bar: &mut Report,
entries: Vec<IndexEntry>,
metadata_sm: Option<Arc<Mutex<dyn SpaceMap + Send + Sync>>>,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
root: SMRoot,
) -> Result<()> {
let sm = sm.lock().unwrap();
// overflow btree
{
let mut v = OverflowChecker::new(&*sm);
let mut w;
if metadata_sm.is_none() {
w = BTreeWalker::new(engine.clone(), false);
} else {
w = BTreeWalker::new_with_sm(engine.clone(), metadata_sm.unwrap().clone(), false)?;
}
w.walk(&mut v, root.ref_count_root)?;
}
let mut blocks = Vec::new();
for i in &entries {
blocks.push(Block::new(i.blocknr));
}
// FIXME: we should do this in batches
engine.read_many(&mut blocks)?;
let mut leaks = 0;
let mut fail = false;
let mut blocknr = 0;
for n in 0..entries.len() {
let b = &blocks[n];
if checksum::metadata_block_type(&b.get_data()) != checksum::BT::BITMAP {
return Err(anyhow!(
"Index entry points to block ({}) that isn't a bitmap",
b.loc
));
}
let bitmap = unpack::<Bitmap>(b.get_data())?;
for e in bitmap.entries {
if blocknr >= root.nr_blocks {
break;
}
match e {
BitmapEntry::Small(actual) => {
let expected = sm.get(blocknr)?;
if actual == 1 && expected == 0 {
leaks += 1;
} else if actual != expected as u8 {
bar.fatal(format!("Bad reference count for {} block {}. Expected {}, but space map contains {}.",
kind, blocknr, expected, actual))?;
fail = true;
}
}
BitmapEntry::Overflow => {
let expected = sm.get(blocknr)?;
if expected < 3 {
bar.fatal(format!("Bad reference count for {} block {}. Expected {}, but space map says it's >= 3.",
kind, blocknr, expected))?;
fail = true;
}
}
}
blocknr += 1;
}
}
if leaks > 0 {
bar.non_fatal(format!(
"{} {} blocks have leaked. Use --auto-repair to fix.",
leaks, kind
))?;
}
if fail {
return Err(anyhow!("Inconsistent data space map"));
}
Ok(())
}
//------------------------------------------
fn inc_entries(sm: &Arc<Mutex<dyn SpaceMap + Sync + Send>>, entries: &[IndexEntry]) -> Result<()> {
let mut sm = sm.lock().unwrap();
for ie in entries {
sm.inc(ie.blocknr, 1)?;
}
Ok(())
}
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
pub struct ThinCheckOptions<'a> {
pub dev: &'a Path,
pub async_io: bool,
}
pub fn check(opts: &ThinCheckOptions) -> Result<()> {
let engine: Arc<dyn IoEngine + Send + Sync>;
let nr_threads;
if opts.async_io {
nr_threads = std::cmp::min(4, num_cpus::get());
engine = Arc::new(AsyncIoEngine::new(opts.dev, MAX_CONCURRENT_IO)?);
} else {
nr_threads = num_cpus::get() * 2;
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads)?);
}
// superblock
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
let nr_allocated_metadata;
{
let root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
nr_allocated_metadata = root.nr_allocated;
}
// Device details. We read this once to get the number of thin devices, and hence the
// maximum metadata ref count. Then create metadata space map, and reread to increment
// the ref counts for that metadata.
let devs = btree_to_map::<DeviceDetail>(engine.clone(), false, sb.details_root)?;
let nr_devs = devs.len();
let metadata_sm = core_sm(engine.get_nr_blocks(), nr_devs as u32);
let opts = ReportOptions {};
let mut report = Report::new(opts, metadata_sm.clone(), nr_allocated_metadata)?;
report.set_title("device details tree")?;
let _devs = btree_to_map_with_sm::<DeviceDetail>(
engine.clone(),
metadata_sm.clone(),
false,
sb.details_root,
)?;
// increment superblock
{
let mut sm = metadata_sm.lock().unwrap();
sm.inc(SUPERBLOCK_LOCATION, 1)?;
}
// mapping top level
let roots = btree_to_map::<u64>(engine.clone(), false, sb.mapping_root)?;
// Check the mappings filling in the data_sm as we go.
report.set_title("mapping tree")?;
let data_sm;
{
// FIXME: with a thread pool we need to return errors another way.
let nr_workers = nr_threads;
let pool = ThreadPool::new(nr_workers);
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
data_sm = core_sm(root.nr_blocks, nr_devs as u32);
for (_thin_id, root) in roots {
let mut w = BTreeWalker::new_with_sm(engine.clone(), metadata_sm.clone(), false)?;
let data_sm = data_sm.clone();
pool.execute(move || {
let mut v = BottomLevelVisitor { data_sm };
// FIXME: return error
match w.walk(&mut v, root) {
Err(e) => {
eprintln!("walk failed {:?}", e);
std::process::abort();
}
Ok(_result) => {
//eprintln!("checked thin_dev {} -> {:?}", thin_id, result);
}
}
});
}
pool.join();
}
report.set_title("data space map")?;
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
let entries = btree_to_map_with_sm::<IndexEntry>(
engine.clone(),
metadata_sm.clone(),
false,
root.bitmap_root,
)?;
let entries: Vec<IndexEntry> = entries.values().cloned().collect();
inc_entries(&metadata_sm, &entries[0..])?;
check_space_map(
"data",
engine.clone(),
&mut report,
entries,
Some(metadata_sm.clone()),
data_sm.clone(),
root,
)?;
report.set_title("metadata space map")?;
let root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
let mut b = Block::new(root.bitmap_root);
engine.read(&mut b)?;
let entries = unpack::<MetadataIndex>(b.get_data())?.indexes;
// Unused entries will point to block 0
let entries: Vec<IndexEntry> = entries
.iter()
.take_while(|e| e.blocknr != 0)
.cloned()
.collect();
inc_entries(&metadata_sm, &entries[0..])?;
let _counts = btree_to_map_with_sm::<u32>(
engine.clone(),
metadata_sm.clone(),
false,
root.ref_count_root,
)?;
// Now the counts should be correct and we can check it.
check_space_map(
"metadata",
engine.clone(),
&mut report,
entries,
None,
metadata_sm.clone(),
root,
)?;
report.complete()?;
Ok(())
}
//------------------------------------------

View File

@ -1 +1,3 @@
mod superblock;
pub mod check;
pub mod xml;

99
src/thin/superblock.rs Normal file
View File

@ -0,0 +1,99 @@
use crate::io_engine::*;
use anyhow::{anyhow, Result};
use nom::{bytes::complete::*, number::complete::*, IResult};
pub const SUPERBLOCK_LOCATION: u64 = 0;
//const UUID_SIZE: usize = 16;
const SPACE_MAP_ROOT_SIZE: usize = 128;
#[derive(Debug)]
pub struct Superblock {
pub block: u64,
//uuid: [u8; UUID_SIZE],
pub version: u32,
pub time: u32,
pub transaction_id: u64,
pub metadata_snap: u64,
pub data_sm_root: Vec<u8>,
pub metadata_sm_root: Vec<u8>,
pub mapping_root: u64,
pub details_root: u64,
pub data_block_size: u32,
}
/*
pub enum CheckSeverity {
Fatal,
NonFatal,
}
pub trait CheckError {
fn severity(&self) -> CheckSeverity;
fn block(&self) -> u64;
fn sub_errors(&self) -> Vec<Box<dyn CheckError>>;
}
enum ErrorType {
BadChecksum,
BadBlockType(&'static str),
BadBlock(u64),
BadVersion(u32),
MetadataSnapOutOfBounds(u64),
MappingRootOutOfBounds(u64),
DetailsRootOutOfBounds(u64),
}
struct SuperblockError {
severity: CheckSeverity,
kind: ErrorType,
}
*/
fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
let (i, _csum) = le_u32(data)?;
let (i, _flags) = le_u32(i)?;
let (i, block) = le_u64(i)?;
let (i, _uuid) = take(16usize)(i)?;
let (i, _magic) = le_u64(i)?;
let (i, version) = le_u32(i)?;
let (i, time) = le_u32(i)?;
let (i, transaction_id) = le_u64(i)?;
let (i, metadata_snap) = le_u64(i)?;
let (i, data_sm_root) = take(SPACE_MAP_ROOT_SIZE)(i)?;
let (i, metadata_sm_root) = take(SPACE_MAP_ROOT_SIZE)(i)?;
let (i, mapping_root) = le_u64(i)?;
let (i, details_root) = le_u64(i)?;
let (i, data_block_size) = le_u32(i)?;
let (i, _metadata_block_size) = le_u32(i)?;
let (i, _metadata_nr_blocks) = le_u64(i)?;
Ok((
i,
Superblock {
block,
//uuid: uuid[0..UUID_SIZE],
version,
time,
transaction_id,
metadata_snap,
data_sm_root: data_sm_root.to_vec(),
metadata_sm_root: metadata_sm_root.to_vec(),
mapping_root,
details_root,
data_block_size,
},
))
}
pub fn read_superblock(engine: &dyn IoEngine, loc: u64) -> Result<Superblock> {
let mut b = Block::new(loc);
engine.read(&mut b)?;
if let Ok((_, sb)) = unpack(&b.get_data()) {
Ok(sb)
} else {
Err(anyhow!("couldn't unpack superblock"))
}
}
//------------------------------

View File

@ -359,13 +359,7 @@ where
reader.trim_text(true);
let mut buf = Vec::new();
loop {
match handle_event(&mut reader, &mut buf, visitor)? {
Visit::Continue => {}
Visit::Stop => break,
}
}
while let Visit::Continue = handle_event(&mut reader, &mut buf, visitor)? {}
Ok(())
}
@ -380,7 +374,7 @@ impl MetadataVisitor for SBVisitor {
self.superblock = Some(sb.clone());
Ok(Visit::Stop)
}
fn superblock_e(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
@ -405,7 +399,7 @@ pub fn read_superblock<R>(input: R) -> Result<Superblock>
where
R: Read,
{
let mut v = SBVisitor {superblock: None};
let mut v = SBVisitor { superblock: None };
read(input, &mut v)?;
Ok(v.superblock.unwrap())
}

127
tests/cache_check.rs Normal file
View File

@ -0,0 +1,127 @@
use anyhow::Result;
use thinp::version::TOOLS_VERSION;
use duct::cmd;
mod common;
use common::*;
use common::test_dir::*;
//------------------------------------------
#[test]
fn accepts_v() -> Result<()> {
let stdout = cache_check!("-V").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
#[test]
fn accepts_version() -> Result<()> {
let stdout = cache_check!("--version").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
const USAGE: &str = "Usage: cache_check [options] {device|file}\nOptions:\n {-q|--quiet}\n {-h|--help}\n {-V|--version}\n {--clear-needs-check-flag}\n {--super-block-only}\n {--skip-mappings}\n {--skip-hints}\n {--skip-discards}";
#[test]
fn accepts_h() -> Result<()> {
let stdout = cache_check!("-h").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn accepts_help() -> Result<()> {
let stdout = cache_check!("--help").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn missing_metadata() -> Result<()> {
let stderr = run_fail(cache_check!())?;
assert!(stderr.contains("No input file provided"));
Ok(())
}
#[test]
fn no_such_metadata() -> Result<()> {
let stderr = run_fail(cache_check!("/arbitrary/filename"))?;
assert!(stderr.contains("No such file or directory"));
Ok(())
}
#[test]
fn metadata_cannot_be_a_directory() -> Result<()> {
let stderr = run_fail(cache_check!("/tmp"))?;
assert!(stderr.contains("Not a block device or regular file"));
Ok(())
}
#[test]
fn unreadable_metadata() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
cmd!("chmod", "-r", &md).run()?;
let stderr = run_fail(cache_check!(&md))?;
assert!(stderr.contains("syscall 'open' failed: Permission denied"));
Ok(())
}
#[test]
fn corrupt_metadata() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
run_fail(cache_check!(&md))?;
Ok(())
}
#[test]
fn failing_q() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let output = cache_check!("-q", &md).unchecked().run()?;
assert!(!output.status.success());
assert_eq!(output.stdout.len(), 0);
assert_eq!(output.stderr.len(), 0);
Ok(())
}
#[test]
fn failing_quiet() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let output = cache_check!("--quiet", &md).unchecked().run()?;
assert!(!output.status.success());
assert_eq!(output.stdout.len(), 0);
assert_eq!(output.stderr.len(), 0);
Ok(())
}
// (define-scenario (cache-check valid-metadata-passes)
// "A valid metadata area passes"
// (with-valid-metadata (md)
// (run-ok (cache-check md))))
//
// (define-scenario (cache-check bad-metadata-version)
// "Invalid metadata version fails"
// (with-cache-xml (xml)
// (with-empty-metadata (md)
// (cache-restore "-i" xml "-o" md "--debug-override-metadata-version" "12345")
// (run-fail (cache-check md)))))
//
// (define-scenario (cache-check tiny-metadata)
// "Prints helpful message in case tiny metadata given"
// (with-temp-file-sized ((md "cache.bin" 1024))
// (run-fail-rcv (_ stderr) (cache-check md)
// (assert-starts-with "Metadata device/file too small. Is this binary metadata?" stderr))))
//
// (define-scenario (cache-check spot-accidental-xml-data)
// "Prints helpful message if XML metadata given"
// (with-cache-xml (xml)
// (system (fmt #f "man bash >> " xml))
// (run-fail-rcv (_ stderr) (cache-check xml)
// (assert-matches ".*This looks like XML. cache_check only checks the binary metadata format." stderr))))
//

View File

@ -0,0 +1,94 @@
use anyhow::{Result};
use rand::prelude::*;
use std::collections::HashSet;
use std::fs::OpenOptions;
use std::path::Path;
use thinp::cache::xml;
//------------------------------------------
pub trait XmlGen {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()>;
}
pub fn write_xml(path: &Path, g: &mut dyn XmlGen) -> Result<()> {
let xml_out = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.truncate(true)
.open(path)?;
let mut w = xml::XmlWriter::new(xml_out);
g.generate_xml(&mut w)
}
pub struct CacheGen {
block_size: u64,
nr_cache_blocks: u64,
nr_origin_blocks: u64,
percent_resident: u8,
percent_dirty: u8,
}
impl CacheGen {
pub fn new(
block_size: u64,
nr_cache_blocks: u64,
nr_origin_blocks: u64,
percent_resident: u8,
percent_dirty: u8,
) -> Self {
CacheGen {
block_size,
nr_cache_blocks,
nr_origin_blocks,
percent_resident,
percent_dirty,
}
}
}
impl XmlGen for CacheGen {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
v.superblock_b(&xml::Superblock {
uuid: "".to_string(),
block_size: self.block_size,
nr_cache_blocks: self.nr_cache_blocks,
policy: "smq".to_string(),
hint_width: 4,
})?;
let mut cblocks = Vec::new();
for n in 0..self.nr_cache_blocks {
cblocks.push(n);
}
cblocks.shuffle(&mut rand::thread_rng());
v.mappings_b()?;
{
let nr_resident = (self.nr_cache_blocks * 100 as u64) / (self.percent_resident as u64);
let mut used = HashSet::new();
for n in 0..nr_resident {
let mut oblock = 0u64;
while used.contains(&oblock) {
oblock = rand::thread_rng().gen();
}
used.insert(oblock);
// FIXME: dirty should vary
v.mapping(&xml::Map {
cblock: cblocks[n as usize],
oblock,
dirty: false,
})?;
}
}
v.mappings_e()?;
v.superblock_e()?;
Ok(())
}
}
//------------------------------------------

182
tests/common/mod.rs Normal file
View File

@ -0,0 +1,182 @@
#![allow(dead_code)]
use anyhow::Result;
use duct::{Expression};
use std::fs::OpenOptions;
use std::io::{Read, Write};
use std::path::{PathBuf};
use std::str::from_utf8;
use thinp::file_utils;
pub mod thin_xml_generator;
pub mod cache_xml_generator;
pub mod test_dir;
use crate::common::thin_xml_generator::{write_xml, SingleThinS};
use test_dir::TestDir;
//------------------------------------------
// FIXME: write a macro to generate these commands
#[macro_export]
macro_rules! thin_check {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/thin_check", args).stdout_capture().stderr_capture()
}
};
}
#[macro_export]
macro_rules! thin_restore {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/thin_restore", args).stdout_capture().stderr_capture()
}
};
}
#[macro_export]
macro_rules! thin_dump {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/thin_dump", args).stdout_capture().stderr_capture()
}
};
}
#[macro_export]
macro_rules! thin_rmap {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/thin_rmap", args).stdout_capture().stderr_capture()
}
};
}
#[macro_export]
macro_rules! thin_repair {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/thin_repair", args).stdout_capture().stderr_capture()
}
};
}
#[macro_export]
macro_rules! thin_delta {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/thin_delta", args).stdout_capture().stderr_capture()
}
};
}
#[macro_export]
macro_rules! thin_metadata_pack {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/thin_metadata_pack", args).stdout_capture().stderr_capture()
}
};
}
#[macro_export]
macro_rules! thin_metadata_unpack {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/thin_metadata_unpack", args).stdout_capture().stderr_capture()
}
};
}
#[macro_export]
macro_rules! cache_check {
( $( $arg: expr ),* ) => {
{
use std::ffi::OsString;
let args: &[OsString] = &[$( Into::<OsString>::into($arg) ),*];
duct::cmd("bin/cache_check", args).stdout_capture().stderr_capture()
}
};
}
//------------------------------------------
// Returns stderr, a non zero status must be returned
pub fn run_fail(command: Expression) -> Result<String> {
let output = command.stderr_capture().unchecked().run()?;
assert!(!output.status.success());
Ok(from_utf8(&output.stderr[0..]).unwrap().to_string())
}
pub fn mk_valid_xml(td: &mut TestDir) -> Result<PathBuf> {
let xml = td.mk_path("meta.xml");
let mut gen = SingleThinS::new(0, 1024, 2048, 2048);
write_xml(&xml, &mut gen)?;
Ok(xml)
}
pub fn mk_valid_md(td: &mut TestDir) -> Result<PathBuf> {
let xml = td.mk_path("meta.xml");
let md = td.mk_path("meta.bin");
let mut gen = SingleThinS::new(0, 1024, 20480, 20480);
write_xml(&xml, &mut gen)?;
let _file = file_utils::create_sized_file(&md, 4096 * 4096);
thin_restore!("-i", xml, "-o", &md).run()?;
Ok(md)
}
pub fn mk_zeroed_md(td: &mut TestDir) -> Result<PathBuf> {
let md = td.mk_path("meta.bin");
eprintln!("path = {:?}", md);
let _file = file_utils::create_sized_file(&md, 4096 * 4096);
Ok(md)
}
pub fn accepts_flag(flag: &str) -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
thin_check!(flag, &md).run()?;
Ok(())
}
pub fn superblock_all_zeroes(path: &PathBuf) -> Result<bool> {
let mut input = OpenOptions::new().read(true).write(false).open(path)?;
let mut buf = vec![0; 4096];
input.read_exact(&mut buf[0..])?;
for b in buf {
if b != 0 {
return Ok(false);
}
}
Ok(true)
}
pub fn damage_superblock(path: &PathBuf) -> Result<()> {
let mut output = OpenOptions::new().read(false).write(true).open(path)?;
let buf = [0u8; 512];
output.write_all(&buf)?;
Ok(())
}
//------------------------------------------

27
tests/common/test_dir.rs Normal file
View File

@ -0,0 +1,27 @@
use anyhow::Result;
use std::path::{PathBuf};
use tempfile::{tempdir, TempDir};
//---------------------------------------
pub struct TestDir {
dir: TempDir,
file_count: usize,
}
impl TestDir {
pub fn new() -> Result<TestDir> {
let dir = tempdir()?;
Ok(TestDir { dir, file_count: 0 })
}
pub fn mk_path(&mut self, file: &str) -> PathBuf {
let mut p = PathBuf::new();
p.push(&self.dir);
p.push(PathBuf::from(format!("{:02}_{}", self.file_count, file)));
self.file_count += 1;
p
}
}
//---------------------------------------

View File

@ -0,0 +1,537 @@
use anyhow::{anyhow, Result};
use rand::prelude::*;
use std::collections::VecDeque;
use std::fs::OpenOptions;
use std::ops::Range;
use std::path::Path;
use thinp::thin::xml;
//------------------------------------------
pub trait XmlGen {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()>;
}
pub fn write_xml(path: &Path, g: &mut dyn XmlGen) -> Result<()> {
let xml_out = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.truncate(true)
.open(path)?;
let mut w = xml::XmlWriter::new(xml_out);
g.generate_xml(&mut w)
}
fn common_sb(nr_blocks: u64) -> xml::Superblock {
xml::Superblock {
uuid: "".to_string(),
time: 0,
transaction: 0,
flags: None,
version: None,
data_block_size: 32,
nr_data_blocks: nr_blocks,
metadata_snap: None,
}
}
//------------------------------------------
pub struct EmptyPoolS {}
impl XmlGen for EmptyPoolS {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
v.superblock_b(&common_sb(1024))?;
v.superblock_e()?;
Ok(())
}
}
//------------------------------------------
pub struct SingleThinS {
pub offset: u64,
pub len: u64,
pub old_nr_data_blocks: u64,
pub new_nr_data_blocks: u64,
}
impl SingleThinS {
pub fn new(offset: u64, len: u64, old_nr_data_blocks: u64, new_nr_data_blocks: u64) -> Self {
SingleThinS {
offset,
len,
old_nr_data_blocks,
new_nr_data_blocks,
}
}
}
impl XmlGen for SingleThinS {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
v.superblock_b(&common_sb(self.old_nr_data_blocks))?;
v.device_b(&xml::Device {
dev_id: 0,
mapped_blocks: self.len,
transaction: 0,
creation_time: 0,
snap_time: 0,
})?;
v.map(&xml::Map {
thin_begin: 0,
data_begin: self.offset,
time: 0,
len: self.len,
})?;
v.device_e()?;
v.superblock_e()?;
Ok(())
}
}
//------------------------------------------
pub struct FragmentedS {
pub nr_thins: u32,
pub thin_size: u64,
pub old_nr_data_blocks: u64,
pub new_nr_data_blocks: u64,
}
impl FragmentedS {
pub fn new(nr_thins: u32, thin_size: u64) -> Self {
let old_size = (nr_thins as u64) * thin_size;
FragmentedS {
nr_thins,
thin_size,
old_nr_data_blocks: (nr_thins as u64) * thin_size,
new_nr_data_blocks: old_size * 3 / 4,
}
}
}
#[derive(Clone)]
struct ThinRun {
thin_id: u32,
thin_begin: u64,
len: u64,
}
#[derive(Clone, Debug, Copy)]
struct MappedRun {
thin_id: u32,
thin_begin: u64,
data_begin: u64,
len: u64,
}
fn mk_runs(thin_id: u32, total_len: u64, run_len: std::ops::Range<u64>) -> Vec<ThinRun> {
let mut runs = Vec::new();
let mut b = 0u64;
while b < total_len {
let len = u64::min(
total_len - b,
thread_rng().gen_range(run_len.start, run_len.end),
);
runs.push(ThinRun {
thin_id,
thin_begin: b,
len,
});
b += len;
}
runs
}
impl XmlGen for FragmentedS {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
// Allocate each thin fully, in runs between 1 and 16.
let mut runs = Vec::new();
for thin in 0..self.nr_thins {
runs.append(&mut mk_runs(thin, self.thin_size, 1..17));
}
// Shuffle
runs.shuffle(&mut rand::thread_rng());
// map across the data
let mut maps = Vec::new();
let mut b = 0;
for r in &runs {
maps.push(MappedRun {
thin_id: r.thin_id,
thin_begin: r.thin_begin,
data_begin: b,
len: r.len,
});
b += r.len;
}
// drop half the mappings, which leaves us free runs
let mut dropped = Vec::new();
for (i, m) in maps.iter().enumerate() {
if i % 2 == 0 {
dropped.push(*m);
}
}
// Unshuffle. This isn't strictly necc. but makes the xml
// more readable.
use std::cmp::Ordering;
maps.sort_by(|&l, &r| match l.thin_id.cmp(&r.thin_id) {
Ordering::Equal => l.thin_begin.cmp(&r.thin_begin),
o => o,
});
// write the xml
v.superblock_b(&common_sb(self.old_nr_data_blocks))?;
for thin in 0..self.nr_thins {
v.device_b(&xml::Device {
dev_id: thin,
mapped_blocks: self.thin_size,
transaction: 0,
creation_time: 0,
snap_time: 0,
})?;
for m in &dropped {
if m.thin_id != thin {
continue;
}
v.map(&xml::Map {
thin_begin: m.thin_begin,
data_begin: m.data_begin,
time: 0,
len: m.len,
})?;
}
v.device_e()?;
}
v.superblock_e()?;
Ok(())
}
}
//------------------------------------------
struct Allocator {
runs: VecDeque<Range<u64>>,
}
impl Allocator {
fn new_shuffled(total_len: u64, run_len: Range<u64>) -> Allocator {
let mut runs = Vec::new();
let mut b = 0u64;
while b < total_len {
let len = u64::min(
total_len - b,
thread_rng().gen_range(run_len.start, run_len.end),
);
runs.push(b..(b + len));
b += len;
}
runs.shuffle(&mut thread_rng());
let runs: VecDeque<Range<u64>> = runs.iter().cloned().collect();
Allocator { runs }
}
#[allow(dead_code)]
fn is_empty(&self) -> bool {
self.runs.is_empty()
}
fn alloc(&mut self, len: u64) -> Result<Vec<Range<u64>>> {
let mut len = len;
let mut runs = Vec::new();
while len > 0 {
let r = self.runs.pop_front();
if r.is_none() {
return Err(anyhow!("could not allocate; out of space"));
}
let r = r.unwrap();
let rlen = r.end - r.start;
if len < rlen {
runs.push(r.start..(r.start + len));
// We need to push something back.
self.runs.push_front((r.start + len)..r.end);
len = 0;
} else {
runs.push(r.start..r.end);
len -= rlen;
}
}
Ok(runs)
}
}
// Having explicitly unmapped regions makes it easier to
// apply snapshots.
#[derive(Clone)]
enum Run {
Mapped { data_begin: u64, len: u64 },
UnMapped { len: u64 },
}
impl Run {
#[allow(dead_code)]
fn len(&self) -> u64 {
match self {
Run::Mapped {
data_begin: _data_begin,
len,
} => *len,
Run::UnMapped { len } => *len,
}
}
fn split(&self, n: u64) -> (Option<Run>, Option<Run>) {
if n == 0 {
(None, Some(self.clone()))
} else if self.len() <= n {
(Some(self.clone()), None)
} else {
match self {
Run::Mapped { data_begin, len } => (
Some(Run::Mapped {
data_begin: *data_begin,
len: n,
}),
Some(Run::Mapped {
data_begin: data_begin + n,
len: len - n,
}),
),
Run::UnMapped { len } => (
Some(Run::UnMapped { len: n }),
Some(Run::UnMapped { len: len - n }),
),
}
}
}
}
#[derive(Clone)]
struct ThinDev {
thin_id: u32,
dev_size: u64,
runs: Vec<Run>,
}
impl ThinDev {
fn emit(&self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
v.device_b(&xml::Device {
dev_id: self.thin_id,
mapped_blocks: self.dev_size,
transaction: 0,
creation_time: 0,
snap_time: 0,
})?;
let mut b = 0;
for r in &self.runs {
match r {
Run::Mapped { data_begin, len } => {
v.map(&xml::Map {
thin_begin: b,
data_begin: *data_begin,
time: 0,
len: *len,
})?;
b += len;
}
Run::UnMapped { len } => {
b += len;
}
}
}
v.device_e()?;
Ok(())
}
}
#[derive(Clone)]
enum SnapRunType {
Same,
Diff,
Hole,
}
#[derive(Clone)]
struct SnapRun(SnapRunType, u64);
fn mk_origin(thin_id: u32, total_len: u64, allocator: &mut Allocator) -> Result<ThinDev> {
let mut runs = Vec::new();
let mut b = 0;
while b < total_len {
let len = u64::min(thread_rng().gen_range(16, 64), total_len - b);
match thread_rng().gen_range(0, 2) {
0 => {
for data in allocator.alloc(len)? {
assert!(data.end >= data.start);
runs.push(Run::Mapped {
data_begin: data.start,
len: data.end - data.start,
});
}
}
1 => {
runs.push(Run::UnMapped { len });
}
_ => {
return Err(anyhow!("bad value returned from rng"));
}
};
b += len;
}
Ok(ThinDev {
thin_id,
dev_size: total_len,
runs,
})
}
fn mk_snap_mapping(
total_len: u64,
run_len: Range<u64>,
same_percent: usize,
diff_percent: usize,
) -> Vec<SnapRun> {
let mut runs = Vec::new();
let mut b = 0u64;
while b < total_len {
let len = u64::min(
total_len - b,
thread_rng().gen_range(run_len.start, run_len.end),
);
let n = thread_rng().gen_range(0, 100);
if n < same_percent {
runs.push(SnapRun(SnapRunType::Same, len));
} else if n < diff_percent {
runs.push(SnapRun(SnapRunType::Diff, len));
} else {
runs.push(SnapRun(SnapRunType::Hole, len));
}
b += len;
}
runs
}
fn split_runs(mut n: u64, runs: &[Run]) -> (Vec<Run>, Vec<Run>) {
let mut before = Vec::new();
let mut after = Vec::new();
for r in runs {
match r.split(n) {
(Some(lhs), None) => {
before.push(lhs);
}
(Some(lhs), Some(rhs)) => {
before.push(lhs);
after.push(rhs);
}
(None, Some(rhs)) => {
after.push(rhs);
}
(None, None) => {}
}
n -= r.len();
}
(before, after)
}
fn apply_snap_runs(
origin: &[Run],
snap: &[SnapRun],
allocator: &mut Allocator,
) -> Result<Vec<Run>> {
let mut origin = origin.to_owned();
let mut runs = Vec::new();
for SnapRun(st, slen) in snap {
let (os, rest) = split_runs(*slen, &origin);
match st {
SnapRunType::Same => {
for o in os {
runs.push(o);
}
}
SnapRunType::Diff => {
for data in allocator.alloc(*slen)? {
runs.push(Run::Mapped {
data_begin: data.start,
len: data.end - data.start,
});
}
}
SnapRunType::Hole => {
runs.push(Run::UnMapped { len: *slen });
}
}
origin = rest;
}
Ok(runs)
}
// Snapshots share mappings, not neccessarily the entire ranges.
pub struct SnapS {
pub len: u64,
pub nr_snaps: u32,
// Snaps will differ from the origin by this percentage
pub percent_change: usize,
pub old_nr_data_blocks: u64,
pub new_nr_data_blocks: u64,
}
impl SnapS {
pub fn new(len: u64, nr_snaps: u32, percent_change: usize) -> Self {
let delta = len * (nr_snaps as u64) * (percent_change as u64) / 100;
let old_nr_data_blocks = len + 3 * delta;
let new_nr_data_blocks = len + 2 * delta;
SnapS {
len,
nr_snaps,
percent_change,
old_nr_data_blocks,
new_nr_data_blocks,
}
}
}
impl XmlGen for SnapS {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
let mut allocator = Allocator::new_shuffled(self.old_nr_data_blocks, 64..512);
let origin = mk_origin(0, self.len, &mut allocator)?;
v.superblock_b(&common_sb(self.old_nr_data_blocks))?;
origin.emit(v)?;
v.superblock_e()?;
Ok(())
}
}
//------------------------------------------

127
tests/thin_check.rs Normal file
View File

@ -0,0 +1,127 @@
use anyhow::Result;
use thinp::file_utils;
use thinp::version::TOOLS_VERSION;
mod common;
use common::*;
use common::test_dir::*;
use common::thin_xml_generator::{write_xml, FragmentedS};
//------------------------------------------
#[test]
fn accepts_v() -> Result<()> {
let stdout = thin_check!("-V").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
#[test]
fn accepts_version() -> Result<()> {
let stdout = thin_check!("--version").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
const USAGE: &str = "Usage: thin_check [options] {device|file}\nOptions:\n {-q|--quiet}\n {-h|--help}\n {-V|--version}\n {-m|--metadata-snap}\n {--override-mapping-root}\n {--clear-needs-check-flag}\n {--ignore-non-fatal-errors}\n {--skip-mappings}\n {--super-block-only}";
#[test]
fn accepts_h() -> Result<()> {
let stdout = thin_check!("-h").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn accepts_help() -> Result<()> {
let stdout = thin_check!("--help").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn rejects_bad_option() -> Result<()> {
let stderr = run_fail(thin_check!("--hedgehogs-only"))?;
assert!(stderr.contains("unrecognized option \'--hedgehogs-only\'"));
Ok(())
}
#[test]
fn accepts_superblock_only() -> Result<()> {
accepts_flag("--super-block-only")
}
#[test]
fn accepts_skip_mappings() -> Result<()> {
accepts_flag("--skip-mappings")
}
#[test]
fn accepts_ignore_non_fatal_errors() -> Result<()> {
accepts_flag("--ignore-non-fatal-errors")
}
#[test]
fn accepts_clear_needs_check_flag() -> Result<()> {
accepts_flag("--clear-needs-check-flag")
}
#[test]
fn accepts_quiet() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let output = thin_check!("--quiet", &md).run()?;
assert!(output.status.success());
assert_eq!(output.stdout.len(), 0);
assert_eq!(output.stderr.len(), 0);
Ok(())
}
#[test]
fn detects_corrupt_superblock_with_superblock_only() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let output = thin_check!("--super-block-only", &md).unchecked().run()?;
assert!(!output.status.success());
Ok(())
}
#[test]
fn prints_help_message_for_tiny_metadata() -> Result<()> {
let mut td = TestDir::new()?;
let md = td.mk_path("meta.bin");
let _file = file_utils::create_sized_file(&md, 1024);
let stderr = run_fail(thin_check!(&md))?;
assert!(stderr.contains("Metadata device/file too small. Is this binary metadata?"));
Ok(())
}
#[test]
fn spot_xml_data() -> Result<()> {
let mut td = TestDir::new()?;
let xml = td.mk_path("meta.xml");
let mut gen = FragmentedS::new(4, 10240);
write_xml(&xml, &mut gen)?;
let stderr = run_fail(thin_check!(&xml))?;
eprintln!("{}", stderr);
assert!(
stderr.contains("This looks like XML. thin_check only checks the binary metadata format.")
);
Ok(())
}
#[test]
fn prints_info_fields() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let stdout = thin_check!(&md).read()?;
assert!(stdout.contains("TRANSACTION_ID="));
assert!(stdout.contains("METADATA_FREE_BLOCKS="));
Ok(())
}
//------------------------------------------

71
tests/thin_delta.rs Normal file
View File

@ -0,0 +1,71 @@
use anyhow::Result;
use thinp::version::TOOLS_VERSION;
mod common;
use common::*;
use common::test_dir::*;
//------------------------------------------
#[test]
fn accepts_v() -> Result<()> {
let stdout = thin_delta!("-V").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
#[test]
fn accepts_version() -> Result<()> {
let stdout = thin_delta!("--version").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
const USAGE: &str = "Usage: thin_delta [options] <device or file>\nOptions:\n {--thin1, --snap1}\n {--thin2, --snap2}\n {-m, --metadata-snap} [block#]\n {--verbose}\n {-h|--help}\n {-V|--version}";
#[test]
fn accepts_h() -> Result<()> {
let stdout = thin_delta!("-h").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn accepts_help() -> Result<()> {
let stdout = thin_delta!("--help").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn rejects_bad_option() -> Result<()> {
let stderr = run_fail(thin_delta!("--hedgehogs-only"))?;
assert!(stderr.contains("unrecognized option \'--hedgehogs-only\'"));
Ok(())
}
#[test]
fn snap1_unspecified() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let stderr = run_fail(thin_delta!("--snap2", "45", &md))?;
assert!(stderr.contains("--snap1 not specified"));
Ok(())
}
#[test]
fn snap2_unspecified() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let stderr = run_fail(thin_delta!("--snap1", "45", &md))?;
assert!(stderr.contains("--snap2 not specified"));
Ok(())
}
#[test]
fn dev_unspecified() -> Result<()> {
let stderr = run_fail(thin_delta!("--snap1", "45", "--snap2", "46"))?;
assert!(stderr.contains("No input device provided"));
Ok(())
}

121
tests/thin_dump.rs Normal file
View File

@ -0,0 +1,121 @@
use anyhow::Result;
use thinp::file_utils;
use std::fs::OpenOptions;
use std::io::{Write};
use std::str::from_utf8;
mod common;
use common::*;
use common::test_dir::*;
//------------------------------------------
#[test]
fn small_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = td.mk_path("meta.bin");
file_utils::create_sized_file(&md, 512)?;
let _stderr = run_fail(thin_dump!(&md))?;
Ok(())
}
#[test]
fn dump_restore_cycle() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let output = thin_dump!(&md).run()?;
let xml = td.mk_path("meta.xml");
let mut file = OpenOptions::new().read(false).write(true).create(true).open(&xml)?;
file.write_all(&output.stdout[0..])?;
drop(file);
let md2 = mk_zeroed_md(&mut td)?;
thin_restore!("-i", &xml, "-o", &md2).run()?;
let output2 = thin_dump!(&md2).run()?;
assert_eq!(output.stdout, output2.stdout);
Ok(())
}
#[test]
fn no_stderr() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let output = thin_dump!(&md).run()?;
assert_eq!(output.stderr.len(), 0);
Ok(())
}
fn override_something(flag: &str, value: &str, pattern: &str) -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let output = thin_dump!(&md, flag, value).run()?;
assert_eq!(output.stderr.len(), 0);
assert!(from_utf8(&output.stdout[0..])?.contains(pattern));
Ok(())
}
#[test]
fn override_transaction_id() -> Result<()> {
override_something("--transaction-id", "2345", "transaction=\"2345\"")
}
#[test]
fn override_data_block_size() -> Result<()> {
override_something("--data-block-size", "8192", "data_block_size=\"8192\"")
}
#[test]
fn override_nr_data_blocks() -> Result<()> {
override_something("--nr-data-blocks", "234500", "nr_data_blocks=\"234500\"")
}
#[test]
fn repair_superblock() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let before = thin_dump!("--transaction-id=5", "--data-block-size=128", "--nr-data-blocks=4096000", &md).run()?;
damage_superblock(&md)?;
let after = thin_dump!("--repair", "--transaction-id=5", "--data-block-size=128", "--nr-data-blocks=4096000", &md).run()?;
assert_eq!(after.stderr.len(), 0);
assert_eq!(before.stdout, after.stdout);
Ok(())
}
#[test]
fn missing_transaction_id() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
damage_superblock(&md)?;
let stderr = run_fail(thin_dump!("--repair", "--data-block-size=128", "--nr-data-blocks=4096000", &md))?;
assert!(stderr.contains("transaction id"));
Ok(())
}
#[test]
fn missing_data_block_size() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
damage_superblock(&md)?;
let stderr = run_fail(thin_dump!("--repair", "--transaction-id=5", "--nr-data-blocks=4096000", &md))?;
assert!(stderr.contains("data block size"));
Ok(())
}
#[test]
fn missing_nr_data_blocks() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
damage_superblock(&md)?;
let stderr = run_fail(thin_dump!("--repair", "--transaction-id=5", "--data-block-size=128", &md))?;
assert!(stderr.contains("nr data blocks"));
Ok(())
}

View File

@ -0,0 +1,77 @@
use anyhow::Result;
use thinp::version::TOOLS_VERSION;
mod common;
use common::*;
use common::test_dir::*;
//------------------------------------------
#[test]
fn accepts_v() -> Result<()> {
let stdout = thin_metadata_pack!("-V").read()?;
assert!(stdout.contains(TOOLS_VERSION));
Ok(())
}
#[test]
fn accepts_version() -> Result<()> {
let stdout = thin_metadata_pack!("--version").read()?;
assert!(stdout.contains(TOOLS_VERSION));
Ok(())
}
const USAGE: &str = "thin_metadata_pack 0.9.0-rc2\nProduces a compressed file of thin metadata. Only packs metadata blocks that are actually used.\n\nUSAGE:\n thin_metadata_pack -i <DEV> -o <FILE>\n\nFLAGS:\n -h, --help Prints help information\n -V, --version Prints version information\n\nOPTIONS:\n -i <DEV> Specify thinp metadata binary device/file\n -o <FILE> Specify packed output file";
#[test]
fn accepts_h() -> Result<()> {
let stdout = thin_metadata_pack!("-h").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn accepts_help() -> Result<()> {
let stdout = thin_metadata_pack!("--help").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn rejects_bad_option() -> Result<()> {
let stderr = run_fail(thin_metadata_pack!("--hedgehogs-only"))?;
assert!(stderr.contains("Found argument \'--hedgehogs-only\'"));
Ok(())
}
#[test]
fn missing_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_metadata_pack!("-o", &md))?;
assert!(
stderr.contains("error: The following required arguments were not provided:\n -i <DEV>")
);
Ok(())
}
#[test]
fn no_such_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_metadata_pack!("-i", "no-such-file", "-o", &md))?;
assert!(stderr.contains("Couldn't find input file"));
Ok(())
}
#[test]
fn missing_output_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_metadata_pack!("-i", &md))?;
assert!(stderr
.contains("error: The following required arguments were not provided:\n -o <FILE>"));
Ok(())
}
//------------------------------------------

View File

@ -0,0 +1,100 @@
use anyhow::Result;
use thinp::version::TOOLS_VERSION;
mod common;
use common::*;
use common::test_dir::*;
//------------------------------------------
#[test]
fn accepts_v() -> Result<()> {
let stdout = thin_metadata_unpack!("-V").read()?;
assert!(stdout.contains(TOOLS_VERSION));
Ok(())
}
#[test]
fn accepts_version() -> Result<()> {
let stdout = thin_metadata_unpack!("--version").read()?;
assert!(stdout.contains(TOOLS_VERSION));
Ok(())
}
const USAGE: &str = "thin_metadata_unpack 0.9.0-rc2\nUnpack a compressed file of thin metadata.\n\nUSAGE:\n thin_metadata_unpack -i <DEV> -o <FILE>\n\nFLAGS:\n -h, --help Prints help information\n -V, --version Prints version information\n\nOPTIONS:\n -i <DEV> Specify thinp metadata binary device/file\n -o <FILE> Specify packed output file";
#[test]
fn accepts_h() -> Result<()> {
let stdout = thin_metadata_unpack!("-h").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn accepts_help() -> Result<()> {
let stdout = thin_metadata_unpack!("--help").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn rejects_bad_option() -> Result<()> {
let stderr = run_fail(thin_metadata_unpack!("--hedgehogs-only"))?;
assert!(stderr.contains("Found argument \'--hedgehogs-only\'"));
Ok(())
}
#[test]
fn missing_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_metadata_unpack!("-o", &md))?;
assert!(
stderr.contains("error: The following required arguments were not provided:\n -i <DEV>")
);
Ok(())
}
#[test]
fn no_such_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_metadata_unpack!("-i", "no-such-file", "-o", &md))?;
assert!(stderr.contains("Couldn't find input file"));
Ok(())
}
#[test]
fn missing_output_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_metadata_unpack!("-i", &md))?;
assert!(stderr
.contains("error: The following required arguments were not provided:\n -o <FILE>"));
Ok(())
}
#[test]
fn garbage_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_metadata_unpack!("-i", &md, "-o", "junk"))?;
assert!(stderr.contains("Not a pack file."));
Ok(())
}
#[test]
fn end_to_end() -> Result<()> {
let mut td = TestDir::new()?;
let md_in = mk_valid_md(&mut td)?;
let md_out = mk_zeroed_md(&mut td)?;
thin_metadata_pack!("-i", &md_in, "-o", "meta.pack").run()?;
thin_metadata_unpack!("-i", "meta.pack", "-o", &md_out).run()?;
let dump1 = thin_dump!(&md_in).read()?;
let dump2 = thin_dump!(&md_out).read()?;
assert_eq!(dump1, dump2);
Ok(())
}
//------------------------------------------

159
tests/thin_repair.rs Normal file
View File

@ -0,0 +1,159 @@
use anyhow::Result;
use std::str::from_utf8;
use thinp::version::TOOLS_VERSION;
mod common;
use common::*;
use common::test_dir::*;
//------------------------------------------
#[test]
fn accepts_v() -> Result<()> {
let stdout = thin_repair!("-V").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
#[test]
fn accepts_version() -> Result<()> {
let stdout = thin_repair!("--version").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
const USAGE: &str = "Usage: thin_repair [options] {device|file}\nOptions:\n {-h|--help}\n {-i|--input} <input metadata (binary format)>\n {-o|--output} <output metadata (binary format)>\n {--transaction-id} <natural>\n {--data-block-size} <natural>\n {--nr-data-blocks} <natural>\n {-V|--version}";
#[test]
fn accepts_h() -> Result<()> {
let stdout = thin_repair!("-h").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn accepts_help() -> Result<()> {
let stdout = thin_repair!("--help").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn dont_repair_xml() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let xml = mk_valid_xml(&mut td)?;
run_fail(thin_repair!("-i", &xml, "-o", &md))?;
Ok(())
}
#[test]
fn missing_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_repair!("-i", "no-such-file", "-o", &md))?;
assert!(superblock_all_zeroes(&md)?);
assert!(stderr.contains("Couldn't stat file"));
Ok(())
}
#[test]
fn garbage_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let md2 = mk_zeroed_md(&mut td)?;
run_fail(thin_repair!("-i", &md, "-o", &md2))?;
assert!(superblock_all_zeroes(&md2)?);
Ok(())
}
#[test]
fn missing_output_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let stderr = run_fail(thin_repair!("-i", &md))?;
assert!(stderr.contains("No output file provided."));
Ok(())
}
fn override_thing(flag: &str, val: &str, pattern: &str) -> Result<()> {
let mut td = TestDir::new()?;
let md1 = mk_valid_md(&mut td)?;
let md2 = mk_zeroed_md(&mut td)?;
let output = thin_repair!(flag, val, "-i", &md1, "-o", &md2).run()?;
assert_eq!(output.stderr.len(), 0);
let output = thin_dump!(&md2).run()?;
assert!(from_utf8(&output.stdout[0..])?.contains(pattern));
Ok(())
}
#[test]
fn override_transaction_id() -> Result<()> {
override_thing("--transaction-id", "2345", "transaction=\"2345\"")
}
#[test]
fn override_data_block_size() -> Result<()> {
override_thing("--data-block-size", "8192", "data_block_size=\"8192\"")
}
#[test]
fn override_nr_data_blocks() -> Result<()> {
override_thing("--nr-data-blocks", "234500", "nr_data_blocks=\"234500\"")
}
#[test]
fn superblock_succeeds() -> Result<()> {
let mut td = TestDir::new()?;
let md1 = mk_valid_md(&mut td)?;
let original = thin_dump!(
"--transaction-id=5",
"--data-block-size=128",
"--nr-data-blocks=4096000",
&md1
)
.run()?;
assert_eq!(original.stderr.len(), 0);
damage_superblock(&md1)?;
let md2 = mk_zeroed_md(&mut td)?;
thin_repair!(
"--transaction-id=5",
"--data-block-size=128",
"--nr-data-blocks=4096000",
"-i",
&md1,
"-o",
&md2
)
.run()?;
let repaired = thin_dump!(&md2).run()?;
assert_eq!(repaired.stderr.len(), 0);
assert_eq!(original.stdout, repaired.stdout);
Ok(())
}
fn missing_thing(flag1: &str, flag2: &str, pattern: &str) -> Result<()>
{
let mut td = TestDir::new()?;
let md1 = mk_valid_md(&mut td)?;
damage_superblock(&md1)?;
let md2 = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_repair!(flag1, flag2, "-i", &md1, "-o", &md2))?;
assert!(stderr.contains(pattern));
Ok(())
}
#[test]
fn missing_transaction_id() -> Result<()> {
missing_thing("--data-block-size=128", "--nr-data-blocks=4096000", "transaction id")
}
#[test]
fn missing_data_block_size() -> Result<()> {
missing_thing("--transaction-id=5", "--nr-data-blocks=4096000", "data block size")
}
#[test]
fn missing_nr_data_blocks() -> Result<()> {
missing_thing("--transaction-id=5", "--data-block-size=128", "nr data blocks")
}

139
tests/thin_restore.rs Normal file
View File

@ -0,0 +1,139 @@
use anyhow::Result;
use std::str::from_utf8;
use thinp::file_utils;
use thinp::version::TOOLS_VERSION;
mod common;
use common::*;
use common::test_dir::*;
//------------------------------------------
#[test]
fn accepts_v() -> Result<()> {
let stdout = thin_restore!("-V").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
#[test]
fn accepts_version() -> Result<()> {
let stdout = thin_restore!("--version").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
const USAGE: &str = "Usage: thin_restore [options]\nOptions:\n {-h|--help}\n {-i|--input} <input xml file>\n {-o|--output} <output device or file>\n {--transaction-id} <natural>\n {--data-block-size} <natural>\n {--nr-data-blocks} <natural>\n {-q|--quiet}\n {-V|--version}";
#[test]
fn accepts_h() -> Result<()> {
let stdout = thin_restore!("-h").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn accepts_help() -> Result<()> {
let stdout = thin_restore!("--help").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn no_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_restore!("-o", &md))?;
assert!(stderr.contains("No input file provided."));
Ok(())
}
#[test]
fn missing_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let stderr = run_fail(thin_restore!("-i", "no-such-file", "-o", &md))?;
assert!(superblock_all_zeroes(&md)?);
assert!(stderr.contains("Couldn't stat file"));
Ok(())
}
#[test]
fn garbage_input_file() -> Result<()> {
let mut td = TestDir::new()?;
let xml = mk_zeroed_md(&mut td)?;
let md = mk_zeroed_md(&mut td)?;
let _stderr = run_fail(thin_restore!("-i", &xml, "-o", &md))?;
assert!(superblock_all_zeroes(&md)?);
Ok(())
}
#[test]
fn no_output_file() -> Result<()> {
let mut td = TestDir::new()?;
let xml = mk_valid_xml(&mut td)?;
let stderr = run_fail(thin_restore!("-i", &xml))?;
assert!(stderr.contains("No output file provided."));
Ok(())
}
#[test]
fn tiny_output_file() -> Result<()> {
let mut td = TestDir::new()?;
let xml = mk_valid_xml(&mut td)?;
let md = td.mk_path("meta.bin");
let _file = file_utils::create_sized_file(&md, 4096);
let stderr = run_fail(thin_restore!("-i", &xml, "-o", &md))?;
assert!(stderr.contains("Output file too small"));
Ok(())
}
fn quiet_flag(flag: &str) -> Result<()> {
let mut td = TestDir::new()?;
let xml = mk_valid_xml(&mut td)?;
let md = mk_zeroed_md(&mut td)?;
let output = thin_restore!("-i", &xml, "-o", &md, flag).run()?;
assert!(output.status.success());
assert_eq!(output.stdout.len(), 0);
assert_eq!(output.stderr.len(), 0);
Ok(())
}
#[test]
fn accepts_q() -> Result<()> {
quiet_flag("-q")
}
#[test]
fn accepts_quiet() -> Result<()> {
quiet_flag("--quiet")
}
fn override_something(flag: &str, value: &str, pattern: &str) -> Result<()> {
let mut td = TestDir::new()?;
let xml = mk_valid_xml(&mut td)?;
let md = mk_zeroed_md(&mut td)?;
thin_restore!("-i", &xml, "-o", &md, flag, value).run()?;
let output = thin_dump!(&md).run()?;
assert!(from_utf8(&output.stdout)?.contains(pattern));
Ok(())
}
#[test]
fn override_transaction_id() -> Result<()> {
override_something("--transaction-id", "2345", "transaction=\"2345\"")
}
#[test]
fn override_data_block_size() -> Result<()> {
override_something("--data-block-size", "8192", "data_block_size=\"8192\"")
}
#[test]
fn override_nr_data_blocks() -> Result<()> {
override_something("--nr-data-blocks", "234500", "nr_data_blocks=\"234500\"")
}

82
tests/thin_rmap.rs Normal file
View File

@ -0,0 +1,82 @@
use anyhow::Result;
use thinp::version::TOOLS_VERSION;
mod common;
use common::*;
use common::test_dir::*;
//------------------------------------------
#[test]
fn accepts_v() -> Result<()> {
let stdout = thin_rmap!("-V").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
#[test]
fn accepts_version() -> Result<()> {
let stdout = thin_rmap!("--version").read()?;
assert_eq!(stdout, TOOLS_VERSION);
Ok(())
}
const USAGE: &str = "Usage: thin_rmap [options] {device|file}\nOptions:\n {-h|--help}\n {-V|--version}\n {--region <block range>}*\nWhere:\n <block range> is of the form <begin>..<one-past-the-end>\n for example 5..45 denotes blocks 5 to 44 inclusive, but not block 45";
#[test]
fn accepts_h() -> Result<()> {
let stdout = thin_rmap!("-h").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn accepts_help() -> Result<()> {
let stdout = thin_rmap!("--help").read()?;
assert_eq!(stdout, USAGE);
Ok(())
}
#[test]
fn rejects_bad_option() -> Result<()> {
let stderr = run_fail(thin_rmap!("--hedgehogs-only"))?;
assert!(stderr.contains("unrecognized option \'--hedgehogs-only\'"));
Ok(())
}
#[test]
fn valid_region_format_should_pass() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
thin_rmap!("--region", "23..7890", &md).run()?;
Ok(())
}
#[test]
fn invalid_regions_should_fail() -> Result<()> {
let invalid_regions = ["23,7890", "23..six", "found..7890", "89..88", "89..89", "89..", "", "89...99"];
for r in &invalid_regions {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
run_fail(thin_rmap!(r, &md))?;
}
Ok(())
}
#[test]
fn multiple_regions_should_pass() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
thin_rmap!("--region", "1..23", "--region", "45..78", &md).run()?;
Ok(())
}
#[test]
fn junk_input() -> Result<()> {
let mut td = TestDir::new()?;
let xml = mk_valid_xml(&mut td)?;
run_fail(thin_rmap!("--region", "0..-1", &xml))?;
Ok(())
}
//------------------------------------------

View File

@ -1,16 +1,19 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use rand::prelude::*;
use std::collections::VecDeque;
use std::fs::OpenOptions;
use std::io::{Cursor, Read, Seek, SeekFrom, Write};
use std::ops::Range;
use std::path::{Path, PathBuf};
use tempfile::tempdir;
use std::path::{Path};
use thinp::file_utils;
use thinp::thin::xml::{self, Visit};
mod common;
use common::test_dir::*;
use common::thin_xml_generator::{
write_xml, EmptyPoolS, FragmentedS, SingleThinS, SnapS, XmlGen
};
//------------------------------------
#[derive(Debug)]
@ -251,25 +254,6 @@ impl<'a, R: Read + Seek> ThinVisitor for Verifier<'a, R> {
//------------------------------------
fn mk_path(dir: &Path, file: &str) -> PathBuf {
let mut p = PathBuf::new();
p.push(dir);
p.push(PathBuf::from(file));
p
}
fn generate_xml(path: &Path, g: &mut dyn Scenario) -> Result<()> {
let xml_out = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.truncate(true)
.open(path)?;
let mut w = xml::XmlWriter::new(xml_out);
g.generate_xml(&mut w)
}
fn create_data_file(data_path: &Path, xml_path: &Path) -> Result<()> {
let input = OpenOptions::new().read(true).write(false).open(xml_path)?;
@ -304,17 +288,19 @@ fn verify(xml_path: &Path, data_path: &Path, seed: u64) -> Result<()> {
}
trait Scenario {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()>;
fn get_new_nr_blocks(&self) -> u64;
}
fn test_shrink(scenario: &mut dyn Scenario) -> Result<()> {
let dir = tempdir()?;
let xml_before = mk_path(dir.path(), "before.xml");
let xml_after = mk_path(dir.path(), "after.xml");
let data_path = mk_path(dir.path(), "metadata.bin");
fn test_shrink<S>(scenario: &mut S) -> Result<()>
where
S: Scenario + XmlGen,
{
let mut td = TestDir::new()?;
let xml_before = td.mk_path("before.xml");
let xml_after = td.mk_path("after.xml");
let data_path = td.mk_path("metadata.bin");
generate_xml(&xml_before, scenario)?;
write_xml(&xml_before, scenario)?;
create_data_file(&data_path, &xml_before)?;
let mut rng = rand::thread_rng();
@ -332,28 +318,7 @@ fn test_shrink(scenario: &mut dyn Scenario) -> Result<()> {
//------------------------------------
fn common_sb(nr_blocks: u64) -> xml::Superblock {
xml::Superblock {
uuid: "".to_string(),
time: 0,
transaction: 0,
flags: None,
version: None,
data_block_size: 32,
nr_data_blocks: nr_blocks,
metadata_snap: None,
}
}
struct EmptyPoolS {}
impl Scenario for EmptyPoolS {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
v.superblock_b(&common_sb(1024))?;
v.superblock_e()?;
Ok(())
}
fn get_new_nr_blocks(&self) -> u64 {
512
}
@ -367,45 +332,7 @@ fn shrink_empty_pool() -> Result<()> {
//------------------------------------
struct SingleThinS {
offset: u64,
len: u64,
old_nr_data_blocks: u64,
new_nr_data_blocks: u64,
}
impl SingleThinS {
fn new(offset: u64, len: u64, old_nr_data_blocks: u64, new_nr_data_blocks: u64) -> Self {
SingleThinS {
offset,
len,
old_nr_data_blocks,
new_nr_data_blocks,
}
}
}
impl Scenario for SingleThinS {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
v.superblock_b(&common_sb(self.old_nr_data_blocks))?;
v.device_b(&xml::Device {
dev_id: 0,
mapped_blocks: self.len,
transaction: 0,
creation_time: 0,
snap_time: 0,
})?;
v.map(&xml::Map {
thin_begin: 0,
data_begin: self.offset,
time: 0,
len: self.len,
})?;
v.device_e()?;
v.superblock_e()?;
Ok(())
}
fn get_new_nr_blocks(&self) -> u64 {
self.new_nr_data_blocks
}
@ -452,128 +379,7 @@ fn shrink_insufficient_space() -> Result<()> {
//------------------------------------
struct FragmentedS {
nr_thins: u32,
thin_size: u64,
old_nr_data_blocks: u64,
new_nr_data_blocks: u64,
}
impl FragmentedS {
fn new(nr_thins: u32, thin_size: u64) -> Self {
let old_size = (nr_thins as u64) * thin_size;
FragmentedS {
nr_thins,
thin_size,
old_nr_data_blocks: (nr_thins as u64) * thin_size,
new_nr_data_blocks: old_size * 3 / 4,
}
}
}
#[derive(Clone)]
struct ThinRun {
thin_id: u32,
thin_begin: u64,
len: u64,
}
#[derive(Clone, Debug, Copy)]
struct MappedRun {
thin_id: u32,
thin_begin: u64,
data_begin: u64,
len: u64,
}
fn mk_runs(thin_id: u32, total_len: u64, run_len: std::ops::Range<u64>) -> Vec<ThinRun> {
let mut runs = Vec::new();
let mut b = 0u64;
while b < total_len {
let len = u64::min(
total_len - b,
thread_rng().gen_range(run_len.start, run_len.end),
);
runs.push(ThinRun {
thin_id: thin_id,
thin_begin: b,
len,
});
b += len;
}
runs
}
impl Scenario for FragmentedS {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
// Allocate each thin fully, in runs between 1 and 16.
let mut runs = Vec::new();
for thin in 0..self.nr_thins {
runs.append(&mut mk_runs(thin, self.thin_size, 1..17));
}
// Shuffle
runs.shuffle(&mut rand::thread_rng());
// map across the data
let mut maps = Vec::new();
let mut b = 0;
for r in &runs {
maps.push(MappedRun {
thin_id: r.thin_id,
thin_begin: r.thin_begin,
data_begin: b,
len: r.len,
});
b += r.len;
}
// drop half the mappings, which leaves us free runs
let mut dropped = Vec::new();
for i in 0..maps.len() {
if i % 2 == 0 {
dropped.push(maps[i].clone());
}
}
// Unshuffle. This isn't strictly necc. but makes the xml
// more readable.
use std::cmp::Ordering;
maps.sort_by(|&l, &r| match l.thin_id.cmp(&r.thin_id) {
Ordering::Equal => l.thin_begin.cmp(&r.thin_begin),
o => o,
});
// write the xml
v.superblock_b(&common_sb(self.old_nr_data_blocks))?;
for thin in 0..self.nr_thins {
v.device_b(&xml::Device {
dev_id: thin,
mapped_blocks: self.thin_size,
transaction: 0,
creation_time: 0,
snap_time: 0,
})?;
for m in &dropped {
if m.thin_id != thin {
continue;
}
v.map(&xml::Map {
thin_begin: m.thin_begin,
data_begin: m.data_begin,
time: 0,
len: m.len,
})?;
}
v.device_e()?;
}
v.superblock_e()?;
Ok(())
}
fn get_new_nr_blocks(&self) -> u64 {
self.new_nr_data_blocks
}
@ -605,321 +411,7 @@ fn shrink_fragmented_thin_64() -> Result<()> {
//------------------------------------
struct Allocator {
runs: VecDeque<Range<u64>>,
}
impl Allocator {
fn new_shuffled(total_len: u64, run_len: Range<u64>) -> Allocator {
let mut runs = Vec::new();
let mut b = 0u64;
while b < total_len {
let len = u64::min(
total_len - b,
thread_rng().gen_range(run_len.start, run_len.end),
);
runs.push(b..(b + len));
b += len;
}
runs.shuffle(&mut thread_rng());
let runs: VecDeque<Range<u64>> = runs.iter().map(|r| r.clone()).collect();
Allocator { runs }
}
fn is_empty(&self) -> bool {
self.runs.is_empty()
}
fn alloc(&mut self, len: u64) -> Result<Vec<Range<u64>>> {
let mut len = len;
let mut runs = Vec::new();
while len > 0 {
let r = self.runs.pop_front();
if r.is_none() {
return Err(anyhow!("could not allocate; out of space"));
}
let mut r = r.unwrap();
let rlen = r.end - r.start;
if len < rlen {
runs.push(r.start..(r.start + len));
// We need to push something back.
self.runs.push_front((r.start + len)..r.end);
len = 0;
} else {
runs.push(r.start..r.end);
len -= rlen;
}
}
Ok(runs)
}
}
// Having explicitly unmapped regions makes it easier to
// apply snapshots.
#[derive(Clone)]
enum Run {
Mapped { data_begin: u64, len: u64 },
UnMapped { len: u64 },
}
impl Run {
fn len(&self) -> u64 {
match self {
Run::Mapped {
data_begin: _data_begin,
len,
} => *len,
Run::UnMapped { len } => *len,
}
}
fn split(&self, n: u64) -> (Option<Run>, Option<Run>) {
if n == 0 {
return (None, Some(self.clone()));
} else {
if self.len() <= n {
return (Some(self.clone()), None);
} else {
match self {
Run::Mapped { data_begin, len } => (
Some(Run::Mapped {
data_begin: *data_begin,
len: n,
}),
Some(Run::Mapped {
data_begin: data_begin + n,
len: len - n,
}),
),
Run::UnMapped { len } => (
Some(Run::UnMapped { len: n }),
Some(Run::UnMapped { len: len - n }),
),
}
}
}
}
}
#[derive(Clone)]
struct ThinDev {
thin_id: u32,
dev_size: u64,
runs: Vec<Run>,
}
impl ThinDev {
fn emit(&self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
v.device_b(&xml::Device {
dev_id: self.thin_id,
mapped_blocks: self.dev_size,
transaction: 0,
creation_time: 0,
snap_time: 0,
})?;
let mut b = 0;
for r in &self.runs {
match r {
Run::Mapped { data_begin, len } => {
v.map(&xml::Map {
thin_begin: b,
data_begin: *data_begin,
time: 0,
len: *len,
})?;
b += len;
}
Run::UnMapped { len } => {
b += len;
}
}
}
v.device_e()?;
Ok(())
}
}
#[derive(Clone)]
enum SnapRunType {
Same,
Diff,
Hole,
}
#[derive(Clone)]
struct SnapRun(SnapRunType, u64);
fn mk_origin(thin_id: u32, total_len: u64, allocator: &mut Allocator) -> Result<ThinDev> {
let mut runs = Vec::new();
let mut b = 0;
while b < total_len {
let len = u64::min(thread_rng().gen_range(16, 64), total_len - b);
match thread_rng().gen_range(0, 2) {
0 => {
for data in allocator.alloc(len)? {
assert!(data.end >= data.start);
runs.push(Run::Mapped {
data_begin: data.start,
len: data.end - data.start,
});
}
}
1 => {
runs.push(Run::UnMapped { len });
}
_ => {
return Err(anyhow!("bad value returned from rng"));
}
};
b += len;
}
Ok(ThinDev {
thin_id,
dev_size: total_len,
runs,
})
}
fn mk_snap_mapping(
total_len: u64,
run_len: Range<u64>,
same_percent: usize,
diff_percent: usize,
) -> Vec<SnapRun> {
let mut runs = Vec::new();
let mut b = 0u64;
while b < total_len {
let len = u64::min(
total_len - b,
thread_rng().gen_range(run_len.start, run_len.end),
);
let n = thread_rng().gen_range(0, 100);
if n < same_percent {
runs.push(SnapRun(SnapRunType::Same, len));
} else if n < diff_percent {
runs.push(SnapRun(SnapRunType::Diff, len));
} else {
runs.push(SnapRun(SnapRunType::Hole, len));
}
b += len;
}
runs
}
fn split_runs(mut n: u64, runs: &Vec<Run>) -> (Vec<Run>, Vec<Run>) {
let mut before = Vec::new();
let mut after = Vec::new();
for r in runs {
match r.split(n) {
(Some(lhs), None) => {
before.push(lhs);
}
(Some(lhs), Some(rhs)) => {
before.push(lhs);
after.push(rhs);
}
(None, Some(rhs)) => {
after.push(rhs);
}
(None, None) => {}
}
n -= r.len();
}
(before, after)
}
fn apply_snap_runs(
origin: &Vec<Run>,
snap: &Vec<SnapRun>,
allocator: &mut Allocator,
) -> Result<Vec<Run>> {
let mut origin = origin.clone();
let mut runs = Vec::new();
for SnapRun(st, slen) in snap {
let (os, rest) = split_runs(*slen, &origin);
match st {
SnapRunType::Same => {
for o in os {
runs.push(o);
}
}
SnapRunType::Diff => {
for data in allocator.alloc(*slen)? {
runs.push(Run::Mapped {
data_begin: data.start,
len: data.end - data.start,
});
}
}
SnapRunType::Hole => {
runs.push(Run::UnMapped { len: *slen });
}
}
origin = rest;
}
Ok(runs)
}
// Snapshots share mappings, not neccessarily the entire ranges.
struct SnapS {
len: u64,
nr_snaps: u32,
// Snaps will differ from the origin by this percentage
percent_change: usize,
old_nr_data_blocks: u64,
new_nr_data_blocks: u64,
}
impl SnapS {
fn new(len: u64, nr_snaps: u32, percent_change: usize) -> Self {
let delta = len * (nr_snaps as u64) * (percent_change as u64) / 100;
let old_nr_data_blocks = len + 3 * delta;
let new_nr_data_blocks = len + 2 * delta;
SnapS {
len,
nr_snaps,
percent_change,
old_nr_data_blocks,
new_nr_data_blocks,
}
}
}
impl Scenario for SnapS {
fn generate_xml(&mut self, v: &mut dyn xml::MetadataVisitor) -> Result<()> {
let mut allocator = Allocator::new_shuffled(self.old_nr_data_blocks, 64..512);
let origin = mk_origin(0, self.len, &mut allocator)?;
v.superblock_b(&common_sb(self.old_nr_data_blocks))?;
origin.emit(v)?;
v.superblock_e()?;
Ok(())
}
fn get_new_nr_blocks(&self) -> u64 {
self.new_nr_data_blocks
}