Merge pull request #191 from mingnus/2021-10-20-era-tools-rebase

era-tools in Rust
This commit is contained in:
Joe Thornber 2021-11-01 09:51:59 +00:00 committed by GitHub
commit cab57534c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
80 changed files with 3609 additions and 407 deletions

View File

@ -37,7 +37,7 @@ namespace {
bool metadata_touched = false;
try {
block_manager::ptr bm = open_bm(*fs.output, block_manager::READ_WRITE);
file_utils::check_file_exists(*fs.output, false);
file_utils::check_file_exists(*fs.input);
metadata_touched = true;
metadata::ptr md(new metadata(bm, metadata::CREATE));
emitter::ptr restorer = create_restore_emitter(*md);

View File

@ -58,76 +58,4 @@
(with-temp-file-sized ((md "cache.bin" 512))
(run-fail
(cache-dump md))))
;;;-----------------------------------------------------------
;;; cache_metadata_size scenarios
;;;-----------------------------------------------------------
(define-scenario (cache-metadata-size v)
"cache_metadata_size -V"
(run-ok-rcv (stdout _) (cache-metadata-size "-V")
(assert-equal tools-version stdout)))
(define-scenario (cache-metadata-size version)
"cache_metadata_size --version"
(run-ok-rcv (stdout _) (cache-metadata-size "--version")
(assert-equal tools-version stdout)))
(define-scenario (cache-metadata-size h)
"cache_metadata_size -h"
(run-ok-rcv (stdout _) (cache-metadata-size "-h")
(assert-equal cache-metadata-size-help stdout)))
(define-scenario (cache-metadata-size help)
"cache_metadata_size --help"
(run-ok-rcv (stdout _) (cache-metadata-size "--help")
(assert-equal cache-metadata-size-help stdout)))
(define-scenario (cache-metadata-size no-args)
"No arguments specified causes fail"
(run-fail-rcv (_ stderr) (cache-metadata-size)
(assert-equal "Please specify either --device-size and --block-size, or --nr-blocks."
stderr)))
(define-scenario (cache-metadata-size device-size-only)
"Just --device-size causes fail"
(run-fail-rcv (_ stderr) (cache-metadata-size "--device-size" (to-bytes (meg 100)))
(assert-equal "If you specify --device-size you must also give --block-size."
stderr)))
(define-scenario (cache-metadata-size block-size-only)
"Just --block-size causes fail"
(run-fail-rcv (_ stderr) (cache-metadata-size "--block-size" 128)
(assert-equal "If you specify --block-size you must also give --device-size."
stderr)))
(define-scenario (cache-metadata-size conradictory-info-fails)
"Contradictory info causes fail"
(run-fail-rcv (_ stderr) (cache-metadata-size "--device-size 102400 --block-size 1000 --nr-blocks 6")
(assert-equal "Contradictory arguments given, --nr-blocks doesn't match the --device-size and --block-size."
stderr)))
(define-scenario (cache-metadata-size all-args-agree)
"All args agreeing succeeds"
(run-ok-rcv (stdout stderr) (cache-metadata-size "--device-size" 102400 "--block-size" 100 "--nr-blocks" 1024)
(assert-equal "8248 sectors" stdout)
(assert-eof stderr)))
(define-scenario (cache-metadata-size nr-blocks-alone)
"Just --nr-blocks succeeds"
(run-ok-rcv (stdout stderr) (cache-metadata-size "--nr-blocks" 1024)
(assert-equal "8248 sectors" stdout)
(assert-eof stderr)))
(define-scenario (cache-metadata-size dev-size-and-block-size-succeeds)
"Specifying --device-size with --block-size succeeds"
(run-ok-rcv (stdout stderr) (cache-metadata-size "--device-size" 102400 "--block-size" 100)
(assert-equal "8248 sectors" stdout)
(assert-eof stderr)))
(define-scenario (cache-metadata-size big-config)
"A big configuration succeeds"
(run-ok-rcv (stdout stderr) (cache-metadata-size "--nr-blocks 67108864")
(assert-equal "3678208 sectors" stdout)
(assert-eof stderr)))
)

View File

@ -43,156 +43,9 @@
(define (register-era-tests) #t)
;;;-----------------------------------------------------------
;;; era_check scenarios
;;;-----------------------------------------------------------
(define-scenario (era-check v)
"era_check -V"
(run-ok-rcv (stdout _) (era-check "-V")
(assert-equal tools-version stdout)))
(define-scenario (era-check version)
"era_check --version"
(run-ok-rcv (stdout _) (era-check "--version")
(assert-equal tools-version stdout)))
(define-scenario (era-check h)
"era_check -h"
(run-ok-rcv (stdout _) (era-check "-h")
(assert-equal era-check-help stdout)))
(define-scenario (era-check help)
"era_check --help"
(run-ok-rcv (stdout _) (era-check "--help")
(assert-equal era-check-help stdout)))
(define-scenario (era-check no-device-specified)
"Fail if no device specified"
(run-fail-rcv (_ stderr) (era-check)
(assert-starts-with "No input file provided." stderr)))
(define-scenario (era-check dev-not-exist)
"Fail if specified device doesn't exist"
(run-fail-rcv (_ stderr) (era-check "/dev/unlikely")
(assert-starts-with "/dev/unlikely: No such file or directory" stderr)))
(define-scenario (era-check dev-is-a-directory)
"Fail if given a directory instead of a file or device"
(run-fail-rcv (_ stderr) (era-check "/tmp")
(assert-starts-with "/tmp: Not a block device or regular file" stderr)))
(define-scenario (era-check bad-permissions)
"Fail if given a device with inadequate access permissions"
(with-temp-file-sized ((md "era.bin" (meg 4)))
(run-ok "chmod -r" md)
(run-fail-rcv (_ stderr) (era-check md)
(assert-starts-with "syscall 'open' failed: Permission denied" stderr))))
(define-scenario (era-check empty-dev)
"Fail if given a file of zeroes"
(with-empty-metadata (md)
(run-fail (era-check md))))
(define-scenario (era-check quiet)
"Fail should give no output if --quiet"
(with-empty-metadata (md)
(run-fail-rcv (stdout stderr) (era-check "--quiet" md)
(assert-eof stdout)
(assert-eof stderr))))
(define-scenario (era-check q)
"Fail should give no output if -q"
(with-empty-metadata (md)
(run-fail-rcv (stdout stderr) (era-check "-q" md)
(assert-eof stdout)
(assert-eof stderr))))
(define-scenario (era-check tiny-metadata)
"Prints helpful message in case tiny metadata given"
(with-temp-file-sized ((md "era.bin" 1024))
(run-fail-rcv (_ stderr) (era-check md)
(assert-starts-with "Metadata device/file too small. Is this binary metadata?" stderr))))
(define-scenario (era-check spot-accidental-xml-data)
"Prints helpful message if XML metadata given"
(with-era-xml (xml)
(system (fmt #f "man bash >> " xml))
(run-fail-rcv (_ stderr) (era-check xml)
(assert-matches ".*This looks like XML. era_check only checks the binary metadata format." stderr))))
;;;-----------------------------------------------------------
;;; era_restore scenarios
;;;-----------------------------------------------------------
(define-scenario (era-restore v)
"era_restore -V"
(run-ok-rcv (stdout _) (era-restore "-V")
(assert-equal tools-version stdout)))
(define-scenario (era-restore version)
"era_restore --version"
(run-ok-rcv (stdout _) (era-restore "--version")
(assert-equal tools-version stdout)))
(define-scenario (era-restore h)
"era_restore -h"
(run-ok-rcv (stdout _) (era-restore "-h")
(assert-equal era-restore-help stdout)))
(define-scenario (era-restore help)
"era_restore --help"
(run-ok-rcv (stdout _) (era-restore "--help")
(assert-equal era-restore-help stdout)))
(define-scenario (era-restore input-unspecified)
"Fails if no xml specified"
(with-empty-metadata (md)
(run-fail-rcv (_ stderr) (era-restore "-o" md)
(assert-starts-with "No input file provided." stderr))))
(define-scenario (era-restore missing-input-file)
"the input file can't be found"
(with-empty-metadata (md)
(let ((bad-path "no-such-file"))
(run-fail-rcv (_ stderr) (era-restore "-i no-such-file -o" md)
(assert-superblock-all-zeroes md)
(assert-starts-with
(string-append bad-path ": No such file or directory")
stderr)))))
(define-scenario (era-restore garbage-input-file)
"the input file is just zeroes"
(with-empty-metadata (md)
(with-temp-file-sized ((xml "era.xml" 4096))
(run-fail-rcv (_ stderr) (era-restore "-i " xml "-o" md)
(assert-superblock-all-zeroes md)))))
(define-scenario (era-restore output-unspecified)
"Fails if no metadata dev specified"
(with-era-xml (xml)
(run-fail-rcv (_ stderr) (era-restore "-i" xml)
(assert-starts-with "No output file provided." stderr))))
(define-scenario (era-restore success)
"Succeeds with xml and metadata"
(with-era-xml (xml)
(with-empty-metadata (md)
(run-ok (era-restore "-i" xml "-o" md)))))
(define-scenario (era-restore quiet)
"No output with --quiet (succeeding)"
(with-era-xml (xml)
(with-empty-metadata (md)
(run-ok-rcv (stdout stderr) (era-restore "--quiet" "-i" xml "-o" md)
(assert-eof stdout)
(assert-eof stderr)))))
(define-scenario (era-restore q)
"No output with -q (succeeding)"
(with-era-xml (xml)
(with-empty-metadata (md)
(run-ok-rcv (stdout stderr) (era-restore "-q" "-i" xml "-o" md)
(assert-eof stdout)
(assert-eof stderr)))))
(define-scenario (era-restore quiet-fail)
"No output with --quiet (failing)"
@ -213,21 +66,4 @@
(assert-starts-with
(string-append bad-xml ": No such file or directory")
stderr)))))
;;;-----------------------------------------------------------
;;; era_dump scenarios
;;;-----------------------------------------------------------
(define-scenario (era-dump small-input-file)
"Fails with small input file"
(with-temp-file-sized ((md "era.bin" 512))
(run-fail (era-dump md))))
(define-scenario (era-dump restore-is-noop)
"era_dump followed by era_restore is a noop."
(with-valid-metadata (md)
(run-ok-rcv (d1-stdout _) (era-dump md)
(with-temp-file-containing ((xml "era.xml" d1-stdout))
(run-ok (era-restore "-i" xml "-o" md))
(run-ok-rcv (d2-stdout _) (era-dump md)
(assert-equal d1-stdout d2-stdout))))))
)

View File

@ -23,23 +23,33 @@ fn main_() -> Result<()> {
let mut new_args = vec![OsString::from(&name)];
for a in args.into_iter() {
new_args.push(OsString::from(a));
new_args.push(a);
}
if name_eq(name, "cache_check") {
cache_check::run(&new_args);
} else if name_eq(name, "cache_dump") {
cache_dump::run(&new_args);
} else if name_eq(name, "cache_metadata_size") {
cache_metadata_size::run(&new_args);
} else if name_eq(name, "cache_repair") {
cache_repair::run(&new_args);
} else if name_eq(name, "cache_restore") {
cache_restore::run(&new_args);
} else if name_eq(name, "era_check") {
era_check::run(&new_args);
} else if name_eq(name, "era_dump") {
era_dump::run(&new_args);
} else if name_eq(name, "era_restore") {
era_restore::run(&new_args);
} else if name_eq(name, "thin_check") {
thin_check::run(&new_args);
} else if name_eq(name, "thin_dump") {
thin_dump::run(&new_args);
} else if name_eq(name, "thin_metadata_pack") {
thin_metadata_pack::run(&new_args);
} else if name_eq(name, "thin_metadata_size") {
thin_metadata_size::run(&new_args);
} else if name_eq(name, "thin_metadata_unpack") {
thin_metadata_unpack::run(&new_args);
} else if name_eq(name, "thin_repair") {

View File

@ -266,7 +266,7 @@ impl<'a> Widget for HeaderWidget<'a> {
fn read_node<V: Unpack>(engine: &dyn IoEngine, loc: u64) -> Result<btree::Node<V>> {
let b = engine.read(loc)?;
let path = Vec::new();
btree::unpack_node(&path, &b.get_data(), true, false)
btree::unpack_node(&path, b.get_data(), true, false)
.map_err(|_| anyhow!("couldn't unpack btree node"))
}
@ -765,7 +765,7 @@ fn perform_action(
}
fn explore(path: &Path, node_path: Option<Vec<u64>>) -> Result<()> {
let engine = SyncIoEngine::new(&path, 1, false)?;
let engine = SyncIoEngine::new(path, 1, false)?;
let mut panels: Vec<Box<dyn Panel>> = Vec::new();
@ -861,7 +861,7 @@ fn main() -> Result<()> {
.map(|text| btree::decode_node_path(text).unwrap());
let input_file = Path::new(matches.value_of("INPUT").unwrap());
explore(&input_file, node_path)
explore(input_file, node_path)
}
//------------------------------------

10
src/cache/check.rs vendored
View File

@ -92,10 +92,10 @@ mod format1 {
let mut errs: Vec<ArrayError> = Vec::new();
for m in b.values.iter() {
if let Err(e) = self.check_flags(&m) {
if let Err(e) = self.check_flags(m) {
errs.push(e);
}
if let Err(e) = self.check_oblock(&m) {
if let Err(e) = self.check_oblock(m) {
errs.push(e);
}
}
@ -182,10 +182,10 @@ mod format2 {
let cbegin = index as u32 * b.header.max_entries;
let cend = cbegin + b.header.nr_entries;
for (m, cblock) in b.values.iter().zip(cbegin..cend) {
if let Err(e) = self.check_flags(&m, inner.dirty_bits.contains(cblock as usize)) {
if let Err(e) = self.check_flags(m, inner.dirty_bits.contains(cblock as usize)) {
errs.push(e);
}
if let Err(e) = self.check_oblock(&m, &mut inner.seen_oblocks) {
if let Err(e) = self.check_oblock(m, &mut inner.seen_oblocks) {
errs.push(e);
}
}
@ -271,7 +271,7 @@ pub fn check(opts: CacheCheckOptions) -> anyhow::Result<()> {
let sb = match read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION) {
Ok(sb) => sb,
Err(e) => {
check_not_xml(&opts.dev, &opts.report);
check_not_xml(opts.dev, &opts.report);
return Err(e);
}
};

15
src/cache/dump.rs vendored
View File

@ -222,7 +222,7 @@ pub fn dump_metadata(
engine: Arc<dyn IoEngine + Send + Sync>,
out: &mut dyn MetadataVisitor,
sb: &Superblock,
_repair: bool,
repair: bool,
) -> anyhow::Result<()> {
let xml_sb = ir::Superblock {
uuid: "".to_string(),
@ -236,7 +236,7 @@ pub fn dump_metadata(
out.mappings_b()?;
let valid_mappings = match sb.version {
1 => {
let w = ArrayWalker::new(engine.clone(), false);
let w = ArrayWalker::new(engine.clone(), repair);
let mut emitter = format1::MappingEmitter::new(sb.cache_blocks as usize, out);
w.walk(&mut emitter, sb.mapping_root)?;
emitter.get_valid()
@ -246,9 +246,8 @@ pub fn dump_metadata(
let dirty_bits;
if let Some(root) = sb.dirty_root {
let (bits, errs) =
read_bitset(engine.clone(), root, sb.cache_blocks as usize, false);
// TODO: allow errors in repair mode
if errs.is_some() {
read_bitset(engine.clone(), root, sb.cache_blocks as usize, repair);
if errs.is_some() && !repair {
return Err(anyhow!("errors in bitset {}", errs.unwrap()));
}
dirty_bits = bits;
@ -258,7 +257,7 @@ pub fn dump_metadata(
return Err(anyhow!("format 2 selected, but no dirty bitset present"));
}
let w = ArrayWalker::new(engine.clone(), false);
let w = ArrayWalker::new(engine.clone(), repair);
let mut emitter =
format2::MappingEmitter::new(sb.cache_blocks as usize, dirty_bits, out);
w.walk(&mut emitter, sb.mapping_root)?;
@ -272,7 +271,7 @@ pub fn dump_metadata(
out.hints_b()?;
{
let w = ArrayWalker::new(engine.clone(), false);
let w = ArrayWalker::new(engine.clone(), repair);
let mut emitter = HintEmitter::new(out, valid_mappings);
w.walk(&mut emitter, sb.hint_root)?;
}
@ -296,7 +295,7 @@ pub fn dump(opts: CacheDumpOptions) -> anyhow::Result<()> {
}
let mut out = xml::XmlWriter::new(writer);
dump_metadata(ctx.engine.clone(), &mut out, &sb, opts.repair)
dump_metadata(ctx.engine, &mut out, &sb, opts.repair)
}
//------------------------------------------

19
src/cache/metadata_size.rs vendored Normal file
View File

@ -0,0 +1,19 @@
use anyhow::Result;
pub struct CacheMetadataSizeOptions {
pub nr_blocks: u64,
pub max_hint_width: u32, // bytes
}
pub fn metadata_size(opts: &CacheMetadataSizeOptions) -> Result<u64> {
const SECTOR_SHIFT: u64 = 9; // 512 bytes per sector
const BYTES_PER_BLOCK_SHIFT: u64 = 4; // 16 bytes for key and value
const TRANSACTION_OVERHEAD: u64 = 8192; // in sectors; 4 MB
const HINT_OVERHEAD_PER_BLOCK: u64 = 8; // 8 bytes for the key
let mapping_size = (opts.nr_blocks << BYTES_PER_BLOCK_SHIFT) >> SECTOR_SHIFT;
let hint_size =
(opts.nr_blocks * (opts.max_hint_width as u64 + HINT_OVERHEAD_PER_BLOCK)) >> SECTOR_SHIFT;
Ok(TRANSACTION_OVERHEAD + mapping_size + hint_size)
}

1
src/cache/mod.rs vendored
View File

@ -3,6 +3,7 @@ pub mod dump;
pub mod hint;
pub mod ir;
pub mod mapping;
pub mod metadata_size;
pub mod repair;
pub mod restore;
pub mod superblock;

View File

@ -218,9 +218,9 @@ impl<'a> MetadataVisitor for Restorer<'a> {
if m.dirty {
let index = m.cblock >> 6;
let bi = m.cblock & 63;
let mask = 1 << (m.cblock & 63);
if index == self.dirty_bits.0 {
self.dirty_bits.1 |= 1 << bi;
self.dirty_bits.1 |= mask;
} else {
let dirty_builder = self.dirty_builder.as_mut().unwrap();
dirty_builder.push_value(

View File

@ -96,7 +96,7 @@ fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
if version >= 2 {
let (m, root) = le_u64(i)?;
dirty_root = Some(root);
i = &m;
i = m;
}
Ok((

View File

@ -7,8 +7,8 @@ use std::process;
use std::sync::Arc;
use crate::cache::check::{check, CacheCheckOptions};
use crate::report::*;
use crate::commands::utils::*;
use crate::report::*;
//------------------------------------------
@ -76,7 +76,7 @@ pub fn run(args: &[std::ffi::OsString]) {
check_file_not_tiny(input_file, &report);
let opts = CacheCheckOptions {
dev: &input_file,
dev: input_file,
async_io: matches.is_present("ASYNC_IO"),
sb_only: matches.is_present("SB_ONLY"),
skip_mappings: matches.is_present("SKIP_MAPPINGS"),

View File

@ -0,0 +1,88 @@
extern crate clap;
use clap::{value_t_or_exit, App, Arg, ArgGroup};
use std::ffi::OsString;
use std::process;
use crate::cache::metadata_size::{metadata_size, CacheMetadataSizeOptions};
use crate::math::div_up;
//------------------------------------------
fn parse_args<I, T>(args: I) -> CacheMetadataSizeOptions
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
let parser = App::new("cache_metadata_size")
.version(crate::version::tools_version())
.about("Estimate the size of the metadata device needed for a given configuration.")
.usage("cache_metadata_size [OPTIONS] <--device-size <SECTORS> --block-size <SECTORS> | --nr-blocks <NUM>>")
// options
.arg(
Arg::with_name("BLOCK_SIZE")
.help("Specify the size of each cache block")
.long("block-size")
.requires("DEVICE_SIZE")
.value_name("SECTORS"),
)
.arg(
Arg::with_name("DEVICE_SIZE")
.help("Specify total size of the fast device used in the cache")
.long("device-size")
.requires("BLOCK_SIZE")
.value_name("SECTORS"),
)
.arg(
Arg::with_name("NR_BLOCKS")
.help("Specify the number of cache blocks")
.long("nr-blocks")
.value_name("NUM"),
)
.arg(
Arg::with_name("MAX_HINT_WIDTH")
.help("Specity the per-block hint width")
.long("max-hint-width")
.value_name("BYTES")
.default_value("4"),
)
.group(
ArgGroup::with_name("selection")
.args(&["DEVICE_SIZE", "NR_BLOCKS"])
.required(true)
);
let matches = parser.get_matches_from(args);
let nr_blocks = matches.value_of("NR_BLOCKS").map_or_else(
|| {
let device_size = value_t_or_exit!(matches.value_of("DEVICE_SIZE"), u64);
let block_size = value_t_or_exit!(matches.value_of("BLOCK_SIZE"), u32);
div_up(device_size, block_size as u64)
},
|_| value_t_or_exit!(matches.value_of("NR_BLOCKS"), u64),
);
let max_hint_width = value_t_or_exit!(matches.value_of("MAX_HINT_WIDTH"), u32);
CacheMetadataSizeOptions {
nr_blocks,
max_hint_width,
}
}
pub fn run(args: &[std::ffi::OsString]) {
let opts = parse_args(args);
match metadata_size(&opts) {
Ok(size) => {
println!("{} sectors", size);
}
Err(reason) => {
eprintln!("{}", reason);
process::exit(1);
}
}
}
//------------------------------------------

View File

@ -60,8 +60,8 @@ pub fn run(args: &[std::ffi::OsString]) {
check_input_file(input_file, &report);
let opts = CacheRepairOptions {
input: &input_file,
output: &output_file,
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};

View File

@ -51,8 +51,8 @@ pub fn run(args: &[std::ffi::OsString]) {
check_output_file(output_file, &report);
let opts = CacheRestoreOptions {
input: &input_file,
output: &output_file,
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};

78
src/commands/era_check.rs Normal file
View File

@ -0,0 +1,78 @@
extern crate clap;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::commands::utils::*;
use crate::era::check::{check, EraCheckOptions};
use crate::report::*;
//------------------------------------------
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_check")
.version(crate::version::tools_version())
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("IGNORE_NON_FATAL")
.help("Only return a non-zero exit code if a fatal error is found.")
.long("ignore-non-fatal-errors"),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
.arg(
Arg::with_name("SB_ONLY")
.help("Only check the superblock.")
.long("super-block-only"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to check")
.required(true)
.index(1),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let report = if matches.is_present("QUIET") {
std::sync::Arc::new(mk_quiet_report())
} else if atty::is(Stream::Stdout) {
std::sync::Arc::new(mk_progress_bar_report())
} else {
Arc::new(mk_simple_report())
};
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
check_not_xml(input_file, &report);
let opts = EraCheckOptions {
dev: input_file,
async_io: matches.is_present("ASYNC_IO"),
sb_only: matches.is_present("SB_ONLY"),
ignore_non_fatal: matches.is_present("IGNORE_NON_FATAL"),
report: report.clone(),
};
if let Err(reason) = check(&opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}
//------------------------------------------

79
src/commands/era_dump.rs Normal file
View File

@ -0,0 +1,79 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::commands::utils::*;
use crate::era::dump::{dump, EraDumpOptions};
//------------------------------------------
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_dump")
.version(crate::version::tools_version())
.about("Dump the era metadata to stdout in XML format")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("LOGICAL")
.help("Fold any unprocessed write sets into the final era array")
.long("logical"),
)
.arg(
Arg::with_name("REPAIR")
.help("Repair the metadata whilst dumping it")
.short("r")
.long("repair"),
)
// options
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output file rather than stdout")
.short("o")
.long("output")
.value_name("FILE"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to dump")
.required(true)
.index(1),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = if matches.is_present("OUTPUT") {
Some(Path::new(matches.value_of("OUTPUT").unwrap()))
} else {
None
};
// Create a temporary report just in case these checks
// need to report anything.
let report = std::sync::Arc::new(crate::report::mk_simple_report());
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
drop(report);
let opts = EraDumpOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
logical: matches.is_present("LOGICAL"),
repair: matches.is_present("REPAIR"),
};
if let Err(reason) = dump(opts) {
eprintln!("{}", reason);
process::exit(1);
}
}
//------------------------------------------

View File

@ -0,0 +1,85 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::commands::utils::*;
use crate::era::invalidate::{invalidate, EraInvalidateOptions};
//------------------------------------------
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_invalidate")
.version(crate::version::tools_version())
.about("List blocks that may have changed since a given era")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
// options
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output file rather than stdout")
.short("o")
.long("output")
.value_name("FILE"),
)
// arguments
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to dump")
.required(true)
.index(1),
)
.arg(
Arg::with_name("WRITTEN_SINCE")
.help("Blocks written since the given era will be listed")
.long("written-since")
.required(true)
.value_name("ERA"),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = if matches.is_present("OUTPUT") {
Some(Path::new(matches.value_of("OUTPUT").unwrap()))
} else {
None
};
// Create a temporary report just in case these checks
// need to report anything.
let report = std::sync::Arc::new(crate::report::mk_simple_report());
check_input_file(input_file, &report);
check_file_not_tiny(input_file, &report);
drop(report);
let threshold = matches
.value_of("WRITTEN_SINCE")
.map(|s| {
s.parse::<u32>().unwrap_or_else(|_| {
eprintln!("Couldn't parse written_since");
process::exit(1);
})
})
.unwrap_or(0);
let opts = EraInvalidateOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
threshold,
metadata_snap: matches.is_present("METADATA_SNAP"),
};
if let Err(reason) = invalidate(&opts) {
eprintln!("{}", reason);
process::exit(1);
}
}
//------------------------------------------

View File

@ -0,0 +1,73 @@
extern crate clap;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::commands::utils::*;
use crate::era::repair::{repair, EraRepairOptions};
use crate::report::*;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_repair")
.version(crate::version::tools_version())
.about("Repair binary era metadata, and write it to a different device or file")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
// options
.arg(
Arg::with_name("INPUT")
.help("Specify the input device")
.short("i")
.long("input")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device")
.short("o")
.long("output")
.value_name("FILE")
.required(true),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = if matches.is_present("QUIET") {
std::sync::Arc::new(mk_quiet_report())
} else if atty::is(Stream::Stdout) {
std::sync::Arc::new(mk_progress_bar_report())
} else {
Arc::new(mk_simple_report())
};
check_input_file(input_file, &report);
let opts = EraRepairOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};
if let Err(reason) = repair(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -0,0 +1,64 @@
extern crate clap;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::commands::utils::*;
use crate::era::restore::{restore, EraRestoreOptions};
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("era_restore")
.version(crate::version::tools_version())
.about("Convert XML format metadata to binary.")
// flags
.arg(
Arg::with_name("ASYNC_IO")
.help("Force use of io_uring for synchronous io")
.long("async-io")
.hidden(true),
)
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
// options
.arg(
Arg::with_name("INPUT")
.help("Specify the input xml")
.short("i")
.long("input")
.value_name("FILE")
.required(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device to check")
.short("o")
.long("output")
.value_name("FILE")
.required(true),
);
let matches = parser.get_matches_from(args);
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
let report = mk_report(matches.is_present("QUIET"));
check_input_file(input_file, &report);
check_output_file(output_file, &report);
let opts = EraRestoreOptions {
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};
if let Err(reason) = restore(opts) {
report.fatal(&format!("{}", reason));
process::exit(1);
}
}

View File

@ -1,10 +1,17 @@
pub mod cache_check;
pub mod cache_dump;
pub mod cache_metadata_size;
pub mod cache_repair;
pub mod cache_restore;
pub mod era_check;
pub mod era_dump;
pub mod era_invalidate;
pub mod era_repair;
pub mod era_restore;
pub mod thin_check;
pub mod thin_dump;
pub mod thin_metadata_pack;
pub mod thin_metadata_size;
pub mod thin_metadata_unpack;
pub mod thin_repair;
pub mod thin_restore;

View File

@ -5,9 +5,9 @@ use std::path::Path;
use std::process;
use std::sync::Arc;
use crate::commands::utils::*;
use crate::io_engine::*;
use crate::thin::check::{check, ThinCheckOptions, MAX_CONCURRENT_IO};
use crate::commands::utils::*;
pub fn run(args: &[std::ffi::OsString]) {
let parser = App::new("thin_check")
@ -88,7 +88,7 @@ pub fn run(args: &[std::ffi::OsString]) {
.index(1),
);
let matches = parser.get_matches_from(args.into_iter());
let matches = parser.get_matches_from(args.iter());
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let report = mk_report(matches.is_present("QUIET"));
@ -101,14 +101,13 @@ pub fn run(args: &[std::ffi::OsString]) {
if matches.is_present("ASYNC_IO") {
engine = Arc::new(
AsyncIoEngine::new(&input_file, MAX_CONCURRENT_IO, writable)
AsyncIoEngine::new(input_file, MAX_CONCURRENT_IO, writable)
.expect("unable to open input file"),
);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(
SyncIoEngine::new(&input_file, nr_threads, writable)
.expect("unable to open input file"),
SyncIoEngine::new(input_file, nr_threads, writable).expect("unable to open input file"),
);
}

View File

@ -31,7 +31,7 @@ pub fn run(args: &[std::ffi::OsString]) {
let report = std::sync::Arc::new(mk_simple_report());
check_input_file(input_file, &report);
if let Err(reason) = crate::pack::toplevel::pack(&input_file, &output_file) {
if let Err(reason) = crate::pack::toplevel::pack(input_file, output_file) {
report.fatal(&format!("Application error: {}\n", reason));
exit(1);
}

View File

@ -0,0 +1,100 @@
extern crate clap;
use clap::{value_t_or_exit, App, Arg};
use std::ffi::OsString;
use std::process;
use crate::thin::metadata_size::{metadata_size, ThinMetadataSizeOptions};
use crate::units::*;
//------------------------------------------
fn parse_args<I, T>(args: I) -> (ThinMetadataSizeOptions, Units, bool)
where
I: IntoIterator<Item = T>,
T: Into<OsString> + Clone,
{
let parser = App::new("thin_metadata_size")
.version(crate::version::tools_version())
.about("Estimate the size of the metadata device needed for a given configuration.")
// options
.arg(
Arg::with_name("BLOCK_SIZE")
.help("Specify the data block size")
.short("b")
.long("block-size")
.required(true)
.value_name("SECTORS"),
)
.arg(
Arg::with_name("POOL_SIZE")
.help("Specify the size of pool device")
.short("s")
.long("pool-size")
.required(true)
.value_name("SECTORS"),
)
.arg(
Arg::with_name("MAX_THINS")
.help("Maximum number of thin devices and snapshots")
.short("m")
.long("max-thins")
.required(true)
.value_name("NUM"),
)
.arg(
Arg::with_name("UNIT")
.help("Specify the output unit")
.short("u")
.long("unit")
.value_name("UNIT")
.default_value("sector"),
)
.arg(
Arg::with_name("NUMERIC_ONLY")
.help("Output numeric value only")
.short("n")
.long("numeric-only"),
);
let matches = parser.get_matches_from(args);
// TODO: handle unit suffix
let pool_size = value_t_or_exit!(matches.value_of("POOL_SIZE"), u64);
let block_size = value_t_or_exit!(matches.value_of("BLOCK_SIZE"), u32);
let max_thins = value_t_or_exit!(matches.value_of("MAX_THINS"), u64);
let unit = value_t_or_exit!(matches.value_of("UNIT"), Units);
let numeric_only = matches.is_present("NUMERIC_ONLY");
(
ThinMetadataSizeOptions {
nr_blocks: pool_size / block_size as u64,
max_thins,
},
unit,
numeric_only,
)
}
pub fn run(args: &[std::ffi::OsString]) {
let (opts, unit, numeric_only) = parse_args(args);
match metadata_size(&opts) {
Ok(size) => {
let size = to_units(size * 512, unit.clone());
if numeric_only {
println!("{}", size);
} else {
let mut name = unit.to_string();
name.push('s');
println!("{} {}", size, name);
}
}
Err(reason) => {
eprintln!("{}", reason);
process::exit(1);
}
}
}
//------------------------------------------

View File

@ -1,9 +1,9 @@
extern crate clap;
use crate::file_utils;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use crate::file_utils;
use std::process::exit;
@ -37,7 +37,7 @@ pub fn run(args: &[std::ffi::OsString]) {
exit(1);
}
if let Err(reason) = crate::pack::toplevel::unpack(&input_file, &output_file) {
if let Err(reason) = crate::pack::toplevel::unpack(input_file, output_file) {
eprintln!("Application error: {}", reason);
process::exit(1);
}

View File

@ -91,8 +91,8 @@ pub fn run(args: &[std::ffi::OsString]) {
});
let opts = ThinRepairOptions {
input: &input_file,
output: &output_file,
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
overrides: SuperblockOverrides {

View File

@ -51,8 +51,8 @@ pub fn run(args: &[std::ffi::OsString]) {
check_output_file(output_file, &report);
let opts = ThinRestoreOptions {
input: &input_file,
output: &output_file,
input: input_file,
output: output_file,
async_io: matches.is_present("ASYNC_IO"),
report: report.clone(),
};

View File

@ -70,7 +70,7 @@ pub fn run(args: &[std::ffi::OsString]) {
check_input_file(input_file, &report);
if let Err(reason) =
crate::shrink::toplevel::shrink(&input_file, &output_file, &data_file, size, do_copy)
crate::shrink::toplevel::shrink(input_file, output_file, data_file, size, do_copy)
{
eprintln!("Application error: {}\n", reason);
exit(1);

View File

@ -10,10 +10,7 @@ use crate::report::*;
pub fn check_input_file(input_file: &Path, report: &Report) {
if !file_utils::file_exists(input_file) {
report.fatal(&format!(
"Couldn't find input file '{:?}'.",
&input_file
));
report.fatal(&format!("Couldn't find input file '{:?}'.", &input_file));
exit(1);
}
@ -62,15 +59,11 @@ pub fn mk_report(quiet: bool) -> std::sync::Arc<Report> {
}
fn is_xml(line: &[u8]) -> bool {
line.starts_with(b"<superblock") ||
line.starts_with(b"?xml") ||
line.starts_with(b"<!DOCTYPE")
line.starts_with(b"<superblock") || line.starts_with(b"?xml") || line.starts_with(b"<!DOCTYPE")
}
pub fn check_not_xml_(input_file: &Path, report: &Report) -> Result<()> {
let mut file = OpenOptions::new()
.read(true)
.open(input_file)?;
let mut file = OpenOptions::new().read(true).open(input_file)?;
let mut data = vec![0; 16];
file.read_exact(&mut data)?;

152
src/era/check.rs Normal file
View File

@ -0,0 +1,152 @@
use anyhow::{anyhow, Result};
use std::path::Path;
use std::sync::Arc;
use crate::era::superblock::*;
use crate::era::writeset::*;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::array::{self, ArrayBlock, ArrayError};
use crate::pdata::array_walker::*;
use crate::pdata::bitset::*;
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::report::*;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
fn inc_superblock(sm: &ASpaceMap) -> anyhow::Result<()> {
let mut sm = sm.lock().unwrap();
sm.inc(SUPERBLOCK_LOCATION, 1)?;
Ok(())
}
//------------------------------------------
struct EraChecker {
current_era: u32,
}
impl EraChecker {
pub fn new(current_era: u32) -> EraChecker {
EraChecker { current_era }
}
}
impl ArrayVisitor<u32> for EraChecker {
fn visit(&self, index: u64, b: ArrayBlock<u32>) -> array::Result<()> {
let mut errs: Vec<ArrayError> = Vec::new();
let dbegin = index as u32 * b.header.max_entries;
let dend = dbegin + b.header.max_entries;
for (era, dblock) in b.values.iter().zip(dbegin..dend) {
if era > &self.current_era {
errs.push(array::value_err(format!(
"invalid era value at data block {}: {}",
dblock, era
)));
}
}
match errs.len() {
0 => Ok(()),
1 => Err(errs[0].clone()),
_ => Err(array::aggregate_error(errs)),
}
}
}
//------------------------------------------
pub struct EraCheckOptions<'a> {
pub dev: &'a Path,
pub async_io: bool,
pub sb_only: bool,
pub ignore_non_fatal: bool,
pub report: Arc<Report>,
}
struct Context {
report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &EraCheckOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.dev, MAX_CONCURRENT_IO, false)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, false)?);
}
Ok(Context {
report: opts.report.clone(),
engine,
})
}
fn check_superblock(sb: &Superblock) -> anyhow::Result<()> {
if sb.version > 1 {
return Err(anyhow!("unknown superblock version"));
}
Ok(())
}
pub fn check(opts: &EraCheckOptions) -> Result<()> {
let ctx = mk_context(opts)?;
let engine = &ctx.engine;
let report = &ctx.report;
let mut fatal = false;
report.set_title("Checking era metadata");
let metadata_sm = core_sm(engine.get_nr_blocks(), u8::MAX as u32);
inc_superblock(&metadata_sm)?;
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
check_superblock(&sb)?;
if opts.sb_only {
return Ok(());
}
let mut path = vec![0];
let writesets = btree_to_map::<Writeset>(
&mut path,
engine.clone(),
opts.ignore_non_fatal,
sb.writeset_tree_root,
)?;
for ws in writesets.values() {
let (_bs, err) = read_bitset_with_sm(
engine.clone(),
ws.root,
ws.nr_bits as usize,
metadata_sm.clone(),
opts.ignore_non_fatal,
)?;
if err.is_some() {
ctx.report.fatal(&format!("{}", err.unwrap()));
fatal = true;
}
}
let w = ArrayWalker::new_with_sm(engine.clone(), metadata_sm.clone(), opts.ignore_non_fatal)?;
let mut c = EraChecker::new(sb.current_era);
if let Err(e) = w.walk(&mut c, sb.era_array_root) {
ctx.report.fatal(&format!("{}", e));
fatal = true;
}
if fatal {
Err(anyhow!("fatal errors in metadata"))
} else {
Ok(())
}
}

420
src/era/dump.rs Normal file
View File

@ -0,0 +1,420 @@
use anyhow::{anyhow, Result};
use fixedbitset::FixedBitSet;
use std::convert::TryFrom;
use std::fs::File;
use std::io::BufWriter;
use std::io::Write;
use std::ops::Deref;
use std::path::Path;
use std::sync::{Arc, Mutex};
use crate::era::ir::{self, MetadataVisitor};
use crate::era::superblock::*;
use crate::era::writeset::Writeset;
use crate::era::xml;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::array::{self, ArrayBlock};
use crate::pdata::array_walker::*;
use crate::pdata::bitset::read_bitset_no_err;
use crate::pdata::btree_walker::btree_to_map;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//-----------------------------------------
struct EraEmitter<'a> {
emitter: Mutex<&'a mut dyn MetadataVisitor>,
}
impl<'a> EraEmitter<'a> {
pub fn new(emitter: &'a mut dyn MetadataVisitor) -> EraEmitter {
EraEmitter {
emitter: Mutex::new(emitter),
}
}
}
impl<'a> ArrayVisitor<u32> for EraEmitter<'a> {
fn visit(&self, index: u64, b: ArrayBlock<u32>) -> array::Result<()> {
let begin = index as u32 * b.header.max_entries;
let end = begin + b.header.nr_entries;
for (v, block) in b.values.iter().zip(begin..end) {
let era = ir::Era { block, era: *v };
self.emitter
.lock()
.unwrap()
.era(&era)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
}
}
//------------------------------------------
trait Archive {
fn set(&mut self, key: u32, value: u32) -> Result<()>;
fn get(&self, key: u32) -> Option<u32>;
}
// In-core archive of writeset eras.
// The actual era for a given block is `digested_era + deltas[b]` if `deltas[b]` is non-zero.
struct EraArchive<T> {
digested_era: u32, // maximum possible era in the era array
deltas: Vec<T>,
}
fn new_era_archive(nr_blocks: u32, archived_begin: u32, nr_writesets: u32) -> Box<dyn Archive> {
match nr_writesets + 1 {
0..=255 => Box::new(EraArchive {
digested_era: archived_begin.wrapping_sub(1),
deltas: vec![0u8; nr_blocks as usize],
}),
256..=65535 => Box::new(EraArchive {
digested_era: archived_begin.wrapping_sub(1),
deltas: vec![0u16; nr_blocks as usize],
}),
_ => Box::new(EraArchive {
digested_era: archived_begin.wrapping_sub(1),
deltas: vec![0u32; nr_blocks as usize],
}),
}
}
impl<T: std::convert::TryFrom<u32>> Archive for EraArchive<T>
where
T: Copy + Into<u32> + TryFrom<u32>,
<T as TryFrom<u32>>::Error: std::fmt::Debug,
{
fn set(&mut self, block: u32, delta: u32) -> Result<()> {
self.deltas[block as usize] = T::try_from(delta).unwrap();
Ok(())
}
fn get(&self, block: u32) -> Option<u32> {
if let Some(&delta) = self.deltas.get(block as usize) {
let d: u32 = delta.into();
if d == 0 {
None
} else {
Some(self.digested_era.wrapping_add(d))
}
} else {
None
}
}
}
//------------------------------------------
struct Inner<'a> {
emitter: &'a mut dyn MetadataVisitor,
era_archive: &'a dyn Archive,
}
struct LogicalEraEmitter<'a> {
inner: Mutex<Inner<'a>>,
}
impl<'a> LogicalEraEmitter<'a> {
pub fn new(
emitter: &'a mut dyn MetadataVisitor,
era_archive: &'a dyn Archive,
) -> LogicalEraEmitter<'a> {
LogicalEraEmitter {
inner: Mutex::new(Inner {
emitter,
era_archive,
}),
}
}
}
impl<'a> ArrayVisitor<u32> for LogicalEraEmitter<'a> {
fn visit(&self, index: u64, b: ArrayBlock<u32>) -> array::Result<()> {
let mut inner = self.inner.lock().unwrap();
let begin = index as u32 * b.header.max_entries;
let end = begin + b.header.nr_entries;
for (v, block) in b.values.iter().zip(begin..end) {
let era;
if let Some(archived) = inner.era_archive.get(block) {
era = ir::Era {
block,
era: archived,
}
} else {
era = ir::Era { block, era: *v }
};
inner
.emitter
.era(&era)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
}
}
//------------------------------------------
pub struct EraDumpOptions<'a> {
pub input: &'a Path,
pub output: Option<&'a Path>,
pub async_io: bool,
pub logical: bool,
pub repair: bool,
}
struct Context {
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &EraDumpOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.input, MAX_CONCURRENT_IO, false)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.input, nr_threads, false)?);
}
Ok(Context { engine })
}
// notify the visitor about the marked blocks only
fn dump_writeset(
engine: Arc<dyn IoEngine + Send + Sync>,
out: &mut dyn MetadataVisitor,
era: u32,
ws: &Writeset,
repair: bool,
) -> anyhow::Result<()> {
// TODO: deal with broken writeset
let bits = read_bitset_no_err(engine.clone(), ws.root, ws.nr_bits as usize, repair)?;
out.writeset_b(&ir::Writeset {
era,
nr_bits: ws.nr_bits,
})?;
// [begin, end) denotes the range of set bits.
let mut begin: u32 = 0;
let mut end: u32 = 0;
for (index, entry) in bits.as_slice().iter().enumerate() {
let mut n = *entry;
if n == u32::MAX {
end = std::cmp::min(end + 32, ws.nr_bits);
continue;
}
while n > 0 {
let zeros = n.trailing_zeros();
if zeros > 0 {
if end > begin {
let m = ir::MarkedBlocks {
begin,
len: end - begin,
};
out.writeset_blocks(&m)?;
}
n >>= zeros;
end += zeros;
begin = end;
}
let ones = n.trailing_ones();
n >>= ones;
end = std::cmp::min(end + ones, ws.nr_bits);
}
// emit the range if it ends before the entry boundary
let endpos = ((index as u32) << 5) + 32;
if end < endpos {
if end > begin {
let m = ir::MarkedBlocks {
begin,
len: end - begin,
};
out.writeset_blocks(&m)?;
}
begin = endpos;
end = begin;
}
}
if end > begin {
let m = ir::MarkedBlocks {
begin,
len: end - begin,
};
out.writeset_blocks(&m)?;
}
out.writeset_e()?;
Ok(())
}
pub fn dump_metadata(
engine: Arc<dyn IoEngine + Send + Sync>,
out: &mut dyn MetadataVisitor,
sb: &Superblock,
repair: bool,
) -> anyhow::Result<()> {
let xml_sb = ir::Superblock {
uuid: "".to_string(),
block_size: sb.data_block_size,
nr_blocks: sb.nr_blocks,
current_era: sb.current_era,
};
out.superblock_b(&xml_sb)?;
let writesets = get_writesets_ordered(engine.clone(), sb, repair)?;
for (era, ws) in writesets.iter() {
dump_writeset(engine.clone(), out, *era as u32, ws, repair)?;
}
out.era_b()?;
let w = ArrayWalker::new(engine.clone(), repair);
let mut emitter = EraEmitter::new(out);
w.walk(&mut emitter, sb.era_array_root)?;
out.era_e()?;
out.superblock_e()?;
out.eof()?;
Ok(())
}
//-----------------------------------------
fn get_writesets_ordered(
engine: Arc<dyn IoEngine + Send + Sync>,
sb: &Superblock,
repair: bool,
) -> Result<Vec<(u32, Writeset)>> {
let mut path = vec![0];
let mut writesets =
btree_to_map::<Writeset>(&mut path, engine.clone(), repair, sb.writeset_tree_root)?;
if sb.current_writeset.root != 0 {
if writesets.contains_key(&(sb.current_era as u64)) {
return Err(anyhow!(
"Duplicated era found in current_writeset and the writeset tree"
));
}
writesets.insert(sb.current_era as u64, sb.current_writeset);
}
if writesets.is_empty() {
return Ok(Vec::new());
}
let mut v = Vec::<(u32, Writeset)>::new();
let era_begin = sb.current_era.wrapping_sub((writesets.len() - 1) as u32);
for era in era_begin..=sb.current_era {
if let Some(ws) = writesets.get(&(era as u64)) {
v.push((era, *ws));
} else {
return Err(anyhow!("Writeset of era {} is not present", era));
}
}
Ok(v)
}
fn collate_writeset(index: u32, bitset: &FixedBitSet, archive: &mut dyn Archive) -> Result<()> {
let era_delta = index + 1;
for (i, entry) in bitset.as_slice().iter().enumerate() {
let mut bi = (i << 5) as u32;
let mut n = *entry;
while n > 0 {
if n & 0x1 > 0 {
archive.set(bi, era_delta)?;
}
n >>= 1;
bi += 1;
}
}
Ok(())
}
fn collate_writesets(
engine: Arc<dyn IoEngine + Send + Sync>,
sb: &Superblock,
repair: bool,
) -> Result<Box<dyn Archive>> {
let writesets = get_writesets_ordered(engine.clone(), sb, repair)?;
let archived_begin = writesets.get(0).map_or(0u32, |(era, _ws)| *era);
let mut archive = new_era_archive(sb.nr_blocks, archived_begin, writesets.len() as u32);
for (index, (_era, ws)) in writesets.iter().enumerate() {
let bitset = read_bitset_no_err(engine.clone(), ws.root, ws.nr_bits as usize, repair)?;
collate_writeset(index as u32, &bitset, archive.as_mut())?;
}
Ok(archive)
}
pub fn dump_metadata_logical(
engine: Arc<dyn IoEngine + Send + Sync>,
out: &mut dyn MetadataVisitor,
sb: &Superblock,
repair: bool,
) -> anyhow::Result<()> {
let era_archive = collate_writesets(engine.clone(), sb, repair)?;
let xml_sb = ir::Superblock {
uuid: "".to_string(),
block_size: sb.data_block_size,
nr_blocks: sb.nr_blocks,
current_era: sb.current_era,
};
out.superblock_b(&xml_sb)?;
out.era_b()?;
let w = ArrayWalker::new(engine, repair);
let mut emitter = LogicalEraEmitter::new(out, era_archive.deref());
w.walk(&mut emitter, sb.era_array_root)?;
out.era_e()?;
out.superblock_e()?;
out.eof()?;
Ok(())
}
//-----------------------------------------
pub fn dump(opts: EraDumpOptions) -> anyhow::Result<()> {
let ctx = mk_context(&opts)?;
let sb = read_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION)?;
let writer: Box<dyn Write>;
if opts.output.is_some() {
writer = Box::new(BufWriter::new(File::create(opts.output.unwrap())?));
} else {
writer = Box::new(BufWriter::new(std::io::stdout()));
}
let mut out = xml::XmlWriter::new(writer, false);
let writesets = get_writesets_ordered(ctx.engine.clone(), &sb, opts.repair)?;
if opts.logical && !writesets.is_empty() {
dump_metadata_logical(ctx.engine, &mut out, &sb, opts.repair)
} else {
dump_metadata(ctx.engine, &mut out, &sb, opts.repair)
}
}
//------------------------------------------

285
src/era/invalidate.rs Normal file
View File

@ -0,0 +1,285 @@
use anyhow::Result;
use quick_xml::events::{BytesEnd, BytesStart, Event};
use quick_xml::Writer;
use std::fs::File;
use std::io::{BufWriter, Write};
use std::path::Path;
use std::sync::{Arc, Mutex};
use crate::era::superblock::*;
use crate::era::writeset::*;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::math::div_up;
use crate::pdata::array::{self, value_err, ArrayBlock};
use crate::pdata::array_walker::*;
use crate::pdata::btree_walker::*;
use crate::xml::mk_attr;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
struct BitsetCollator<'a> {
composed_bits: Box<Mutex<&'a mut [u64]>>,
}
impl<'a> BitsetCollator<'a> {
fn new(bitset: &mut [u64]) -> BitsetCollator {
BitsetCollator {
composed_bits: Box::new(Mutex::new(bitset)),
}
}
}
impl<'a> ArrayVisitor<u64> for BitsetCollator<'a> {
fn visit(&self, index: u64, b: ArrayBlock<u64>) -> array::Result<()> {
let mut bitset = self.composed_bits.lock().unwrap();
let idx = index as usize * b.header.max_entries as usize; // index of u64 in bitset array
for (entry, dest) in b.values.iter().zip(bitset.iter_mut().skip(idx)) {
*dest |= entry;
}
Ok(())
}
}
//------------------------------------------
struct EraArrayCollator<'a> {
composed_bits: Box<Mutex<&'a mut [u64]>>,
threshold: u32,
}
impl<'a> EraArrayCollator<'a> {
fn new(bitset: &mut [u64], threshold: u32) -> EraArrayCollator {
EraArrayCollator {
composed_bits: Box::new(Mutex::new(bitset)),
threshold,
}
}
}
impl<'a> ArrayVisitor<u32> for EraArrayCollator<'a> {
fn visit(&self, index: u64, b: ArrayBlock<u32>) -> array::Result<()> {
let blk_begin = index as usize * b.header.max_entries as usize; // range of data blocks
let blk_end = blk_begin + b.header.max_entries as usize;
let mut bitset = self.composed_bits.lock().unwrap();
let mut bitset_iter = bitset.iter_mut();
let mut idx = blk_begin >> 6; // index of u64 in bitset array
let mut dest = bitset_iter
.nth(idx)
.ok_or_else(|| value_err("array index out of bounds".to_string()))?;
let mut buf = *dest;
for (era, blk) in b.values.iter().zip(blk_begin..blk_end) {
if *era < self.threshold {
continue;
}
let steps = (blk >> 6) - idx;
if steps > 0 {
*dest = buf;
idx += steps;
dest = bitset_iter
.nth(steps - 1)
.ok_or_else(|| value_err("array index out of bounds".to_string()))?;
buf = *dest;
}
buf |= 1 << (blk & 0x3F);
}
*dest = buf;
Ok(())
}
}
//------------------------------------------
fn collate_writeset(
engine: Arc<dyn IoEngine + Send + Sync>,
writeset_root: u64,
marked_bits: &mut [u64],
) -> Result<()> {
let w = ArrayWalker::new(engine, false);
let mut c = BitsetCollator::new(marked_bits);
w.walk(&mut c, writeset_root)?;
Ok(())
}
fn collate_era_array(
engine: Arc<dyn IoEngine + Send + Sync>,
era_array_root: u64,
marked_bits: &mut [u64],
threshold: u32,
) -> Result<()> {
let w = ArrayWalker::new(engine, false);
let mut c = EraArrayCollator::new(marked_bits, threshold);
w.walk(&mut c, era_array_root)?;
Ok(())
}
fn mark_blocks_since(
engine: Arc<dyn IoEngine + Send + Sync>,
sb: &Superblock,
threshold: u32,
) -> Result<Vec<u64>> {
let mut marked_bits = Vec::<u64>::new();
marked_bits.resize(div_up(sb.nr_blocks as usize, 64), 0);
let mut path = vec![0];
let wsets = btree_to_map::<Writeset>(&mut path, engine.clone(), false, sb.writeset_tree_root)?;
for (era, ws) in wsets.iter() {
if (*era as u32) < threshold {
continue;
}
collate_writeset(engine.clone(), ws.root, &mut marked_bits)?;
}
if let Some(archived_begin) = wsets.keys().next() {
if *archived_begin as u32 > threshold {
collate_era_array(
engine.clone(),
sb.era_array_root,
&mut marked_bits,
threshold,
)?;
}
}
Ok(marked_bits)
}
fn emit_start<W: Write>(w: &mut Writer<W>) -> Result<()> {
let elem = BytesStart::owned_name(b"blocks".to_vec());
w.write_event(Event::Start(elem))?;
Ok(())
}
fn emit_end<W: Write>(w: &mut Writer<W>) -> Result<()> {
let elem = BytesEnd::borrowed(b"blocks");
w.write_event(Event::End(elem))?;
Ok(())
}
fn emit_range<W: Write>(w: &mut Writer<W>, begin: u32, end: u32) -> Result<()> {
if end > begin + 1 {
let mut elem = BytesStart::owned_name(b"range".to_vec());
elem.push_attribute(mk_attr(b"begin", begin));
elem.push_attribute(mk_attr(b"end", end));
w.write_event(Event::Empty(elem))?;
} else if end > begin {
let mut elem = BytesStart::owned_name(b"block".to_vec());
elem.push_attribute(mk_attr(b"block", begin));
w.write_event(Event::Empty(elem))?;
}
Ok(())
}
fn emit_blocks<W: Write>(marked_bits: &[u64], nr_blocks: u32, w: &mut Writer<W>) -> Result<()> {
let mut begin: u32 = 0;
let mut end: u32 = 0;
emit_start(w)?;
for (index, entry) in marked_bits.iter().enumerate() {
let mut n = *entry;
if n == u64::max_value() {
end = std::cmp::min(end + 64, nr_blocks);
continue;
}
while n > 0 {
let zeros = n.trailing_zeros();
if zeros > 0 {
if end > begin {
emit_range(w, begin, end)?;
}
n >>= zeros;
end += zeros;
begin = end;
}
let ones = n.trailing_ones();
n >>= ones;
end = std::cmp::min(end + ones, nr_blocks);
}
let endpos = (index << 6) as u32 + 64;
if end < endpos {
if end > begin {
emit_range(w, begin, end)?;
}
begin = endpos;
end = begin;
}
}
if end > begin {
emit_range(w, begin, end)?;
}
emit_end(w)?;
Ok(())
}
//------------------------------------------
pub struct EraInvalidateOptions<'a> {
pub input: &'a Path,
pub output: Option<&'a Path>,
pub async_io: bool,
pub threshold: u32,
pub metadata_snap: bool,
}
struct Context {
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &EraInvalidateOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new_with(
opts.input,
MAX_CONCURRENT_IO,
false,
!opts.metadata_snap,
)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new_with(
opts.input,
nr_threads,
false,
!opts.metadata_snap,
)?);
}
Ok(Context { engine })
}
pub fn invalidate(opts: &EraInvalidateOptions) -> Result<()> {
let ctx = mk_context(opts)?;
let mut sb = read_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION)?;
if opts.metadata_snap {
sb = read_superblock(ctx.engine.as_ref(), sb.metadata_snap)?;
}
let w: Box<dyn Write>;
if opts.output.is_some() {
w = Box::new(BufWriter::new(File::create(opts.output.unwrap())?));
} else {
w = Box::new(BufWriter::new(std::io::stdout()));
}
let mut writer = Writer::new_with_indent(w, 0x20, 2);
let marked_bits = mark_blocks_since(ctx.engine, &sb, opts.threshold)?;
emit_blocks(&marked_bits, sb.nr_blocks, &mut writer)
}
//------------------------------------------

54
src/era/ir.rs Normal file
View File

@ -0,0 +1,54 @@
use anyhow::Result;
//------------------------------------------
#[derive(Clone)]
pub struct Superblock {
pub uuid: String,
pub block_size: u32,
pub nr_blocks: u32,
pub current_era: u32,
}
#[derive(Clone)]
pub struct Writeset {
pub era: u32,
pub nr_bits: u32,
}
#[derive(Clone)]
pub struct MarkedBlocks {
pub begin: u32,
pub len: u32,
}
#[derive(Clone)]
pub struct Era {
pub block: u32,
pub era: u32,
}
//------------------------------------------
#[derive(Clone)]
pub enum Visit {
Continue,
Stop,
}
pub trait MetadataVisitor {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit>;
fn superblock_e(&mut self) -> Result<Visit>;
fn writeset_b(&mut self, ws: &Writeset) -> Result<Visit>;
fn writeset_e(&mut self) -> Result<Visit>;
fn writeset_blocks(&mut self, blocks: &MarkedBlocks) -> Result<Visit>;
fn era_b(&mut self) -> Result<Visit>;
fn era_e(&mut self) -> Result<Visit>;
fn era(&mut self, era: &Era) -> Result<Visit>;
fn eof(&mut self) -> Result<Visit>;
}
//------------------------------------------

9
src/era/mod.rs Normal file
View File

@ -0,0 +1,9 @@
pub mod check;
pub mod dump;
pub mod invalidate;
pub mod ir;
pub mod repair;
pub mod restore;
pub mod superblock;
pub mod writeset;
pub mod xml;

68
src/era/repair.rs Normal file
View File

@ -0,0 +1,68 @@
use anyhow::Result;
use std::path::Path;
use std::sync::Arc;
use crate::era::dump::*;
use crate::era::restore::*;
use crate::era::superblock::*;
use crate::io_engine::*;
use crate::pdata::space_map_metadata::*;
use crate::report::*;
use crate::write_batcher::*;
//------------------------------------------
pub struct EraRepairOptions<'a> {
pub input: &'a Path,
pub output: &'a Path,
pub async_io: bool,
pub report: Arc<Report>,
}
struct Context {
_report: Arc<Report>,
engine_in: Arc<dyn IoEngine + Send + Sync>,
engine_out: Arc<dyn IoEngine + Send + Sync>,
}
const MAX_CONCURRENT_IO: u32 = 1024;
fn new_context(opts: &EraRepairOptions) -> Result<Context> {
let engine_in: Arc<dyn IoEngine + Send + Sync>;
let engine_out: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine_in = Arc::new(AsyncIoEngine::new(opts.input, MAX_CONCURRENT_IO, false)?);
engine_out = Arc::new(AsyncIoEngine::new(opts.output, MAX_CONCURRENT_IO, true)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine_in = Arc::new(SyncIoEngine::new(opts.input, nr_threads, false)?);
engine_out = Arc::new(SyncIoEngine::new(opts.output, nr_threads, true)?);
}
Ok(Context {
_report: opts.report.clone(),
engine_in,
engine_out,
})
}
//------------------------------------------
pub fn repair(opts: EraRepairOptions) -> Result<()> {
let ctx = new_context(&opts)?;
let sb = read_superblock(ctx.engine_in.as_ref(), SUPERBLOCK_LOCATION)?;
let sm = core_metadata_sm(ctx.engine_out.get_nr_blocks(), u32::MAX);
let mut w = WriteBatcher::new(
ctx.engine_out.clone(),
sm.clone(),
ctx.engine_out.get_batch_size(),
);
let mut restorer = Restorer::new(&mut w);
dump_metadata(ctx.engine_in, &mut restorer, &sb, true)
}
//------------------------------------------

313
src/era/restore.rs Normal file
View File

@ -0,0 +1,313 @@
use anyhow::{anyhow, Result};
use std::collections::BTreeMap;
use std::fs::OpenOptions;
use std::path::Path;
use std::sync::Arc;
use crate::era::ir::{self, MetadataVisitor, Visit};
use crate::era::superblock::*;
use crate::era::writeset::Writeset;
use crate::era::xml;
use crate::io_engine::*;
use crate::math::*;
use crate::pdata::array_builder::*;
use crate::pdata::btree_builder::*;
use crate::pdata::space_map_common::pack_root;
use crate::pdata::space_map_metadata::*;
use crate::report::*;
use crate::write_batcher::*;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
pub struct EraRestoreOptions<'a> {
pub input: &'a Path,
pub output: &'a Path,
pub async_io: bool,
pub report: Arc<Report>,
}
struct Context {
_report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &EraRestoreOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.output, MAX_CONCURRENT_IO, true)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.output, nr_threads, true)?);
}
Ok(Context {
_report: opts.report.clone(),
engine,
})
}
//------------------------------------------
#[derive(PartialEq)]
enum Section {
None,
Superblock,
Writeset,
EraArray,
Finalized,
}
pub struct Restorer<'a> {
w: &'a mut WriteBatcher,
sb: Option<ir::Superblock>,
writesets: BTreeMap<u32, Writeset>,
writeset_builder: Option<ArrayBuilder<u64>>, // bitset
current_writeset: Option<ir::Writeset>,
era_array_builder: Option<ArrayBuilder<u32>>,
writeset_entry: u64,
entry_index: u32,
in_section: Section,
}
impl<'a> Restorer<'a> {
pub fn new(w: &'a mut WriteBatcher) -> Restorer<'a> {
Restorer {
w,
sb: None,
writesets: BTreeMap::new(),
writeset_builder: None,
current_writeset: None,
era_array_builder: None,
writeset_entry: 0,
entry_index: 0,
in_section: Section::None,
}
}
fn finalize(&mut self) -> Result<()> {
let src_sb;
if let Some(sb) = self.sb.take() {
src_sb = sb;
} else {
return Err(anyhow!("not in superblock"));
}
// build the writeset tree
let mut tree_builder = BTreeBuilder::<Writeset>::new(Box::new(NoopRC {}));
let mut writesets = BTreeMap::<u32, Writeset>::new();
std::mem::swap(&mut self.writesets, &mut writesets);
for (era, ws) in writesets {
tree_builder.push_value(self.w, era as u64, ws)?;
}
let writeset_tree_root = tree_builder.complete(self.w)?;
// complete the era array
let era_array_root;
if let Some(builder) = self.era_array_builder.take() {
era_array_root = builder.complete(self.w)?;
} else {
return Err(anyhow!("internal error. couldn't find era array"));
}
// build metadata space map
let metadata_sm_root = build_metadata_sm(self.w)?;
let sb = Superblock {
flags: SuperblockFlags {
clean_shutdown: true,
},
block: SUPERBLOCK_LOCATION,
version: 1,
metadata_sm_root,
data_block_size: src_sb.block_size,
nr_blocks: src_sb.nr_blocks,
current_era: src_sb.current_era,
current_writeset: Writeset {
nr_bits: src_sb.nr_blocks,
root: 0,
},
writeset_tree_root,
era_array_root,
metadata_snap: 0,
};
write_superblock(self.w.engine.as_ref(), SUPERBLOCK_LOCATION, &sb)?;
self.in_section = Section::Finalized;
Ok(())
}
}
impl<'a> MetadataVisitor for Restorer<'a> {
fn superblock_b(&mut self, sb: &ir::Superblock) -> Result<Visit> {
if self.in_section != Section::None {
return Err(anyhow!("duplicated superblock"));
}
self.sb = Some(sb.clone());
let b = self.w.alloc()?;
if b.loc != SUPERBLOCK_LOCATION {
return Err(anyhow!("superblock was occupied"));
}
self.writeset_builder = None;
self.era_array_builder = Some(ArrayBuilder::new(sb.nr_blocks as u64));
self.in_section = Section::Superblock;
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
self.finalize()?;
Ok(Visit::Continue)
}
fn writeset_b(&mut self, ws: &ir::Writeset) -> Result<Visit> {
if self.in_section != Section::Superblock {
return Err(anyhow!("not in superblock"));
}
self.writeset_builder = Some(ArrayBuilder::new(div_up(ws.nr_bits as u64, 64)));
self.entry_index = 0;
self.writeset_entry = 0;
self.current_writeset = Some(ws.clone());
self.in_section = Section::Writeset;
Ok(Visit::Continue)
}
fn writeset_e(&mut self) -> Result<Visit> {
if self.in_section != Section::Writeset {
return Err(anyhow!("not in writeset"));
}
if let Some(mut builder) = self.writeset_builder.take() {
if let Some(ws) = self.current_writeset.take() {
// push the trailing bits
builder.push_value(self.w, self.entry_index as u64, self.writeset_entry)?;
let root = builder.complete(self.w)?;
self.writesets.insert(
ws.era,
Writeset {
root,
nr_bits: ws.nr_bits,
},
);
self.in_section = Section::Superblock;
} else {
return Err(anyhow!("internal error. couldn't find writeset"));
}
} else {
return Err(anyhow!("internal error. couldn't find writeset"));
}
Ok(Visit::Continue)
}
fn writeset_blocks(&mut self, blocks: &ir::MarkedBlocks) -> Result<Visit> {
let first = blocks.begin;
let last = first + blocks.len - 1; // inclusive
let mut idx = first >> 6;
let last_idx = last >> 6; // inclusive
let builder = self.writeset_builder.as_mut().unwrap();
// emit the bufferred bits
if idx > self.entry_index {
builder.push_value(self.w, self.entry_index as u64, self.writeset_entry)?;
self.entry_index = idx;
self.writeset_entry = 0;
}
// buffer the bits of the first entry
let bi_first = first & 63;
if idx == last_idx {
let bi_last = last & 63;
let mask = 1u64 << bi_last;
self.writeset_entry |= (mask ^ mask.wrapping_sub(1)) & (u64::MAX << bi_first);
return Ok(Visit::Continue);
}
self.writeset_entry |= u64::MAX << bi_first;
// emit the all-1 entries if necessary
while idx < last_idx {
builder.push_value(self.w, self.entry_index as u64, self.writeset_entry)?;
self.entry_index += 1;
self.writeset_entry = u64::MAX;
idx += 1;
}
// buffer the bits of the last entry
builder.push_value(self.w, self.entry_index as u64, self.writeset_entry)?;
let bi_last = last & 63;
let mask = 1u64 << bi_last;
self.entry_index += 1;
self.writeset_entry |= mask ^ mask.wrapping_sub(1);
Ok(Visit::Continue)
}
fn era_b(&mut self) -> Result<Visit> {
if self.in_section != Section::Superblock {
return Err(anyhow!("not in superblock"));
}
self.in_section = Section::EraArray;
Ok(Visit::Continue)
}
fn era_e(&mut self) -> Result<Visit> {
if self.in_section != Section::EraArray {
return Err(anyhow!("not in era array"));
}
self.in_section = Section::Superblock;
Ok(Visit::Continue)
}
fn era(&mut self, era: &ir::Era) -> Result<Visit> {
let builder = self.era_array_builder.as_mut().unwrap();
builder.push_value(self.w, era.block as u64, era.era)?;
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
if self.in_section != Section::Finalized {
return Err(anyhow!("incompleted source metadata"));
}
Ok(Visit::Continue)
}
}
//------------------------------------------
fn build_metadata_sm(w: &mut WriteBatcher) -> Result<Vec<u8>> {
let r = write_metadata_sm(w)?;
let sm_root = pack_root(&r, SPACE_MAP_ROOT_SIZE)?;
Ok(sm_root)
}
//------------------------------------------
pub fn restore(opts: EraRestoreOptions) -> Result<()> {
let input = OpenOptions::new()
.read(true)
.write(false)
.open(opts.input)?;
let ctx = mk_context(&opts)?;
let sm = core_metadata_sm(ctx.engine.get_nr_blocks(), u32::MAX);
let mut w = WriteBatcher::new(ctx.engine.clone(), sm.clone(), ctx.engine.get_batch_size());
let mut restorer = Restorer::new(&mut w);
xml::read(input, &mut restorer)?;
Ok(())
}
//------------------------------------------

153
src/era/superblock.rs Normal file
View File

@ -0,0 +1,153 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{bytes::complete::*, number::complete::*, IResult};
use std::io::Cursor;
use crate::checksum::*;
use crate::era::writeset::Writeset;
use crate::io_engine::*;
//------------------------------------------
pub const SPACE_MAP_ROOT_SIZE: usize = 128;
pub const SUPERBLOCK_LOCATION: u64 = 0;
const MAGIC: u64 = 0o17660203573; // 0x7EC1077B in hex
const UUID_SIZE: usize = 16;
//------------------------------------------
#[derive(Debug, Clone)]
pub struct SuperblockFlags {
pub clean_shutdown: bool,
}
#[derive(Debug, Clone)]
pub struct Superblock {
pub flags: SuperblockFlags,
pub block: u64,
pub version: u32,
pub metadata_sm_root: Vec<u8>,
pub data_block_size: u32,
pub nr_blocks: u32,
pub current_era: u32,
pub current_writeset: Writeset,
pub writeset_tree_root: u64,
pub era_array_root: u64,
pub metadata_snap: u64,
}
fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
let (i, _csum) = le_u32(data)?;
let (i, flags) = le_u32(i)?;
let (i, block) = le_u64(i)?;
let (i, _uuid) = take(16usize)(i)?;
let (i, _magic) = le_u64(i)?;
let (i, version) = le_u32(i)?;
let (i, metadata_sm_root) = take(SPACE_MAP_ROOT_SIZE)(i)?;
let (i, data_block_size) = le_u32(i)?;
let (i, _metadata_block_size) = le_u32(i)?;
let (i, nr_blocks) = le_u32(i)?;
let (i, current_era) = le_u32(i)?;
let (i, nr_bits) = le_u32(i)?;
let (i, root) = le_u64(i)?;
let (i, writeset_tree_root) = le_u64(i)?;
let (i, era_array_root) = le_u64(i)?;
let (i, metadata_snap) = le_u64(i)?;
Ok((
i,
Superblock {
flags: SuperblockFlags {
clean_shutdown: (flags & 0x1) != 0,
},
block,
version,
metadata_sm_root: metadata_sm_root.to_vec(),
data_block_size,
nr_blocks,
current_era,
current_writeset: Writeset { nr_bits, root },
writeset_tree_root,
era_array_root,
metadata_snap,
},
))
}
pub fn read_superblock(engine: &dyn IoEngine, loc: u64) -> Result<Superblock> {
let b = engine.read(loc)?;
if metadata_block_type(b.get_data()) != BT::ERA_SUPERBLOCK {
return Err(anyhow!("bad checksum in superblock"));
}
if let Ok((_, sb)) = unpack(b.get_data()) {
Ok(sb)
} else {
Err(anyhow!("couldn't unpack superblock"))
}
}
//------------------------------------------
fn pack_superblock<W: WriteBytesExt>(sb: &Superblock, w: &mut W) -> Result<()> {
// checksum, which we don't know yet
w.write_u32::<LittleEndian>(0)?;
// flags
let mut flags: u32 = 0;
if sb.flags.clean_shutdown {
flags |= 0x1;
}
w.write_u32::<LittleEndian>(flags)?;
w.write_u64::<LittleEndian>(sb.block)?;
w.write_all(&[0; UUID_SIZE])?;
w.write_u64::<LittleEndian>(MAGIC)?;
w.write_u32::<LittleEndian>(sb.version)?;
w.write_all(&sb.metadata_sm_root)?;
w.write_u32::<LittleEndian>(sb.data_block_size)?;
// metadata block size
w.write_u32::<LittleEndian>((BLOCK_SIZE >> SECTOR_SHIFT) as u32)?;
w.write_u32::<LittleEndian>(sb.nr_blocks)?;
w.write_u32::<LittleEndian>(sb.current_era)?;
w.write_u32::<LittleEndian>(sb.current_writeset.nr_bits)?;
w.write_u64::<LittleEndian>(sb.current_writeset.root)?;
w.write_u64::<LittleEndian>(sb.writeset_tree_root)?;
w.write_u64::<LittleEndian>(sb.era_array_root)?;
w.write_u64::<LittleEndian>(sb.metadata_snap)?;
Ok(())
}
pub fn write_superblock(engine: &dyn IoEngine, _loc: u64, sb: &Superblock) -> Result<()> {
let b = Block::zeroed(SUPERBLOCK_LOCATION);
// pack the superblock
{
let mut cursor = Cursor::new(b.get_data());
pack_superblock(sb, &mut cursor)?;
}
// calculate the checksum
write_checksum(b.get_data(), BT::ERA_SUPERBLOCK)?;
// write
engine.write(&b)?;
Ok(())
}
//------------------------------------------

35
src/era/writeset.rs Normal file
View File

@ -0,0 +1,35 @@
use anyhow::Result;
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
use crate::pdata::unpack::*;
//------------------------------------------
#[derive(Clone, Copy, Debug)]
pub struct Writeset {
pub nr_bits: u32,
pub root: u64,
}
impl Unpack for Writeset {
fn disk_size() -> u32 {
12
}
fn unpack(i: &[u8]) -> IResult<&[u8], Writeset> {
let (i, nr_bits) = le_u32(i)?;
let (i, root) = le_u64(i)?;
Ok((i, Writeset { nr_bits, root }))
}
}
impl Pack for Writeset {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u32::<LittleEndian>(self.nr_bits)?;
w.write_u64::<LittleEndian>(self.root)?;
Ok(())
}
}
//------------------------------------------

313
src/era/xml.rs Normal file
View File

@ -0,0 +1,313 @@
use anyhow::{anyhow, Result};
use quick_xml::events::{BytesEnd, BytesStart, Event};
use quick_xml::{Reader, Writer};
use std::io::{BufRead, BufReader};
use std::io::{Read, Write};
use crate::era::ir::*;
use crate::xml::*;
//---------------------------------------
pub struct XmlWriter<W: Write> {
w: Writer<W>,
compact: bool,
nr_blocks: u32,
emitted_blocks: u32,
}
impl<W: Write> XmlWriter<W> {
pub fn new(w: W, compact: bool) -> XmlWriter<W> {
XmlWriter {
w: Writer::new_with_indent(w, 0x20, 2),
compact,
nr_blocks: 0,
emitted_blocks: 0,
}
}
}
impl<W: Write> MetadataVisitor for XmlWriter<W> {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit> {
let tag = b"superblock";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"uuid", sb.uuid.clone()));
elem.push_attribute(mk_attr(b"block_size", sb.block_size));
elem.push_attribute(mk_attr(b"nr_blocks", sb.nr_blocks));
elem.push_attribute(mk_attr(b"current_era", sb.current_era));
self.w.write_event(Event::Start(elem))?;
self.nr_blocks = sb.nr_blocks;
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
self.w
.write_event(Event::End(BytesEnd::borrowed(b"superblock")))?;
Ok(Visit::Continue)
}
fn writeset_b(&mut self, ws: &Writeset) -> Result<Visit> {
let tag = b"writeset";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"era", ws.era));
elem.push_attribute(mk_attr(b"nr_bits", ws.nr_bits));
self.w.write_event(Event::Start(elem))?;
self.emitted_blocks = 0;
Ok(Visit::Continue)
}
fn writeset_e(&mut self) -> Result<Visit> {
if !self.compact {
for b in self.emitted_blocks..self.nr_blocks {
let tag = b"bit";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block", b));
elem.push_attribute(mk_attr(b"value", "false"));
self.w.write_event(Event::Empty(elem))?;
}
}
self.w
.write_event(Event::End(BytesEnd::borrowed(b"writeset")))?;
Ok(Visit::Continue)
}
fn writeset_blocks(&mut self, blocks: &MarkedBlocks) -> Result<Visit> {
if self.compact {
let tag = b"marked";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block_begin", blocks.begin));
elem.push_attribute(mk_attr(b"len", blocks.len));
self.w.write_event(Event::Empty(elem))?;
} else {
for b in self.emitted_blocks..blocks.begin {
let tag = b"bit";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block", b));
elem.push_attribute(mk_attr(b"value", "false"));
self.w.write_event(Event::Empty(elem))?;
}
let end = blocks.begin + blocks.len;
for b in blocks.begin..end {
let tag = b"bit";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block", b));
elem.push_attribute(mk_attr(b"value", "true"));
self.w.write_event(Event::Empty(elem))?;
}
self.emitted_blocks = end;
}
Ok(Visit::Continue)
}
fn era_b(&mut self) -> Result<Visit> {
let tag = b"era_array";
let elem = BytesStart::owned(tag.to_vec(), tag.len());
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn era_e(&mut self) -> Result<Visit> {
self.w
.write_event(Event::End(BytesEnd::borrowed(b"era_array")))?;
Ok(Visit::Continue)
}
fn era(&mut self, era: &Era) -> Result<Visit> {
let tag = b"era";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"block", era.block));
elem.push_attribute(mk_attr(b"era", era.era));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
}
//------------------------------------------
fn parse_superblock(e: &BytesStart) -> Result<Superblock> {
let tag = "superblock";
let mut uuid: Option<String> = None;
let mut block_size: Option<u32> = None;
let mut nr_blocks: Option<u32> = None;
let mut current_era: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"uuid" => uuid = Some(string_val(&kv)),
b"block_size" => block_size = Some(u32_val(&kv)?),
b"nr_blocks" => nr_blocks = Some(u32_val(&kv)?),
b"current_era" => current_era = Some(u32_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
Ok(Superblock {
uuid: check_attr(tag, "uuid", uuid)?,
block_size: check_attr(tag, "block_size", block_size)?,
nr_blocks: check_attr(tag, "nr_cache_blocks", nr_blocks)?,
current_era: check_attr(tag, "current_era", current_era)?,
})
}
fn parse_writeset(e: &BytesStart) -> Result<Writeset> {
let tag = "writeset";
let mut era: Option<u32> = None;
let mut nr_bits: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"era" => era = Some(u32_val(&kv)?),
b"nr_bits" => nr_bits = Some(u32_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
Ok(Writeset {
era: check_attr(tag, "era", era)?,
nr_bits: check_attr(tag, "nr_bits", nr_bits)?,
})
}
fn parse_writeset_bit(e: &BytesStart) -> Result<Option<MarkedBlocks>> {
let tag = "bit";
let mut block: Option<u32> = None;
let mut value: Option<bool> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"block" => block = Some(u32_val(&kv)?),
b"value" => value = Some(bool_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
check_attr(tag, "block", block)?;
check_attr(tag, "value", value)?;
if let Some(true) = value {
Ok(Some(MarkedBlocks {
begin: block.unwrap(),
len: 1,
}))
} else {
Ok(None)
}
}
fn parse_writeset_blocks(e: &BytesStart) -> Result<MarkedBlocks> {
let tag = "marked";
let mut begin: Option<u32> = None;
let mut len: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"block_begin" => begin = Some(u32_val(&kv)?),
b"len" => len = Some(u32_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
Ok(MarkedBlocks {
begin: check_attr(tag, "block_begin", begin)?,
len: check_attr(tag, "len", len)?,
})
}
fn parse_era(e: &BytesStart) -> Result<Era> {
let tag = "era";
let mut block: Option<u32> = None;
let mut era: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"block" => block = Some(u32_val(&kv)?),
b"era" => era = Some(u32_val(&kv)?),
_ => return bad_attr(tag, kv.key),
}
}
Ok(Era {
block: check_attr(tag, "block", block)?,
era: check_attr(tag, "era", era)?,
})
}
fn handle_event<R, M>(reader: &mut Reader<R>, buf: &mut Vec<u8>, visitor: &mut M) -> Result<Visit>
where
R: Read + BufRead,
M: MetadataVisitor,
{
match reader.read_event(buf) {
Ok(Event::Start(ref e)) => match e.name() {
b"superblock" => visitor.superblock_b(&parse_superblock(e)?),
b"writeset" => visitor.writeset_b(&parse_writeset(e)?),
b"era_array" => visitor.era_b(),
_ => return Err(anyhow!("Parse error at byte {}", reader.buffer_position())),
},
Ok(Event::End(ref e)) => match e.name() {
b"superblock" => visitor.superblock_e(),
b"writeset" => visitor.writeset_e(),
b"era_array" => visitor.era_e(),
_ => return Err(anyhow!("Parse error at byte {}", reader.buffer_position())),
},
Ok(Event::Empty(ref e)) => match e.name() {
b"bit" => {
if let Some(b) = parse_writeset_bit(e)? {
visitor.writeset_blocks(&b)
} else {
Ok(Visit::Continue)
}
}
b"marked" => visitor.writeset_blocks(&parse_writeset_blocks(e)?),
b"era" => visitor.era(&parse_era(e)?),
_ => return Err(anyhow!("Parse error at byte {}", reader.buffer_position())),
},
Ok(Event::Text(_)) => Ok(Visit::Continue),
Ok(Event::Comment(_)) => Ok(Visit::Continue),
Ok(Event::Eof) => {
visitor.eof()?;
Ok(Visit::Stop)
}
Ok(_) => return Err(anyhow!("Parse error at byte {}", reader.buffer_position())),
Err(e) => {
return Err(anyhow!(
"Parse error at byte {}: {:?}",
reader.buffer_position(),
e
))
}
}
}
pub fn read<R, M>(input: R, visitor: &mut M) -> Result<()>
where
R: Read,
M: MetadataVisitor,
{
let input = BufReader::new(input);
let mut reader = Reader::from_reader(input);
reader.trim_text(true);
let mut buf = Vec::new();
while let Visit::Continue = handle_event(&mut reader, &mut buf, visitor)? {}
Ok(())
}
//------------------------------------------

View File

@ -18,28 +18,19 @@ pub fn is_file_or_blk_(info: FileStat) -> bool {
}
pub fn file_exists(path: &Path) -> bool {
match stat::stat(path) {
Ok(_) => true,
_ => false,
}
matches!(stat::stat(path), Ok(_))
}
pub fn is_file_or_blk(path: &Path) -> bool {
match stat::stat(path) {
Ok(info) =>is_file_or_blk_(info),
Ok(info) => is_file_or_blk_(info),
_ => false,
}
}
pub fn is_file(path: &Path) -> bool {
match stat::stat(path) {
Ok(info) => {
if test_bit(info.st_mode, SFlag::S_IFREG) {
true
} else {
false
}
}
Ok(info) => test_bit(info.st_mode, SFlag::S_IFREG),
_ => false,
}
}
@ -113,5 +104,3 @@ pub fn create_sized_file(path: &Path, nr_bytes: u64) -> io::Result<std::fs::File
}
//---------------------------------------

View File

@ -109,7 +109,7 @@ impl<'a> Deref for FileGuard<'a> {
type Target = File;
fn deref(&self) -> &File {
&self.file.as_ref().expect("empty file guard")
self.file.as_ref().expect("empty file guard")
}
}
@ -131,21 +131,30 @@ impl<'a> Drop for FileGuard<'a> {
}
impl SyncIoEngine {
fn open_file(path: &Path, writable: bool) -> Result<File> {
fn open_file(path: &Path, writable: bool, excl: bool) -> Result<File> {
let file = OpenOptions::new()
.read(true)
.write(writable)
.custom_flags(libc::O_EXCL)
.custom_flags(if excl { libc::O_EXCL } else { 0 })
.open(path)?;
Ok(file)
}
pub fn new(path: &Path, nr_files: usize, writable: bool) -> Result<SyncIoEngine> {
SyncIoEngine::new_with(path, nr_files, writable, true)
}
pub fn new_with(
path: &Path,
nr_files: usize,
writable: bool,
excl: bool,
) -> Result<SyncIoEngine> {
let nr_blocks = get_nr_blocks(path)?; // check file mode eariler
let mut files = Vec::with_capacity(nr_files);
for _n in 0..nr_files {
files.push(SyncIoEngine::open_file(path, writable)?);
files.push(SyncIoEngine::open_file(path, writable, excl)?);
}
Ok(SyncIoEngine {
@ -180,7 +189,7 @@ impl SyncIoEngine {
fn write_(output: &mut File, b: &Block) -> Result<()> {
output.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
output.write_all(&b.get_data())?;
output.write_all(b.get_data())?;
Ok(())
}
}
@ -237,11 +246,24 @@ pub struct AsyncIoEngine {
impl AsyncIoEngine {
pub fn new(path: &Path, queue_len: u32, writable: bool) -> Result<AsyncIoEngine> {
AsyncIoEngine::new_with(path, queue_len, writable, true)
}
pub fn new_with(
path: &Path,
queue_len: u32,
writable: bool,
excl: bool,
) -> Result<AsyncIoEngine> {
let nr_blocks = get_nr_blocks(path)?; // check file mode earlier
let mut flags = libc::O_DIRECT;
if excl {
flags |= libc::O_EXCL;
}
let input = OpenOptions::new()
.read(true)
.write(writable)
.custom_flags(libc::O_DIRECT | libc::O_EXCL)
.custom_flags(flags)
.open(path)?;
Ok(AsyncIoEngine {

View File

@ -18,6 +18,7 @@ extern crate quickcheck_macros;
pub mod cache;
pub mod checksum;
pub mod commands;
pub mod era;
pub mod file_utils;
pub mod io_engine;
pub mod math;
@ -26,6 +27,7 @@ pub mod pdata;
pub mod report;
pub mod shrink;
pub mod thin;
pub mod units;
pub mod version;
pub mod write_batcher;
pub mod xml;

View File

@ -66,7 +66,7 @@ fn mk_chunk_vecs(nr_blocks: u64, nr_jobs: u64) -> Vec<Vec<(u64, u64)>> {
}
pub fn pack(input_file: &Path, output_file: &Path) -> Result<(), Box<dyn Error>> {
let nr_blocks = get_nr_blocks(&input_file)?;
let nr_blocks = get_nr_blocks(input_file)?;
let nr_jobs = std::cmp::max(1, std::cmp::min(num_cpus::get() as u64, nr_blocks / 128));
let chunk_vecs = mk_chunk_vecs(nr_blocks, nr_jobs);
@ -122,7 +122,7 @@ where
let kind = metadata_block_type(data);
if kind != BT::UNKNOWN {
z.write_u64::<LittleEndian>(b)?;
pack_block(&mut z, kind, &data)?;
pack_block(&mut z, kind, data)?;
written += 1;
if written == 1024 {

View File

@ -442,7 +442,7 @@ mod tests {
let mut r = Cursor::new(&mut bs);
let unpacked = unpack(&mut r, ns.len() * 8).unwrap();
check_u64s_match(&ns, &unpacked[0..])
check_u64s_match(ns, &unpacked[0..])
}
#[test]
@ -457,7 +457,7 @@ mod tests {
];
for t in &cases {
assert!(check_pack_u64s(&t));
assert!(check_pack_u64s(t));
}
}

View File

@ -77,7 +77,7 @@ impl<'a, V: Unpack + Copy> NodeVisitor<u64> for BlockValueVisitor<'a, V> {
for (i, b) in values.iter().enumerate() {
// TODO: report indices of array entries based on the type size
let mut array_errs = self.array_errs.lock().unwrap();
array_errs.push(array::io_err(&path, *b).index_context(keys[i]));
array_errs.push(array::io_err(path, *b).index_context(keys[i]));
}
}
Ok(rblocks) => {
@ -85,7 +85,7 @@ impl<'a, V: Unpack + Copy> NodeVisitor<u64> for BlockValueVisitor<'a, V> {
match rb {
Err(_) => {
let mut array_errs = self.array_errs.lock().unwrap();
array_errs.push(array::io_err(&path, values[i]).index_context(keys[i]));
array_errs.push(array::io_err(path, values[i]).index_context(keys[i]));
}
Ok(b) => {
let mut path = path.to_vec();

View File

@ -2,10 +2,13 @@ use fixedbitset::FixedBitSet;
use std::sync::{Arc, Mutex};
use crate::io_engine::IoEngine;
use crate::math::div_up;
use crate::pdata::array::{self, ArrayBlock};
use crate::pdata::array_walker::{ArrayVisitor, ArrayWalker};
use crate::pdata::space_map::*;
//------------------------------------------
pub struct CheckedBitSet {
bits: FixedBitSet,
}
@ -30,6 +33,8 @@ impl CheckedBitSet {
}
}
//------------------------------------------
struct BitsetVisitor {
nr_bits: usize,
bits: Mutex<CheckedBitSet>,
@ -72,6 +77,55 @@ impl ArrayVisitor<u64> for BitsetVisitor {
}
}
//------------------------------------------
struct BitsetCollector {
bits: Mutex<FixedBitSet>,
nr_bits: usize,
}
impl BitsetCollector {
fn new(nr_bits: usize) -> BitsetCollector {
BitsetCollector {
bits: Mutex::new(FixedBitSet::with_capacity(nr_bits)),
nr_bits,
}
}
pub fn get_bitset(self) -> FixedBitSet {
self.bits.into_inner().unwrap()
}
}
impl ArrayVisitor<u64> for BitsetCollector {
fn visit(&self, index: u64, b: ArrayBlock<u64>) -> array::Result<()> {
let mut bitset = self.bits.lock().unwrap();
let mut idx = (index as usize * b.header.max_entries as usize) << 1; // index of u32 in bitset array
let idx_end = div_up(self.nr_bits, 32);
let mut dest = bitset.as_mut_slice().iter_mut().skip(idx);
for entry in b.values.iter() {
let lower = (*entry & (u32::MAX as u64)) as u32;
*(dest.next().ok_or_else(|| {
array::value_err(format!("bitset size exceeds limit: {} bits", self.nr_bits))
})?) = lower;
idx += 1;
if idx == idx_end {
break;
}
let upper = (*entry >> 32) as u32;
*(dest.next().ok_or_else(|| {
array::value_err(format!("bitset size exceeds limit: {} bits", self.nr_bits))
})?) = upper;
idx += 1;
}
Ok(())
}
}
//------------------------------------------
// TODO: multi-threaded is possible
pub fn read_bitset(
engine: Arc<dyn IoEngine + Send + Sync>,
@ -106,3 +160,15 @@ pub fn read_bitset_with_sm(
};
Ok((v.get_bitset(), e))
}
pub fn read_bitset_no_err(
engine: Arc<dyn IoEngine + Send + Sync>,
root: u64,
nr_bits: usize,
ignore_none_fatal: bool,
) -> array::Result<FixedBitSet> {
let w = ArrayWalker::new(engine, ignore_none_fatal);
let mut v = BitsetCollector::new(nr_bits);
w.walk(&mut v, root)?;
Ok(v.get_bitset())
}

View File

@ -561,7 +561,7 @@ pub fn unpack_node<V: Unpack>(
if let Some(l) = last {
if k <= l {
return Err(node_err(
&path,
path,
&format!("keys out of order: {} <= {}", k, l),
));
}
@ -582,7 +582,7 @@ pub fn unpack_node<V: Unpack>(
values,
})
} else {
let (_i, values) = convert_result(&path, count(le_u64, header.nr_entries as usize)(i))?;
let (_i, values) = convert_result(path, count(le_u64, header.nr_entries as usize)(i))?;
Ok(Node::Internal {
header,
keys,

View File

@ -132,10 +132,10 @@ impl<'a> LeafWalker<'a> {
.keys_context(kr));
}
let node = unpack_node::<V>(path, &b.get_data(), self.ignore_non_fatal, is_root)?;
let node = unpack_node::<V>(path, b.get_data(), self.ignore_non_fatal, is_root)?;
if let Internal { keys, values, .. } = node {
let krs = split_key_ranges(path, &kr, &keys)?;
let krs = split_key_ranges(path, kr, &keys)?;
if depth == 0 {
// it is the lowest internal
for i in 0..krs.len() {
@ -187,7 +187,7 @@ impl<'a> LeafWalker<'a> {
));
}
let node = unpack_node::<V>(path, &b.get_data(), self.ignore_non_fatal, is_root)?;
let node = unpack_node::<V>(path, b.get_data(), self.ignore_non_fatal, is_root)?;
match node {
Internal { values, .. } => {

View File

@ -195,11 +195,11 @@ impl BTreeWalker {
.keys_context(kr));
}
let node = unpack_node::<V>(path, &b.get_data(), self.ignore_non_fatal, is_root)?;
let node = unpack_node::<V>(path, b.get_data(), self.ignore_non_fatal, is_root)?;
match node {
Internal { keys, values, .. } => {
let krs = split_key_ranges(path, &kr, &keys)?;
let krs = split_key_ranges(path, kr, &keys)?;
let errs = self.walk_nodes(path, visitor, &krs, &values);
return self.build_aggregate(b.loc, errs);
}
@ -208,7 +208,7 @@ impl BTreeWalker {
keys,
values,
} => {
if let Err(e) = visitor.visit(path, &kr, &header, &keys, &values) {
if let Err(e) = visitor.visit(path, kr, &header, &keys, &values) {
let e = BTreeError::Path(path.clone(), Box::new(e));
self.set_fail(b.loc, e.clone());
return Err(e);
@ -286,11 +286,11 @@ where
.keys_context(kr));
}
let node = unpack_node::<V>(path, &b.get_data(), w.ignore_non_fatal, is_root)?;
let node = unpack_node::<V>(path, b.get_data(), w.ignore_non_fatal, is_root)?;
match node {
Internal { keys, values, .. } => {
let krs = split_key_ranges(path, &kr, &keys)?;
let krs = split_key_ranges(path, kr, &keys)?;
let errs = walk_nodes_threaded(w.clone(), path, pool, visitor, &krs, &values);
return w.build_aggregate(b.loc, errs);
}

View File

@ -109,7 +109,7 @@ fn check_low_ref_counts(
return Err(anyhow!("Unable to read bitmap block"));
}
Ok(b) => {
if checksum::metadata_block_type(&b.get_data()) != checksum::BT::BITMAP {
if checksum::metadata_block_type(b.get_data()) != checksum::BT::BITMAP {
report.fatal(&format!(
"Index entry points to block ({}) that isn't a bitmap",
b.loc

View File

@ -117,7 +117,7 @@ impl ReportInner for PBInner {
fn set_sub_title(&mut self, txt: &str) {
//let mut fmt = "".to_string(); //Checking thin metadata".to_string(); //self.title.clone();
let mut fmt = "Checking thin metadata [{bar:40}] Remaining {eta}, ".to_string();
fmt.push_str(&txt);
fmt.push_str(txt);
self.bar.set_style(
ProgressStyle::default_bar()
.template(&fmt)

View File

@ -343,16 +343,16 @@ fn find_first(r: &BlockRange, remaps: &[(BlockRange, BlockRange)]) -> Option<usi
Err(n) => {
if n == 0 {
let (from, _) = &remaps[n];
overlaps(&r, &from, n)
overlaps(r, from, n)
} else if n == remaps.len() {
let (from, _) = &remaps[n - 1];
overlaps(&r, from, n - 1)
overlaps(r, from, n - 1)
} else {
// Need to check the previous entry
let (from, _) = &remaps[n - 1];
overlaps(&r, &from, n - 1).or_else(|| {
overlaps(r, from, n - 1).or_else(|| {
let (from, _) = &remaps[n];
overlaps(&r, &from, n)
overlaps(r, from, n)
})
}
}
@ -368,7 +368,7 @@ fn remap(r: &BlockRange, remaps: &[(BlockRange, BlockRange)]) -> Vec<BlockRange>
let mut remap = Vec::new();
let mut r = r.start..r.end;
if let Some(index) = find_first(&r, &remaps) {
if let Some(index) = find_first(&r, remaps) {
let mut index = index;
loop {
let (from, to) = &remaps[index];
@ -487,7 +487,7 @@ fn build_copy_regions(remaps: &[(BlockRange, BlockRange)], block_size: u64) -> V
rs.push(Region {
src: from.start * block_size,
dest: to.start * block_size,
len: range_len(&from) * block_size,
len: range_len(from) * block_size,
});
}

View File

@ -188,7 +188,7 @@ fn emit_leaf(v: &mut MappingVisitor, b: &Block) -> Result<()> {
)));
}
let node = unpack_node::<BlockTime>(&path, &b.get_data(), true, true)?;
let node = unpack_node::<BlockTime>(&path, b.get_data(), true, true)?;
match node {
Internal { .. } => {

21
src/thin/metadata_size.rs Normal file
View File

@ -0,0 +1,21 @@
use anyhow::Result;
use crate::math::div_up;
pub struct ThinMetadataSizeOptions {
pub nr_blocks: u64,
pub max_thins: u64,
}
pub fn metadata_size(opts: &ThinMetadataSizeOptions) -> Result<u64> {
const ENTRIES_PER_NODE: u64 = 126; // assumed the mapping leaves are half populated
const BLOCK_SIZE: u64 = 8; // sectors
// size of all the leaf nodes for data mappings
let mapping_size = div_up(opts.nr_blocks, ENTRIES_PER_NODE) * BLOCK_SIZE;
// space required by root nodes
let roots_overhead = opts.max_thins * BLOCK_SIZE;
Ok(mapping_size + roots_overhead)
}

View File

@ -5,6 +5,7 @@ pub mod dump;
pub mod ir;
pub mod metadata;
pub mod metadata_repair;
pub mod metadata_size;
pub mod repair;
pub mod restore;
pub mod runs;

View File

@ -153,7 +153,7 @@ impl<'a> Restorer<'a> {
};
for (_, leaves) in self.sub_trees.iter() {
release_leaves(self.w, &leaves, &mut value_rc)?;
release_leaves(self.w, leaves, &mut value_rc)?;
}
Ok(())

View File

@ -125,7 +125,7 @@ impl Gatherer {
// Now we need to mark entries that follow a tail as heads.
let mut heads = mem::take(&mut self.heads);
for t in &self.tails {
if let Some(e) = self.entries.get(&t) {
if let Some(e) = self.entries.get(t) {
for n in &e.neighbours {
heads.insert(*n);
}

104
src/units.rs Normal file
View File

@ -0,0 +1,104 @@
use anyhow::anyhow;
use std::str::FromStr;
//------------------------------------------
#[derive(Clone)]
pub enum Units {
Byte,
Sector,
Kilobyte,
Megabyte,
Gigabyte,
Terabyte,
Petabyte,
Exabyte,
Kibibyte,
Mebibyte,
Gibibyte,
Tebibyte,
Pebibyte,
Exbibyte,
}
impl Units {
fn size_bytes(&self) -> u64 {
match self {
Units::Byte => 1,
Units::Sector => 512,
// base 2
Units::Kibibyte => 1024,
Units::Mebibyte => 1048576,
Units::Gibibyte => 1073741824,
Units::Tebibyte => 1099511627776,
Units::Pebibyte => 1125899906842624,
Units::Exbibyte => 1152921504606846976,
// base 10
Units::Kilobyte => 1000,
Units::Megabyte => 1000000,
Units::Gigabyte => 1000000000,
Units::Terabyte => 1000000000000,
Units::Petabyte => 1000000000000000,
Units::Exabyte => 1000000000000000000,
}
}
}
impl FromStr for Units {
type Err = anyhow::Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"byte" | "b" => Ok(Units::Byte),
"sector" | "s" => Ok(Units::Sector),
// base 2
"kibibyte" | "k" => Ok(Units::Kibibyte),
"mibibyte" | "m" => Ok(Units::Mebibyte),
"gibibyte" | "g" => Ok(Units::Gibibyte),
"tebibyte" | "t" => Ok(Units::Tebibyte),
"pebibyte" | "p" => Ok(Units::Pebibyte),
"exbibyte" | "e" => Ok(Units::Exbibyte),
// base 10
"kilobyte" | "K" => Ok(Units::Kilobyte),
"megabyte" | "M" => Ok(Units::Megabyte),
"gigabyte" | "G" => Ok(Units::Gigabyte),
"terabyte" | "T" => Ok(Units::Terabyte),
"petabyte" | "P" => Ok(Units::Petabyte),
"exabyte" | "E" => Ok(Units::Exabyte),
_ => Err(anyhow!("Invalid unit specifier")),
}
}
}
impl ToString for Units {
fn to_string(&self) -> String {
String::from(match self {
Units::Byte => "byte",
Units::Sector => "sector",
// base 2
Units::Kibibyte => "kibibyte",
Units::Mebibyte => "mibibyte",
Units::Gibibyte => "gibibyte",
Units::Tebibyte => "terabyte",
Units::Pebibyte => "pebibyte",
Units::Exbibyte => "exbibyte",
// base 10
Units::Kilobyte => "kilobyte",
Units::Megabyte => "megabyte",
Units::Gigabyte => "gigabyte",
Units::Terabyte => "terabyte",
Units::Petabyte => "petabyte",
Units::Exabyte => "exabyte",
})
}
}
pub fn to_bytes(size: u64, unit: Units) -> u64 {
size * unit.size_bytes()
}
pub fn to_units(bytes: u64, unit: Units) -> f64 {
bytes as f64 / unit.size_bytes() as f64
}
//------------------------------------------

View File

@ -32,8 +32,14 @@ pub fn bool_val(kv: &Attribute) -> anyhow::Result<bool> {
Ok(n)
}
pub fn bad_attr<T>(tag: &str, _attr: &[u8]) -> anyhow::Result<T> {
Err(anyhow!("unknown attribute in tag '{}'", tag))
pub fn bad_attr<T>(tag: &str, attr: &[u8]) -> anyhow::Result<T> {
Err(anyhow!(
"unknown attribute {}in tag '{}'",
std::str::from_utf8(attr)
.map(|s| format!("'{}' ", s))
.unwrap_or_default(),
tag
))
}
pub fn check_attr<T>(tag: &str, name: &str, maybe_v: Option<T>) -> anyhow::Result<T> {

View File

@ -13,8 +13,7 @@ use common::test_dir::*;
//------------------------------------------
const USAGE: &str =
"cache_check 0.9.0
const USAGE: &str = "cache_check 0.9.0
USAGE:
cache_check [FLAGS] <INPUT>
@ -105,7 +104,10 @@ fn failing_q() -> Result<()> {
let md = mk_zeroed_md(&mut td)?;
let output = run_fail_raw(cache_check_cmd(args!["-q", &md]))?;
assert_eq!(output.stdout.len(), 0);
eprintln!("stderr = '{}'", std::str::from_utf8(&output.stderr).unwrap());
eprintln!(
"stderr = '{}'",
std::str::from_utf8(&output.stderr).unwrap()
);
assert_eq!(output.stderr.len(), 0);
Ok(())
}
@ -128,7 +130,6 @@ fn valid_metadata_passes() -> Result<()> {
Ok(())
}
// FIXME: put back in, I don't want to add the --debug- arg to the
// tool again, so we should have a little library function for tweaking
// metadata version.

View File

@ -0,0 +1,186 @@
use anyhow::Result;
mod common;
use common::common_args::*;
use common::process::*;
use common::program::*;
use common::target::*;
//------------------------------------------
const USAGE: &str = "cache_metadata_size 0.9.0
Estimate the size of the metadata device needed for a given configuration.
USAGE:
cache_metadata_size [OPTIONS] <--device-size <SECTORS> --block-size <SECTORS> | --nr-blocks <NUM>>
FLAGS:
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
--block-size <SECTORS> Specify the size of each cache block
--device-size <SECTORS> Specify total size of the fast device used in the cache
--max-hint-width <BYTES> Specity the per-block hint width [default: 4]
--nr-blocks <NUM> Specify the number of cache blocks";
//------------------------------------------
struct CacheMetadataSize;
impl<'a> Program<'a> for CacheMetadataSize {
fn name() -> &'a str {
"cache_metadata_size"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
cache_metadata_size_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::InputArg
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
//------------------------------------------
test_accepts_help!(CacheMetadataSize);
test_accepts_version!(CacheMetadataSize);
test_rejects_bad_option!(CacheMetadataSize);
//------------------------------------------
#[test]
fn no_args() -> Result<()> {
let _stderr = run_fail(cache_metadata_size_cmd([""; 0]))?;
Ok(())
}
#[test]
fn device_size_only() -> Result<()> {
let _stderr = run_fail(cache_metadata_size_cmd(args!["--device-size", "204800"]))?;
Ok(())
}
#[test]
fn block_size_only() -> Result<()> {
let _stderr = run_fail(cache_metadata_size_cmd(args!["--block-size", "128"]))?;
Ok(())
}
/*
#[test]
fn conradictory_info_fails() -> Result<()> {
let stderr = run_fail(cache_metadata_size_cmd(
args![
"--device-size",
"102400",
"--block-size",
"1000",
"--nr-blocks",
"6"
],
))?;
assert_eq!(stderr, "Contradictory arguments given, --nr-blocks doesn't match the --device-size and --block-size.");
Ok(())
}
#[test]
fn all_args_agree() -> Result<()> {
let out = run_ok_raw(cache_metadata_size_cmd(
args![
"--device-size",
"102400",
"--block-size",
"100",
"--nr-blocks",
"1024"
],
))?;
let stdout = std::str::from_utf8(&out.stdout[..])
.unwrap()
.trim_end_matches(|c| c == '\n' || c == '\r')
.to_string();
assert_eq!(stdout, "8248 sectors");
assert_eq!(out.stderr.len(), 0);
Ok(())
}
*/
#[test]
fn dev_size_and_nr_blocks_conflicts() -> Result<()> {
run_fail(cache_metadata_size_cmd(args![
"--device-size",
"102400",
"--nr-blocks",
"1024"
]))?;
Ok(())
}
#[test]
fn block_size_and_nr_blocks_conflicts() -> Result<()> {
run_fail(cache_metadata_size_cmd(args![
"--block-size",
"100",
"--nr-blocks",
"1024"
]))?;
Ok(())
}
#[test]
fn nr_blocks_alone() -> Result<()> {
let out = run_ok_raw(cache_metadata_size_cmd(args!["--nr-blocks", "1024"]))?;
let stdout = std::str::from_utf8(&out.stdout[..])
.unwrap()
.trim_end_matches(|c| c == '\n' || c == '\r')
.to_string();
assert_eq!(stdout, "8248 sectors");
assert_eq!(out.stderr.len(), 0);
Ok(())
}
#[test]
fn dev_size_and_block_size_succeeds() -> Result<()> {
let out = run_ok_raw(cache_metadata_size_cmd(args![
"--device-size",
"102400",
"--block-size",
"100"
]))?;
let stdout = std::str::from_utf8(&out.stdout[..])
.unwrap()
.trim_end_matches(|c| c == '\n' || c == '\r')
.to_string();
assert_eq!(stdout, "8248 sectors");
assert_eq!(out.stderr.len(), 0);
Ok(())
}
#[test]
fn large_nr_blocks() -> Result<()> {
let out = run_ok_raw(cache_metadata_size_cmd(args!["--nr-blocks", "67108864"]))?;
let stdout = std::str::from_utf8(&out.stdout[..])
.unwrap()
.trim_end_matches(|c| c == '\n' || c == '\r')
.to_string();
assert_eq!(stdout, "3678208 sectors");
assert_eq!(out.stderr.len(), 0);
Ok(())
}
//------------------------------------------

View File

@ -12,8 +12,7 @@ use common::test_dir::*;
//------------------------------------------
const USAGE: &str =
"cache_repair 0.9.0
const USAGE: &str = "cache_repair 0.9.0
Repair binary cache metadata, and write it to a different device or file
USAGE:

View File

@ -14,8 +14,7 @@ use common::test_dir::*;
//------------------------------------------
const USAGE: &str =
"cache_restore 0.9.0
const USAGE: &str = "cache_restore 0.9.0
Convert XML format metadata to binary.
USAGE:

View File

@ -64,7 +64,7 @@ impl XmlGen for CacheGen {
let mut cblocks = (0..self.nr_cache_blocks).collect::<Vec<u32>>();
cblocks.shuffle(&mut rand::thread_rng());
cblocks.truncate(nr_resident as usize);
cblocks.sort();
cblocks.sort_unstable();
v.mappings_b()?;
{

34
tests/common/era.rs Normal file
View File

@ -0,0 +1,34 @@
use anyhow::Result;
use std::path::PathBuf;
use thinp::file_utils;
use crate::args;
use crate::common::era_xml_generator::{write_xml, CleanShutdownMeta};
use crate::common::process::*;
use crate::common::target::*;
use crate::common::test_dir::TestDir;
//-----------------------------------------------
pub fn mk_valid_xml(td: &mut TestDir) -> Result<PathBuf> {
let xml = td.mk_path("meta.xml");
let mut gen = CleanShutdownMeta::new(128, 256, 32, 4); // bs, nr_blocks, era, nr_wsets
write_xml(&xml, &mut gen)?;
Ok(xml)
}
pub fn mk_valid_md(td: &mut TestDir) -> Result<PathBuf> {
let xml = td.mk_path("meta.xml");
let md = td.mk_path("meta.bin");
let mut gen = CleanShutdownMeta::new(128, 256, 32, 4);
write_xml(&xml, &mut gen)?;
let _file = file_utils::create_sized_file(&md, 4096 * 4096);
run_ok(era_restore_cmd(args!["-i", &xml, "-o", &md]))?;
Ok(md)
}
//-----------------------------------------------

View File

@ -0,0 +1,157 @@
use anyhow::Result;
use rand::prelude::*;
use std::fs::OpenOptions;
use std::path::Path;
use thinp::era::ir::{self, MetadataVisitor};
use thinp::era::xml;
//------------------------------------------
pub trait XmlGen {
fn generate_xml(&mut self, v: &mut dyn MetadataVisitor) -> Result<()>;
}
pub fn write_xml(path: &Path, g: &mut dyn XmlGen) -> Result<()> {
let xml_out = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.truncate(true)
.open(path)?;
let mut w = xml::XmlWriter::new(xml_out, false);
g.generate_xml(&mut w)
}
//------------------------------------------
// Ordered sequence generator where each element has an independent probability
// of being present.
struct IndependentSequence {
begin: u32,
end: u32,
prob: u32,
rng: ThreadRng,
}
impl IndependentSequence {
fn new(begin: u32, end: u32, prob: u32) -> IndependentSequence {
IndependentSequence {
begin,
end,
prob,
rng: rand::thread_rng(),
}
}
}
impl Iterator for IndependentSequence {
type Item = std::ops::Range<u32>;
// FIXME: reduce complexity
fn next(&mut self) -> Option<std::ops::Range<u32>> {
if self.begin >= self.end {
return None;
}
let mut b = self.begin;
while b < self.end && self.rng.gen_range(0..100) >= self.prob {
b += 1;
}
if b == self.end {
return None;
}
let mut e = b + 1;
while e < self.end && self.rng.gen_range(0..100) < self.prob {
e += 1;
}
self.begin = e + 1;
Some(std::ops::Range { start: b, end: e })
}
}
//------------------------------------------
fn create_superblock(block_size: u32, nr_blocks: u32, current_era: u32) -> ir::Superblock {
ir::Superblock {
uuid: "".to_string(),
block_size,
nr_blocks,
current_era,
}
}
pub struct CleanShutdownMeta {
block_size: u32,
nr_blocks: u32,
current_era: u32,
nr_writesets: u32,
}
impl CleanShutdownMeta {
pub fn new(block_size: u32, nr_blocks: u32, current_era: u32, nr_writesets: u32) -> Self {
CleanShutdownMeta {
block_size,
nr_blocks,
current_era,
nr_writesets,
}
}
fn generate_writeset(v: &mut dyn MetadataVisitor, ws: &ir::Writeset) -> Result<()> {
v.writeset_b(ws)?;
let gen = IndependentSequence::new(0, ws.nr_bits, 10);
for seq in gen {
v.writeset_blocks(&ir::MarkedBlocks {
begin: seq.start,
len: seq.end - seq.start,
})?;
}
v.writeset_e()?;
Ok(())
}
fn generate_era_array(v: &mut dyn MetadataVisitor, nr_blocks: u32, max_era: u32) -> Result<()> {
let mut rng = rand::thread_rng();
v.era_b()?;
for b in 0..nr_blocks {
let era = rng.gen_range(0..max_era);
v.era(&ir::Era { block: b, era })?;
}
v.era_e()?;
Ok(())
}
}
impl XmlGen for CleanShutdownMeta {
fn generate_xml(&mut self, v: &mut dyn MetadataVisitor) -> Result<()> {
v.superblock_b(&create_superblock(
self.block_size,
self.nr_blocks,
self.current_era,
))?;
let era_low = self.current_era - self.nr_writesets + 1;
for era in era_low..self.current_era + 1 {
Self::generate_writeset(
v,
&ir::Writeset {
era,
nr_bits: self.nr_blocks,
},
)?;
}
Self::generate_era_array(v, self.nr_blocks, era_low)?;
v.superblock_e()?;
Ok(())
}
}
//------------------------------------------

View File

@ -180,6 +180,32 @@ macro_rules! test_unreadable_input_file {
//------------------------------------------
// test invalid content
pub fn test_tiny_input_file<'a, P>() -> Result<()>
where
P: MetadataReader<'a>,
{
let mut td = TestDir::new()?;
let input = td.mk_path("meta.bin");
file_utils::create_sized_file(&input, 1024)?;
let wrapper = build_args_fn(P::arg_type())?;
wrapper(&mut td, input.as_ref(), &|args: &[&OsStr]| {
run_fail(P::cmd(args))?;
Ok(())
})
}
#[macro_export]
macro_rules! test_tiny_input_file {
($program: ident) => {
#[test]
fn tiny_input_file() -> Result<()> {
test_tiny_input_file::<$program>()
}
};
}
pub fn test_help_message_for_tiny_input_file<'a, P>() -> Result<()>
where
P: MetadataReader<'a>,
@ -222,9 +248,8 @@ where
let wrapper = build_args_fn(P::arg_type())?;
wrapper(&mut td, input.as_ref(), &|args: &[&OsStr]| {
let stderr = run_fail(P::cmd(args))?;
let msg = format!(
"This looks like XML. This tool only checks the binary metadata format.",
);
let msg =
"This looks like XML. This tool only checks the binary metadata format.".to_string();
assert!(stderr.contains(&msg));
Ok(())
})

View File

@ -5,6 +5,8 @@
pub mod cache;
pub mod cache_xml_generator;
pub mod common_args;
pub mod era;
pub mod era_xml_generator;
pub mod fixture;
pub mod input_arg;
pub mod output_option;

View File

@ -57,10 +57,10 @@ impl fmt::Display for Command {
fn log_output(output: &process::Output) {
use std::str::from_utf8;
if output.stdout.len() > 0 {
if !output.stdout.is_empty() {
eprintln!("stdout: \n{}<<END>>", from_utf8(&output.stdout).unwrap());
}
if output.stderr.len() > 0 {
if !output.stderr.is_empty() {
eprintln!("stderr: \n{}<<END>>", from_utf8(&output.stderr).unwrap());
}
}
@ -100,7 +100,11 @@ pub fn run_fail(command: Command) -> Result<String> {
let output = command.unchecked().run()?;
log_output(&output);
assert!(!output.status.success());
let stderr = std::str::from_utf8(&output.stderr[..]).unwrap().to_string();
let stderr = std::str::from_utf8(&output.stderr[..])
.unwrap()
.trim_end_matches(|c| c == '\n' || c == '\r')
.to_string();
Ok(stderr)
}

View File

@ -143,6 +143,14 @@ where
rust_cmd("cache_dump", args)
}
pub fn cache_metadata_size_cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<OsString>,
{
rust_cmd("cache_metadata_size", args)
}
pub fn cache_restore_cmd<I>(args: I) -> Command
where
I: IntoIterator,
@ -159,6 +167,38 @@ where
rust_cmd("cache_repair", args)
}
pub fn era_check_cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<OsString>,
{
rust_cmd("era_check", args)
}
pub fn era_dump_cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<OsString>,
{
rust_cmd("era_dump", args)
}
pub fn era_restore_cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<OsString>,
{
rust_cmd("era_restore", args)
}
pub fn era_repair_cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<OsString>,
{
rust_cmd("era_repair", args)
}
//------------------------------------------
pub mod msg {

View File

@ -118,7 +118,7 @@ pub fn generate_metadata_leaks(
pub fn get_needs_check(md: &PathBuf) -> Result<bool> {
use thinp::thin::superblock::*;
let engine = SyncIoEngine::new(&md, 1, false)?;
let engine = SyncIoEngine::new(md, 1, false)?;
let sb = read_superblock(&engine, SUPERBLOCK_LOCATION)?;
Ok(sb.flags.needs_check)
}

118
tests/era_check.rs Normal file
View File

@ -0,0 +1,118 @@
use anyhow::Result;
mod common;
use common::cache::*;
use common::common_args::*;
use common::fixture::*;
use common::input_arg::*;
use common::process::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = "era_check 0.9.0
USAGE:
era_check [FLAGS] <INPUT>
FLAGS:
--ignore-non-fatal-errors Only return a non-zero exit code if a fatal error is found.
-q, --quiet Suppress output messages, return only exit code.
--super-block-only Only check the superblock.
-h, --help Prints help information
-V, --version Prints version information
ARGS:
<INPUT> Specify the input device to check";
//------------------------------------------
struct EraCheck;
impl<'a> Program<'a> for EraCheck {
fn name() -> &'a str {
"era_check"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
era_check_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::InputArg
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for EraCheck {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_md(td)
}
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str {
msg::BAD_SUPERBLOCK
}
}
impl<'a> MetadataReader<'a> for EraCheck {}
//------------------------------------------
test_accepts_help!(EraCheck);
test_accepts_version!(EraCheck);
test_rejects_bad_option!(EraCheck);
test_missing_input_arg!(EraCheck);
test_input_file_not_found!(EraCheck);
test_input_cannot_be_a_directory!(EraCheck);
test_unreadable_input_file!(EraCheck);
test_help_message_for_tiny_input_file!(EraCheck);
test_spot_xml_data!(EraCheck);
test_corrupted_input_data!(EraCheck);
//------------------------------------------
#[test]
fn failing_q() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let output = run_fail_raw(era_check_cmd(args!["-q", &md]))?;
assert_eq!(output.stdout.len(), 0);
assert_eq!(output.stderr.len(), 0);
Ok(())
}
#[test]
fn failing_quiet() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_zeroed_md(&mut td)?;
let output = run_fail_raw(era_check_cmd(args!["--quiet", &md]))?;
assert_eq!(output.stdout.len(), 0);
assert_eq!(output.stderr.len(), 0);
Ok(())
}
//------------------------------------------

123
tests/era_dump.rs Normal file
View File

@ -0,0 +1,123 @@
use anyhow::Result;
use std::fs::OpenOptions;
use std::io::Write;
mod common;
use common::common_args::*;
use common::era::*;
use common::fixture::*;
use common::input_arg::*;
use common::process::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = "era_dump 0.9.0
Dump the era metadata to stdout in XML format
USAGE:
era_dump [FLAGS] [OPTIONS] <INPUT>
FLAGS:
--logical Fold any unprocessed write sets into the final era array
-r, --repair Repair the metadata whilst dumping it
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
-o, --output <FILE> Specify the output file rather than stdout
ARGS:
<INPUT> Specify the input device to dump";
//------------------------------------------
struct EraDump;
impl<'a> Program<'a> for EraDump {
fn name() -> &'a str {
"era_dump"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
era_dump_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::InputArg
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for EraDump {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_md(td)
}
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str {
msg::BAD_SUPERBLOCK
}
}
impl<'a> MetadataReader<'a> for EraDump {}
//------------------------------------------
test_accepts_help!(EraDump);
test_accepts_version!(EraDump);
test_rejects_bad_option!(EraDump);
test_missing_input_arg!(EraDump);
test_input_file_not_found!(EraDump);
test_input_cannot_be_a_directory!(EraDump);
test_unreadable_input_file!(EraDump);
test_tiny_input_file!(EraDump);
//------------------------------------------
// TODO: share with thin_dump
#[test]
fn dump_restore_cycle() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
let output = run_ok_raw(era_dump_cmd(args![&md]))?;
let xml = td.mk_path("meta.xml");
let mut file = OpenOptions::new()
.read(false)
.write(true)
.create(true)
.open(&xml)?;
file.write_all(&output.stdout[0..])?;
drop(file);
let md2 = mk_zeroed_md(&mut td)?;
run_ok(era_restore_cmd(args!["-i", &xml, "-o", &md2]))?;
let output2 = run_ok_raw(era_dump_cmd(args![&md2]))?;
assert_eq!(output.stdout, output2.stdout);
Ok(())
}

143
tests/era_restore.rs Normal file
View File

@ -0,0 +1,143 @@
use anyhow::Result;
mod common;
use common::common_args::*;
use common::era::*;
use common::fixture::*;
use common::input_arg::*;
use common::output_option::*;
use common::process::*;
use common::program::*;
use common::target::*;
use common::test_dir::*;
//------------------------------------------
const USAGE: &str = "era_restore 0.9.0
Convert XML format metadata to binary.
USAGE:
era_restore [FLAGS] --input <FILE> --output <FILE>
FLAGS:
-q, --quiet Suppress output messages, return only exit code.
-h, --help Prints help information
-V, --version Prints version information
OPTIONS:
-i, --input <FILE> Specify the input xml
-o, --output <FILE> Specify the output device to check";
//------------------------------------------
struct EraRestore;
impl<'a> Program<'a> for EraRestore {
fn name() -> &'a str {
"era_restore"
}
fn cmd<I>(args: I) -> Command
where
I: IntoIterator,
I::Item: Into<std::ffi::OsString>,
{
era_restore_cmd(args)
}
fn usage() -> &'a str {
USAGE
}
fn arg_type() -> ArgType {
ArgType::IoOptions
}
fn bad_option_hint(option: &str) -> String {
msg::bad_option_hint(option)
}
}
impl<'a> InputProgram<'a> for EraRestore {
fn mk_valid_input(td: &mut TestDir) -> Result<std::path::PathBuf> {
mk_valid_xml(td)
}
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
fn missing_input_arg() -> &'a str {
msg::MISSING_INPUT_ARG
}
fn corrupted_input() -> &'a str {
"" // we don't intent to verify error messages of XML parsing
}
}
impl<'a> OutputProgram<'a> for EraRestore {
fn missing_output_arg() -> &'a str {
msg::MISSING_OUTPUT_ARG
}
}
impl<'a> MetadataWriter<'a> for EraRestore {
fn file_not_found() -> &'a str {
msg::FILE_NOT_FOUND
}
}
//-----------------------------------------
test_accepts_help!(EraRestore);
test_accepts_version!(EraRestore);
test_missing_input_option!(EraRestore);
test_input_file_not_found!(EraRestore);
test_corrupted_input_data!(EraRestore);
test_missing_output_option!(EraRestore);
test_tiny_output_file!(EraRestore);
test_unwritable_output_file!(EraRestore);
//-----------------------------------------
// TODO: share with thin_restore, era_restore
fn quiet_flag(flag: &str) -> Result<()> {
let mut td = TestDir::new()?;
let xml = mk_valid_xml(&mut td)?;
let md = mk_zeroed_md(&mut td)?;
let output = run_ok_raw(era_restore_cmd(args!["-i", &xml, "-o", &md, flag]))?;
assert_eq!(output.stdout.len(), 0);
assert_eq!(output.stderr.len(), 0);
Ok(())
}
#[test]
fn accepts_q() -> Result<()> {
quiet_flag("-q")
}
#[test]
fn accepts_quiet() -> Result<()> {
quiet_flag("--quiet")
}
//-----------------------------------------
#[test]
fn successfully_restores() -> Result<()> {
let mut td = TestDir::new()?;
let xml = mk_valid_xml(&mut td)?;
let md = mk_zeroed_md(&mut td)?;
run_ok(era_restore_cmd(args!["-i", &xml, "-o", &md]))?;
Ok(())
}
//-----------------------------------------

View File

@ -145,11 +145,11 @@ fn accepts_quiet() -> Result<()> {
let md = mk_valid_md(&mut td)?;
let output = run_ok_raw(thin_check_cmd(args!["--quiet", &md]))?;
if output.stdout.len() > 0 {
if !output.stdout.is_empty() {
eprintln!("stdout: {:?}", &std::str::from_utf8(&output.stdout));
}
if output.stderr.len() > 0 {
if !output.stderr.is_empty() {
eprintln!("stderr: {:?}", &std::str::from_utf8(&output.stderr));
}
assert_eq!(output.stdout.len(), 0);

View File

@ -15,8 +15,7 @@ use common::thin::*;
//------------------------------------------
const USAGE: &str =
"thin_dump 0.9.0
const USAGE: &str = "thin_dump 0.9.0
Dump thin-provisioning metadata to stdout in XML format
USAGE:
@ -178,15 +177,13 @@ fn repair_superblock() -> Result<()> {
let before = run_ok_raw(thin_dump_cmd(args![&md]))?;
damage_superblock(&md)?;
let after = run_ok_raw(thin_dump_cmd(
args![
"--repair",
"--transaction-id=1",
"--data-block-size=128",
"--nr-data-blocks=20480",
&md
],
))?;
let after = run_ok_raw(thin_dump_cmd(args![
"--repair",
"--transaction-id=1",
"--data-block-size=128",
"--nr-data-blocks=20480",
&md
]))?;
if !cfg!(feature = "rust_tests") {
assert_eq!(after.stderr.len(), 0);
}
@ -204,15 +201,12 @@ fn missing_transaction_id() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
damage_superblock(&md)?;
let stderr = run_fail(
thin_dump_cmd(
args![
"--repair",
"--data-block-size=128",
"--nr-data-blocks=20480",
&md
],
))?;
let stderr = run_fail(thin_dump_cmd(args![
"--repair",
"--data-block-size=128",
"--nr-data-blocks=20480",
&md
]))?;
assert!(stderr.contains("transaction id"));
Ok(())
}
@ -222,15 +216,12 @@ fn missing_data_block_size() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
damage_superblock(&md)?;
let stderr = run_fail(
thin_dump_cmd(
args![
"--repair",
"--transaction-id=1",
"--nr-data-blocks=20480",
&md
],
))?;
let stderr = run_fail(thin_dump_cmd(args![
"--repair",
"--transaction-id=1",
"--nr-data-blocks=20480",
&md
]))?;
assert!(stderr.contains("data block size"));
Ok(())
}
@ -240,15 +231,12 @@ fn missing_nr_data_blocks() -> Result<()> {
let mut td = TestDir::new()?;
let md = mk_valid_md(&mut td)?;
damage_superblock(&md)?;
let stderr = run_fail(
thin_dump_cmd(
args![
"--repair",
"--transaction-id=1",
"--data-block-size=128",
&md
],
))?;
let stderr = run_fail(thin_dump_cmd(args![
"--repair",
"--transaction-id=1",
"--data-block-size=128",
&md
]))?;
assert!(stderr.contains("nr data blocks"));
Ok(())
}

View File

@ -106,8 +106,18 @@ fn end_to_end() -> Result<()> {
let mut td = TestDir::new()?;
let md_in = mk_valid_md(&mut td)?;
let md_out = mk_zeroed_md(&mut td)?;
run_ok(thin_metadata_pack_cmd(args!["-i", &md_in, "-o", "meta.pack"]))?;
run_ok(thin_metadata_unpack_cmd(args!["-i", "meta.pack", "-o", &md_out]))?;
run_ok(thin_metadata_pack_cmd(args![
"-i",
&md_in,
"-o",
"meta.pack"
]))?;
run_ok(thin_metadata_unpack_cmd(args![
"-i",
"meta.pack",
"-o",
&md_out
]))?;
let dump1 = run_ok(thin_dump_cmd(args![&md_in]))?;
let dump2 = run_ok(thin_dump_cmd(args![&md_out]))?;

View File

@ -161,18 +161,15 @@ fn superblock_succeeds() -> Result<()> {
}
damage_superblock(&md1)?;
let md2 = mk_zeroed_md(&mut td)?;
run_ok(
thin_repair_cmd(
args![
"--transaction-id=1",
"--data-block-size=128",
"--nr-data-blocks=20480",
"-i",
&md1,
"-o",
&md2
],
))?;
run_ok(thin_repair_cmd(args![
"--transaction-id=1",
"--data-block-size=128",
"--nr-data-blocks=20480",
"-i",
&md1,
"-o",
&md2
]))?;
let repaired = run_ok_raw(thin_dump_cmd(args![&md2]))?;
if !cfg!(feature = "rust_tests") {
assert_eq!(repaired.stderr.len(), 0);

View File

@ -26,7 +26,7 @@ namespace thin_provisioning {
};
inline bool operator==(device_details const& lhs, device_details const& rhs) {
return false; // device_details are not compariable
return false; // device_details are not comparable
}
inline bool operator!=(device_details const& lhs, device_details const& rhs) {

View File

@ -135,7 +135,9 @@ namespace {
public:
ll_mapping_tree_emitter(block_manager::ptr bm,
indented_stream &out)
: bm_(bm), out_(out) {
: bm_(bm),
nv_(create_btree_node_validator()),
out_(out) {
}
void visit(btree_path const &path, block_address tree_root) {
@ -147,6 +149,7 @@ namespace {
try {
block_manager::read_ref rr = bm_->read_lock(tree_root);
node_ref<uint64_traits> n = btree_detail::to_node<uint64_traits>(rr);
nv_->check(n.raw(), tree_root);
node_info ni;
convert_to_node_info(n, ni);
output_node_info(out_, ni);
@ -160,6 +163,7 @@ namespace {
}
private:
block_manager::ptr bm_;
bcache::validator::ptr nv_;
indented_stream& out_;
};