Merge branch '2020-08-13-thin-check-rewrite' into main

This commit is contained in:
Joe Thornber 2020-10-09 11:21:12 +01:00
commit c42b623e39
21 changed files with 2822 additions and 458 deletions

122
Cargo.lock generated
View File

@ -49,9 +49,9 @@ dependencies = [
[[package]]
name = "autocfg"
version = "1.0.0"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d"
checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a"
[[package]]
name = "base64"
@ -72,10 +72,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de"
[[package]]
name = "cc"
version = "1.0.58"
name = "cassowary"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9a06fb2e53271d7c279ec1efea6ab691c35a2ae67ec0d91d7acec0caf13b518"
checksum = "df8670b8c7b9dae1793364eafadf7239c40d669904660c5960d74cfd80b46a53"
[[package]]
name = "cc"
version = "1.0.60"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ef611cc68ff783f18535d77ddd080185275713d852c4f5cbb6122c462a7a825c"
[[package]]
name = "cfg-if"
@ -85,9 +91,9 @@ checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822"
[[package]]
name = "clap"
version = "2.33.2"
version = "2.33.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10040cdf04294b565d9e0319955430099ec3813a64c952b86a41200ad714ae48"
checksum = "37e58ac78573c40708d45522f0d80fa2f01cc4f9b4e2bf749807255454312002"
dependencies = [
"ansi_term",
"atty",
@ -130,6 +136,12 @@ dependencies = [
"cfg-if",
]
[[package]]
name = "data-encoding"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4d0e2d24e5ee3b23a01de38eefdcd978907890701f08ffffd4cb457ca4ee8d6"
[[package]]
name = "duct"
version = "0.13.4"
@ -160,15 +172,15 @@ dependencies = [
[[package]]
name = "fixedbitset"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fc4fcacf5cd3681968f6524ea159383132937739c6c40dabab9e37ed515911b"
checksum = "4e08c8bc7575d7e091fe0706963bd22e2a4be6a64da995f03b2a5a57d66ad015"
[[package]]
name = "flate2"
version = "1.0.16"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68c90b0fc46cf89d227cc78b40e494ff81287a92dd07631e5af0d06fe3cf885e"
checksum = "766d0e77a2c1502169d4a93ff3b8c15a71fd946cd0126309752104e5f3c46d94"
dependencies = [
"cfg-if",
"crc32fast",
@ -273,9 +285,9 @@ dependencies = [
[[package]]
name = "getrandom"
version = "0.1.14"
version = "0.1.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb"
checksum = "fc587bc0ec293155d5bfa6b9891ec18a1e330c234f896ea47fbada4cadbe47e6"
dependencies = [
"cfg-if",
"libc",
@ -340,9 +352,9 @@ dependencies = [
[[package]]
name = "libc"
version = "0.2.74"
version = "0.2.77"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a2f02823cf78b754822df5f7f268fb59822e7296276d3e069d8e8cb26a14bd10"
checksum = "f2f96b10ec2560088a8e76961b00d47107b3a625fecb76dedb29ee7ccbf98235"
[[package]]
name = "log"
@ -361,11 +373,12 @@ checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400"
[[package]]
name = "miniz_oxide"
version = "0.4.0"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be0f75932c1f6cfae3c04000e40114adf955636e19040f9c0a2c380702aa1c7f"
checksum = "c60c0dfe32c10b43a144bad8fc83538c52f58302c92300ea7ec7bf7b38d5a7b9"
dependencies = [
"adler",
"autocfg",
]
[[package]]
@ -383,9 +396,7 @@ dependencies = [
[[package]]
name = "nom"
version = "5.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ffb4262d26ed83a1c0a33a38fe2bb15797329c85770da05e6b828ddb782627af"
version = "6.0.0-alpha1"
dependencies = [
"lexical-core",
"memchr",
@ -394,9 +405,9 @@ dependencies = [
[[package]]
name = "num-derive"
version = "0.3.1"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e0396233fb2d5b0ae3f05ff6aba9a09185f7f6e70f87fb01147d545f85364665"
checksum = "6f09b9841adb6b5e1f89ef7087ea636e0fd94b2851f887c1e3eb5d5f8228fab3"
dependencies = [
"proc-macro2",
"quote",
@ -429,10 +440,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17b02fc0ff9a9e4b35b3342880f48e896ebf69f2967921fe8646bf5b7125956a"
[[package]]
name = "once_cell"
version = "1.4.0"
name = "numtoa"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b631f7e854af39a1739f401cf34a8a013dfe09eac4fa4dba91e9768bd28168d"
checksum = "b8f8bdf33df195859076e54ab11ee78a1b208382d3a26ec40d142ffc1ecc49ef"
[[package]]
name = "once_cell"
version = "1.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "260e51e7efe62b592207e9e13a68e43692a7a279171d6ba57abd208bf23645ad"
[[package]]
name = "os_pipe"
@ -472,9 +489,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "ppv-lite86"
version = "0.2.8"
version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "237a5ed80e274dbc66f86bd59c1e25edc039660be53194b5fe0a482e0f2612ea"
checksum = "c36fa947111f5c62a733b652544dd0016a43ce89619538a8ef92724a6f501a20"
[[package]]
name = "proc-macro-hack"
@ -490,9 +507,9 @@ checksum = "eba180dafb9038b050a4c280019bbedf9f2467b61e5d892dcad585bb57aadc5a"
[[package]]
name = "proc-macro2"
version = "1.0.19"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "04f5f085b5d71e2188cb8271e5da0161ad52c3f227a661a3c135fdf28e258b12"
checksum = "36e28516df94f3dd551a587da5357459d9b36d945a7c37c3557928c1c2ff2a2c"
dependencies = [
"unicode-xid",
]
@ -585,6 +602,15 @@ version = "0.1.57"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41cc0f7e4d5d4544e8861606a285bb08d3e70712ccc7d2b84d7c0ccfaf4b05ce"
[[package]]
name = "redox_termios"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
dependencies = [
"redox_syscall",
]
[[package]]
name = "regex"
version = "1.3.9"
@ -648,9 +674,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a"
[[package]]
name = "syn"
version = "1.0.38"
version = "1.0.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e69abc24912995b3038597a7a593be5053eb0fb44f3cc5beec0deb421790c1f4"
checksum = "6690e3e9f692504b941dc6c3b188fd28df054f7fb8469ab40680df52fdcc842b"
dependencies = [
"proc-macro2",
"quote",
@ -681,6 +707,18 @@ dependencies = [
"winapi",
]
[[package]]
name = "termion"
version = "1.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c22cec9d8978d906be5ac94bceb5a010d885c626c4c8855721a4dbd20e3ac905"
dependencies = [
"libc",
"numtoa",
"redox_syscall",
"redox_termios",
]
[[package]]
name = "termios"
version = "0.3.2"
@ -709,6 +747,7 @@ dependencies = [
"byteorder",
"clap",
"crc32c",
"data-encoding",
"duct",
"fixedbitset",
"flate2",
@ -727,8 +766,10 @@ dependencies = [
"quickcheck_macros",
"rand",
"tempfile",
"termion",
"thiserror",
"threadpool",
"tui",
]
[[package]]
@ -769,6 +810,25 @@ dependencies = [
"num_cpus",
]
[[package]]
name = "tui"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a977b0bb2e2033a6fef950f218f13622c3c34e59754b704ce3492dedab1dfe95"
dependencies = [
"bitflags",
"cassowary",
"termion",
"unicode-segmentation",
"unicode-width",
]
[[package]]
name = "unicode-segmentation"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0"
[[package]]
name = "unicode-width"
version = "0.1.8"

View File

@ -12,6 +12,7 @@ base64 = "0.12"
byteorder = "1.3"
clap = "2.33"
crc32c = "0.4"
data-encoding = "2.3"
duct = "0.13"
fixedbitset = "0.3"
futures = "0.3"
@ -20,7 +21,7 @@ io-uring = "0.3"
indicatif = "0.15"
libc = "0.2.71"
nix = "0.17"
nom = "5.1"
nom = { path = "/home/ejt/builds/nom/" }
num_cpus = "1.13"
num-derive = "0.3"
num-traits = "0.2"
@ -29,8 +30,13 @@ rand = "0.7"
tempfile = "3.1"
threadpool = "1.8"
thiserror = "1.0"
tui = "0.10"
termion = "1.5"
[dev-dependencies]
json = "0.12"
quickcheck = "0.9"
quickcheck_macros = "0.9"
[profile.release]
debug = true

View File

@ -26,8 +26,15 @@ fn main() {
.help("Only check the superblock.")
.long("super-block-only")
.value_name("SB_ONLY"),
) .arg(
Arg::with_name("AUTO_REPAIR")
)
.arg(
Arg::with_name("SKIP_MAPPINGS")
.help("Don't check the mapping tree")
.long("skip-mappings")
.value_name("SKIP_MAPPINGS"),
)
.arg(
Arg::with_name("AUTO_REPAIR")
.help("Auto repair trivial issues.")
.long("auto-repair"),
)
@ -88,6 +95,8 @@ fn main() {
let opts = ThinCheckOptions {
dev: &input_file,
async_io: !matches.is_present("SYNC_IO"),
sb_only: matches.is_present("SB_ONLY"),
skip_mappings: matches.is_present("SKIP_MAPPINGS"),
ignore_non_fatal: matches.is_present("IGNORE_NON_FATAL"),
auto_repair: matches.is_present("AUTO_REPAIR"),
report,

105
src/bin/thin_dump.rs Normal file
View File

@ -0,0 +1,105 @@
extern crate clap;
extern crate thinp;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::process::exit;
use std::sync::Arc;
use thinp::file_utils;
use thinp::report::*;
use thinp::thin::dump::{dump, ThinDumpOptions};
fn main() {
let parser = App::new("thin_check")
.version(thinp::version::TOOLS_VERSION)
.about("Validates thin provisioning metadata on a device or file.")
.arg(
Arg::with_name("QUIET")
.help("Suppress output messages, return only exit code.")
.short("q")
.long("quiet"),
)
.arg(
Arg::with_name("SB_ONLY")
.help("Only check the superblock.")
.long("super-block-only")
.value_name("SB_ONLY"),
)
.arg(
Arg::with_name("SKIP_MAPPINGS")
.help("Don't check the mapping tree")
.long("skip-mappings")
.value_name("SKIP_MAPPINGS"),
)
.arg(
Arg::with_name("AUTO_REPAIR")
.help("Auto repair trivial issues.")
.long("auto-repair"),
)
.arg(
Arg::with_name("IGNORE_NON_FATAL")
.help("Only return a non-zero exit code if a fatal error is found.")
.long("ignore-non-fatal-errors"),
)
.arg(
Arg::with_name("CLEAR_NEEDS_CHECK")
.help("Clears the 'needs_check' flag in the superblock")
.long("clear-needs-check"),
)
.arg(
Arg::with_name("OVERRIDE_MAPPING_ROOT")
.help("Specify a mapping root to use")
.long("override-mapping-root")
.value_name("OVERRIDE_MAPPING_ROOT")
.takes_value(true),
)
.arg(
Arg::with_name("METADATA_SNAPSHOT")
.help("Check the metadata snapshot on a live pool")
.short("m")
.long("metadata-snapshot")
.value_name("METADATA_SNAPSHOT"),
)
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to check")
.required(true)
.index(1),
)
.arg(
Arg::with_name("SYNC_IO")
.help("Force use of synchronous io")
.long("sync-io"),
);
let matches = parser.get_matches();
let input_file = Path::new(matches.value_of("INPUT").unwrap());
if !file_utils::file_exists(input_file) {
eprintln!("Couldn't find input file '{:?}'.", &input_file);
exit(1);
}
let report;
if matches.is_present("QUIET") {
report = std::sync::Arc::new(mk_quiet_report());
} else if atty::is(Stream::Stdout) {
report = std::sync::Arc::new(mk_progress_bar_report());
} else {
report = Arc::new(mk_simple_report());
}
let opts = ThinDumpOptions {
dev: &input_file,
async_io: !matches.is_present("SYNC_IO"),
report,
};
if let Err(reason) = dump(opts) {
println!("{}", reason);
process::exit(1);
}
}

746
src/bin/thin_explore.rs Normal file
View File

@ -0,0 +1,746 @@
extern crate clap;
use anyhow::{anyhow, Result};
use clap::{App, Arg};
use std::fmt;
use std::io::{self, Write};
use std::path::Path;
use std::sync::mpsc;
use std::sync::{
atomic::{AtomicBool, Ordering},
Arc,
};
use std::thread;
use std::time::Duration;
use termion::event::Key;
use termion::input::TermRead;
use termion::raw::IntoRawMode;
use tui::{
backend::{TermionBackend},
buffer::Buffer,
layout::{Constraint, Direction, Layout, Rect},
style::{Color, Modifier, Style},
terminal::Frame,
text::{Span},
widgets::{Block, Borders, List, ListItem, ListState, Row, StatefulWidget, Table, Widget},
Terminal,
};
use thinp::io_engine::*;
use thinp::pdata::btree;
use thinp::pdata::unpack::*;
use thinp::thin::block_time::*;
use thinp::thin::superblock::*;
//------------------------------------
pub enum Event<I> {
Input(I),
Tick,
}
pub struct Events {
rx: mpsc::Receiver<Event<Key>>,
input_handle: thread::JoinHandle<()>,
ignore_exit_key: Arc<AtomicBool>,
}
#[derive(Debug, Clone, Copy)]
pub struct Config {
pub exit_key: Key,
pub tick_rate: Duration,
}
impl Default for Config {
fn default() -> Config {
Config {
exit_key: Key::Char('q'),
tick_rate: Duration::from_millis(250),
}
}
}
impl Events {
pub fn new() -> Events {
Events::with_config(Config::default())
}
pub fn with_config(config: Config) -> Events {
let (tx, rx) = mpsc::channel();
let ignore_exit_key = Arc::new(AtomicBool::new(false));
let input_handle = {
let tx = tx.clone();
let ignore_exit_key = ignore_exit_key.clone();
thread::spawn(move || {
let stdin = io::stdin();
for evt in stdin.keys() {
if let Ok(key) = evt {
if let Err(err) = tx.send(Event::Input(key)) {
eprintln!("{}", err);
return;
}
if !ignore_exit_key.load(Ordering::Relaxed) && key == config.exit_key {
return;
}
}
}
})
};
Events {
rx,
ignore_exit_key,
input_handle,
}
}
pub fn next(&self) -> Result<Event<Key>, mpsc::RecvError> {
self.rx.recv()
}
pub fn disable_exit_key(&mut self) {
self.ignore_exit_key.store(true, Ordering::Relaxed);
}
pub fn enable_exit_key(&mut self) {
self.ignore_exit_key.store(false, Ordering::Relaxed);
}
}
//------------------------------------
fn ls_next(ls: &mut ListState, max: usize) {
let i = match ls.selected() {
Some(i) => {
if i >= max - 1 {
max - 1
} else {
i + 1
}
}
None => 0,
};
ls.select(Some(i));
}
fn ls_previous(ls: &mut ListState) {
let i = match ls.selected() {
Some(i) => {
if i == 0 {
0
} else {
i - 1
}
}
None => 0,
};
ls.select(Some(i));
}
//------------------------------------
struct SBWidget {
sb: Superblock,
}
impl Widget for SBWidget {
fn render(self, area: Rect, buf: &mut Buffer) {
let sb = &self.sb;
let flags = ["flags".to_string(), format!("{}", sb.flags)];
let block = ["block".to_string(), format!("{}", sb.block)];
let uuid = ["uuid".to_string(), format!("-")];
let version = ["version".to_string(), format!("{}", sb.version)];
let time = ["time".to_string(), format!("{}", sb.time)];
let transaction_id = [
"transaction_id".to_string(),
format!("{}", sb.transaction_id),
];
let metadata_snap = [
"metadata_snap".to_string(),
if sb.metadata_snap == 0 {
"-".to_string()
} else {
format!("{}", sb.metadata_snap)
},
];
let mapping_root = ["mapping root".to_string(), format!("{}", sb.mapping_root)];
let details_root = ["details root".to_string(), format!("{}", sb.details_root)];
let data_block_size = [
"data block size".to_string(),
format!("{}k", sb.data_block_size * 2),
];
let table = Table::new(
["Field", "Value"].iter(),
vec![
Row::Data(flags.iter()),
Row::Data(block.iter()),
Row::Data(uuid.iter()),
Row::Data(version.iter()),
Row::Data(time.iter()),
Row::Data(transaction_id.iter()),
Row::Data(metadata_snap.iter()),
Row::Data(mapping_root.iter()),
Row::Data(details_root.iter()),
Row::Data(data_block_size.iter()),
]
.into_iter(),
)
.block(
Block::default()
.borders(Borders::ALL)
.title("Superblock".to_string()),
)
.header_style(Style::default().fg(Color::Yellow))
.widths(&[Constraint::Length(20), Constraint::Length(60)])
.style(Style::default().fg(Color::White))
.column_spacing(1);
Widget::render(table, area, buf);
}
}
//------------------------------------
struct HeaderWidget<'a> {
title: String,
hdr: &'a btree::NodeHeader,
}
impl<'a> Widget for HeaderWidget<'a> {
fn render(self, area: Rect, buf: &mut Buffer) {
let hdr = &self.hdr;
let block = ["block".to_string(), format!("{}", hdr.block)];
let kind = [
"type".to_string(),
match hdr.is_leaf {
true => "LEAF".to_string(),
false => "INTERNAL".to_string(),
},
];
let nr_entries = ["nr_entries".to_string(), format!("{}", hdr.nr_entries)];
let max_entries = ["max_entries".to_string(), format!("{}", hdr.max_entries)];
let value_size = ["value size".to_string(), format!("{}", hdr.value_size)];
let table = Table::new(
["Field", "Value"].iter(),
vec![
Row::Data(block.iter()),
Row::Data(kind.iter()),
Row::Data(nr_entries.iter()),
Row::Data(max_entries.iter()),
Row::Data(value_size.iter()),
]
.into_iter(),
)
.block(Block::default().borders(Borders::ALL).title(self.title))
.header_style(Style::default().fg(Color::Yellow))
.widths(&[Constraint::Length(20), Constraint::Length(60)])
.style(Style::default().fg(Color::White))
.column_spacing(1);
Widget::render(table, area, buf);
}
}
/*
fn read_node_header(engine: &dyn IoEngine, loc: u64) -> Result<btree::NodeHeader> {
let b = engine.read(loc)?;
unpack(&b.get_data()).map_err(|_| anyhow!("couldn't unpack btree header"))
}
*/
fn read_node<V: Unpack>(engine: &dyn IoEngine, loc: u64) -> Result<btree::Node<V>> {
let b = engine.read(loc)?;
let path = Vec::new();
btree::unpack_node(&path, &b.get_data(), true, false)
.map_err(|_| anyhow!("couldn't unpack btree node"))
}
//------------------------------------
// For types that have the concept of adjacency, but not of a distance
// between values. For instance with a BlockTime there is no delta that
// will get between two values with different times.
trait Adjacent {
fn adjacent(&self, rhs: &Self) -> bool;
}
impl Adjacent for u64 {
fn adjacent(&self, rhs: &Self) -> bool {
(*self + 1) == *rhs
}
}
impl Adjacent for BlockTime {
fn adjacent(&self, rhs: &Self) -> bool {
if self.time != rhs.time {
return false;
}
self.block + 1 == rhs.block
}
}
impl<X: Adjacent, Y: Adjacent> Adjacent for (X, Y) {
fn adjacent(&self, rhs: &Self) -> bool {
self.0.adjacent(&rhs.0) && self.1.adjacent(&rhs.1)
}
}
fn adjacent_runs<V: Adjacent + Copy>(mut ns: Vec<V>) -> Vec<(V, usize)> {
let mut result = Vec::new();
if ns.len() == 0 {
return result;
}
// Reverse so we can pop without cloning the value.
ns.reverse();
let mut base = ns.pop().unwrap();
let mut current = base;
let mut len = 1;
while let Some(v) = ns.pop() {
if current.adjacent(&v) {
current = v;
len += 1;
} else {
result.push((base.clone(), len));
base = v.clone();
current = v.clone();
len = 1;
}
}
result.push((base.clone(), len));
result
}
fn mk_runs<V: Adjacent + Sized + Copy>(keys: &[u64], values: &[V]) -> Vec<((u64, V), usize)> {
let mut pairs = Vec::new();
for (k, v) in keys.iter().zip(values.iter()) {
pairs.push((k.clone(), v.clone()));
}
adjacent_runs(pairs)
}
struct NodeWidget<'a, V: Unpack + Adjacent + Clone> {
title: String,
node: &'a btree::Node<V>,
}
fn mk_item<'a, V: fmt::Display>(k: u64, v: &V, len: usize) -> ListItem<'a> {
if len > 1 {
ListItem::new(Span::raw(format!("{} x {} -> {}", k, len as u64, v)))
} else {
ListItem::new(Span::raw(format!("{} -> {}", k, v)))
}
}
fn mk_items<'a, V>(keys: &[u64], values: &[V], selected: usize) -> (Vec<ListItem<'a>>, usize)
where
V: Adjacent + Copy + fmt::Display,
{
let mut items = Vec::new();
let bkeys = &keys[0..selected];
let key = keys[selected];
let akeys = &keys[(selected + 1)..];
let bvalues = &values[0..selected];
let value = values[selected];
let avalues = &values[(selected + 1)..];
let bruns = mk_runs(bkeys, bvalues);
let aruns = mk_runs(akeys, avalues);
let i = bruns.len();
for ((k, v), len) in bruns {
items.push(mk_item(k, &v, len));
}
items.push(ListItem::new(Span::raw(format!("{} -> {}", key, value))));
for ((k, v), len) in aruns {
items.push(mk_item(k, &v, len));
}
(items, i)
}
impl<'a, V: Unpack + fmt::Display + Adjacent + Copy> StatefulWidget for NodeWidget<'a, V> {
type State = ListState;
fn render(self, area: Rect, buf: &mut Buffer, state: &mut ListState) {
let chunks = Layout::default()
.direction(Direction::Vertical)
.constraints([Constraint::Min(10), Constraint::Percentage(80)].as_ref())
.split(area);
let hdr = HeaderWidget {
title: self.title,
hdr: self.node.get_header(),
};
hdr.render(chunks[0], buf);
let items: Vec<ListItem>;
let i: usize;
let selected = state.selected().unwrap();
let mut state = ListState::default();
match self.node {
btree::Node::Internal { keys, values, .. } => {
let (items_, i_) = mk_items(keys, values, selected);
items = items_;
i = i_;
}
btree::Node::Leaf { keys, values, .. } => {
let (items_, i_) = mk_items(keys, values, selected);
items = items_;
i = i_;
}
}
state.select(Some(i));
let items = List::new(items)
.block(Block::default().borders(Borders::ALL).title("Entries"))
.highlight_style(
Style::default()
.bg(Color::LightGreen)
.add_modifier(Modifier::BOLD),
);
StatefulWidget::render(items, chunks[1], buf, &mut state);
}
}
//------------------------------------
enum Action {
PushTopLevel(u64),
PushBottomLevel(u32, u64),
PopPanel,
}
use Action::*;
type Frame_<'a, 'b> = Frame<'a, TermionBackend<termion::raw::RawTerminal<std::io::StdoutLock<'b>>>>;
trait Panel {
fn render(&mut self, area: Rect, f: &mut Frame_);
fn input(&mut self, k: Key) -> Option<Action>;
fn path_action(&mut self, child: u64) -> Option<Action>;
}
struct SBPanel {
sb: Superblock,
}
impl Panel for SBPanel {
fn render(&mut self, area: Rect, f: &mut Frame_) {
// FIXME: get rid of clone
let w = SBWidget {
sb: self.sb.clone(),
};
f.render_widget(w, area);
}
fn input(&mut self, _k: Key) -> Option<Action> {
None
}
fn path_action(&mut self, child: u64) -> Option<Action> {
if child == self.sb.mapping_root {
Some(PushTopLevel(child))
} else {
None
}
}
}
struct TopLevelPanel {
node: btree::Node<u64>,
nr_entries: usize,
state: ListState,
}
impl TopLevelPanel {
fn new(node: btree::Node<u64>) -> TopLevelPanel {
let nr_entries = node.get_header().nr_entries as usize;
let mut state = ListState::default();
state.select(Some(0));
TopLevelPanel {
node,
nr_entries,
state,
}
}
}
impl Panel for TopLevelPanel {
fn render(&mut self, area: Rect, f: &mut Frame_) {
let w = NodeWidget {
title: "Top Level".to_string(),
node: &self.node, // FIXME: get rid of clone
};
f.render_stateful_widget(w, area, &mut self.state);
}
fn input(&mut self, k: Key) -> Option<Action> {
match k {
Key::Char('j') | Key::Down => {
ls_next(&mut self.state, self.nr_entries);
None
}
Key::Char('k') | Key::Up => {
ls_previous(&mut self.state);
None
}
Key::Char('l') | Key::Right => match &self.node {
btree::Node::Internal { values, .. } => {
Some(PushTopLevel(values[self.state.selected().unwrap()]))
}
btree::Node::Leaf { values, keys, .. } => {
let index = self.state.selected().unwrap();
Some(PushBottomLevel(keys[index] as u32, values[index]))
}
},
Key::Char('h') | Key::Left => Some(PopPanel),
_ => None,
}
}
fn path_action(&mut self, child: u64) -> Option<Action> {
match &self.node {
btree::Node::Internal { values, .. } => {
for i in 0..values.len() {
if values[i] == child {
self.state.select(Some(i));
return Some(PushTopLevel(child));
}
}
return None;
}
btree::Node::Leaf { keys, values, .. } => {
for i in 0..values.len() {
if values[i] == child {
self.state.select(Some(i));
return Some(PushBottomLevel(keys[i] as u32, child));
}
}
return None;
}
}
}
}
struct BottomLevelPanel {
thin_id: u32,
node: btree::Node<BlockTime>,
nr_entries: usize,
state: ListState,
}
impl BottomLevelPanel {
fn new(thin_id: u32, node: btree::Node<BlockTime>) -> BottomLevelPanel {
let nr_entries = node.get_header().nr_entries as usize;
let mut state = ListState::default();
state.select(Some(0));
BottomLevelPanel {
thin_id,
node,
nr_entries,
state,
}
}
}
impl Panel for BottomLevelPanel {
fn render(&mut self, area: Rect, f: &mut Frame_) {
let w = NodeWidget {
title: format!("Thin dev #{}", self.thin_id),
node: &self.node,
};
f.render_stateful_widget(w, area, &mut self.state);
}
fn input(&mut self, k: Key) -> Option<Action> {
match k {
Key::Char('j') | Key::Down => {
ls_next(&mut self.state, self.nr_entries);
None
}
Key::Char('k') | Key::Up => {
ls_previous(&mut self.state);
None
}
Key::Char('l') | Key::Right => match &self.node {
btree::Node::Internal { values, .. } => Some(PushBottomLevel(
self.thin_id,
values[self.state.selected().unwrap()],
)),
_ => None,
},
Key::Char('h') | Key::Left => Some(PopPanel),
_ => None,
}
}
fn path_action(&mut self, child: u64) -> Option<Action> {
match &self.node {
btree::Node::Internal { values, .. } => {
for i in 0..values.len() {
if values[i] == child {
self.state.select(Some(i));
return Some(PushBottomLevel(self.thin_id, child));
}
}
return None;
}
btree::Node::Leaf { .. } => None,
}
}
}
//------------------------------------
fn perform_action(
panels: &mut Vec<Box<dyn Panel>>,
engine: &dyn IoEngine,
action: Action,
) -> Result<()> {
match action {
PushTopLevel(b) => {
let node = read_node::<u64>(engine, b)?;
panels.push(Box::new(TopLevelPanel::new(node)));
}
PushBottomLevel(thin_id, b) => {
let node = read_node::<BlockTime>(engine, b)?;
panels.push(Box::new(BottomLevelPanel::new(thin_id, node)));
}
PopPanel => {
if panels.len() > 2 {
panels.pop();
}
}
};
Ok(())
}
fn explore(path: &Path, node_path: Option<Vec<u64>>) -> Result<()> {
let stdout = io::stdout();
let mut stdout = stdout.lock().into_raw_mode()?;
write!(stdout, "{}", termion::clear::All)?;
let backend = TermionBackend::new(stdout);
let mut terminal = Terminal::new(backend)?;
let engine = SyncIoEngine::new(&path, 1, false)?;
let mut panels: Vec<Box<dyn Panel>> = Vec::new();
if let Some(path) = node_path {
assert_eq!(path[0], 0);
let sb = read_superblock(&engine, path[0])?;
panels.push(Box::new(SBPanel { sb }));
for b in &path[1..] {
let action = panels.last_mut().unwrap().path_action(*b);
if let Some(action) = action {
perform_action(&mut panels, &engine, action)?;
} else {
return Err(anyhow!("bad node path: couldn't find child node {}", b));
}
}
} else {
let sb = read_superblock(&engine, 0)?;
panels.push(Box::new(SBPanel { sb: sb.clone() }));
let node = read_node::<u64>(&engine, sb.mapping_root)?;
panels.push(Box::new(TopLevelPanel::new(node)));
}
let events = Events::new();
'main: loop {
let render_panels = |f: &mut Frame_| {
let chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(f.size());
let mut base = panels.len();
if base >= 2 {
base -= 2;
} else {
base = 0;
}
for i in base..panels.len() {
panels[i].render(chunks[i - base], f);
}
};
terminal.draw(render_panels)?;
let last = panels.len() - 1;
let active_panel = &mut panels[last];
if let Event::Input(key) = events.next()? {
match key {
Key::Char('q') => break 'main,
_ => match active_panel.input(key) {
Some(action) => {
perform_action(&mut panels, &engine, action)?;
}
_ => {}
},
}
}
}
events.input_handle.join().unwrap();
Ok(())
}
//------------------------------------
fn main() -> Result<()> {
let parser = App::new("thin_explore")
.version(thinp::version::TOOLS_VERSION)
.about("A text user interface for examining thin metadata.")
.arg(
Arg::with_name("NODE_PATH")
.help("Pass in a node path as output by thin_check")
.short("p")
.long("node-path")
.value_name("NODE_PATH"),
)
.arg(
Arg::with_name("INPUT")
.help("Specify the input device to check")
.required(true)
.index(1),
);
let matches = parser.get_matches();
let node_path = matches
.value_of("NODE_PATH")
.map(|text| btree::decode_node_path(text).unwrap());
let input_file = Path::new(matches.value_of("INPUT").unwrap());
explore(&input_file, node_path)
}
//------------------------------------

View File

@ -1,10 +1,11 @@
use anyhow::Result;
use io_uring::opcode::{self, types};
use io_uring::IoUring;
use std::alloc::{alloc, dealloc, Layout};
use std::fs::File;
use std::fs::OpenOptions;
use std::io::Result;
use std::io::{self, Read, Seek, Write};
use std::ops::{Deref, DerefMut};
use std::os::unix::fs::OpenOptionsExt;
use std::os::unix::io::{AsRawFd, RawFd};
use std::path::Path;
@ -15,10 +16,10 @@ use std::sync::{Arc, Condvar, Mutex};
pub const BLOCK_SIZE: usize = 4096;
const ALIGN: usize = 4096;
#[derive(Debug)]
#[derive(Clone, Debug)]
pub struct Block {
pub loc: u64,
pub data: *mut u8,
data: *mut u8,
}
impl Block {
@ -49,10 +50,14 @@ unsafe impl Send for Block {}
pub trait IoEngine {
fn get_nr_blocks(&self) -> u64;
fn read(&self, block: &mut Block) -> Result<()>;
fn read_many(&self, blocks: &mut [Block]) -> Result<()>;
fn read(&self, b: u64) -> Result<Block>;
// The whole io could fail, or individual blocks
fn read_many(&self, blocks: &[u64]) -> Result<Vec<Result<Block>>>;
fn write(&self, block: &Block) -> Result<()>;
fn write_many(&self, blocks: &[Block]) -> Result<()>;
// The whole io could fail, or individual blocks
fn write_many(&self, blocks: &[Block]) -> Result<Vec<Result<()>>>;
}
fn get_nr_blocks(path: &Path) -> io::Result<u64> {
@ -68,19 +73,54 @@ pub struct SyncIoEngine {
cvar: Condvar,
}
struct FileGuard<'a> {
engine: &'a SyncIoEngine,
file: Option<File>,
}
impl<'a> FileGuard<'a> {
fn new(engine: &'a SyncIoEngine, file: File) -> FileGuard<'a> {
FileGuard {
engine,
file: Some(file),
}
}
}
impl<'a> Deref for FileGuard<'a> {
type Target = File;
fn deref(&self) -> &File {
&self.file.as_ref().expect("empty file guard")
}
}
impl<'a> DerefMut for FileGuard<'a> {
fn deref_mut(&mut self) -> &mut File {
match &mut self.file {
None => {
todo!();
}
Some(f) => f,
}
}
}
impl<'a> Drop for FileGuard<'a> {
fn drop(&mut self) {
self.engine.put(self.file.take().expect("empty file guard"));
}
}
impl SyncIoEngine {
fn open_file(path: &Path, writeable: bool) -> Result<File> {
let file = OpenOptions::new()
.read(true)
.write(writeable)
.custom_flags(libc::O_DIRECT)
.open(path)?;
let file = OpenOptions::new().read(true).write(writeable).open(path)?;
Ok(file)
}
pub fn new(path: &Path, nr_files: usize, writeable: bool) -> Result<SyncIoEngine> {
let mut files = Vec::new();
let mut files = Vec::with_capacity(nr_files);
for _n in 0..nr_files {
files.push(SyncIoEngine::open_file(path, writeable)?);
}
@ -92,13 +132,14 @@ impl SyncIoEngine {
})
}
fn get(&self) -> File {
fn get(&self) -> FileGuard {
let mut files = self.files.lock().unwrap();
while files.len() == 0 {
files = self.cvar.wait(files).unwrap();
}
files.pop().unwrap()
FileGuard::new(self, files.pop().unwrap())
}
fn put(&self, f: File) {
@ -106,6 +147,19 @@ impl SyncIoEngine {
files.push(f);
self.cvar.notify_one();
}
fn read_(input: &mut File, loc: u64) -> Result<Block> {
let b = Block::new(loc);
input.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
input.read_exact(b.get_data())?;
Ok(b)
}
fn write_(output: &mut File, b: &Block) -> Result<()> {
output.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
output.write_all(&b.get_data())?;
Ok(())
}
}
impl IoEngine for SyncIoEngine {
@ -113,44 +167,30 @@ impl IoEngine for SyncIoEngine {
self.nr_blocks
}
fn read(&self, b: &mut Block) -> Result<()> {
let mut input = self.get();
input.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
input.read_exact(&mut b.get_data())?;
self.put(input);
Ok(())
fn read(&self, loc: u64) -> Result<Block> {
SyncIoEngine::read_(&mut self.get(), loc)
}
fn read_many(&self, blocks: &mut [Block]) -> Result<()> {
fn read_many(&self, blocks: &[u64]) -> Result<Vec<Result<Block>>> {
let mut input = self.get();
let mut bs = Vec::new();
for b in blocks {
input.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
input.read_exact(&mut b.get_data())?;
bs.push(SyncIoEngine::read_(&mut input, *b));
}
self.put(input);
Ok(())
Ok(bs)
}
fn write(&self, b: &Block) -> Result<()> {
let mut input = self.get();
input.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
input.write_all(&b.get_data())?;
self.put(input);
Ok(())
SyncIoEngine::write_(&mut self.get(), b)
}
fn write_many(&self, blocks: &[Block]) -> Result<()> {
let mut input = self.get();
fn write_many(&self, blocks: &[Block]) -> Result<Vec<Result<()>>> {
let mut output = self.get();
let mut bs = Vec::new();
for b in blocks {
input.seek(io::SeekFrom::Start(b.loc * BLOCK_SIZE as u64))?;
input.write_all(&b.get_data())?;
bs.push(SyncIoEngine::write_(&mut output, b));
}
self.put(input);
Ok(())
Ok(bs)
}
}
@ -188,19 +228,21 @@ impl AsyncIoEngine {
}
// FIXME: refactor next two fns
fn read_many_(&self, blocks: &mut [Block]) -> Result<()> {
fn read_many_(&self, blocks: Vec<Block>) -> Result<Vec<Result<Block>>> {
use std::io::*;
let mut inner = self.inner.lock().unwrap();
let count = blocks.len();
let fd = types::Target::Fd(inner.input.as_raw_fd());
for b in blocks.iter_mut() {
for (i, b) in blocks.iter().enumerate() {
let read_e = opcode::Read::new(fd, b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(read_e.build().user_data(1))
.push(read_e.build().user_data(i as u64))
.ok()
.expect("queue is full");
}
@ -208,30 +250,52 @@ impl AsyncIoEngine {
inner.ring.submit_and_wait(count)?;
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
let mut cqes = inner.ring.completion().available().collect::<Vec<_>>();
// FIXME: return proper errors
assert_eq!(cqes.len(), count);
for c in &cqes {
assert_eq!(c.result(), BLOCK_SIZE as i32);
if cqes.len() != count {
return Err(Error::new(
ErrorKind::Other,
"insufficient io_uring completions",
));
}
Ok(())
// reorder cqes
cqes.sort_by(|a, b| a.user_data().partial_cmp(&b.user_data()).unwrap());
let mut rs = Vec::new();
let mut i = 0;
for b in blocks {
let c = &cqes[i];
i += 1;
let r = c.result();
if r < 0 {
let error = Error::from_raw_os_error(-r);
rs.push(Err(error));
} else if c.result() != BLOCK_SIZE as i32 {
rs.push(Err(Error::new(ErrorKind::UnexpectedEof, "short read")));
} else {
rs.push(Ok(b));
}
}
Ok(rs)
}
fn write_many_(&self, blocks: &[Block]) -> Result<()> {
fn write_many_(&self, blocks: &[Block]) -> Result<Vec<Result<()>>> {
use std::io::*;
let mut inner = self.inner.lock().unwrap();
let count = blocks.len();
let fd = types::Target::Fd(inner.input.as_raw_fd());
for b in blocks.iter() {
for (i, b) in blocks.iter().enumerate() {
let write_e = opcode::Write::new(fd, b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(write_e.build().user_data(1))
.push(write_e.build().user_data(i as u64))
.ok()
.expect("queue is full");
}
@ -239,15 +303,24 @@ impl AsyncIoEngine {
inner.ring.submit_and_wait(count)?;
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
let mut cqes = inner.ring.completion().available().collect::<Vec<_>>();
// FIXME: return proper errors
assert_eq!(cqes.len(), count);
for c in &cqes {
assert_eq!(c.result(), BLOCK_SIZE as i32);
// reorder cqes
cqes.sort_by(|a, b| a.user_data().partial_cmp(&b.user_data()).unwrap());
let mut rs = Vec::new();
for c in cqes {
let r = c.result();
if r < 0 {
let error = Error::from_raw_os_error(-r);
rs.push(Err(error));
} else if r != BLOCK_SIZE as i32 {
rs.push(Err(Error::new(ErrorKind::UnexpectedEof, "short write")));
} else {
rs.push(Ok(()));
}
}
Ok(())
Ok(rs)
}
}
@ -273,16 +346,17 @@ impl IoEngine for AsyncIoEngine {
inner.nr_blocks
}
fn read(&self, b: &mut Block) -> Result<()> {
fn read(&self, b: u64) -> Result<Block> {
let mut inner = self.inner.lock().unwrap();
let fd = types::Target::Fd(inner.input.as_raw_fd());
let b = Block::new(b);
let read_e = opcode::Read::new(fd, b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(read_e.build().user_data(1))
.push(read_e.build().user_data(0))
.ok()
.expect("queue is full");
}
@ -291,26 +365,34 @@ impl IoEngine for AsyncIoEngine {
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
// FIXME: return proper errors
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 1);
assert_eq!(cqes[0].result(), BLOCK_SIZE as i32);
Ok(())
let r = cqes[0].result();
use std::io::*;
if r < 0 {
let error = Error::from_raw_os_error(-r);
Err(error)
} else if r != BLOCK_SIZE as i32 {
Err(Error::new(ErrorKind::UnexpectedEof, "short write"))
} else {
Ok(b)
}
}
fn read_many(&self, blocks: &mut [Block]) -> Result<()> {
fn read_many(&self, blocks: &[u64]) -> Result<Vec<Result<Block>>> {
let inner = self.inner.lock().unwrap();
let queue_len = inner.queue_len as usize;
drop(inner);
let mut done = 0;
while done != blocks.len() {
let len = usize::min(blocks.len() - done, queue_len);
self.read_many_(&mut blocks[done..(done + len)])?;
done += len;
let mut results = Vec::new();
for cs in blocks.chunks(queue_len) {
let mut bs = Vec::new();
for b in cs {
bs.push(Block::new(*b));
}
results.append(&mut self.read_many_(bs)?);
}
Ok(())
Ok(results)
}
fn write(&self, b: &Block) -> Result<()> {
@ -322,7 +404,7 @@ impl IoEngine for AsyncIoEngine {
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(write_e.build().user_data(1))
.push(write_e.build().user_data(0))
.ok()
.expect("queue is full");
}
@ -331,26 +413,32 @@ impl IoEngine for AsyncIoEngine {
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
// FIXME: return proper errors
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 1);
assert_eq!(cqes[0].result(), BLOCK_SIZE as i32);
Ok(())
let r = cqes[0].result();
use std::io::*;
if r < 0 {
let error = Error::from_raw_os_error(-r);
Err(error)
} else if r != BLOCK_SIZE as i32 {
Err(Error::new(ErrorKind::UnexpectedEof, "short write"))
} else {
Ok(())
}
}
fn write_many(&self, blocks: &[Block]) -> Result<()> {
fn write_many(&self, blocks: &[Block]) -> Result<Vec<Result<()>>> {
let inner = self.inner.lock().unwrap();
let queue_len = inner.queue_len as usize;
drop(inner);
let mut results = Vec::new();
let mut done = 0;
while done != blocks.len() {
let len = usize::min(blocks.len() - done, queue_len);
self.write_many_(&blocks[done..(done + len)])?;
results.append(&mut self.write_many_(&blocks[done..(done + len)])?);
done += len;
}
Ok(())
Ok(results)
}
}

View File

@ -13,7 +13,7 @@ use Delta::*;
pub fn to_delta(ns: &[u64]) -> Vec<Delta> {
use std::cmp::Ordering::*;
let mut ds = Vec::new();
let mut ds = Vec::with_capacity(ns.len());
if !ns.is_empty() {
let mut base = ns[0];

View File

@ -1,5 +1,5 @@
pub mod node_encode;
pub mod toplevel;
pub mod vm;
mod delta_list;
mod node_encode;
mod vm;

View File

@ -1,5 +1,5 @@
use thiserror::Error;
use std::{io, io::Write};
use thiserror::Error;
use nom::{bytes::complete::*, number::complete::*, IResult};
@ -23,7 +23,7 @@ fn nom_to_pr<T>(r: IResult<&[u8], T>) -> PResult<(&[u8], T)> {
}
fn io_to_pr<T>(r: io::Result<T>) -> PResult<T> {
r.map_err(|source| PackError::WriteError {source})
r.map_err(|source| PackError::WriteError { source })
}
//-------------------------------------------
@ -36,7 +36,7 @@ fn run64(i: &[u8], count: usize) -> IResult<&[u8], Vec<u64>> {
struct NodeSummary {
is_leaf: bool,
max_entries: usize,
value_size: usize
value_size: usize,
}
fn summarise_node(data: &[u8]) -> IResult<&[u8], NodeSummary> {
@ -47,11 +47,14 @@ fn summarise_node(data: &[u8]) -> IResult<&[u8], NodeSummary> {
let (i, max_entries) = le_u32(i)?;
let (i, value_size) = le_u32(i)?;
let (i, _padding) = le_u32(i)?;
Ok((i, NodeSummary {
is_leaf: flags == 2,
max_entries: max_entries as usize,
value_size: value_size as usize,
}))
Ok((
i,
NodeSummary {
is_leaf: flags == 2,
max_entries: max_entries as usize,
value_size: value_size as usize,
},
))
}
pub fn pack_btree_node<W: Write>(w: &mut W, data: &[u8]) -> PResult<()> {

View File

@ -146,8 +146,8 @@ pub fn pack_u64s<W: Write>(w: &mut W, ns: &[u64]) -> io::Result<()> {
}
fn unshift_nrs(shift: usize, ns: &[u64]) -> (Vec<u64>, Vec<u64>) {
let mut values = Vec::new();
let mut shifts = Vec::new();
let mut values = Vec::with_capacity(ns.len());
let mut shifts = Vec::with_capacity(ns.len());
let mask = (1 << shift) - 1;
for n in ns {
@ -206,8 +206,8 @@ fn unpack_with_width<R: Read>(r: &mut R, nibble: u8) -> io::Result<u64> {
Ok(v)
}
fn unpack_u64s<R: Read>(r: &mut R, count: usize) -> io::Result<Vec<u64>> {
let mut v = Vec::new();
pub fn unpack_u64s<R: Read>(r: &mut R, count: usize) -> io::Result<Vec<u64>> {
let mut v = Vec::with_capacity(count);
for _ in 0..count {
let n = r.read_u64::<LittleEndian>()?;
v.push(n);
@ -215,13 +215,13 @@ fn unpack_u64s<R: Read>(r: &mut R, count: usize) -> io::Result<Vec<u64>> {
Ok(v)
}
struct VM {
pub struct VM {
base: u64,
bytes_written: usize,
}
impl VM {
fn new() -> VM {
pub fn new() -> VM {
VM {
base: 0,
bytes_written: 0,
@ -356,7 +356,7 @@ impl VM {
}
// Runs until at least a number of bytes have been emitted. Returns nr emitted.
fn exec<R: Read, W: Write>(
pub fn exec<R: Read, W: Write>(
&mut self,
r: &mut R,
w: &mut W,

File diff suppressed because it is too large Load Diff

View File

@ -49,7 +49,7 @@ impl Unpack for SMRoot {
//------------------------------------------
#[derive(Clone, Debug)]
#[derive(Clone, Copy, Debug)]
pub struct IndexEntry {
pub blocknr: u64,
pub nr_free: u32,
@ -159,8 +159,8 @@ impl Unpack for Bitmap {
fn unpack(data: &[u8]) -> IResult<&[u8], Self> {
let (mut i, header) = BitmapHeader::unpack(data)?;
let mut entries = Vec::new();
let nr_words = (BLOCK_SIZE - BitmapHeader::disk_size() as usize) / 8;
let mut entries = Vec::with_capacity(nr_words * 32);
for _w in 0..nr_words {
let (tmp, mut word) = le_u64(i)?;
@ -225,6 +225,10 @@ pub trait SpaceMap {
fn get_nr_blocks(&self) -> Result<u64>;
fn get_nr_allocated(&self) -> Result<u64>;
fn get(&self, b: u64) -> Result<u32>;
// Returns the old ref count
fn set(&mut self, b: u64, v: u32) -> Result<u32>;
fn inc(&mut self, begin: u64, len: u64) -> Result<()>;
}
@ -265,6 +269,20 @@ where
Ok(self.counts[b as usize].into())
}
fn set(&mut self, b: u64, v: u32) -> Result<u32> {
let old = self.counts[b as usize];
assert!(v < 0xff); // FIXME: we can't assume this
self.counts[b as usize] = V::from(v as u8);
if old == V::from(0u8) && v != 0 {
self.nr_allocated += 1;
} else if old != V::from(0u8) && v == 0 {
self.nr_allocated -= 1;
}
Ok(old.into())
}
fn inc(&mut self, begin: u64, len: u64) -> Result<()> {
for b in begin..(begin + len) {
if self.counts[b as usize] == V::from(0u8) {
@ -325,6 +343,24 @@ impl SpaceMap for RestrictedSpaceMap {
}
}
fn set(&mut self, b: u64, v: u32) -> Result<u32> {
let old = self.counts.contains(b as usize);
if v > 0 {
if !old {
self.nr_allocated += 1;
}
self.counts.insert(b as usize);
} else {
if old {
self.nr_allocated -= 1;
}
self.counts.set(b as usize, false);
}
Ok(if old {1} else {0})
}
fn inc(&mut self, begin: u64, len: u64) -> Result<()> {
for b in begin..(begin + len) {
if !self.counts.contains(b as usize) {

View File

@ -45,6 +45,14 @@ impl xml::MetadataVisitor for Pass1 {
Ok(Visit::Continue)
}
fn def_shared_b(&mut self, _name: &str) -> Result<Visit> {
todo!();
}
fn def_shared_e(&mut self) -> Result<Visit> {
todo!();
}
fn device_b(&mut self, _d: &xml::Device) -> Result<Visit> {
Ok(Visit::Continue)
}
@ -63,6 +71,10 @@ impl xml::MetadataVisitor for Pass1 {
Ok(Visit::Continue)
}
fn ref_shared(&mut self, _name: &str) -> Result<Visit> {
todo!();
}
fn eof(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
@ -96,6 +108,14 @@ impl<W: Write> xml::MetadataVisitor for Pass2<W> {
self.writer.superblock_e()
}
fn def_shared_b(&mut self, _name: &str) -> Result<Visit> {
todo!();
}
fn def_shared_e(&mut self) -> Result<Visit> {
todo!();
}
fn device_b(&mut self, d: &xml::Device) -> Result<Visit> {
self.writer.device_b(d)
}
@ -127,6 +147,10 @@ impl<W: Write> xml::MetadataVisitor for Pass2<W> {
Ok(Visit::Continue)
}
fn ref_shared(&mut self, _name: &str) -> Result<Visit> {
todo!();
}
fn eof(&mut self) -> Result<Visit> {
self.writer.eof()
}

40
src/thin/block_time.rs Normal file
View File

@ -0,0 +1,40 @@
use nom::{number::complete::*, IResult};
use std::fmt;
use crate::pdata::unpack::*;
//------------------------------------------
#[derive(Clone, Copy)]
pub struct BlockTime {
pub block: u64,
pub time: u32,
}
impl Unpack for BlockTime {
fn disk_size() -> u32 {
8
}
fn unpack(i: &[u8]) -> IResult<&[u8], BlockTime> {
let (i, n) = le_u64(i)?;
let block = n >> 24;
let time = n & ((1 << 24) - 1);
Ok((
i,
BlockTime {
block,
time: time as u32,
},
))
}
}
impl fmt::Display for BlockTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} @ {}", self.block, self.time)
}
}
//------------------------------------------

View File

@ -1,5 +1,4 @@
use anyhow::{anyhow, Result};
use nom::{number::complete::*, IResult};
use std::collections::BTreeMap;
use std::io::Cursor;
use std::path::Path;
@ -8,41 +7,17 @@ use std::thread::{self, JoinHandle};
use threadpool::ThreadPool;
use crate::checksum;
use crate::io_engine::{AsyncIoEngine, Block, IoEngine, SyncIoEngine};
use crate::pdata::btree::{btree_to_map, btree_to_map_with_sm, BTreeWalker, Node, NodeVisitor};
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::btree::{self, *};
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
use crate::report::*;
use crate::thin::block_time::*;
use crate::thin::device_detail::*;
use crate::thin::superblock::*;
//------------------------------------------
#[allow(dead_code)]
struct BlockTime {
block: u64,
time: u32,
}
impl Unpack for BlockTime {
fn disk_size() -> u32 {
8
}
fn unpack(i: &[u8]) -> IResult<&[u8], BlockTime> {
let (i, n) = le_u64(i)?;
let block = n >> 24;
let time = n & ((1 << 24) - 1);
Ok((
i,
BlockTime {
block,
time: time as u32,
},
))
}
}
struct BottomLevelVisitor {
data_sm: ASpaceMap,
}
@ -50,72 +25,46 @@ struct BottomLevelVisitor {
//------------------------------------------
impl NodeVisitor<BlockTime> for BottomLevelVisitor {
fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<BlockTime>) -> Result<()> {
fn visit(
&self,
_path: &Vec<u64>,
_kr: &KeyRange,
_h: &NodeHeader,
_k: &[u64],
values: &[BlockTime],
) -> btree::Result<()> {
// FIXME: do other checks
if let Node::Leaf {
header: _h,
keys: _k,
values,
} = node
{
if values.len() == 0 {
return Ok(());
}
let mut data_sm = self.data_sm.lock().unwrap();
let mut start = values[0].block;
let mut len = 1;
for n in 1..values.len() {
let block = values[n].block;
if block == start + len {
len += 1;
} else {
data_sm.inc(start, len)?;
start = block;
len = 1;
}
}
data_sm.inc(start, len)?;
if values.len() == 0 {
return Ok(());
}
let mut data_sm = self.data_sm.lock().unwrap();
let mut start = values[0].block;
let mut len = 1;
for n in 1..values.len() {
let block = values[n].block;
if block == start + len {
len += 1;
} else {
data_sm.inc(start, len).unwrap();
start = block;
len = 1;
}
}
data_sm.inc(start, len).unwrap();
Ok(())
}
}
//------------------------------------------
#[derive(Clone)]
struct DeviceDetail {
mapped_blocks: u64,
transaction_id: u64,
creation_time: u32,
snapshotted_time: u32,
}
impl Unpack for DeviceDetail {
fn disk_size() -> u32 {
24
fn visit_again(&self, _path: &Vec<u64>, _b: u64) -> btree::Result<()> {
Ok(())
}
fn unpack(i: &[u8]) -> IResult<&[u8], DeviceDetail> {
let (i, mapped_blocks) = le_u64(i)?;
let (i, transaction_id) = le_u64(i)?;
let (i, creation_time) = le_u32(i)?;
let (i, snapshotted_time) = le_u32(i)?;
Ok((
i,
DeviceDetail {
mapped_blocks,
transaction_id,
creation_time,
snapshotted_time,
},
))
fn end_walk(&self) -> btree::Result<()> {
Ok(())
}
}
@ -132,26 +81,34 @@ impl<'a> OverflowChecker<'a> {
}
impl<'a> NodeVisitor<u32> for OverflowChecker<'a> {
fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<u32>) -> Result<()> {
if let Node::Leaf {
header: _h,
keys,
values,
} = node
{
for n in 0..keys.len() {
let k = keys[n];
let v = values[n];
let expected = self.data_sm.get(k)?;
if expected != v {
return Err(anyhow!("Bad reference count for data block {}. Expected {}, but space map contains {}.",
k, expected, v));
}
fn visit(
&self,
_path: &Vec<u64>,
_kr: &KeyRange,
_h: &NodeHeader,
keys: &[u64],
values: &[u32],
) -> btree::Result<()> {
for n in 0..keys.len() {
let k = keys[n];
let v = values[n];
let expected = self.data_sm.get(k).unwrap();
if expected != v {
return Err(value_err(format!("Bad reference count for data block {}. Expected {}, but space map contains {}.",
k, expected, v)));
}
}
Ok(())
}
fn visit_again(&self, _path: &Vec<u64>, _b: u64) -> btree::Result<()> {
Ok(())
}
fn end_walk(&self) -> btree::Result<()> {
Ok(())
}
}
//------------------------------------------
@ -163,6 +120,7 @@ struct BitmapLeak {
// This checks the space map and returns any leak blocks for auto-repair to process.
fn check_space_map(
path: &mut Vec<u64>,
ctx: &Context,
kind: &str,
entries: Vec<IndexEntry>,
@ -177,70 +135,77 @@ fn check_space_map(
// overflow btree
{
let mut v = OverflowChecker::new(&*sm);
let mut w;
let v = OverflowChecker::new(&*sm);
let w;
if metadata_sm.is_none() {
w = BTreeWalker::new(engine.clone(), false);
} else {
w = BTreeWalker::new_with_sm(engine.clone(), metadata_sm.unwrap().clone(), false)?;
}
w.walk(&mut v, root.ref_count_root)?;
w.walk(path, &v, root.ref_count_root)?;
}
let mut blocks = Vec::new();
let mut blocks = Vec::with_capacity(entries.len());
for i in &entries {
blocks.push(Block::new(i.blocknr));
blocks.push(i.blocknr);
}
// FIXME: we should do this in batches
engine.read_many(&mut blocks)?;
let blocks = engine.read_many(&mut blocks)?;
let mut leaks = 0;
let mut blocknr = 0;
let mut bitmap_leaks = Vec::new();
for n in 0..entries.len() {
let b = &blocks[n];
if checksum::metadata_block_type(&b.get_data()) != checksum::BT::BITMAP {
report.fatal(&format!(
"Index entry points to block ({}) that isn't a bitmap",
b.loc
));
}
let bitmap = unpack::<Bitmap>(b.get_data())?;
let first_blocknr = blocknr;
let mut contains_leak = false;
for e in bitmap.entries.iter() {
if blocknr >= root.nr_blocks {
break;
match b {
Err(_e) => {
return Err(anyhow!("Unable to read bitmap block"));
}
match e {
BitmapEntry::Small(actual) => {
let expected = sm.get(blocknr)?;
if *actual == 1 && expected == 0 {
leaks += 1;
contains_leak = true;
} else if *actual != expected as u8 {
report.fatal(&format!("Bad reference count for {} block {}. Expected {}, but space map contains {}.",
kind, blocknr, expected, actual));
}
Ok(b) => {
if checksum::metadata_block_type(&b.get_data()) != checksum::BT::BITMAP {
report.fatal(&format!(
"Index entry points to block ({}) that isn't a bitmap",
b.loc
));
}
BitmapEntry::Overflow => {
let expected = sm.get(blocknr)?;
if expected < 3 {
report.fatal(&format!("Bad reference count for {} block {}. Expected {}, but space map says it's >= 3.",
kind, blocknr, expected));
let bitmap = unpack::<Bitmap>(b.get_data())?;
let first_blocknr = blocknr;
let mut contains_leak = false;
for e in bitmap.entries.iter() {
if blocknr >= root.nr_blocks {
break;
}
match e {
BitmapEntry::Small(actual) => {
let expected = sm.get(blocknr)?;
if *actual == 1 && expected == 0 {
leaks += 1;
contains_leak = true;
} else if *actual != expected as u8 {
report.fatal(&format!("Bad reference count for {} block {}. Expected {}, but space map contains {}.",
kind, blocknr, expected, actual));
}
}
BitmapEntry::Overflow => {
let expected = sm.get(blocknr)?;
if expected < 3 {
report.fatal(&format!("Bad reference count for {} block {}. Expected {}, but space map says it's >= 3.",
kind, blocknr, expected));
}
}
}
blocknr += 1;
}
if contains_leak {
bitmap_leaks.push(BitmapLeak {
blocknr: first_blocknr,
loc: b.loc,
});
}
}
blocknr += 1;
}
if contains_leak {
bitmap_leaks.push(BitmapLeak {
blocknr: first_blocknr,
loc: b.loc,
});
}
}
@ -258,38 +223,50 @@ fn repair_space_map(ctx: &Context, entries: Vec<BitmapLeak>, sm: ASpaceMap) -> R
let sm = sm.lock().unwrap();
let mut blocks = Vec::new();
let mut blocks = Vec::with_capacity(entries.len());
for i in &entries {
blocks.push(Block::new(i.loc));
blocks.push(i.loc);
}
// FIXME: we should do this in batches
engine.read_many(&mut blocks)?;
let rblocks = engine.read_many(&blocks[0..])?;
let mut write_blocks = Vec::new();
for (be, b) in entries.iter().zip(blocks.iter()) {
let mut blocknr = be.blocknr;
let mut bitmap = unpack::<Bitmap>(b.get_data())?;
for e in bitmap.entries.iter_mut() {
if blocknr >= sm.get_nr_blocks()? {
break;
}
if let BitmapEntry::Small(actual) = e {
let expected = sm.get(blocknr)?;
if *actual == 1 && expected == 0 {
*e = BitmapEntry::Small(0);
let mut i = 0;
for rb in rblocks {
if rb.is_err() {
return Err(anyhow!("Unable to reread bitmap blocks for repair"));
} else {
let b = rb.unwrap();
let be = &entries[i];
let mut blocknr = be.blocknr;
let mut bitmap = unpack::<Bitmap>(b.get_data())?;
for e in bitmap.entries.iter_mut() {
if blocknr >= sm.get_nr_blocks()? {
break;
}
if let BitmapEntry::Small(actual) = e {
let expected = sm.get(blocknr)?;
if *actual == 1 && expected == 0 {
*e = BitmapEntry::Small(0);
}
}
blocknr += 1;
}
blocknr += 1;
let mut out = Cursor::new(b.get_data());
bitmap.pack(&mut out)?;
checksum::write_checksum(b.get_data(), checksum::BT::BITMAP)?;
write_blocks.push(b);
}
let mut out = Cursor::new(b.get_data());
bitmap.pack(&mut out)?;
checksum::write_checksum(b.get_data(), checksum::BT::BITMAP)?;
i += 1;
}
engine.write_many(&blocks)?;
engine.write_many(&write_blocks[0..])?;
Ok(())
}
@ -316,6 +293,8 @@ const MAX_CONCURRENT_IO: u32 = 1024;
pub struct ThinCheckOptions<'a> {
pub dev: &'a Path,
pub async_io: bool,
pub sb_only: bool,
pub skip_mappings: bool,
pub ignore_non_fatal: bool,
pub auto_repair: bool,
pub report: Arc<Report>,
@ -368,30 +347,63 @@ fn check_mapping_bottom_level(
ctx: &Context,
metadata_sm: &Arc<Mutex<dyn SpaceMap + Send + Sync>>,
data_sm: &Arc<Mutex<dyn SpaceMap + Send + Sync>>,
roots: &BTreeMap<u64, u64>,
roots: &BTreeMap<u64, (Vec<u64>, u64)>,
) -> Result<()> {
ctx.report.set_sub_title("mapping tree");
for (_thin_id, root) in roots {
let mut w = BTreeWalker::new_with_sm(ctx.engine.clone(), metadata_sm.clone(), false)?;
let data_sm = data_sm.clone();
let root = *root;
ctx.pool.execute(move || {
let mut v = BottomLevelVisitor { data_sm };
let w = Arc::new(BTreeWalker::new_with_sm(
ctx.engine.clone(),
metadata_sm.clone(),
false,
)?);
// FIXME: return error
match w.walk(&mut v, root) {
Err(e) => {
eprintln!("walk failed {:?}", e);
std::process::abort();
// We want to print out errors as we progress, so we aggregate for each thin and print
// at that point.
let mut failed = false;
if roots.len() > 64 {
let errs = Arc::new(Mutex::new(Vec::new()));
for (_thin_id, (path, root)) in roots {
let data_sm = data_sm.clone();
let root = *root;
let v = BottomLevelVisitor { data_sm };
let w = w.clone();
let mut path = path.clone();
let errs = errs.clone();
ctx.pool.execute(move || {
if let Err(e) = w.walk(&mut path, &v, root) {
let mut errs = errs.lock().unwrap();
errs.push(e);
}
Ok(_result) => {}
}
});
}
ctx.pool.join();
});
}
ctx.pool.join();
let errs = Arc::try_unwrap(errs).unwrap().into_inner().unwrap();
if errs.len() > 0 {
ctx.report.fatal(&format!("{}", aggregate_error(errs)));
failed = true;
}
} else {
for (_thin_id, (path, root)) in roots {
let w = w.clone();
let data_sm = data_sm.clone();
let root = *root;
let v = Arc::new(BottomLevelVisitor { data_sm });
let mut path = path.clone();
Ok(())
if let Err(e) = walk_threaded(&mut path, w, &ctx.pool, v, root) {
failed = true;
ctx.report.fatal(&format!("{}", e));
}
}
}
if failed {
Err(anyhow!("Check of mappings failed"))
} else {
Ok(())
}
}
fn mk_context(opts: &ThinCheckOptions) -> Result<Context> {
@ -400,9 +412,13 @@ fn mk_context(opts: &ThinCheckOptions) -> Result<Context> {
if opts.async_io {
nr_threads = std::cmp::min(4, num_cpus::get());
engine = Arc::new(AsyncIoEngine::new(opts.dev, MAX_CONCURRENT_IO, opts.auto_repair)?);
engine = Arc::new(AsyncIoEngine::new(
opts.dev,
MAX_CONCURRENT_IO,
opts.auto_repair,
)?);
} else {
nr_threads = num_cpus::get() * 2;
nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, opts.auto_repair)?);
}
let pool = ThreadPool::new(nr_threads);
@ -437,18 +453,28 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
// superblock
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
report.info(&format!("TRANSACTION_ID={}", sb.transaction_id));
if opts.sb_only {
return Ok(());
}
let metadata_root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
let mut path = Vec::new();
path.push(0);
// Device details. We read this once to get the number of thin devices, and hence the
// maximum metadata ref count. Then create metadata space map, and reread to increment
// the ref counts for that metadata.
let devs = btree_to_map::<DeviceDetail>(engine.clone(), false, sb.details_root)?;
let devs = btree_to_map::<DeviceDetail>(&mut path, engine.clone(), false, sb.details_root)?;
let nr_devs = devs.len();
let metadata_sm = core_sm(engine.get_nr_blocks(), nr_devs as u32);
inc_superblock(&metadata_sm)?;
report.set_sub_title("device details tree");
let _devs = btree_to_map_with_sm::<DeviceDetail>(
&mut path,
engine.clone(),
metadata_sm.clone(),
false,
@ -462,11 +488,20 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
)?;
// mapping top level
let roots =
btree_to_map_with_sm::<u64>(engine.clone(), metadata_sm.clone(), false, sb.mapping_root)?;
report.set_sub_title("mapping tree");
let roots = btree_to_map_with_path::<u64>(
&mut path,
engine.clone(),
metadata_sm.clone(),
false,
sb.mapping_root,
)?;
if opts.skip_mappings {
return Ok(());
}
// mapping bottom level
report.set_sub_title("mapping tree");
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
let data_sm = core_sm(root.nr_blocks, nr_devs as u32);
check_mapping_bottom_level(&ctx, &metadata_sm, &data_sm, &roots)?;
@ -476,6 +511,7 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
let entries = btree_to_map_with_sm::<IndexEntry>(
&mut path,
engine.clone(),
metadata_sm.clone(),
false,
@ -485,6 +521,7 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
inc_entries(&metadata_sm, &entries[0..])?;
let data_leaks = check_space_map(
&mut path,
&ctx,
"data",
entries,
@ -496,8 +533,12 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
report.set_sub_title("metadata space map");
let root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
let mut b = Block::new(root.bitmap_root);
engine.read(&mut b)?;
report.info(&format!(
"METADATA_FREE_BLOCKS={}",
root.nr_blocks - root.nr_allocated
));
let b = engine.read(root.bitmap_root)?;
metadata_sm.lock().unwrap().inc(root.bitmap_root, 1)?;
let entries = unpack::<MetadataIndex>(b.get_data())?.indexes;
@ -512,6 +553,7 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
// We call this for the side effect of incrementing the ref counts
// for the metadata that holds the tree.
let _counts = btree_to_map_with_sm::<u32>(
&mut path,
engine.clone(),
metadata_sm.clone(),
false,
@ -519,28 +561,34 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
)?;
// Now the counts should be correct and we can check it.
let metadata_leaks = check_space_map(&ctx, "metadata", entries, None, metadata_sm.clone(), root)?;
let metadata_leaks = check_space_map(
&mut path,
&ctx,
"metadata",
entries,
None,
metadata_sm.clone(),
root,
)?;
bail_out(&ctx, "metadata space map")?;
if opts.auto_repair {
if data_leaks.len() > 0 {
ctx.report.info("Repairing data leaks.");
repair_space_map(&ctx, data_leaks, data_sm.clone());
repair_space_map(&ctx, data_leaks, data_sm.clone())?;
}
if metadata_leaks.len() > 0 {
ctx.report.info("Repairing metadata leaks.");
repair_space_map(&ctx, metadata_leaks, metadata_sm.clone());
repair_space_map(&ctx, metadata_leaks, metadata_sm.clone())?;
}
}
// Completing consumes the report.
{
let mut stop_progress = stop_progress.lock().unwrap();
*stop_progress = true;
}
tid.join();
bail_out(&ctx, "metadata space map")?;
tid.join().unwrap();
Ok(())
}

37
src/thin/device_detail.rs Normal file
View File

@ -0,0 +1,37 @@
use crate::pdata::unpack::*;
use nom::{number::complete::*, IResult};
//------------------------------------------
#[derive(Clone, Copy)]
pub struct DeviceDetail {
pub mapped_blocks: u64,
pub transaction_id: u64,
pub creation_time: u32,
pub snapshotted_time: u32,
}
impl Unpack for DeviceDetail {
fn disk_size() -> u32 {
24
}
fn unpack(i: &[u8]) -> IResult<&[u8], DeviceDetail> {
let (i, mapped_blocks) = le_u64(i)?;
let (i, transaction_id) = le_u64(i)?;
let (i, creation_time) = le_u32(i)?;
let (i, snapshotted_time) = le_u32(i)?;
Ok((
i,
DeviceDetail {
mapped_blocks,
transaction_id,
creation_time,
snapshotted_time,
},
))
}
}
//------------------------------------------

323
src/thin/dump.rs Normal file
View File

@ -0,0 +1,323 @@
use anyhow::Result;
use std::collections::{BTreeMap, BTreeSet};
use std::path::Path;
use std::sync::{Arc, Mutex};
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::btree::{self, *};
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
use crate::report::*;
use crate::thin::block_time::*;
use crate::thin::device_detail::*;
use crate::thin::superblock::*;
use crate::thin::xml::{self, MetadataVisitor};
//------------------------------------------
struct RunBuilder {
run: Option<xml::Map>,
}
impl RunBuilder {
fn new() -> RunBuilder {
RunBuilder { run: None }
}
fn next(&mut self, thin_block: u64, data_block: u64, time: u32) -> Option<xml::Map> {
use xml::Map;
match self.run {
None => {
self.run = Some(xml::Map {
thin_begin: thin_block,
data_begin: data_block,
time: time,
len: 1,
});
None
}
Some(xml::Map {
thin_begin,
data_begin,
time: mtime,
len,
}) => {
if thin_block == (thin_begin + len)
&& data_block == (data_begin + len)
&& mtime == time
{
self.run.as_mut().unwrap().len += 1;
None
} else {
self.run.replace(Map {
thin_begin: thin_block,
data_begin: data_block,
time: time,
len: 1,
})
}
}
}
}
fn complete(&mut self) -> Option<xml::Map> {
self.run.take()
}
}
//------------------------------------------
struct MVInner<'a> {
md_out: &'a mut dyn xml::MetadataVisitor,
builder: RunBuilder,
}
struct MappingVisitor<'a> {
inner: Mutex<MVInner<'a>>,
}
//------------------------------------------
impl<'a> MappingVisitor<'a> {
fn new(md_out: &'a mut dyn xml::MetadataVisitor) -> MappingVisitor<'a> {
MappingVisitor {
inner: Mutex::new(MVInner {
md_out,
builder: RunBuilder::new(),
}),
}
}
}
impl<'a> NodeVisitor<BlockTime> for MappingVisitor<'a> {
fn visit(
&self,
_path: &Vec<u64>,
_kr: &KeyRange,
_h: &NodeHeader,
keys: &[u64],
values: &[BlockTime],
) -> btree::Result<()> {
let mut inner = self.inner.lock().unwrap();
for (k, v) in keys.iter().zip(values.iter()) {
if let Some(run) = inner.builder.next(*k, v.block, v.time) {
inner
.md_out
.map(&run)
.map_err(|e| btree::value_err(format!("{}", e)))?;
}
}
Ok(())
}
fn visit_again(&self, _path: &Vec<u64>, b: u64) -> btree::Result<()> {
let mut inner = self.inner.lock().unwrap();
inner
.md_out
.ref_shared(&format!("{}", b))
.map_err(|e| btree::value_err(format!("{}", e)))?;
Ok(())
}
fn end_walk(&self) -> btree::Result<()> {
let mut inner = self.inner.lock().unwrap();
if let Some(run) = inner.builder.complete() {
inner
.md_out
.map(&run)
.map_err(|e| btree::value_err(format!("{}", e)))?;
}
Ok(())
}
}
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
pub struct ThinDumpOptions<'a> {
pub dev: &'a Path,
pub async_io: bool,
pub report: Arc<Report>,
}
struct Context {
report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &ThinDumpOptions) -> Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.dev, MAX_CONCURRENT_IO, false)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, false)?);
}
Ok(Context {
report: opts.report.clone(),
engine,
})
}
//------------------------------------------
struct NoopVisitor {}
impl<V: Unpack> btree::NodeVisitor<V> for NoopVisitor {
fn visit(
&self,
_path: &Vec<u64>,
_kr: &btree::KeyRange,
_h: &btree::NodeHeader,
_k: &[u64],
_values: &[V],
) -> btree::Result<()> {
Ok(())
}
fn visit_again(&self, _path: &Vec<u64>, _b: u64) -> btree::Result<()> {
Ok(())
}
fn end_walk(&self) -> btree::Result<()> {
Ok(())
}
}
fn find_shared_nodes(
ctx: &Context,
nr_metadata_blocks: u64,
roots: &BTreeMap<u64, u64>,
) -> Result<(BTreeSet<u64>, Arc<Mutex<dyn SpaceMap + Send + Sync>>)> {
// By default the walker uses a restricted space map that can only count to 1. So
// we explicitly create a full sm.
let sm = core_sm(nr_metadata_blocks, roots.len() as u32);
let w = BTreeWalker::new_with_sm(ctx.engine.clone(), sm.clone(), false)?;
let mut path = Vec::new();
path.push(0);
for (thin_id, root) in roots {
ctx.report.info(&format!("scanning {}", thin_id));
let v = NoopVisitor {};
w.walk::<NoopVisitor, BlockTime>(&mut path, &v, *root)?;
}
let mut shared = BTreeSet::new();
{
let sm = sm.lock().unwrap();
for i in 0..sm.get_nr_blocks().unwrap() {
if sm.get(i).expect("couldn't get count from space map.") > 1 {
shared.insert(i);
}
}
}
return Ok((shared, sm));
}
//------------------------------------------
fn dump_node(
ctx: &Context,
out: &mut dyn xml::MetadataVisitor,
root: u64,
sm: &Arc<Mutex<dyn SpaceMap + Send + Sync>>,
force: bool, // sets the ref count for the root to zero to force output.
) -> Result<()> {
let w = BTreeWalker::new_with_sm(ctx.engine.clone(), sm.clone(), false)?;
let mut path = Vec::new();
path.push(0);
let v = MappingVisitor::new(out);
// Temporarily set the ref count for the root to zero.
let mut old_count = 0;
if force {
let mut sm = sm.lock().unwrap();
old_count = sm.get(root).unwrap();
sm.set(root, 0)?;
}
w.walk::<MappingVisitor, BlockTime>(&mut path, &v, root)?;
// Reset the ref count for root.
if force {
let mut sm = sm.lock().unwrap();
sm.set(root, old_count)?;
}
Ok(())
}
//------------------------------------------
pub fn dump(opts: ThinDumpOptions) -> Result<()> {
let ctx = mk_context(&opts)?;
let report = &ctx.report;
let engine = &ctx.engine;
// superblock
report.set_title("Reading superblock");
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
let metadata_root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
let data_root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
let mut path = Vec::new();
path.push(0);
report.set_title("Reading device details");
let devs = btree_to_map::<DeviceDetail>(&mut path, engine.clone(), true, sb.details_root)?;
report.set_title("Reading mappings roots");
let roots = btree_to_map::<u64>(&mut path, engine.clone(), true, sb.mapping_root)?;
report.set_title("Finding shared mappings");
let (shared, sm) = find_shared_nodes(&ctx, metadata_root.nr_blocks, &roots)?;
report.info(&format!("{} shared nodes found", shared.len()));
let mut out = xml::XmlWriter::new(std::io::stdout());
let xml_sb = xml::Superblock {
uuid: "".to_string(),
time: sb.time as u64,
transaction: sb.transaction_id,
flags: None,
version: Some(2),
data_block_size: sb.data_block_size,
nr_data_blocks: data_root.nr_blocks,
metadata_snap: None,
};
out.superblock_b(&xml_sb)?;
report.set_title("Dumping shared regions");
for b in shared {
out.def_shared_b(&format!("{}", b))?;
dump_node(&ctx, &mut out, b, &sm, true)?;
out.def_shared_e()?;
}
report.set_title("Dumping mappings");
for (thin_id, detail) in devs {
let d = xml::Device {
dev_id: thin_id as u32,
mapped_blocks: detail.mapped_blocks,
transaction: detail.transaction_id,
creation_time: detail.creation_time as u64,
snap_time: detail.snapshotted_time as u64,
};
out.device_b(&d)?;
let root = roots.get(&thin_id).unwrap();
dump_node(&ctx, &mut out, *root, &sm, false)?;
out.device_e()?;
}
out.superblock_e()?;
Ok(())
}
//------------------------------------------

View File

@ -1,3 +1,6 @@
pub mod block_time;
pub mod device_detail;
pub mod superblock;
pub mod check;
pub mod dump;
pub mod xml;

View File

@ -1,17 +1,27 @@
use crate::io_engine::*;
use anyhow::{anyhow, Result};
use nom::{bytes::complete::*, number::complete::*, IResult};
use std::fmt;
pub const SUPERBLOCK_LOCATION: u64 = 0;
//const UUID_SIZE: usize = 16;
const SPACE_MAP_ROOT_SIZE: usize = 128;
#[derive(Debug)]
#[derive(Debug, Clone)]
pub struct SuperblockFlags {
pub needs_check: bool,
}
#[derive(Debug)]
impl fmt::Display for SuperblockFlags {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.needs_check {
write!(f, "NEEDS_CHECK")
} else {
write!(f, "-")
}
}
}
#[derive(Debug, Clone)]
pub struct Superblock {
pub flags: SuperblockFlags,
pub block: u64,
@ -76,7 +86,9 @@ fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
Ok((
i,
Superblock {
flags: SuperblockFlags {needs_check: (flags & 0x1) != 0},
flags: SuperblockFlags {
needs_check: (flags & 0x1) != 0,
},
block,
//uuid: uuid[0..UUID_SIZE],
version,
@ -93,8 +105,7 @@ fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
}
pub fn read_superblock(engine: &dyn IoEngine, loc: u64) -> Result<Superblock> {
let mut b = Block::new(loc);
engine.read(&mut b)?;
let b = engine.read(loc)?;
if let Ok((_, sb)) = unpack(&b.get_data()) {
Ok(sb)

View File

@ -46,10 +46,14 @@ pub trait MetadataVisitor {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit>;
fn superblock_e(&mut self) -> Result<Visit>;
fn def_shared_b(&mut self, name: &str) -> Result<Visit>;
fn def_shared_e(&mut self) -> Result<Visit>;
fn device_b(&mut self, d: &Device) -> Result<Visit>;
fn device_e(&mut self) -> Result<Visit>;
fn map(&mut self, m: &Map) -> Result<Visit>;
fn ref_shared(&mut self, name: &str) -> Result<Visit>;
fn eof(&mut self) -> Result<Visit>;
}
@ -110,6 +114,19 @@ impl<W: Write> MetadataVisitor for XmlWriter<W> {
Ok(Visit::Continue)
}
fn def_shared_b(&mut self, name: &str) -> Result<Visit> {
let tag = b"def";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"name", name));
self.w.write_event(Event::Start(elem))?;
Ok(Visit::Continue)
}
fn def_shared_e(&mut self) -> Result<Visit> {
self.w.write_event(Event::End(BytesEnd::borrowed(b"def")))?;
Ok(Visit::Continue)
}
fn device_b(&mut self, d: &Device) -> Result<Visit> {
let tag = b"device";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
@ -151,6 +168,14 @@ impl<W: Write> MetadataVisitor for XmlWriter<W> {
Ok(Visit::Continue)
}
fn ref_shared(&mut self, name: &str) -> Result<Visit> {
let tag = b"ref";
let mut elem = BytesStart::owned(tag.to_vec(), tag.len());
elem.push_attribute(mk_attr(b"name", name));
self.w.write_event(Event::Empty(elem))?;
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
let w = self.w.inner();
w.flush()?;
@ -379,6 +404,14 @@ impl MetadataVisitor for SBVisitor {
Ok(Visit::Continue)
}
fn def_shared_b(&mut self, _name: &str) -> Result<Visit> {
Ok(Visit::Continue)
}
fn def_shared_e(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn device_b(&mut self, _d: &Device) -> Result<Visit> {
Ok(Visit::Continue)
}
@ -390,6 +423,10 @@ impl MetadataVisitor for SBVisitor {
Ok(Visit::Continue)
}
fn ref_shared(&mut self, _name: &str) -> Result<Visit> {
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
Ok(Visit::Stop)
}

View File

@ -103,6 +103,14 @@ impl<'a, V: ThinVisitor> xml::MetadataVisitor for ThinXmlVisitor<'a, V> {
Ok(Visit::Continue)
}
fn def_shared_b(&mut self, _name: &str) -> Result<Visit> {
todo!();
}
fn def_shared_e(&mut self) -> Result<Visit> {
todo!();
}
fn device_b(&mut self, d: &xml::Device) -> Result<Visit> {
self.thin_id = Some(d.dev_id);
Ok(Visit::Continue)
@ -125,6 +133,10 @@ impl<'a, V: ThinVisitor> xml::MetadataVisitor for ThinXmlVisitor<'a, V> {
Ok(Visit::Continue)
}
fn ref_shared(&mut self, _name: &str) -> Result<Visit> {
todo!();
}
fn eof(&mut self) -> Result<Visit> {
Ok(Visit::Stop)
}