[all] Apply cargo fmt, and fix clippy warnings

This commit is contained in:
Ming-Hung Tsai 2021-05-11 20:41:30 +08:00
parent 0553a78c04
commit 965fbb6e8f
19 changed files with 166 additions and 124 deletions

View File

@ -6,8 +6,8 @@ use clap::{App, Arg};
use std::path::Path;
use std::sync::Arc;
use thinp::report::*;
use thinp::cache::check::{check, CacheCheckOptions};
use thinp::report::*;
//------------------------------------------

View File

@ -106,14 +106,19 @@ fn main() {
let engine: Arc<dyn IoEngine + Send + Sync>;
if matches.is_present("ASYNC_IO") {
engine = Arc::new(AsyncIoEngine::new(&input_file, MAX_CONCURRENT_IO, false).expect("unable to open input file"));
engine = Arc::new(
AsyncIoEngine::new(&input_file, MAX_CONCURRENT_IO, false)
.expect("unable to open input file"),
);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(&input_file, nr_threads, false).expect("unable to open input file"));
engine = Arc::new(
SyncIoEngine::new(&input_file, nr_threads, false).expect("unable to open input file"),
);
}
let opts = ThinCheckOptions {
engine: engine,
engine,
sb_only: matches.is_present("SB_ONLY"),
skip_mappings: matches.is_present("SKIP_MAPPINGS"),
ignore_non_fatal: matches.is_present("IGNORE_NON_FATAL"),

View File

@ -75,15 +75,13 @@ impl Events {
let ignore_exit_key = ignore_exit_key.clone();
thread::spawn(move || {
let stdin = io::stdin();
for evt in stdin.keys() {
if let Ok(key) = evt {
if let Err(err) = tx.send(Event::Input(key)) {
eprintln!("{}", err);
return;
}
if !ignore_exit_key.load(Ordering::Relaxed) && key == config.exit_key {
return;
}
for key in stdin.keys().flatten() {
if let Err(err) = tx.send(Event::Input(key)) {
eprintln!("{}", err);
return;
}
if !ignore_exit_key.load(Ordering::Relaxed) && key == config.exit_key {
return;
}
}
})
@ -91,8 +89,8 @@ impl Events {
Events {
rx,
ignore_exit_key,
input_handle,
ignore_exit_key,
}
}

79
src/cache/check.rs vendored
View File

@ -41,17 +41,26 @@ mod format1 {
impl MappingChecker {
pub fn new(nr_origin_blocks: Option<u64>) -> MappingChecker {
MappingChecker {
nr_origin_blocks: if let Some(n) = nr_origin_blocks {n} else {MAX_ORIGIN_BLOCKS},
nr_origin_blocks: if let Some(n) = nr_origin_blocks {
n
} else {
MAX_ORIGIN_BLOCKS
},
seen_oblocks: Mutex::new(BTreeSet::new()),
}
}
fn check_flags(&self, m: &Mapping) -> array::Result<()> {
if (m.flags & !(MappingFlags::Valid as u32 | MappingFlags::Dirty as u32)) != 0 {
return Err(array::value_err(format!("unknown flags in mapping: {}", m.flags)));
return Err(array::value_err(format!(
"unknown flags in mapping: {}",
m.flags
)));
}
if !m.is_valid() && m.is_dirty() {
return Err(array::value_err(format!("dirty bit found on an unmapped block")));
return Err(array::value_err(
"dirty bit found on an unmapped block".to_string(),
));
}
Ok(())
}
@ -59,16 +68,18 @@ mod format1 {
fn check_oblock(&self, m: &Mapping) -> array::Result<()> {
if !m.is_valid() {
if m.oblock > 0 {
return Err(array::value_err(format!("invalid block is mapped")));
return Err(array::value_err("invalid block is mapped".to_string()));
}
return Ok(());
}
if m.oblock >= self.nr_origin_blocks {
return Err(array::value_err(format!("mapping beyond end of the origin device")));
return Err(array::value_err(
"mapping beyond end of the origin device".to_string(),
));
}
let mut seen_oblocks = self.seen_oblocks.lock().unwrap();
if seen_oblocks.contains(&m.oblock) {
return Err(array::value_err(format!("origin block already mapped")));
return Err(array::value_err("origin block already mapped".to_string()));
}
seen_oblocks.insert(m.oblock);
Ok(())
@ -93,12 +104,8 @@ mod format1 {
// FIXME: duplicate to BTreeWalker::build_aggregrate()
match errs.len() {
0 => Ok(()),
1 => {
Err(errs[0].clone())
}
_ => {
Err(array::aggregate_error(errs))
}
1 => Err(errs[0].clone()),
_ => Err(array::aggregate_error(errs)),
}
}
}
@ -120,7 +127,11 @@ mod format2 {
impl MappingChecker {
pub fn new(nr_origin_blocks: Option<u64>, dirty_bits: CheckedBitSet) -> MappingChecker {
MappingChecker {
nr_origin_blocks: if let Some(n) = nr_origin_blocks {n} else {MAX_ORIGIN_BLOCKS},
nr_origin_blocks: if let Some(n) = nr_origin_blocks {
n
} else {
MAX_ORIGIN_BLOCKS
},
inner: Mutex::new(Inner {
seen_oblocks: BTreeSet::new(),
dirty_bits,
@ -130,10 +141,15 @@ mod format2 {
fn check_flags(&self, m: &Mapping, dirty_bit: Option<bool>) -> array::Result<()> {
if (m.flags & !(MappingFlags::Valid as u32)) != 0 {
return Err(array::value_err(format!("unknown flags in mapping: {}", m.flags)));
return Err(array::value_err(format!(
"unknown flags in mapping: {}",
m.flags
)));
}
if !m.is_valid() && dirty_bit.is_some() && dirty_bit.unwrap() {
return Err(array::value_err(format!("dirty bit found on an unmapped block")));
return Err(array::value_err(
"dirty bit found on an unmapped block".to_string(),
));
}
Ok(())
}
@ -141,15 +157,17 @@ mod format2 {
fn check_oblock(&self, m: &Mapping, seen_oblocks: &mut BTreeSet<u64>) -> array::Result<()> {
if !m.is_valid() {
if m.oblock > 0 {
return Err(array::value_err(format!("invalid mapped block")));
return Err(array::value_err("invalid mapped block".to_string()));
}
return Ok(());
}
if m.oblock >= self.nr_origin_blocks {
return Err(array::value_err(format!("mapping beyond end of the origin device")));
return Err(array::value_err(
"mapping beyond end of the origin device".to_string(),
));
}
if seen_oblocks.contains(&m.oblock) {
return Err(array::value_err(format!("origin block already mapped")));
return Err(array::value_err("origin block already mapped".to_string()));
}
seen_oblocks.insert(m.oblock);
@ -166,7 +184,8 @@ mod format2 {
for i in 0..b.header.nr_entries {
let m = b.values[i as usize];
if let Err(e) = self.check_flags(&m, inner.dirty_bits.contains(begin + i as usize)) {
if let Err(e) = self.check_flags(&m, inner.dirty_bits.contains(begin + i as usize))
{
errs.push(e);
}
if let Err(e) = self.check_oblock(&m, &mut inner.seen_oblocks) {
@ -177,12 +196,8 @@ mod format2 {
// FIXME: duplicate to BTreeWalker::build_aggregrate()
match errs.len() {
0 => Ok(()),
1 => {
Err(errs[0].clone())
}
_ => {
Err(array::aggregate_error(errs))
}
1 => Err(errs[0].clone()),
_ => Err(array::aggregate_error(errs)),
}
}
}
@ -273,7 +288,8 @@ pub fn check(opts: CacheCheckOptions) -> anyhow::Result<()> {
// TODO: factor out into check_mappings()
if !opts.skip_mappings {
let w = ArrayWalker::new_with_sm(engine.clone(), metadata_sm.clone(), opts.ignore_non_fatal)?;
let w =
ArrayWalker::new_with_sm(engine.clone(), metadata_sm.clone(), opts.ignore_non_fatal)?;
match sb.version {
1 => {
let mut c = format1::MappingChecker::new(nr_origin_blocks);
@ -307,7 +323,8 @@ pub fn check(opts: CacheCheckOptions) -> anyhow::Result<()> {
if sb.policy_hint_size != 4 {
return Err(anyhow!("cache_check only supports policy hint size of 4"));
}
let w = ArrayWalker::new_with_sm(engine.clone(), metadata_sm.clone(), opts.ignore_non_fatal)?;
let w =
ArrayWalker::new_with_sm(engine.clone(), metadata_sm.clone(), opts.ignore_non_fatal)?;
let mut c = HintChecker::new();
if let Err(e) = w.walk(&mut c, sb.hint_root) {
ctx.report.fatal(&format!("{}", e));
@ -338,11 +355,9 @@ pub fn check(opts: CacheCheckOptions) -> anyhow::Result<()> {
opts.ignore_non_fatal,
)?;
if opts.auto_repair {
if !metadata_leaks.is_empty() {
ctx.report.info("Repairing metadata leaks.");
repair_space_map(ctx.engine.clone(), metadata_leaks, metadata_sm.clone())?;
}
if opts.auto_repair && !metadata_leaks.is_empty() {
ctx.report.info("Repairing metadata leaks.");
repair_space_map(ctx.engine.clone(), metadata_leaks, metadata_sm.clone())?;
}
Ok(())

21
src/cache/dump.rs vendored
View File

@ -61,7 +61,10 @@ mod format1 {
let mut inner = self.inner.lock().unwrap();
inner.valid_mappings.set(index as usize, true);
inner.visitor.mapping(&m).map_err(|e| array::value_err(format!("{}", e)))?;
inner
.visitor
.mapping(&m)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
@ -96,7 +99,7 @@ mod format2 {
impl ArrayVisitor<u64> for DirtyVisitor {
fn visit(&self, index: u64, b: ArrayBlock<u64>) -> array::Result<()> {
let mut pos = index as usize * (b.header.max_entries as usize) << 6;
let mut pos = (index as usize * (b.header.max_entries as usize)) << 6;
for i in 0..b.header.nr_entries as usize {
let bits = b.values[i];
@ -127,7 +130,11 @@ mod format2 {
}
impl<'a> MappingEmitter<'a> {
pub fn new(nr_entries: usize, dirty_bits: FixedBitSet, visitor: &'a mut dyn MetadataVisitor) -> MappingEmitter<'a> {
pub fn new(
nr_entries: usize,
dirty_bits: FixedBitSet,
visitor: &'a mut dyn MetadataVisitor,
) -> MappingEmitter<'a> {
MappingEmitter {
inner: Mutex::new(Inner {
visitor,
@ -161,7 +168,10 @@ mod format2 {
};
inner.valid_mappings.set(index as usize, true);
inner.visitor.mapping(&m).map_err(|e| array::value_err(format!("{}", e)))?;
inner
.visitor
.mapping(&m)
.map_err(|e| array::value_err(format!("{}", e)))?;
}
Ok(())
}
@ -272,7 +282,8 @@ fn dump_metadata(ctx: &Context, sb: &Superblock, _repair: bool) -> anyhow::Resul
let dirty_bits = v.get_bits();
let w = ArrayWalker::new(engine.clone(), false);
let mut emitter = format2::MappingEmitter::new(sb.cache_blocks as usize, dirty_bits, &mut out);
let mut emitter =
format2::MappingEmitter::new(sb.cache_blocks as usize, dirty_bits, &mut out);
w.walk(&mut emitter, sb.mapping_root)?;
emitter.get_valid()
}

1
src/cache/hint.rs vendored
View File

@ -1,6 +1,5 @@
use nom::IResult;
use std::convert::TryInto;
use std::marker::PhantomData;
use crate::pdata::unpack::*;

View File

@ -17,6 +17,7 @@ fn checksum(buf: &[u8]) -> u32 {
}
#[derive(Debug, PartialEq)]
#[allow(clippy::upper_case_acronyms)]
pub enum BT {
SUPERBLOCK,
NODE,
@ -59,7 +60,9 @@ pub fn write_checksum(buf: &mut [u8], kind: BT) -> Result<()> {
BITMAP => BITMAP_CSUM_XOR,
INDEX => INDEX_CSUM_XOR,
ARRAY => ARRAY_CSUM_XOR,
UNKNOWN => {return Err(anyhow!("Invalid block type"));}
UNKNOWN => {
return Err(anyhow!("Invalid block type"));
}
};
let csum = checksum(buf) ^ salt;

View File

@ -135,15 +135,16 @@ pub fn unpack_array_block<V: Unpack>(path: &[u64], data: &[u8]) -> Result<ArrayB
if bt != checksum::BT::ARRAY {
return Err(array_block_err(
path,
&format!("checksum failed for array block {}, {:?}", path.last().unwrap(), bt)
&format!(
"checksum failed for array block {}, {:?}",
path.last().unwrap(),
bt
),
));
}
let (i, header) =
ArrayBlockHeader::unpack(data).map_err(|_| array_block_err(
path,
"Couldn't parse array block header"
))?;
let (i, header) = ArrayBlockHeader::unpack(data)
.map_err(|_| array_block_err(path, "Couldn't parse array block header"))?;
// check value_size
if header.value_size != V::disk_size() {
@ -153,7 +154,7 @@ pub fn unpack_array_block<V: Unpack>(path: &[u64], data: &[u8]) -> Result<ArrayB
"value_size mismatch: expected {}, was {}",
V::disk_size(),
header.value_size
)
),
));
}
@ -161,7 +162,7 @@ pub fn unpack_array_block<V: Unpack>(path: &[u64], data: &[u8]) -> Result<ArrayB
if header.value_size * header.max_entries + ARRAY_BLOCK_HEADER_SIZE > BLOCK_SIZE as u32 {
return Err(array_block_err(
path,
&format!("max_entries is too large ({})", header.max_entries)
&format!("max_entries is too large ({})", header.max_entries),
));
}

View File

@ -38,7 +38,7 @@ impl<'a, V: Unpack + Copy> BlockValueVisitor<'a, V> {
BlockValueVisitor {
engine: e,
array_visitor: v,
sm: sm,
sm,
array_errs: Mutex::new(Vec::new()),
}
}
@ -58,11 +58,13 @@ impl<'a, V: Unpack + Copy> NodeVisitor<u64> for BlockValueVisitor<'a, V> {
// The ordering of array indices had been verified in unpack_node(),
// thus checking the upper bound implies key continuity among siblings.
if *keys.first().unwrap() + keys.len() as u64 != *keys.last().unwrap() + 1 {
return Err(btree::value_err(format!("gaps in array indicies")));
return Err(btree::value_err("gaps in array indicies".to_string()));
}
if let Some(end) = kr.end {
if *keys.last().unwrap() + 1 != end {
return Err(btree::value_err(format!("gaps or overlaps in array indicies")));
return Err(btree::value_err(
"gaps or overlaps in array indicies".to_string(),
));
}
}
@ -82,7 +84,7 @@ impl<'a, V: Unpack + Copy> NodeVisitor<u64> for BlockValueVisitor<'a, V> {
Err(_) => {
let mut array_errs = self.array_errs.lock().unwrap();
array_errs.push(array::io_err(&path, values[i]).index_context(keys[i]));
},
}
Ok(b) => {
path.push(b.loc);
match unpack_array_block::<V>(&path, b.get_data()) {
@ -92,13 +94,13 @@ impl<'a, V: Unpack + Copy> NodeVisitor<u64> for BlockValueVisitor<'a, V> {
}
let mut sm = self.sm.lock().unwrap();
sm.inc(b.loc, 1).unwrap();
},
}
Err(e) => {
self.array_errs.lock().unwrap().push(e);
}
}
path.pop();
},
}
}
}
}
@ -150,15 +152,11 @@ impl ArrayWalker {
where
V: Unpack + Copy,
{
let w = BTreeWalker::new_with_sm(
self.engine.clone(),
self.sm.clone(),
self.ignore_non_fatal
)?;
let mut path = Vec::new();
path.push(0);
let w =
BTreeWalker::new_with_sm(self.engine.clone(), self.sm.clone(), self.ignore_non_fatal)?;
let mut path = vec![0];
let v = BlockValueVisitor::<V>::new(self.engine.clone(), self.sm.clone(), visitor);
let btree_err = w.walk(&mut path, &v, root).map_err(|e| ArrayError::BTreeError(e));
let btree_err = w.walk(&mut path, &v, root).map_err(ArrayError::BTreeError);
let mut array_errs = v.array_errs.into_inner().unwrap();
if let Err(e) = btree_err {

View File

@ -50,11 +50,13 @@ impl BitsetVisitor {
impl ArrayVisitor<u64> for BitsetVisitor {
fn visit(&self, index: u64, b: ArrayBlock<u64>) -> array::Result<()> {
let mut begin = index as usize * (b.header.max_entries as usize) << 6;
let mut begin = (index as usize * (b.header.max_entries as usize)) << 6;
for i in 0..b.header.nr_entries as usize {
if begin > self.nr_entries as usize {
return Err(array::value_err("bitset size exceeds expectation".to_string()));
return Err(array::value_err(
"bitset size exceeds expectation".to_string(),
));
}
let end: usize = std::cmp::min(begin + 64, self.nr_entries as usize);
@ -77,7 +79,7 @@ pub fn read_bitset(
root: u64,
nr_entries: usize,
ignore_none_fatal: bool,
)-> (CheckedBitSet, Option<array::ArrayError>) {
) -> (CheckedBitSet, Option<array::ArrayError>) {
let w = ArrayWalker::new(engine, ignore_none_fatal);
let mut v = BitsetVisitor::new(nr_entries);
let err = w.walk(&mut v, root);
@ -85,7 +87,7 @@ pub fn read_bitset(
Ok(()) => None,
Err(e) => Some(e),
};
return (v.get_bitset(), e);
(v.get_bitset(), e)
}
// TODO: multi-threaded is possible
@ -95,7 +97,7 @@ pub fn read_bitset_with_sm(
nr_entries: usize,
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
ignore_none_fatal: bool,
)-> array::Result<(CheckedBitSet, Option<array::ArrayError>)> {
) -> array::Result<(CheckedBitSet, Option<array::ArrayError>)> {
let w = ArrayWalker::new_with_sm(engine, sm, ignore_none_fatal)?;
let mut v = BitsetVisitor::new(nr_entries);
let err = w.walk(&mut v, root);
@ -103,5 +105,5 @@ pub fn read_bitset_with_sm(
Ok(()) => None,
Err(e) => Some(e),
};
return Ok((v.get_bitset(), e));
Ok((v.get_bitset(), e))
}

View File

@ -560,7 +560,10 @@ pub fn unpack_node<V: Unpack>(
for k in &keys {
if let Some(l) = last {
if k <= l {
return Err(node_err(&path, &format!("keys out of order: {} <= {}", k, l)));
return Err(node_err(
&path,
&format!("keys out of order: {} <= {}", k, l),
));
}
}

View File

@ -138,7 +138,7 @@ pub struct WriteResult {
/// Write a node to a free metadata block.
fn write_node_<V: Unpack + Pack>(w: &mut WriteBatcher, mut node: Node<V>) -> Result<WriteResult> {
let keys = node.get_keys();
let first_key = keys.first().unwrap_or(&0u64).clone();
let first_key = *keys.first().unwrap_or(&0u64);
let b = w.alloc()?;
node.set_block(b.loc);
@ -285,8 +285,8 @@ impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
/// Any shared nodes that are used have their block incremented in
/// the space map. Will only increment the ref count for values
/// contained in the nodes if it unpacks them.
pub fn push_nodes(&mut self, w: &mut WriteBatcher, nodes: &Vec<NodeSummary>) -> Result<()> {
assert!(nodes.len() > 0);
pub fn push_nodes(&mut self, w: &mut WriteBatcher, nodes: &[NodeSummary]) -> Result<()> {
assert!(!nodes.is_empty());
// As a sanity check we make sure that all the shared nodes contain the
// minimum nr of entries.
@ -298,7 +298,7 @@ impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
}
// Decide if we're going to use the pre-built nodes.
if (self.values.len() > 0) && (self.values.len() < half_full) {
if !self.values.is_empty() && (self.values.len() < half_full) {
// To avoid writing an under populated node we have to grab some
// values from the first of the shared nodes.
let (keys, values) = self.read_node(w, nodes.get(0).unwrap().block)?;
@ -336,7 +336,7 @@ impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
pub fn complete(mut self, w: &mut WriteBatcher) -> Result<Vec<NodeSummary>> {
let half_full = self.max_entries_per_node / 2;
if (self.values.len() > 0) && (self.values.len() < half_full) && (self.nodes.len() > 0) {
if !self.values.is_empty() && (self.values.len() < half_full) && !self.nodes.is_empty() {
// We don't have enough values to emit a node. So we're going to
// have to rebalance with the previous node.
self.unshift_node(w)?;
@ -344,7 +344,7 @@ impl<'a, V: Pack + Unpack + Clone> NodeBuilder<V> {
self.emit_all(w)?;
if self.nodes.len() == 0 {
if self.nodes.is_empty() {
self.emit_empty_leaf(w)?
}
@ -461,7 +461,7 @@ impl<V: Unpack + Pack + Clone> Builder<V> {
self.leaf_builder.push_value(w, k, v)
}
pub fn push_leaves(&mut self, w: &mut WriteBatcher, leaves: &Vec<NodeSummary>) -> Result<()> {
pub fn push_leaves(&mut self, w: &mut WriteBatcher, leaves: &[NodeSummary]) -> Result<()> {
self.leaf_builder.push_nodes(w, leaves)
}

View File

@ -69,10 +69,7 @@ impl BTreeWalker {
fn failed(&self, b: u64) -> Option<BTreeError> {
let fails = self.fails.lock().unwrap();
match fails.get(&b) {
None => None,
Some(e) => Some(e.clone()),
}
fails.get(&b).cloned()
}
fn set_fail(&self, b: u64, err: BTreeError) {

View File

@ -45,8 +45,10 @@ impl<'a> NodeVisitor<u32> for OverflowChecker<'a> {
let v = values[n];
let expected = self.sm.get(k).unwrap();
if expected != v {
return Err(value_err(format!("Bad reference count for {} block {}. Expected {}, but space map contains {}.",
self.kind, k, expected, v)));
return Err(value_err(format!(
"Bad reference count for {} block {}. Expected {}, but space map contains {}.",
self.kind, k, expected, v
)));
}
}
@ -76,10 +78,10 @@ fn inc_entries(sm: &ASpaceMap, entries: &[IndexEntry]) -> Result<()> {
// Compare the refernece counts in bitmaps against the expected values
//
// `sm` - The in-core space map of expected reference counts
fn check_low_ref_counts<'a>(
fn check_low_ref_counts(
engine: Arc<dyn IoEngine + Send + Sync>,
report: Arc<Report>,
kind: &'a str,
kind: &str,
entries: Vec<IndexEntry>,
sm: ASpaceMap,
) -> Result<Vec<BitmapLeak>> {
@ -215,7 +217,12 @@ pub fn check_disk_space_map(
metadata_sm: ASpaceMap,
ignore_non_fatal: bool,
) -> Result<Vec<BitmapLeak>> {
let entries = gather_disk_index_entries(engine.clone(), root.bitmap_root, metadata_sm.clone(), ignore_non_fatal)?;
let entries = gather_disk_index_entries(
engine.clone(),
root.bitmap_root,
metadata_sm.clone(),
ignore_non_fatal,
)?;
// check overflow ref-counts
{
@ -239,8 +246,15 @@ pub fn check_metadata_space_map(
metadata_sm: ASpaceMap,
ignore_non_fatal: bool,
) -> Result<Vec<BitmapLeak>> {
count_btree_blocks::<u32>(engine.clone(), &mut vec![0], root.ref_count_root, metadata_sm.clone(), false)?;
let entries = gather_metadata_index_entries(engine.clone(), root.bitmap_root, metadata_sm.clone())?;
count_btree_blocks::<u32>(
engine.clone(),
&mut vec![0],
root.ref_count_root,
metadata_sm.clone(),
false,
)?;
let entries =
gather_metadata_index_entries(engine.clone(), root.bitmap_root, metadata_sm.clone())?;
// check overflow ref-counts
{
@ -259,7 +273,7 @@ pub fn check_metadata_space_map(
pub fn repair_space_map(
engine: Arc<dyn IoEngine + Send + Sync>,
entries: Vec<BitmapLeak>,
sm: ASpaceMap
sm: ASpaceMap,
) -> Result<()> {
let sm = sm.lock().unwrap();

View File

@ -371,7 +371,7 @@ pub fn write_metadata_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRo
let mut by_bitmap = BTreeMap::new();
for b in allocations {
let bitmap = block_to_bitmap(b);
(*by_bitmap.entry(bitmap).or_insert(Vec::new())).push(b % ENTRIES_PER_BITMAP as u64);
(*by_bitmap.entry(bitmap).or_insert_with(Vec::new)).push(b % ENTRIES_PER_BITMAP as u64);
}
for (bitmap, allocs) in by_bitmap {

View File

@ -1,12 +1,11 @@
use anyhow::{anyhow, Result};
use std::collections::BTreeMap;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::{self, JoinHandle};
use threadpool::ThreadPool;
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::io_engine::IoEngine;
use crate::pdata::btree::{self, *};
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
@ -236,8 +235,7 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
}
let metadata_root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
let mut path = Vec::new();
path.push(0);
let mut path = vec![0];
// Device details. We read this once to get the number of thin devices, and hence the
// maximum metadata ref count. Then create metadata space map, and reread to increment
@ -349,7 +347,10 @@ pub struct CheckMaps {
pub data_sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
}
pub fn check_with_maps(engine: Arc<dyn IoEngine + Send + Sync>, report: Arc<Report>) -> Result<CheckMaps> {
pub fn check_with_maps(
engine: Arc<dyn IoEngine + Send + Sync>,
report: Arc<Report>,
) -> Result<CheckMaps> {
let ctx = mk_context(engine.clone(), report.clone())?;
report.set_title("Checking thin metadata");
@ -359,18 +360,12 @@ pub fn check_with_maps(engine: Arc<dyn IoEngine + Send + Sync>, report: Arc<Repo
report.info(&format!("TRANSACTION_ID={}", sb.transaction_id));
let metadata_root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
let mut path = Vec::new();
path.push(0);
let mut path = vec![0];
// Device details. We read this once to get the number of thin devices, and hence the
// maximum metadata ref count. Then create metadata space map, and reread to increment
// the ref counts for that metadata.
let devs = btree_to_map::<DeviceDetail>(
&mut path,
engine.clone(),
false,
sb.details_root,
)?;
let devs = btree_to_map::<DeviceDetail>(&mut path, engine.clone(), false, sb.details_root)?;
let nr_devs = devs.len();
let metadata_sm = core_sm(engine.get_nr_blocks(), nr_devs as u32);
inc_superblock(&metadata_sm)?;

View File

@ -255,8 +255,7 @@ fn collect_leaves(
let mut w = LeafWalker::new(ctx.engine.clone(), sm.deref_mut(), false);
let mut v = CollectLeaves::new();
let mut path = Vec::new();
path.push(0);
let mut path = vec![0];
// ctx.report.set_title(&format!("collecting {}", *r));
w.walk::<CollectLeaves, BlockTime>(&mut path, &mut v, *r)?;
@ -323,8 +322,7 @@ fn build_metadata(ctx: &Context, sb: &Superblock) -> Result<Metadata> {
report.set_title("Reading superblock");
//let metadata_root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
//let data_root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
let mut path = Vec::new();
path.push(0);
let mut path = vec![0];
report.set_title("Reading device details");
let details = btree_to_map::<DeviceDetail>(&mut path, engine.clone(), true, sb.details_root)?;
@ -385,6 +383,7 @@ fn build_metadata(ctx: &Context, sb: &Superblock) -> Result<Metadata> {
//------------------------------------------
#[allow(dead_code)]
fn gather_entries(g: &mut Gatherer, es: &[Entry]) {
g.new_seq();
for e in es {
@ -399,6 +398,7 @@ fn gather_entries(g: &mut Gatherer, es: &[Entry]) {
}
}
#[allow(dead_code)]
fn entries_to_runs(runs: &BTreeMap<u64, Vec<u64>>, es: &[Entry]) -> Vec<Entry> {
use Entry::*;
@ -427,6 +427,7 @@ fn entries_to_runs(runs: &BTreeMap<u64, Vec<u64>>, es: &[Entry]) -> Vec<Entry> {
// FIXME: do we really need to track kr?
// FIXME: I think this may be better done as part of restore.
#[allow(dead_code)]
fn optimise_metadata(md: Metadata) -> Result<Metadata> {
use Entry::*;

View File

@ -89,7 +89,7 @@ impl<'a> Pass1<'a> {
if let Some((name, nodes)) = current {
Ok((name, nodes.complete(self.w)?))
} else {
let msg = format!("Unbalanced </def> tag");
let msg = "Unbalanced </def> tag".to_string();
Err(anyhow!(msg))
}
}
@ -154,7 +154,7 @@ impl<'a> MetadataVisitor for Pass1<'a> {
}
Ok(Visit::Continue)
} else {
let msg = format!("Mapping tags must appear within a <def> or <device> tag.");
let msg = "Mapping tags must appear within a <def> or <device> tag.".to_string();
Err(anyhow!(msg))
}
}

View File

@ -110,14 +110,14 @@ fn pack_superblock<W: WriteBytesExt>(sb: &Superblock, w: &mut W) -> Result<()> {
}
w.write_u64::<LittleEndian>(sb.block)?;
w.write_all(&vec![0; UUID_SIZE])?;
w.write_all(&[0; UUID_SIZE])?;
w.write_u64::<LittleEndian>(MAGIC)?;
w.write_u32::<LittleEndian>(sb.version)?;
w.write_u32::<LittleEndian>(sb.time)?;
w.write_u64::<LittleEndian>(sb.transaction_id)?;
w.write_u64::<LittleEndian>(sb.metadata_snap)?;
w.write_all(&vec![0; SPACE_MAP_ROOT_SIZE])?; // data sm root
w.write_all(&vec![0; SPACE_MAP_ROOT_SIZE])?; // metadata sm root
w.write_all(&[0; SPACE_MAP_ROOT_SIZE])?; // data sm root
w.write_all(&[0; SPACE_MAP_ROOT_SIZE])?; // metadata sm root
w.write_u64::<LittleEndian>(sb.mapping_root)?;
w.write_u64::<LittleEndian>(sb.details_root)?;
w.write_u32::<LittleEndian>(sb.data_block_size)?;