[thin_check (rust)] Factor out pdata/unpack

This commit is contained in:
Joe Thornber 2020-08-10 15:42:10 +01:00
parent 55ee4bfad8
commit 50bde693a1
5 changed files with 63 additions and 46 deletions

View File

@ -1,31 +1,17 @@
use anyhow::{anyhow, Result};
use fixedbitset::FixedBitSet;
use nom::{number::complete::*, IResult};
use std::sync::{Arc, Mutex};
use std::collections::BTreeMap;
use std::sync::{Arc, Mutex};
use crate::checksum;
use crate::io_engine::*;
use crate::pdata::unpack::*;
// FIXME: check that keys are in ascending order between nodes.
//------------------------------------------
pub trait Unpack {
// The size of the value when on disk.
fn disk_size() -> u32;
fn unpack(data: &[u8]) -> IResult<&[u8], Self>
where
Self: std::marker::Sized;
}
pub fn unpack<U: Unpack>(data: &[u8]) -> Result<U> {
match U::unpack(data) {
Err(_e) => Err(anyhow!("couldn't parse SMRoot")),
Ok((_i, v)) => Ok(v),
}
}
const NODE_HEADER_SIZE: usize = 32;
pub struct NodeHeader {
@ -160,28 +146,6 @@ pub fn unpack_node<V: Unpack>(
//------------------------------------------
impl Unpack for u64 {
fn disk_size() -> u32 {
8
}
fn unpack(i: &[u8]) -> IResult<&[u8], u64> {
le_u64(i)
}
}
impl Unpack for u32 {
fn disk_size() -> u32 {
4
}
fn unpack(i: &[u8]) -> IResult<&[u8], u32> {
le_u32(i)
}
}
//------------------------------------------
pub trait NodeVisitor<V: Unpack> {
fn visit(&mut self, w: &BTreeWalker, b: &Block, node: &Node<V>) -> Result<()>;
}
@ -315,7 +279,9 @@ struct ValueCollector<V> {
impl<V> ValueCollector<V> {
fn new() -> ValueCollector<V> {
ValueCollector { values: BTreeMap::new() }
ValueCollector {
values: BTreeMap::new(),
}
}
}
@ -338,9 +304,11 @@ impl<V: Unpack + Clone> NodeVisitor<V> for ValueCollector<V> {
}
}
pub fn btree_to_map<V: Unpack + Clone>(engine: Arc<dyn IoEngine + Send + Sync>,
ignore_non_fatal: bool,
root: u64) -> Result<BTreeMap<u64, V>> {
pub fn btree_to_map<V: Unpack + Clone>(
engine: Arc<dyn IoEngine + Send + Sync>,
ignore_non_fatal: bool,
root: u64,
) -> Result<BTreeMap<u64, V>> {
let mut walker = BTreeWalker::new(engine, ignore_non_fatal);
let mut visitor = ValueCollector::<V>::new();

View File

@ -1,3 +1,4 @@
pub mod btree;
pub mod space_map;
pub mod unpack;

View File

@ -3,7 +3,7 @@ use nom::{number::complete::*, IResult};
use std::sync::{Arc, Mutex};
use crate::io_engine::*;
use crate::pdata::btree::Unpack;
use crate::pdata::unpack::Unpack;
//------------------------------------------

43
src/pdata/unpack.rs Normal file
View File

@ -0,0 +1,43 @@
use anyhow::{anyhow, Result};
use nom::{number::complete::*, IResult};
//------------------------------------------
pub trait Unpack {
// The size of the value when on disk.
fn disk_size() -> u32;
fn unpack(data: &[u8]) -> IResult<&[u8], Self>
where
Self: std::marker::Sized;
}
pub fn unpack<U: Unpack>(data: &[u8]) -> Result<U> {
match U::unpack(data) {
Err(_e) => Err(anyhow!("couldn't parse SMRoot")),
Ok((_i, v)) => Ok(v),
}
}
//------------------------------------------
impl Unpack for u64 {
fn disk_size() -> u32 {
8
}
fn unpack(i: &[u8]) -> IResult<&[u8], u64> {
le_u64(i)
}
}
impl Unpack for u32 {
fn disk_size() -> u32 {
4
}
fn unpack(i: &[u8]) -> IResult<&[u8], u32> {
le_u32(i)
}
}
//------------------------------------------

View File

@ -8,7 +8,8 @@ use threadpool::ThreadPool;
use crate::checksum;
use crate::io_engine::{AsyncIoEngine, Block, IoEngine, SyncIoEngine};
use crate::pdata::btree::{btree_to_map, unpack, BTreeWalker, Node, NodeVisitor, Unpack};
use crate::pdata::unpack::*;
use crate::pdata::btree::{btree_to_map, BTreeWalker, Node, NodeVisitor};
use crate::pdata::space_map::*;
use crate::thin::superblock::*;
@ -207,7 +208,7 @@ pub fn check(opts: &ThinCheckOptions) -> Result<()> {
// mapping top level
let roots = btree_to_map::<u64>(engine.clone(), false, sb.mapping_root)?;
// mapping bottom level
// Check the mappings filling in the data_sm as we go.
let data_sm;
{
// FIXME: with a thread pool we need to return errors another way.
@ -242,7 +243,7 @@ pub fn check(opts: &ThinCheckOptions) -> Result<()> {
pool.join();
}
// data space map
// Check the data space map.
{
let data_sm = data_sm.lock().unwrap();
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
@ -265,6 +266,7 @@ pub fn check(opts: &ThinCheckOptions) -> Result<()> {
blocks.push(Block::new(i.blocknr));
}
// FIXME: we should do this in batches
engine.read_many(&mut blocks)?;
let mut leaks = 0;
@ -322,6 +324,9 @@ pub fn check(opts: &ThinCheckOptions) -> Result<()> {
}
}
// Check the metadata space map.
Ok(())
}