[thin_check (rust)] data space map now checked.

This commit is contained in:
Joe Thornber 2020-08-08 16:42:32 +01:00
parent 4054b1be4c
commit fd0c0ffc1d
2 changed files with 140 additions and 61 deletions

View File

@ -135,6 +135,37 @@ impl AsyncIoEngine {
}), }),
}) })
} }
fn read_many_(&self, blocks: &mut [Block]) -> Result<()> {
let mut inner = self.inner.lock().unwrap();
let count = blocks.len();
let fd = types::Target::Fd(inner.input.as_raw_fd());
for b in blocks.iter_mut() {
let read_e = opcode::Read::new(fd, b.data, BLOCK_SIZE as u32)
.offset(b.loc as i64 * BLOCK_SIZE as i64);
unsafe {
let mut queue = inner.ring.submission().available();
queue
.push(read_e.build().user_data(1))
.ok()
.expect("queue is full");
}
}
inner.ring.submit_and_wait(count)?;
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
// FIXME: return proper errors
assert_eq!(cqes.len(), count);
for c in &cqes {
assert_eq!(c.result(), BLOCK_SIZE as i32);
}
Ok(())
}
} }
impl Clone for AsyncIoEngine { impl Clone for AsyncIoEngine {
@ -186,33 +217,16 @@ impl IoEngine for AsyncIoEngine {
} }
fn read_many(&self, blocks: &mut Vec<Block>) -> Result<()> { fn read_many(&self, blocks: &mut Vec<Block>) -> Result<()> {
let mut inner = self.inner.lock().unwrap(); let inner = self.inner.lock().unwrap();
let count = blocks.len(); let queue_len = inner.queue_len as usize;
let fd = types::Target::Fd(inner.input.as_raw_fd()); drop(inner);
for b in blocks.iter_mut() { let mut done = 0;
let read_e = opcode::Read::new(fd, b.data, BLOCK_SIZE as u32) while done != blocks.len() {
.offset(b.loc as i64 * BLOCK_SIZE as i64); let len = usize::min(blocks.len() - done, queue_len);
self.read_many_(&mut blocks[done..(done + len)])?;
unsafe { done += len;
let mut queue = inner.ring.submission().available();
queue
.push(read_e.build().user_data(1))
.ok()
.expect("queue is full");
} }
}
inner.ring.submit_and_wait(count)?;
let cqes = inner.ring.completion().available().collect::<Vec<_>>();
// FIXME: return proper errors
assert_eq!(cqes.len(), count);
for c in &cqes {
assert_eq!(c.result(), BLOCK_SIZE as i32);
}
Ok(()) Ok(())
} }
} }

View File

@ -1,17 +1,17 @@
use anyhow::{anyhow, Result}; use anyhow::{anyhow, Result};
use fixedbitset::FixedBitSet; use fixedbitset::FixedBitSet;
use nom::{number::complete::*, IResult}; use nom::{number::complete::*, IResult};
use std::collections::{BTreeMap}; use std::collections::BTreeMap;
use std::path::Path; use std::path::Path;
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::time::Instant; use std::time::Instant;
use threadpool::ThreadPool; use threadpool::ThreadPool;
use crate::block_manager::{AsyncIoEngine, Block, IoEngine}; use crate::block_manager::{AsyncIoEngine, Block, IoEngine};
use crate::pdata::btree::{BTreeWalker, Node, NodeVisitor, Unpack, unpack}; use crate::checksum;
use crate::pdata::btree::{unpack, BTreeWalker, Node, NodeVisitor, Unpack};
use crate::pdata::space_map::*; use crate::pdata::space_map::*;
use crate::thin::superblock::*; use crate::thin::superblock::*;
use crate::checksum;
//------------------------------------------ //------------------------------------------
@ -74,7 +74,12 @@ impl NodeVisitor<BlockTime> for BottomLevelVisitor {
fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<BlockTime>) -> Result<()> { fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<BlockTime>) -> Result<()> {
// FIXME: do other checks // FIXME: do other checks
if let Node::Leaf {header: _h, keys: _k, values} = node { if let Node::Leaf {
header: _h,
keys: _k,
values,
} = node
{
if values.len() > 0 { if values.len() > 0 {
let mut data_sm = self.data_sm.lock().unwrap(); let mut data_sm = self.data_sm.lock().unwrap();
@ -175,7 +180,8 @@ impl NodeVisitor<IndexEntry> for IndexVisitor {
header: _h, header: _h,
keys: _k, keys: _k,
values, values,
} = node { } = node
{
for v in values { for v in values {
// FIXME: check keys are in incremental order // FIXME: check keys are in incremental order
let v = v.clone(); let v = v.clone();
@ -196,9 +202,7 @@ struct ValueCollector<V> {
impl<V> ValueCollector<V> { impl<V> ValueCollector<V> {
fn new() -> ValueCollector<V> { fn new() -> ValueCollector<V> {
ValueCollector { ValueCollector { values: Vec::new() }
values: Vec::new(),
}
} }
} }
@ -208,7 +212,8 @@ impl<V: Unpack + Clone> NodeVisitor<V> for ValueCollector<V> {
header: _h, header: _h,
keys, keys,
values, values,
} = node { } = node
{
for n in 0..keys.len() { for n in 0..keys.len() {
let k = keys[n]; let k = keys[n];
let v = values[n].clone(); let v = values[n].clone();
@ -222,8 +227,45 @@ impl<V: Unpack + Clone> NodeVisitor<V> for ValueCollector<V> {
//------------------------------------------ //------------------------------------------
struct OverflowChecker<'a> {
data_sm: &'a dyn SpaceMap,
}
impl<'a> OverflowChecker<'a> {
fn new(data_sm: &'a dyn SpaceMap) -> OverflowChecker<'a> {
OverflowChecker { data_sm }
}
}
impl<'a> NodeVisitor<u32> for OverflowChecker<'a> {
fn visit(&mut self, _w: &BTreeWalker, _b: &Block, node: &Node<u32>) -> Result<()> {
if let Node::Leaf {
header: _h,
keys,
values,
} = node
{
for n in 0..keys.len() {
let k = keys[n];
let v = values[n];
let expected = self.data_sm.get(k)?;
if expected != v {
return Err(anyhow!("Bad reference count for data block {}. Expected {}, but space map contains {}.",
k, expected, v));
}
}
}
Ok(())
}
}
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
pub fn check(dev: &Path) -> Result<()> { pub fn check(dev: &Path) -> Result<()> {
let engine = Arc::new(AsyncIoEngine::new(dev, 256)?); let engine = Arc::new(AsyncIoEngine::new(dev, MAX_CONCURRENT_IO)?);
let now = Instant::now(); let now = Instant::now();
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?; let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
@ -249,6 +291,7 @@ pub fn check(dev: &Path) -> Result<()> {
} }
// mapping bottom level // mapping bottom level
let data_sm;
{ {
// FIXME: with a thread pool we need to return errors another way. // FIXME: with a thread pool we need to return errors another way.
let nr_workers = 4; let nr_workers = 4;
@ -258,7 +301,7 @@ pub fn check(dev: &Path) -> Result<()> {
))); )));
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?; let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
let data_sm = core_sm(root.nr_blocks, nr_devs as u32); data_sm = core_sm(root.nr_blocks, nr_devs as u32);
for (thin_id, root) in roots { for (thin_id, root) in roots {
let mut w = BTreeWalker::new_with_seen(engine.clone(), seen.clone(), false); let mut w = BTreeWalker::new_with_seen(engine.clone(), seen.clone(), false);
@ -275,39 +318,61 @@ pub fn check(dev: &Path) -> Result<()> {
// data space map // data space map
{ {
let data_sm = data_sm.lock().unwrap();
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?; let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
eprintln!("data root: {:?}", root); eprintln!("data root: {:?}", root);
// overflow btree // overflow btree
let mut overflow: BTreeMap<u64, u32> = BTreeMap::new();
{ {
let mut v: ValueCollector<u32> = ValueCollector::new(); let mut v = OverflowChecker::new(&*data_sm);
let mut w = BTreeWalker::new(engine.clone(), false); let mut w = BTreeWalker::new(engine.clone(), false);
w.walk(&mut v, root.ref_count_root)?; w.walk(&mut v, root.ref_count_root)?;
for (k, v) in v.values {
overflow.insert(k, v);
} }
}
eprintln!("{} overflow entries", overflow.len());
// Bitmaps // Bitmaps
let mut v = IndexVisitor {entries: Vec::new()}; let mut v = IndexVisitor {
entries: Vec::new(),
};
let mut w = BTreeWalker::new(engine.clone(), false); let mut w = BTreeWalker::new(engine.clone(), false);
let _result = w.walk(&mut v, root.bitmap_root); let _result = w.walk(&mut v, root.bitmap_root);
eprintln!("{} index entries", v.entries.len()); eprintln!("{} index entries", v.entries.len());
for i in v.entries { let mut blocks = Vec::new();
let mut b = Block::new(i.blocknr); for i in &v.entries {
engine.read(&mut b)?; blocks.push(Block::new(i.blocknr));
}
engine.read_many(&mut blocks)?;
let mut blocknr = 0;
for (n, _i) in v.entries.iter().enumerate() {
let b = &blocks[n];
if checksum::metadata_block_type(&b.get_data()) != checksum::BT::BITMAP { if checksum::metadata_block_type(&b.get_data()) != checksum::BT::BITMAP {
return Err(anyhow!("Index entry points to block ({}) that isn't a bitmap", b.loc)); return Err(anyhow!(
"Index entry points to block ({}) that isn't a bitmap",
b.loc
));
} }
let bitmap = unpack::<Bitmap>(b.get_data())?; let bitmap = unpack::<Bitmap>(b.get_data())?;
for _e in bitmap.entries { for e in bitmap.entries {
//builder.push(&e); match e {
BitmapEntry::Small(actual) => {
let expected = data_sm.get(blocknr)?;
if actual != expected as u8 {
return Err(anyhow!("Bad reference count for data block {}. Expected {}, but space map contains {}.",
blocknr, expected, actual));
}
}
BitmapEntry::Overflow => {
let expected = data_sm.get(blocknr)?;
if expected < 3 {
return Err(anyhow!("Bad reference count for data block {}. Expected {}, but space map says it's >= 3.",
blocknr, expected));
}
}
}
blocknr += 1;
} }
} }
} }