2020-07-28 15:21:48 +05:30
|
|
|
use anyhow::{anyhow, Result};
|
2020-07-30 14:42:51 +05:30
|
|
|
use nom::{number::complete::*, IResult};
|
2020-08-18 15:23:11 +05:30
|
|
|
use std::collections::BTreeMap;
|
2020-08-19 19:01:01 +05:30
|
|
|
use std::io::Cursor;
|
2020-07-27 20:23:42 +05:30
|
|
|
use std::path::Path;
|
|
|
|
use std::sync::{Arc, Mutex};
|
2020-08-18 14:18:51 +05:30
|
|
|
use std::thread::{self, JoinHandle};
|
2020-07-29 21:08:52 +05:30
|
|
|
use threadpool::ThreadPool;
|
2020-06-09 13:45:00 +05:30
|
|
|
|
2020-08-08 21:12:32 +05:30
|
|
|
use crate::checksum;
|
2020-09-02 17:27:47 +05:30
|
|
|
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
|
2020-09-16 19:40:01 +05:30
|
|
|
use crate::pdata::btree::{self, *};
|
2020-08-06 12:21:48 +05:30
|
|
|
use crate::pdata::space_map::*;
|
2020-08-11 15:20:43 +05:30
|
|
|
use crate::pdata::unpack::*;
|
2020-08-17 17:38:29 +05:30
|
|
|
use crate::report::*;
|
2020-07-28 15:21:48 +05:30
|
|
|
use crate::thin::superblock::*;
|
2020-07-29 15:42:03 +05:30
|
|
|
|
|
|
|
//------------------------------------------
|
|
|
|
|
2020-07-30 14:42:51 +05:30
|
|
|
#[allow(dead_code)]
|
2020-07-28 15:21:48 +05:30
|
|
|
struct BlockTime {
|
|
|
|
block: u64,
|
|
|
|
time: u32,
|
|
|
|
}
|
|
|
|
|
2020-08-05 12:31:02 +05:30
|
|
|
impl Unpack for BlockTime {
|
2020-08-03 19:34:59 +05:30
|
|
|
fn disk_size() -> u32 {
|
|
|
|
8
|
|
|
|
}
|
|
|
|
|
2020-07-28 15:21:48 +05:30
|
|
|
fn unpack(i: &[u8]) -> IResult<&[u8], BlockTime> {
|
|
|
|
let (i, n) = le_u64(i)?;
|
|
|
|
let block = n >> 24;
|
|
|
|
let time = n & ((1 << 24) - 1);
|
|
|
|
|
|
|
|
Ok((
|
|
|
|
i,
|
|
|
|
BlockTime {
|
|
|
|
block,
|
|
|
|
time: time as u32,
|
|
|
|
},
|
|
|
|
))
|
2020-06-09 13:45:00 +05:30
|
|
|
}
|
2020-07-28 15:21:48 +05:30
|
|
|
}
|
|
|
|
|
2020-08-08 14:24:16 +05:30
|
|
|
struct BottomLevelVisitor {
|
2020-08-18 15:36:15 +05:30
|
|
|
data_sm: ASpaceMap,
|
2020-08-08 14:24:16 +05:30
|
|
|
}
|
2020-07-28 15:21:48 +05:30
|
|
|
|
2020-08-13 19:13:19 +05:30
|
|
|
//------------------------------------------
|
|
|
|
|
2020-08-03 20:52:08 +05:30
|
|
|
impl NodeVisitor<BlockTime> for BottomLevelVisitor {
|
2020-09-16 19:40:01 +05:30
|
|
|
fn visit(&self, _kr: &KeyRange, _h: &NodeHeader, _k: &[u64], values: &[BlockTime]) -> btree::Result<()> {
|
2020-08-08 14:24:16 +05:30
|
|
|
// FIXME: do other checks
|
|
|
|
|
2020-08-20 15:25:38 +05:30
|
|
|
if values.len() == 0 {
|
|
|
|
return Ok(());
|
|
|
|
}
|
2020-08-10 17:26:41 +05:30
|
|
|
|
2020-08-20 15:25:38 +05:30
|
|
|
let mut data_sm = self.data_sm.lock().unwrap();
|
2020-08-08 17:59:30 +05:30
|
|
|
|
2020-08-20 15:25:38 +05:30
|
|
|
let mut start = values[0].block;
|
|
|
|
let mut len = 1;
|
2020-08-10 17:26:41 +05:30
|
|
|
|
2020-08-20 15:25:38 +05:30
|
|
|
for n in 1..values.len() {
|
|
|
|
let block = values[n].block;
|
|
|
|
if block == start + len {
|
|
|
|
len += 1;
|
|
|
|
} else {
|
2020-09-16 19:40:01 +05:30
|
|
|
data_sm.inc(start, len).unwrap();
|
2020-08-20 15:25:38 +05:30
|
|
|
start = block;
|
|
|
|
len = 1;
|
2020-08-08 14:24:16 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-09-16 19:40:01 +05:30
|
|
|
data_sm.inc(start, len).unwrap();
|
2020-08-03 20:52:08 +05:30
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//------------------------------------------
|
|
|
|
|
|
|
|
#[derive(Clone)]
|
|
|
|
struct DeviceDetail {
|
|
|
|
mapped_blocks: u64,
|
|
|
|
transaction_id: u64,
|
|
|
|
creation_time: u32,
|
|
|
|
snapshotted_time: u32,
|
|
|
|
}
|
|
|
|
|
2020-08-05 12:31:02 +05:30
|
|
|
impl Unpack for DeviceDetail {
|
2020-08-03 20:52:08 +05:30
|
|
|
fn disk_size() -> u32 {
|
|
|
|
24
|
|
|
|
}
|
|
|
|
|
|
|
|
fn unpack(i: &[u8]) -> IResult<&[u8], DeviceDetail> {
|
|
|
|
let (i, mapped_blocks) = le_u64(i)?;
|
|
|
|
let (i, transaction_id) = le_u64(i)?;
|
|
|
|
let (i, creation_time) = le_u32(i)?;
|
|
|
|
let (i, snapshotted_time) = le_u32(i)?;
|
|
|
|
|
|
|
|
Ok((
|
|
|
|
i,
|
|
|
|
DeviceDetail {
|
|
|
|
mapped_blocks,
|
|
|
|
transaction_id,
|
|
|
|
creation_time,
|
|
|
|
snapshotted_time,
|
|
|
|
},
|
|
|
|
))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-29 15:42:03 +05:30
|
|
|
//------------------------------------------
|
|
|
|
|
2020-08-08 21:12:32 +05:30
|
|
|
struct OverflowChecker<'a> {
|
|
|
|
data_sm: &'a dyn SpaceMap,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> OverflowChecker<'a> {
|
|
|
|
fn new(data_sm: &'a dyn SpaceMap) -> OverflowChecker<'a> {
|
|
|
|
OverflowChecker { data_sm }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> NodeVisitor<u32> for OverflowChecker<'a> {
|
2020-09-16 19:40:01 +05:30
|
|
|
fn visit(&self, _kr: &KeyRange, _h: &NodeHeader, keys: &[u64], values: &[u32]) -> btree::Result<()> {
|
2020-08-20 15:25:38 +05:30
|
|
|
for n in 0..keys.len() {
|
|
|
|
let k = keys[n];
|
|
|
|
let v = values[n];
|
2020-09-16 19:40:01 +05:30
|
|
|
let expected = self.data_sm.get(k).unwrap();
|
2020-08-20 15:25:38 +05:30
|
|
|
if expected != v {
|
2020-09-16 19:40:01 +05:30
|
|
|
return Err(value_err(format!("Bad reference count for data block {}. Expected {}, but space map contains {}.",
|
|
|
|
k, expected, v)));
|
2020-08-08 21:12:32 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//------------------------------------------
|
|
|
|
|
2020-08-19 19:01:01 +05:30
|
|
|
struct BitmapLeak {
|
|
|
|
blocknr: u64, // blocknr for the first entry in the bitmap
|
|
|
|
loc: u64, // location of the bitmap
|
|
|
|
}
|
|
|
|
|
|
|
|
// This checks the space map and returns any leak blocks for auto-repair to process.
|
2020-08-12 12:32:29 +05:30
|
|
|
fn check_space_map(
|
2020-08-18 15:29:04 +05:30
|
|
|
ctx: &Context,
|
2020-08-12 14:05:21 +05:30
|
|
|
kind: &str,
|
2020-08-12 12:32:29 +05:30
|
|
|
entries: Vec<IndexEntry>,
|
2020-08-18 15:36:15 +05:30
|
|
|
metadata_sm: Option<ASpaceMap>,
|
|
|
|
sm: ASpaceMap,
|
2020-08-12 12:32:29 +05:30
|
|
|
root: SMRoot,
|
2020-08-19 19:01:01 +05:30
|
|
|
) -> Result<Vec<BitmapLeak>> {
|
2020-08-18 15:29:04 +05:30
|
|
|
let report = ctx.report.clone();
|
|
|
|
let engine = ctx.engine.clone();
|
2020-08-12 12:32:29 +05:30
|
|
|
|
2020-08-18 15:29:04 +05:30
|
|
|
let sm = sm.lock().unwrap();
|
2020-08-18 15:36:15 +05:30
|
|
|
|
2020-08-12 12:32:29 +05:30
|
|
|
// overflow btree
|
|
|
|
{
|
2020-08-21 14:40:49 +05:30
|
|
|
let v = OverflowChecker::new(&*sm);
|
|
|
|
let w;
|
2020-08-12 14:05:21 +05:30
|
|
|
if metadata_sm.is_none() {
|
|
|
|
w = BTreeWalker::new(engine.clone(), false);
|
|
|
|
} else {
|
|
|
|
w = BTreeWalker::new_with_sm(engine.clone(), metadata_sm.unwrap().clone(), false)?;
|
|
|
|
}
|
2020-08-21 14:40:49 +05:30
|
|
|
w.walk(&v, root.ref_count_root)?;
|
2020-08-12 12:32:29 +05:30
|
|
|
}
|
|
|
|
|
2020-08-21 13:30:21 +05:30
|
|
|
let mut blocks = Vec::with_capacity(entries.len());
|
2020-08-12 12:32:29 +05:30
|
|
|
for i in &entries {
|
2020-09-02 17:27:47 +05:30
|
|
|
blocks.push(i.blocknr);
|
2020-08-12 12:32:29 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: we should do this in batches
|
2020-09-02 17:27:47 +05:30
|
|
|
let blocks = engine.read_many(&mut blocks)?;
|
2020-08-12 12:32:29 +05:30
|
|
|
|
|
|
|
let mut leaks = 0;
|
|
|
|
let mut blocknr = 0;
|
2020-08-19 19:01:01 +05:30
|
|
|
let mut bitmap_leaks = Vec::new();
|
2020-08-12 12:32:29 +05:30
|
|
|
for n in 0..entries.len() {
|
|
|
|
let b = &blocks[n];
|
2020-09-02 17:27:47 +05:30
|
|
|
match b {
|
|
|
|
Err(_e) => {
|
|
|
|
todo!();
|
|
|
|
},
|
|
|
|
Ok(b) => {
|
|
|
|
if checksum::metadata_block_type(&b.get_data()) != checksum::BT::BITMAP {
|
|
|
|
report.fatal(&format!(
|
|
|
|
"Index entry points to block ({}) that isn't a bitmap",
|
|
|
|
b.loc
|
|
|
|
));
|
|
|
|
}
|
2020-08-12 12:32:29 +05:30
|
|
|
|
2020-09-02 17:27:47 +05:30
|
|
|
let bitmap = unpack::<Bitmap>(b.get_data())?;
|
|
|
|
let first_blocknr = blocknr;
|
|
|
|
let mut contains_leak = false;
|
|
|
|
for e in bitmap.entries.iter() {
|
|
|
|
if blocknr >= root.nr_blocks {
|
|
|
|
break;
|
|
|
|
}
|
2020-08-12 12:32:29 +05:30
|
|
|
|
2020-09-02 17:27:47 +05:30
|
|
|
match e {
|
|
|
|
BitmapEntry::Small(actual) => {
|
|
|
|
let expected = sm.get(blocknr)?;
|
|
|
|
if *actual == 1 && expected == 0 {
|
|
|
|
leaks += 1;
|
|
|
|
contains_leak = true;
|
|
|
|
} else if *actual != expected as u8 {
|
|
|
|
report.fatal(&format!("Bad reference count for {} block {}. Expected {}, but space map contains {}.",
|
|
|
|
kind, blocknr, expected, actual));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
BitmapEntry::Overflow => {
|
|
|
|
let expected = sm.get(blocknr)?;
|
|
|
|
if expected < 3 {
|
|
|
|
report.fatal(&format!("Bad reference count for {} block {}. Expected {}, but space map says it's >= 3.",
|
|
|
|
kind, blocknr, expected));
|
|
|
|
}
|
|
|
|
}
|
2020-08-12 12:32:29 +05:30
|
|
|
}
|
2020-09-02 17:27:47 +05:30
|
|
|
blocknr += 1;
|
2020-08-12 12:32:29 +05:30
|
|
|
}
|
2020-09-02 17:27:47 +05:30
|
|
|
if contains_leak {
|
|
|
|
bitmap_leaks.push(BitmapLeak {
|
|
|
|
blocknr: first_blocknr,
|
|
|
|
loc: b.loc,
|
|
|
|
});
|
2020-08-12 12:32:29 +05:30
|
|
|
}
|
|
|
|
}
|
2020-08-19 19:01:01 +05:30
|
|
|
}
|
2020-08-12 12:32:29 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
if leaks > 0 {
|
2020-08-19 19:01:01 +05:30
|
|
|
report.non_fatal(&format!("{} {} blocks have leaked.", leaks, kind));
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(bitmap_leaks)
|
|
|
|
}
|
|
|
|
|
|
|
|
// This assumes the only errors in the space map are leaks. Entries should just be
|
|
|
|
// those that contain leaks.
|
|
|
|
fn repair_space_map(ctx: &Context, entries: Vec<BitmapLeak>, sm: ASpaceMap) -> Result<()> {
|
|
|
|
let engine = ctx.engine.clone();
|
|
|
|
|
|
|
|
let sm = sm.lock().unwrap();
|
|
|
|
|
2020-08-21 13:30:21 +05:30
|
|
|
let mut blocks = Vec::with_capacity(entries.len());
|
2020-08-19 19:01:01 +05:30
|
|
|
for i in &entries {
|
2020-09-02 17:27:47 +05:30
|
|
|
blocks.push(i.loc);
|
2020-08-19 19:01:01 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: we should do this in batches
|
2020-09-02 17:27:47 +05:30
|
|
|
let rblocks = engine.read_many(&blocks[0..])?;
|
|
|
|
let mut write_blocks = Vec::new();
|
|
|
|
|
|
|
|
let mut i = 0;
|
|
|
|
for rb in rblocks {
|
|
|
|
if rb.is_err() {
|
|
|
|
todo!();
|
|
|
|
} else {
|
|
|
|
let b = rb.unwrap();
|
|
|
|
let be = &entries[i];
|
|
|
|
let mut blocknr = be.blocknr;
|
|
|
|
let mut bitmap = unpack::<Bitmap>(b.get_data())?;
|
|
|
|
for e in bitmap.entries.iter_mut() {
|
|
|
|
if blocknr >= sm.get_nr_blocks()? {
|
|
|
|
break;
|
|
|
|
}
|
2020-08-21 14:40:49 +05:30
|
|
|
|
2020-09-02 17:27:47 +05:30
|
|
|
if let BitmapEntry::Small(actual) = e {
|
|
|
|
let expected = sm.get(blocknr)?;
|
|
|
|
if *actual == 1 && expected == 0 {
|
|
|
|
*e = BitmapEntry::Small(0);
|
|
|
|
}
|
2020-08-19 19:01:01 +05:30
|
|
|
}
|
2020-09-02 17:27:47 +05:30
|
|
|
|
|
|
|
blocknr += 1;
|
2020-08-19 19:01:01 +05:30
|
|
|
}
|
2020-08-21 14:40:49 +05:30
|
|
|
|
2020-09-02 17:27:47 +05:30
|
|
|
let mut out = Cursor::new(b.get_data());
|
|
|
|
bitmap.pack(&mut out)?;
|
|
|
|
checksum::write_checksum(b.get_data(), checksum::BT::BITMAP)?;
|
|
|
|
|
|
|
|
write_blocks.push(b);
|
2020-08-19 19:01:01 +05:30
|
|
|
}
|
|
|
|
|
2020-09-02 17:27:47 +05:30
|
|
|
i += 1;
|
2020-08-12 12:32:29 +05:30
|
|
|
}
|
|
|
|
|
2020-09-02 17:27:47 +05:30
|
|
|
engine.write_many(&write_blocks[0..])?;
|
2020-08-12 12:32:29 +05:30
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
//------------------------------------------
|
|
|
|
|
2020-08-18 15:36:15 +05:30
|
|
|
fn inc_entries(sm: &ASpaceMap, entries: &[IndexEntry]) -> Result<()> {
|
2020-08-12 14:05:21 +05:30
|
|
|
let mut sm = sm.lock().unwrap();
|
|
|
|
for ie in entries {
|
|
|
|
sm.inc(ie.blocknr, 1)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-08-18 16:17:42 +05:30
|
|
|
fn inc_superblock(sm: &ASpaceMap) -> Result<()> {
|
2020-08-18 15:36:15 +05:30
|
|
|
let mut sm = sm.lock().unwrap();
|
|
|
|
sm.inc(SUPERBLOCK_LOCATION, 1)?;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-08-12 14:05:21 +05:30
|
|
|
//------------------------------------------
|
|
|
|
|
2020-08-08 21:12:32 +05:30
|
|
|
const MAX_CONCURRENT_IO: u32 = 1024;
|
|
|
|
|
2020-08-10 15:54:50 +05:30
|
|
|
pub struct ThinCheckOptions<'a> {
|
|
|
|
pub dev: &'a Path,
|
|
|
|
pub async_io: bool,
|
2020-08-18 16:17:42 +05:30
|
|
|
pub ignore_non_fatal: bool,
|
|
|
|
pub auto_repair: bool,
|
2020-08-17 20:22:12 +05:30
|
|
|
pub report: Arc<Report>,
|
2020-08-10 15:54:50 +05:30
|
|
|
}
|
|
|
|
|
2020-08-18 14:18:51 +05:30
|
|
|
fn spawn_progress_thread(
|
2020-08-18 15:29:04 +05:30
|
|
|
sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
|
2020-08-18 14:18:51 +05:30
|
|
|
nr_allocated_metadata: u64,
|
|
|
|
report: Arc<Report>,
|
|
|
|
) -> Result<(JoinHandle<()>, Arc<Mutex<bool>>)> {
|
2020-08-17 17:38:29 +05:30
|
|
|
let tid;
|
|
|
|
let stop_progress = Arc::new(Mutex::new(false));
|
|
|
|
|
|
|
|
{
|
|
|
|
let stop_progress = stop_progress.clone();
|
|
|
|
tid = thread::spawn(move || {
|
|
|
|
let interval = std::time::Duration::from_millis(250);
|
|
|
|
loop {
|
|
|
|
{
|
|
|
|
let stop_progress = stop_progress.lock().unwrap();
|
|
|
|
if *stop_progress {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let sm = sm.lock().unwrap();
|
|
|
|
let mut n = sm.get_nr_allocated().unwrap();
|
|
|
|
drop(sm);
|
2020-08-18 14:18:51 +05:30
|
|
|
|
2020-08-17 17:38:29 +05:30
|
|
|
n *= 100;
|
|
|
|
n /= nr_allocated_metadata;
|
|
|
|
|
|
|
|
let _r = report.progress(n as u8);
|
|
|
|
thread::sleep(interval);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
2020-08-11 17:52:14 +05:30
|
|
|
|
2020-08-18 14:18:51 +05:30
|
|
|
Ok((tid, stop_progress))
|
|
|
|
}
|
|
|
|
|
2020-08-18 15:23:11 +05:30
|
|
|
struct Context {
|
|
|
|
report: Arc<Report>,
|
|
|
|
engine: Arc<dyn IoEngine + Send + Sync>,
|
|
|
|
pool: ThreadPool,
|
|
|
|
}
|
2020-08-18 14:18:51 +05:30
|
|
|
|
2020-08-18 15:23:11 +05:30
|
|
|
// Check the mappings filling in the data_sm as we go.
|
|
|
|
fn check_mapping_bottom_level(
|
|
|
|
ctx: &Context,
|
|
|
|
metadata_sm: &Arc<Mutex<dyn SpaceMap + Send + Sync>>,
|
|
|
|
data_sm: &Arc<Mutex<dyn SpaceMap + Send + Sync>>,
|
|
|
|
roots: &BTreeMap<u64, u64>,
|
|
|
|
) -> Result<()> {
|
|
|
|
ctx.report.set_sub_title("mapping tree");
|
|
|
|
|
2020-08-21 14:40:49 +05:30
|
|
|
let w = Arc::new(BTreeWalker::new_with_sm(
|
|
|
|
ctx.engine.clone(),
|
|
|
|
metadata_sm.clone(),
|
|
|
|
false,
|
|
|
|
)?);
|
|
|
|
|
2020-09-16 19:40:01 +05:30
|
|
|
if roots.len() > 64000 {
|
2020-08-21 14:40:49 +05:30
|
|
|
ctx.report.info("spreading load across devices");
|
|
|
|
for (_thin_id, root) in roots {
|
|
|
|
let data_sm = data_sm.clone();
|
|
|
|
let root = *root;
|
|
|
|
let v = BottomLevelVisitor { data_sm };
|
|
|
|
let w = w.clone();
|
|
|
|
ctx.pool.execute(move || {
|
2020-09-16 19:40:01 +05:30
|
|
|
// FIXME: propogate errors + share fails.
|
2020-08-21 14:40:49 +05:30
|
|
|
let _r = w.walk(&v, root);
|
|
|
|
});
|
|
|
|
}
|
|
|
|
ctx.pool.join();
|
|
|
|
} else {
|
|
|
|
ctx.report.info("spreading load within device");
|
|
|
|
for (_thin_id, root) in roots {
|
|
|
|
let w = w.clone();
|
|
|
|
let data_sm = data_sm.clone();
|
|
|
|
let root = *root;
|
|
|
|
let v = Arc::new(BottomLevelVisitor { data_sm });
|
2020-09-16 19:40:01 +05:30
|
|
|
// FIXME: propogate errors + share fails.
|
2020-08-21 14:40:49 +05:30
|
|
|
walk_threaded(w, &ctx.pool, v, root)?
|
|
|
|
}
|
2020-08-18 15:23:11 +05:30
|
|
|
}
|
2020-08-18 15:29:04 +05:30
|
|
|
|
2020-08-18 15:23:11 +05:30
|
|
|
Ok(())
|
|
|
|
}
|
2020-08-18 14:18:51 +05:30
|
|
|
|
2020-08-19 19:01:01 +05:30
|
|
|
fn mk_context(opts: &ThinCheckOptions) -> Result<Context> {
|
2020-08-18 14:18:51 +05:30
|
|
|
let engine: Arc<dyn IoEngine + Send + Sync>;
|
|
|
|
let nr_threads;
|
|
|
|
|
|
|
|
if opts.async_io {
|
|
|
|
nr_threads = std::cmp::min(4, num_cpus::get());
|
2020-08-21 14:40:49 +05:30
|
|
|
engine = Arc::new(AsyncIoEngine::new(
|
|
|
|
opts.dev,
|
|
|
|
MAX_CONCURRENT_IO,
|
|
|
|
opts.auto_repair,
|
|
|
|
)?);
|
2020-08-18 14:18:51 +05:30
|
|
|
} else {
|
2020-09-02 17:27:47 +05:30
|
|
|
nr_threads = std::cmp::max(8, num_cpus::get() * 2);
|
2020-08-19 19:01:01 +05:30
|
|
|
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, opts.auto_repair)?);
|
2020-08-18 14:18:51 +05:30
|
|
|
}
|
2020-08-18 15:23:11 +05:30
|
|
|
let pool = ThreadPool::new(nr_threads);
|
|
|
|
|
|
|
|
Ok(Context {
|
2020-08-19 19:01:01 +05:30
|
|
|
report: opts.report.clone(),
|
2020-08-18 15:23:11 +05:30
|
|
|
engine,
|
|
|
|
pool,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-08-18 16:17:42 +05:30
|
|
|
fn bail_out(ctx: &Context, task: &str) -> Result<()> {
|
|
|
|
use ReportOutcome::*;
|
|
|
|
|
|
|
|
match ctx.report.get_outcome() {
|
2020-08-19 19:01:01 +05:30
|
|
|
Fatal => Err(anyhow!(format!(
|
|
|
|
"Check of {} failed, ending check early.",
|
|
|
|
task
|
|
|
|
))),
|
2020-08-18 16:17:42 +05:30
|
|
|
_ => Ok(()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-18 15:23:11 +05:30
|
|
|
pub fn check(opts: ThinCheckOptions) -> Result<()> {
|
2020-08-19 19:01:01 +05:30
|
|
|
let ctx = mk_context(&opts)?;
|
2020-08-18 14:18:51 +05:30
|
|
|
|
2020-08-18 15:23:11 +05:30
|
|
|
// FIXME: temporarily get these out
|
|
|
|
let report = &ctx.report;
|
|
|
|
let engine = &ctx.engine;
|
2020-08-18 15:29:04 +05:30
|
|
|
|
2020-08-18 15:23:11 +05:30
|
|
|
report.set_title("Checking thin metadata");
|
2020-08-18 15:29:04 +05:30
|
|
|
|
2020-08-18 14:18:51 +05:30
|
|
|
// superblock
|
|
|
|
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
|
2020-08-18 16:17:42 +05:30
|
|
|
let metadata_root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
|
2020-08-18 14:18:51 +05:30
|
|
|
|
|
|
|
// Device details. We read this once to get the number of thin devices, and hence the
|
|
|
|
// maximum metadata ref count. Then create metadata space map, and reread to increment
|
|
|
|
// the ref counts for that metadata.
|
|
|
|
let devs = btree_to_map::<DeviceDetail>(engine.clone(), false, sb.details_root)?;
|
|
|
|
let nr_devs = devs.len();
|
|
|
|
let metadata_sm = core_sm(engine.get_nr_blocks(), nr_devs as u32);
|
2020-08-18 15:36:15 +05:30
|
|
|
inc_superblock(&metadata_sm)?;
|
2020-08-18 14:18:51 +05:30
|
|
|
|
2020-08-17 20:06:21 +05:30
|
|
|
report.set_sub_title("device details tree");
|
2020-08-11 15:20:43 +05:30
|
|
|
let _devs = btree_to_map_with_sm::<DeviceDetail>(
|
|
|
|
engine.clone(),
|
|
|
|
metadata_sm.clone(),
|
|
|
|
false,
|
|
|
|
sb.details_root,
|
|
|
|
)?;
|
2020-08-04 16:41:36 +05:30
|
|
|
|
2020-08-18 16:17:42 +05:30
|
|
|
let (tid, stop_progress) = spawn_progress_thread(
|
|
|
|
metadata_sm.clone(),
|
|
|
|
metadata_root.nr_allocated,
|
|
|
|
report.clone(),
|
|
|
|
)?;
|
2020-08-18 15:23:11 +05:30
|
|
|
|
2020-08-06 12:21:48 +05:30
|
|
|
// mapping top level
|
2020-09-16 19:40:01 +05:30
|
|
|
report.set_sub_title("mapping tree");
|
2020-08-17 17:38:29 +05:30
|
|
|
let roots =
|
|
|
|
btree_to_map_with_sm::<u64>(engine.clone(), metadata_sm.clone(), false, sb.mapping_root)?;
|
2020-07-27 20:23:42 +05:30
|
|
|
|
2020-08-18 15:36:15 +05:30
|
|
|
// mapping bottom level
|
2020-08-18 15:23:11 +05:30
|
|
|
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
|
|
|
|
let data_sm = core_sm(root.nr_blocks, nr_devs as u32);
|
|
|
|
check_mapping_bottom_level(&ctx, &metadata_sm, &data_sm, &roots)?;
|
2020-08-18 16:17:42 +05:30
|
|
|
bail_out(&ctx, "mapping tree")?;
|
2020-09-16 19:40:01 +05:30
|
|
|
eprintln!("checked mapping");
|
2020-08-06 12:21:48 +05:30
|
|
|
|
2020-08-17 20:06:21 +05:30
|
|
|
report.set_sub_title("data space map");
|
2020-08-12 12:32:29 +05:30
|
|
|
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
|
2020-08-12 14:05:21 +05:30
|
|
|
|
|
|
|
let entries = btree_to_map_with_sm::<IndexEntry>(
|
|
|
|
engine.clone(),
|
|
|
|
metadata_sm.clone(),
|
|
|
|
false,
|
|
|
|
root.bitmap_root,
|
|
|
|
)?;
|
2020-08-12 12:32:29 +05:30
|
|
|
let entries: Vec<IndexEntry> = entries.values().cloned().collect();
|
2020-08-12 14:05:21 +05:30
|
|
|
inc_entries(&metadata_sm, &entries[0..])?;
|
|
|
|
|
2020-08-19 19:01:01 +05:30
|
|
|
let data_leaks = check_space_map(
|
2020-08-18 15:29:04 +05:30
|
|
|
&ctx,
|
2020-08-12 14:05:21 +05:30
|
|
|
"data",
|
|
|
|
entries,
|
|
|
|
Some(metadata_sm.clone()),
|
|
|
|
data_sm.clone(),
|
|
|
|
root,
|
|
|
|
)?;
|
2020-08-18 16:17:42 +05:30
|
|
|
bail_out(&ctx, "data space map")?;
|
2020-08-10 19:15:35 +05:30
|
|
|
|
2020-08-17 20:06:21 +05:30
|
|
|
report.set_sub_title("metadata space map");
|
2020-08-12 12:32:29 +05:30
|
|
|
let root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
|
2020-09-02 17:27:47 +05:30
|
|
|
let b = engine.read(root.bitmap_root)?;
|
2020-08-13 19:13:19 +05:30
|
|
|
metadata_sm.lock().unwrap().inc(root.bitmap_root, 1)?;
|
2020-08-12 12:32:29 +05:30
|
|
|
let entries = unpack::<MetadataIndex>(b.get_data())?.indexes;
|
|
|
|
|
|
|
|
// Unused entries will point to block 0
|
|
|
|
let entries: Vec<IndexEntry> = entries
|
|
|
|
.iter()
|
|
|
|
.take_while(|e| e.blocknr != 0)
|
|
|
|
.cloned()
|
|
|
|
.collect();
|
2020-08-12 14:05:21 +05:30
|
|
|
inc_entries(&metadata_sm, &entries[0..])?;
|
2020-08-12 12:32:29 +05:30
|
|
|
|
2020-08-13 19:13:19 +05:30
|
|
|
// We call this for the side effect of incrementing the ref counts
|
|
|
|
// for the metadata that holds the tree.
|
2020-08-12 12:32:29 +05:30
|
|
|
let _counts = btree_to_map_with_sm::<u32>(
|
|
|
|
engine.clone(),
|
|
|
|
metadata_sm.clone(),
|
|
|
|
false,
|
|
|
|
root.ref_count_root,
|
|
|
|
)?;
|
2020-08-12 14:05:21 +05:30
|
|
|
|
|
|
|
// Now the counts should be correct and we can check it.
|
2020-08-21 14:40:49 +05:30
|
|
|
let metadata_leaks =
|
|
|
|
check_space_map(&ctx, "metadata", entries, None, metadata_sm.clone(), root)?;
|
2020-08-20 15:03:02 +05:30
|
|
|
bail_out(&ctx, "metadata space map")?;
|
2020-08-19 19:01:01 +05:30
|
|
|
|
|
|
|
if opts.auto_repair {
|
|
|
|
if data_leaks.len() > 0 {
|
|
|
|
ctx.report.info("Repairing data leaks.");
|
2020-08-20 15:03:02 +05:30
|
|
|
repair_space_map(&ctx, data_leaks, data_sm.clone())?;
|
2020-08-19 19:01:01 +05:30
|
|
|
}
|
|
|
|
|
|
|
|
if metadata_leaks.len() > 0 {
|
|
|
|
ctx.report.info("Repairing metadata leaks.");
|
2020-08-20 15:03:02 +05:30
|
|
|
repair_space_map(&ctx, metadata_leaks, metadata_sm.clone())?;
|
2020-08-19 19:01:01 +05:30
|
|
|
}
|
|
|
|
}
|
2020-08-10 20:12:10 +05:30
|
|
|
|
2020-08-17 17:38:29 +05:30
|
|
|
{
|
|
|
|
let mut stop_progress = stop_progress.lock().unwrap();
|
|
|
|
*stop_progress = true;
|
|
|
|
}
|
|
|
|
tid.join();
|
2020-08-18 16:17:42 +05:30
|
|
|
|
2020-06-09 13:45:00 +05:30
|
|
|
Ok(())
|
|
|
|
}
|
2020-07-29 21:08:52 +05:30
|
|
|
|
|
|
|
//------------------------------------------
|