Merge pull request #173 from mingnus/2021-05-12-cache-restore

cache_restore wip
This commit is contained in:
Joe Thornber 2021-06-01 09:15:38 +01:00 committed by GitHub
commit b9df99fd6a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 1519 additions and 548 deletions

77
src/bin/cache_restore.rs Normal file
View File

@ -0,0 +1,77 @@
extern crate clap;
extern crate thinp;
use atty::Stream;
use clap::{App, Arg};
use std::path::Path;
use std::process;
use std::process::exit;
use std::sync::Arc;
use thinp::cache::restore::{restore, CacheRestoreOptions};
use thinp::file_utils;
use thinp::report::*;
fn main() {
let parser = App::new("cache_restore")
.version(thinp::version::tools_version())
.about("Convert XML format metadata to binary.")
.arg(
Arg::with_name("OVERRIDE_MAPPING_ROOT")
.help("Specify a mapping root to use")
.long("override-mapping-root")
.value_name("OVERRIDE_MAPPING_ROOT")
.takes_value(true),
)
.arg(
Arg::with_name("INPUT")
.help("Specify the input xml")
.short("i")
.long("input")
.value_name("INPUT")
.required(true),
)
.arg(
Arg::with_name("OUTPUT")
.help("Specify the output device to check")
.short("o")
.long("output")
.value_name("OUTPUT")
.required(true),
)
.arg(
Arg::with_name("SYNC_IO")
.help("Force use of synchronous io")
.long("sync-io"),
);
let matches = parser.get_matches();
let input_file = Path::new(matches.value_of("INPUT").unwrap());
let output_file = Path::new(matches.value_of("OUTPUT").unwrap());
if !file_utils::file_exists(input_file) {
eprintln!("Couldn't find input file '{:?}'.", &input_file);
exit(1);
}
let report;
if matches.is_present("QUIET") {
report = std::sync::Arc::new(mk_quiet_report());
} else if atty::is(Stream::Stdout) {
report = std::sync::Arc::new(mk_progress_bar_report());
} else {
report = Arc::new(mk_simple_report());
}
let opts = CacheRestoreOptions {
input: &input_file,
output: &output_file,
async_io: !matches.is_present("SYNC_IO"),
report,
};
if let Err(reason) = restore(opts) {
println!("{}", reason);
process::exit(1);
}
}

7
src/cache/check.rs vendored
View File

@ -12,7 +12,7 @@ use crate::pdata::array_walker::*;
use crate::pdata::bitset::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_checker::*;
use crate::pdata::space_map_disk::*;
use crate::pdata::space_map_common::*;
use crate::pdata::unpack::unpack;
use crate::report::*;
@ -274,8 +274,11 @@ pub fn check(opts: CacheCheckOptions) -> anyhow::Result<()> {
return Ok(());
}
// The discard bitset is optional and could be updated during device suspension.
// A restored metadata therefore comes with a zero-sized discard bitset,
// and also zeroed discard_block_size and discard_nr_blocks.
let nr_origin_blocks;
if sb.flags.clean_shutdown {
if sb.flags.clean_shutdown && sb.discard_block_size > 0 && sb.discard_nr_blocks > 0 {
let origin_sectors = sb.discard_block_size * sb.discard_nr_blocks;
nr_origin_blocks = Some(origin_sectors / sb.data_block_size as u64);
} else {

63
src/cache/dump.rs vendored
View File

@ -10,6 +10,7 @@ use crate::cache::xml::{self, MetadataVisitor};
use crate::io_engine::{AsyncIoEngine, IoEngine, SyncIoEngine};
use crate::pdata::array::{self, ArrayBlock};
use crate::pdata::array_walker::*;
use crate::pdata::bitset::{read_bitset, CheckedBitSet};
//------------------------------------------
@ -78,49 +79,12 @@ mod format1 {
mod format2 {
use super::*;
//-------------------
// Dirty bitset visitor
pub struct DirtyVisitor {
nr_entries: usize,
bits: Mutex<FixedBitSet>,
}
impl DirtyVisitor {
pub fn new(nr_entries: usize) -> Self {
DirtyVisitor {
nr_entries, // number of bits
bits: Mutex::new(FixedBitSet::with_capacity(nr_entries)),
}
}
pub fn get_bits(self) -> FixedBitSet {
self.bits.into_inner().unwrap()
}
}
impl ArrayVisitor<u64> for DirtyVisitor {
fn visit(&self, index: u64, b: ArrayBlock<u64>) -> array::Result<()> {
let mut pos = (index as usize * (b.header.max_entries as usize)) << 6;
for bits in b.values.iter() {
for bi in 0..64u64 {
if pos >= self.nr_entries {
break;
}
self.bits.lock().unwrap().set(pos, bits & (1 << bi) != 0);
pos += 1;
}
}
Ok(())
}
}
//-------------------
// Mapping visitor
struct Inner<'a> {
visitor: &'a mut dyn MetadataVisitor,
dirty_bits: FixedBitSet,
dirty_bits: CheckedBitSet,
valid_mappings: FixedBitSet,
}
@ -131,7 +95,7 @@ mod format2 {
impl<'a> MappingEmitter<'a> {
pub fn new(
nr_entries: usize,
dirty_bits: FixedBitSet,
dirty_bits: CheckedBitSet,
visitor: &'a mut dyn MetadataVisitor,
) -> MappingEmitter<'a> {
MappingEmitter {
@ -159,7 +123,13 @@ mod format2 {
}
let mut inner = self.inner.lock().unwrap();
let dirty = inner.dirty_bits.contains(cblock as usize);
let dirty;
if let Some(bit) = inner.dirty_bits.contains(cblock as usize) {
dirty = bit;
} else {
// default to dirty if the bitset is damaged
dirty = true;
}
let m = xml::Map {
cblock,
oblock: map.oblock,
@ -266,17 +236,20 @@ fn dump_metadata(ctx: &Context, sb: &Superblock, _repair: bool) -> anyhow::Resul
}
2 => {
// We need to walk the dirty bitset first.
let w = ArrayWalker::new(engine.clone(), false);
let mut v = format2::DirtyVisitor::new(sb.cache_blocks as usize);
let dirty_bits;
if let Some(root) = sb.dirty_root {
w.walk(&mut v, root)?;
let (bits, errs) =
read_bitset(engine.clone(), root, sb.cache_blocks as usize, false);
// TODO: allow errors in repair mode
if errs.is_some() {
return Err(anyhow!("errors in bitset {}", errs.unwrap()));
}
dirty_bits = bits;
} else {
// FIXME: is there a way this can legally happen? eg,
// a crash of a freshly created cache?
return Err(anyhow!("format 2 selected, but no dirty bitset present"));
}
let dirty_bits = v.get_bits();
let w = ArrayWalker::new(engine.clone(), false);
let mut emitter =

17
src/cache/hint.rs vendored
View File

@ -1,3 +1,5 @@
use anyhow::Result;
use byteorder::WriteBytesExt;
use nom::IResult;
use std::convert::TryInto;
@ -26,4 +28,19 @@ impl Unpack for Hint {
}
}
impl Pack for Hint {
fn pack<W: WriteBytesExt>(&self, data: &mut W) -> Result<()> {
for v in &self.hint {
data.write_u8(*v)?;
}
Ok(())
}
}
impl Default for Hint {
fn default() -> Self {
Hint { hint: [0; 4] }
}
}
//------------------------------------------

18
src/cache/mapping.rs vendored
View File

@ -1,3 +1,5 @@
use anyhow::Result;
use byteorder::WriteBytesExt;
use nom::number::complete::*;
use nom::IResult;
@ -51,4 +53,20 @@ impl Unpack for Mapping {
}
}
impl Pack for Mapping {
fn pack<W: WriteBytesExt>(&self, data: &mut W) -> Result<()> {
let m: u64 = (self.oblock << 16) | self.flags as u64;
m.pack(data)
}
}
impl Default for Mapping {
fn default() -> Self {
Mapping {
oblock: 0,
flags: 0,
}
}
}
//------------------------------------------

1
src/cache/mod.rs vendored
View File

@ -2,5 +2,6 @@ pub mod check;
pub mod dump;
pub mod hint;
pub mod mapping;
pub mod restore;
pub mod superblock;
pub mod xml;

293
src/cache/restore.rs vendored Normal file
View File

@ -0,0 +1,293 @@
use anyhow::{anyhow, Result};
use std::convert::TryInto;
use std::fs::OpenOptions;
use std::io::Cursor;
use std::ops::Deref;
use std::path::Path;
use std::sync::Arc;
use crate::cache::hint::Hint;
use crate::cache::mapping::{Mapping, MappingFlags};
use crate::cache::superblock::*;
use crate::cache::xml::{self, MetadataVisitor, Visit};
use crate::io_engine::*;
use crate::math::*;
use crate::pdata::array_builder::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_metadata::*;
use crate::pdata::unpack::Pack;
use crate::report::*;
use crate::write_batcher::*;
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
//------------------------------------------
pub struct CacheRestoreOptions<'a> {
pub input: &'a Path,
pub output: &'a Path,
pub async_io: bool,
pub report: Arc<Report>,
}
struct Context {
_report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
fn mk_context(opts: &CacheRestoreOptions) -> anyhow::Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.output, MAX_CONCURRENT_IO, true)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.output, nr_threads, true)?);
}
Ok(Context {
_report: opts.report.clone(),
engine,
})
}
//------------------------------------------
struct RestoreResult {
sb: xml::Superblock,
mapping_root: u64,
dirty_root: Option<u64>,
hint_root: u64,
discard_root: u64,
}
struct Restorer<'a> {
write_batcher: &'a mut WriteBatcher,
sb: Option<xml::Superblock>,
mapping_builder: Option<ArrayBuilder<Mapping>>,
dirty_builder: Option<ArrayBuilder<u64>>,
hint_builder: Option<ArrayBuilder<Hint>>,
mapping_root: Option<u64>,
dirty_root: Option<u64>,
hint_root: Option<u64>,
discard_root: Option<u64>,
dirty_bits: (u32, u64),
}
impl<'a> Restorer<'a> {
fn new(w: &'a mut WriteBatcher) -> Restorer<'a> {
Restorer {
write_batcher: w,
sb: None,
mapping_builder: None,
dirty_builder: None,
hint_builder: None,
mapping_root: None,
dirty_root: None,
hint_root: None,
discard_root: None,
dirty_bits: (0, 0),
}
}
fn get_result(self) -> Result<RestoreResult> {
self.write_batcher.flush()?;
if self.sb.is_none() || self.discard_root.is_none() {
return Err(anyhow!("No superblock found in xml file"));
}
if self.mapping_root.is_none() || self.hint_root.is_none() {
return Err(anyhow!("No mappings or hints sections in xml file"));
}
Ok(RestoreResult {
sb: self.sb.unwrap(),
mapping_root: self.mapping_root.unwrap(),
dirty_root: self.dirty_root,
hint_root: self.hint_root.unwrap(),
discard_root: self.discard_root.unwrap(),
})
}
}
impl<'a> MetadataVisitor for Restorer<'a> {
fn superblock_b(&mut self, sb: &xml::Superblock) -> Result<Visit> {
self.sb = Some(sb.clone());
self.write_batcher.alloc()?;
self.mapping_builder = Some(ArrayBuilder::new(sb.nr_cache_blocks as u64));
self.dirty_builder = Some(ArrayBuilder::new(div_up(sb.nr_cache_blocks as u64, 64)));
self.hint_builder = Some(ArrayBuilder::new(sb.nr_cache_blocks as u64));
let discard_builder = ArrayBuilder::<u64>::new(0); // discard bitset is optional
self.discard_root = Some(discard_builder.complete(self.write_batcher)?);
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn mappings_b(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn mappings_e(&mut self) -> Result<Visit> {
let mut mapping_builder = None;
std::mem::swap(&mut self.mapping_builder, &mut mapping_builder);
if let Some(builder) = mapping_builder {
self.mapping_root = Some(builder.complete(self.write_batcher)?);
}
// push the bufferred trailing bits
let b = self.dirty_builder.as_mut().unwrap();
b.push_value(
self.write_batcher,
self.dirty_bits.0 as u64,
self.dirty_bits.1,
)?;
let mut dirty_builder = None;
std::mem::swap(&mut self.dirty_builder, &mut dirty_builder);
if let Some(builder) = dirty_builder {
self.dirty_root = Some(builder.complete(self.write_batcher)?);
}
Ok(Visit::Continue)
}
fn mapping(&mut self, m: &xml::Map) -> Result<Visit> {
let map = Mapping {
oblock: m.oblock,
flags: MappingFlags::Valid as u32,
};
let mapping_builder = self.mapping_builder.as_mut().unwrap();
mapping_builder.push_value(self.write_batcher, m.cblock as u64, map)?;
if m.dirty {
let index = m.cblock >> 6;
let bi = m.cblock & 63;
if index == self.dirty_bits.0 {
self.dirty_bits.1 |= 1 << bi;
} else {
let dirty_builder = self.dirty_builder.as_mut().unwrap();
dirty_builder.push_value(
self.write_batcher,
self.dirty_bits.0 as u64,
self.dirty_bits.1,
)?;
self.dirty_bits.0 = index;
self.dirty_bits.1 = 0;
}
}
Ok(Visit::Continue)
}
fn hints_b(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn hints_e(&mut self) -> Result<Visit> {
let mut hint_builder = None;
std::mem::swap(&mut self.hint_builder, &mut hint_builder);
if let Some(builder) = hint_builder {
self.hint_root = Some(builder.complete(self.write_batcher)?);
}
Ok(Visit::Continue)
}
fn hint(&mut self, h: &xml::Hint) -> Result<Visit> {
let hint = Hint {
hint: h.data[..].try_into().unwrap(),
};
let hint_builder = self.hint_builder.as_mut().unwrap();
hint_builder.push_value(self.write_batcher, h.cblock as u64, hint)?;
Ok(Visit::Continue)
}
fn discards_b(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn discards_e(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn discard(&mut self, _d: &xml::Discard) -> Result<Visit> {
Ok(Visit::Continue)
}
fn eof(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
}
//------------------------------------------
fn build_metadata_sm(w: &mut WriteBatcher) -> Result<Vec<u8>> {
let mut sm_root = vec![0u8; SPACE_MAP_ROOT_SIZE];
let mut cur = Cursor::new(&mut sm_root);
let sm_without_meta = clone_space_map(w.sm.lock().unwrap().deref())?;
let r = write_metadata_sm(w, sm_without_meta.deref())?;
r.pack(&mut cur)?;
Ok(sm_root)
}
//------------------------------------------
pub fn restore(opts: CacheRestoreOptions) -> Result<()> {
let input = OpenOptions::new()
.read(true)
.write(false)
.open(opts.input)?;
let ctx = mk_context(&opts)?;
let sm = core_sm(ctx.engine.get_nr_blocks(), u32::MAX);
let mut w = WriteBatcher::new(ctx.engine.clone(), sm.clone(), ctx.engine.get_batch_size());
// build cache mappings
let mut restorer = Restorer::new(&mut w);
xml::read(input, &mut restorer)?;
let result = restorer.get_result()?;
// build metadata space map
let metadata_sm_root = build_metadata_sm(&mut w)?;
let sb = Superblock {
flags: SuperblockFlags {
clean_shutdown: true,
needs_check: false,
},
block: SUPERBLOCK_LOCATION,
version: 2,
policy_name: result.sb.policy.as_bytes().to_vec(),
policy_version: vec![2, 0, 0],
policy_hint_size: result.sb.hint_width,
metadata_sm_root,
mapping_root: result.mapping_root,
dirty_root: result.dirty_root,
hint_root: result.hint_root,
discard_root: result.discard_root,
discard_block_size: 0,
discard_nr_blocks: 0,
data_block_size: result.sb.block_size,
cache_blocks: result.sb.nr_cache_blocks,
compat_flags: 0,
compat_ro_flags: 0,
incompat_flags: 0,
read_hits: 0,
read_misses: 9,
write_hits: 0,
write_misses: 0,
};
write_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION, &sb)?;
Ok(())
}
//------------------------------------------

View File

@ -1,14 +1,19 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{bytes::complete::*, number::complete::*, IResult};
use std::io::Cursor;
use crate::checksum::*;
use crate::io_engine::*;
//------------------------------------------
pub const SPACE_MAP_ROOT_SIZE: usize = 128;
pub const SUPERBLOCK_LOCATION: u64 = 0;
const MAGIC: u64 = 0o6142003; // 0x18c403 in hex
const POLICY_NAME_SIZE: usize = 16;
const SPACE_MAP_ROOT_SIZE: usize = 128;
const UUID_SIZE: usize = 16;
//------------------------------------------
@ -38,7 +43,6 @@ pub struct Superblock {
pub discard_nr_blocks: u64,
pub data_block_size: u32,
pub metadata_block_size: u32,
pub cache_blocks: u32,
pub compat_flags: u32,
@ -71,7 +75,7 @@ fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
let (i, discard_nr_blocks) = le_u64(i)?;
let (i, data_block_size) = le_u32(i)?;
let (i, metadata_block_size) = le_u32(i)?;
let (i, _metadata_block_size) = le_u32(i)?;
let (i, cache_blocks) = le_u32(i)?;
let (i, compat_flags) = le_u32(i)?;
@ -115,7 +119,6 @@ fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
discard_block_size,
discard_nr_blocks,
data_block_size,
metadata_block_size,
cache_blocks,
compat_flags,
compat_ro_flags,
@ -139,3 +142,79 @@ pub fn read_superblock(engine: &dyn IoEngine, loc: u64) -> Result<Superblock> {
}
//------------------------------------------
fn pack_superblock<W: WriteBytesExt>(sb: &Superblock, w: &mut W) -> Result<()> {
// checksum, which we don't know yet
w.write_u32::<LittleEndian>(0)?;
// flags
let mut flags: u32 = 0;
if sb.flags.clean_shutdown {
flags |= 0x1;
}
if sb.flags.needs_check {
flags |= 0x2;
}
w.write_u32::<LittleEndian>(flags)?;
w.write_u64::<LittleEndian>(sb.block)?;
w.write_all(&[0; UUID_SIZE])?;
w.write_u64::<LittleEndian>(MAGIC)?;
w.write_u32::<LittleEndian>(sb.version)?;
let mut policy_name = [0u8; POLICY_NAME_SIZE];
policy_name[..sb.policy_name.len()].copy_from_slice(&sb.policy_name[..]);
w.write_all(&policy_name)?;
w.write_u32::<LittleEndian>(sb.policy_hint_size)?;
w.write_all(&sb.metadata_sm_root)?;
w.write_u64::<LittleEndian>(sb.mapping_root)?;
w.write_u64::<LittleEndian>(sb.hint_root)?;
w.write_u64::<LittleEndian>(sb.discard_root)?;
w.write_u64::<LittleEndian>(sb.discard_block_size)?;
w.write_u64::<LittleEndian>(sb.discard_nr_blocks)?;
w.write_u32::<LittleEndian>(sb.data_block_size)?;
// metadata block size
w.write_u32::<LittleEndian>((BLOCK_SIZE >> SECTOR_SHIFT) as u32)?;
w.write_u32::<LittleEndian>(sb.cache_blocks)?;
w.write_u32::<LittleEndian>(sb.compat_flags)?;
w.write_u32::<LittleEndian>(sb.compat_ro_flags)?;
w.write_u32::<LittleEndian>(sb.incompat_flags)?;
w.write_u32::<LittleEndian>(sb.read_hits)?;
w.write_u32::<LittleEndian>(sb.read_misses)?;
w.write_u32::<LittleEndian>(sb.write_hits)?;
w.write_u32::<LittleEndian>(sb.write_misses)?;
w.write_u32::<LittleEndian>(sb.policy_version[0])?;
w.write_u32::<LittleEndian>(sb.policy_version[1])?;
w.write_u32::<LittleEndian>(sb.policy_version[2])?;
if sb.dirty_root.is_some() {
w.write_u64::<LittleEndian>(sb.dirty_root.unwrap())?;
}
Ok(())
}
pub fn write_superblock(engine: &dyn IoEngine, _loc: u64, sb: &Superblock) -> Result<()> {
let b = Block::zeroed(SUPERBLOCK_LOCATION);
// pack the superblock
{
let mut cursor = Cursor::new(b.get_data());
pack_superblock(sb, &mut cursor)?;
}
// calculate the checksum
write_checksum(b.get_data(), BT::CACHE_SUPERBLOCK)?;
// write
engine.write(&b)?;
Ok(())
}
//------------------------------------------

150
src/cache/xml.rs vendored
View File

@ -1,10 +1,12 @@
use anyhow::Result;
use base64::encode;
use std::{borrow::Cow, fmt::Display, io::Write};
use anyhow::{anyhow, Result};
use base64::{decode, encode};
use std::io::{BufRead, BufReader};
use std::io::{Read, Write};
use quick_xml::events::attributes::Attribute;
use quick_xml::events::{BytesEnd, BytesStart, Event};
use quick_xml::Writer;
use quick_xml::{Reader, Writer};
use crate::xml::*;
//---------------------------------------
@ -73,18 +75,6 @@ impl<W: Write> XmlWriter<W> {
}
}
fn mk_attr_<'a, T: Display>(n: T) -> Cow<'a, [u8]> {
let str = format!("{}", n);
Cow::Owned(str.into_bytes())
}
fn mk_attr<T: Display>(key: &[u8], value: T) -> Attribute {
Attribute {
key,
value: mk_attr_(value),
}
}
impl<W: Write> MetadataVisitor for XmlWriter<W> {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit> {
let tag = b"superblock";
@ -176,3 +166,129 @@ impl<W: Write> MetadataVisitor for XmlWriter<W> {
Ok(Visit::Continue)
}
}
//------------------------------------------
fn parse_superblock(e: &BytesStart) -> Result<Superblock> {
let mut uuid: Option<String> = None;
let mut block_size: Option<u32> = None;
let mut nr_cache_blocks: Option<u32> = None;
let mut policy: Option<String> = None;
let mut hint_width: Option<u32> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"uuid" => uuid = Some(string_val(&kv)),
b"block_size" => block_size = Some(u32_val(&kv)?),
b"nr_cache_blocks" => nr_cache_blocks = Some(u32_val(&kv)?),
b"policy" => policy = Some(string_val(&kv)),
b"hint_width" => hint_width = Some(u32_val(&kv)?),
_ => return bad_attr("superblock", kv.key),
}
}
let tag = "cache";
Ok(Superblock {
uuid: check_attr(tag, "uuid", uuid)?,
block_size: check_attr(tag, "block_size", block_size)?,
nr_cache_blocks: check_attr(tag, "nr_cache_blocks", nr_cache_blocks)?,
policy: check_attr(tag, "policy", policy)?,
hint_width: check_attr(tag, "hint_width", hint_width)?,
})
}
fn parse_mapping(e: &BytesStart) -> Result<Map> {
let mut cblock: Option<u32> = None;
let mut oblock: Option<u64> = None;
let mut dirty: Option<bool> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"cache_block" => cblock = Some(u32_val(&kv)?),
b"origin_block" => oblock = Some(u64_val(&kv)?),
b"dirty" => dirty = Some(bool_val(&kv)?),
_ => return bad_attr("mapping", kv.key),
}
}
let tag = "mapping";
Ok(Map {
cblock: check_attr(tag, "cache_block", cblock)?,
oblock: check_attr(tag, "origin_block", oblock)?,
dirty: check_attr(tag, "dirty", dirty)?,
})
}
fn parse_hint(e: &BytesStart) -> Result<Hint> {
let mut cblock: Option<u32> = None;
let mut data: Option<Vec<u8>> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"cache_block" => cblock = Some(u32_val(&kv)?),
b"data" => data = Some(decode(bytes_val(&kv))?),
_ => return bad_attr("mapping", kv.key),
}
}
let tag = "hint";
Ok(Hint {
cblock: check_attr(tag, "cache_block", cblock)?,
data: check_attr(tag, "data", data)?,
})
}
fn handle_event<R, M>(reader: &mut Reader<R>, buf: &mut Vec<u8>, visitor: &mut M) -> Result<Visit>
where
R: Read + BufRead,
M: MetadataVisitor,
{
match reader.read_event(buf) {
Ok(Event::Start(ref e)) => match e.name() {
b"superblock" => visitor.superblock_b(&parse_superblock(e)?),
b"mappings" => visitor.mappings_b(),
b"hints" => visitor.hints_b(),
_ => todo!(),
},
Ok(Event::End(ref e)) => match e.name() {
b"superblock" => visitor.superblock_e(),
b"mappings" => visitor.mappings_e(),
b"hints" => visitor.hints_e(),
_ => todo!(),
},
Ok(Event::Empty(ref e)) => match e.name() {
b"mapping" => visitor.mapping(&parse_mapping(e)?),
b"hint" => visitor.hint(&parse_hint(e)?),
_ => todo!(),
},
Ok(Event::Text(_)) => Ok(Visit::Continue),
Ok(Event::Comment(_)) => Ok(Visit::Continue),
Ok(Event::Eof) => {
visitor.eof()?;
Ok(Visit::Stop)
}
Ok(_) => todo!(),
Err(e) => Err(anyhow!("{:?}", e)),
}
}
pub fn read<R, M>(input: R, visitor: &mut M) -> Result<()>
where
R: Read,
M: MetadataVisitor,
{
let input = BufReader::new(input);
let mut reader = Reader::from_reader(input);
reader.trim_text(true);
let mut buf = Vec::new();
while let Visit::Continue = handle_event(&mut reader, &mut buf, visitor)? {}
Ok(())
}

View File

@ -5,8 +5,9 @@ use crc32c::crc32c;
use std::io::Cursor;
const BLOCK_SIZE: u64 = 4096;
#[allow(dead_code)]
const SUPERBLOCK_CSUM_XOR: u32 = 160774;
const THIN_SUPERBLOCK_CSUM_XOR: u32 = 160774;
const CACHE_SUPERBLOCK_CSUM_XOR: u32 = 9031977;
const ERA_SUPERBLOCK_CSUM_XOR: u32 = 146538381;
const BITMAP_CSUM_XOR: u32 = 240779;
const INDEX_CSUM_XOR: u32 = 160478;
const BTREE_CSUM_XOR: u32 = 121107;
@ -18,8 +19,11 @@ fn checksum(buf: &[u8]) -> u32 {
#[derive(Debug, PartialEq)]
#[allow(clippy::upper_case_acronyms)]
#[allow(non_camel_case_types)]
pub enum BT {
SUPERBLOCK,
THIN_SUPERBLOCK,
CACHE_SUPERBLOCK,
ERA_SUPERBLOCK,
NODE,
INDEX,
BITMAP,
@ -39,7 +43,9 @@ pub fn metadata_block_type(buf: &[u8]) -> BT {
let btype = csum ^ sum_on_disk;
match btype {
SUPERBLOCK_CSUM_XOR => BT::SUPERBLOCK,
THIN_SUPERBLOCK_CSUM_XOR => BT::THIN_SUPERBLOCK,
CACHE_SUPERBLOCK_CSUM_XOR => BT::CACHE_SUPERBLOCK,
ERA_SUPERBLOCK_CSUM_XOR => BT::ERA_SUPERBLOCK,
BTREE_CSUM_XOR => BT::NODE,
BITMAP_CSUM_XOR => BT::BITMAP,
INDEX_CSUM_XOR => BT::INDEX,
@ -55,7 +61,9 @@ pub fn write_checksum(buf: &mut [u8], kind: BT) -> Result<()> {
use BT::*;
let salt = match kind {
SUPERBLOCK => SUPERBLOCK_CSUM_XOR,
THIN_SUPERBLOCK => THIN_SUPERBLOCK_CSUM_XOR,
CACHE_SUPERBLOCK => CACHE_SUPERBLOCK_CSUM_XOR,
ERA_SUPERBLOCK => ERA_SUPERBLOCK_CSUM_XOR,
NODE => BTREE_CSUM_XOR,
BITMAP => BITMAP_CSUM_XOR,
INDEX => INDEX_CSUM_XOR,

View File

@ -17,6 +17,7 @@ use crate::file_utils;
//------------------------------------------
pub const BLOCK_SIZE: usize = 4096;
pub const SECTOR_SHIFT: usize = 9;
const ALIGN: usize = 4096;
#[derive(Clone, Debug)]

View File

@ -27,3 +27,4 @@ pub mod shrink;
pub mod thin;
pub mod version;
pub mod write_batcher;
pub mod xml;

View File

@ -1,7 +1,47 @@
pub fn div_up(v: usize, divisor: usize) -> usize {
v / divisor + (v % divisor != 0) as usize
use std::cmp::PartialEq;
use std::ops::{Add, Div, Rem};
//-----------------------------------------
pub trait Integer:
Sized + Copy + Add<Output = Self> + Div<Output = Self> + Rem<Output = Self> + PartialEq
{
fn zero() -> Self;
fn one() -> Self;
}
pub fn div_down(v: usize, divisor: usize) -> usize {
pub fn div_up<T: Integer>(v: T, divisor: T) -> T {
if v % divisor != Integer::zero() {
v / divisor + Integer::one()
} else {
v / divisor
}
}
pub fn div_down<T: Integer>(v: T, divisor: T) -> T {
v / divisor
}
//-----------------------------------------
impl Integer for usize {
fn zero() -> Self {
0
}
fn one() -> Self {
1
}
}
impl Integer for u64 {
fn zero() -> Self {
0
}
fn one() -> Self {
1
}
}
//-----------------------------------------

View File

@ -205,7 +205,9 @@ where
fn pack_block<W: Write>(w: &mut W, kind: BT, buf: &[u8]) -> Result<()> {
match kind {
BT::SUPERBLOCK => pack_superblock(w, buf).context("unable to pack superblock")?,
BT::THIN_SUPERBLOCK | BT::CACHE_SUPERBLOCK | BT::ERA_SUPERBLOCK => {
pack_superblock(w, buf).context("unable to pack superblock")?
}
BT::NODE => pack_btree_node(w, buf).context("unable to pack btree node")?,
BT::INDEX => pack_index(w, buf).context("unable to pack space map index")?,
BT::BITMAP => pack_bitmap(w, buf).context("unable to pack space map bitmap")?,

View File

@ -1,3 +1,4 @@
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{multi::count, number::complete::*, IResult};
use std::fmt;
use thiserror::Error;
@ -5,7 +6,7 @@ use thiserror::Error;
use crate::checksum;
use crate::io_engine::BLOCK_SIZE;
use crate::pdata::btree;
use crate::pdata::unpack::Unpack;
use crate::pdata::unpack::{Pack, Unpack};
//------------------------------------------
@ -44,11 +45,31 @@ impl Unpack for ArrayBlockHeader {
}
}
impl Pack for ArrayBlockHeader {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> anyhow::Result<()> {
// csum needs to be calculated right for the whole metadata block.
w.write_u32::<LittleEndian>(0)?;
w.write_u32::<LittleEndian>(self.max_entries)?;
w.write_u32::<LittleEndian>(self.nr_entries)?;
w.write_u32::<LittleEndian>(self.value_size)?;
w.write_u64::<LittleEndian>(self.blocknr)?;
Ok(())
}
}
//------------------------------------------
pub struct ArrayBlock<V: Unpack> {
pub header: ArrayBlockHeader,
pub values: Vec<V>,
}
impl<V: Unpack> ArrayBlock<V> {
pub fn set_block(&mut self, b: u64) {
self.header.blocknr = b;
}
}
//------------------------------------------
#[derive(Error, Clone, Debug)]

212
src/pdata/array_builder.rs Normal file
View File

@ -0,0 +1,212 @@
use anyhow::Result;
use byteorder::WriteBytesExt;
use std::collections::VecDeque;
use std::io::Cursor;
use crate::checksum;
use crate::io_engine::*;
use crate::pdata::array::*;
use crate::pdata::btree_builder::*;
use crate::pdata::unpack::*;
use crate::write_batcher::*;
//------------------------------------------
pub struct ArrayBlockBuilder<V: Unpack + Pack> {
array_io: ArrayIO<V>,
max_entries_per_block: usize,
values: VecDeque<(u64, V)>,
array_blocks: Vec<u64>,
nr_entries: u64,
nr_emitted: u64,
nr_queued: u64,
}
pub struct ArrayBuilder<V: Unpack + Pack> {
block_builder: ArrayBlockBuilder<V>,
}
struct ArrayIO<V: Unpack + Pack> {
dummy: std::marker::PhantomData<V>,
}
struct WriteResult {
loc: u64,
}
//------------------------------------------
fn calc_max_entries<V: Unpack>() -> usize {
(BLOCK_SIZE - ArrayBlockHeader::disk_size() as usize) / V::disk_size() as usize
}
//------------------------------------------
impl<V: Unpack + Pack + Clone + Default> ArrayBlockBuilder<V> {
pub fn new(nr_entries: u64) -> ArrayBlockBuilder<V> {
ArrayBlockBuilder {
array_io: ArrayIO::new(),
max_entries_per_block: calc_max_entries::<V>(),
values: VecDeque::new(),
array_blocks: Vec::new(),
nr_entries,
nr_emitted: 0,
nr_queued: 0,
}
}
pub fn push_value(&mut self, w: &mut WriteBatcher, index: u64, v: V) -> Result<()> {
assert!(index >= self.nr_emitted + self.nr_queued);
assert!(index < self.nr_entries);
self.values.push_back((index, v));
self.nr_queued = index - self.nr_emitted + 1;
if self.nr_queued > self.max_entries_per_block as u64 {
self.emit_blocks(w)?;
}
Ok(())
}
pub fn complete(mut self, w: &mut WriteBatcher) -> Result<Vec<u64>> {
if self.nr_emitted + self.nr_queued < self.nr_entries {
// FIXME: flushing with a default values looks confusing
self.push_value(w, self.nr_entries - 1, Default::default())?;
}
self.emit_all(w)?;
Ok(self.array_blocks)
}
/// Emit all the remaining queued values
fn emit_all(&mut self, w: &mut WriteBatcher) -> Result<()> {
match self.nr_queued {
0 => {
// There's nothing to emit
Ok(())
}
n if n <= self.max_entries_per_block as u64 => self.emit_values(w),
_ => {
panic!(
"There shouldn't be more than {} queued values",
self.max_entries_per_block
);
}
}
}
/// Emit one or more fully utilized array blocks
fn emit_blocks(&mut self, w: &mut WriteBatcher) -> Result<()> {
while self.nr_queued > self.max_entries_per_block as u64 {
self.emit_values(w)?;
}
Ok(())
}
/// Emit an array block with the queued values
fn emit_values(&mut self, w: &mut WriteBatcher) -> Result<()> {
let mut values = Vec::<V>::with_capacity(self.max_entries_per_block);
let mut nr_free = self.max_entries_per_block;
while !self.values.is_empty() && nr_free > 0 {
let len = self.values.front().unwrap().0 - self.nr_emitted + 1;
if len <= nr_free as u64 {
let (_, v) = self.values.pop_front().unwrap();
if len > 1 {
values.resize_with(values.len() + len as usize - 1, Default::default);
}
values.push(v);
nr_free -= len as usize;
self.nr_emitted += len;
self.nr_queued -= len;
} else {
values.resize_with(values.len() + nr_free as usize, Default::default);
self.nr_emitted += nr_free as u64;
self.nr_queued -= nr_free as u64;
nr_free = 0;
}
}
let wresult = self.array_io.write(w, values)?;
self.array_blocks.push(wresult.loc);
Ok(())
}
}
//------------------------------------------
impl<V: Unpack + Pack + Clone + Default> ArrayBuilder<V> {
pub fn new(nr_entries: u64) -> ArrayBuilder<V> {
ArrayBuilder {
block_builder: ArrayBlockBuilder::<V>::new(nr_entries),
}
}
pub fn push_value(&mut self, w: &mut WriteBatcher, index: u64, v: V) -> Result<()> {
self.block_builder.push_value(w, index, v)
}
pub fn complete(self, w: &mut WriteBatcher) -> Result<u64> {
let blocks = self.block_builder.complete(w)?;
let mut index_builder = Builder::<u64>::new(Box::new(NoopRC {}));
for (i, b) in blocks.iter().enumerate() {
index_builder.push_value(w, i as u64, *b)?;
}
index_builder.complete(w)
}
}
//------------------------------------------
impl<V: Unpack + Pack> ArrayIO<V> {
pub fn new() -> ArrayIO<V> {
ArrayIO {
dummy: std::marker::PhantomData,
}
}
fn write(&self, w: &mut WriteBatcher, values: Vec<V>) -> Result<WriteResult> {
let header = ArrayBlockHeader {
csum: 0,
max_entries: calc_max_entries::<V>() as u32,
nr_entries: values.len() as u32,
value_size: V::disk_size(),
blocknr: 0,
};
let ablock = ArrayBlock { header, values };
write_array_block(w, ablock)
}
}
fn write_array_block<V: Unpack + Pack>(
w: &mut WriteBatcher,
mut ablock: ArrayBlock<V>,
) -> Result<WriteResult> {
let b = w.alloc()?;
ablock.set_block(b.loc);
let mut cursor = Cursor::new(b.get_data());
pack_array_block(&ablock, &mut cursor)?;
let loc = b.loc;
w.write(b, checksum::BT::ARRAY)?;
Ok(WriteResult { loc })
}
fn pack_array_block<W: WriteBytesExt, V: Pack + Unpack>(
ablock: &ArrayBlock<V>,
w: &mut W,
) -> Result<()> {
ablock.header.pack(w)?;
for v in ablock.values.iter() {
v.pack(w)?;
}
Ok(())
}
//------------------------------------------

View File

@ -1,4 +1,5 @@
pub mod array;
pub mod array_builder;
pub mod array_walker;
pub mod bitset;
pub mod btree;
@ -8,5 +9,7 @@ pub mod btree_merge;
pub mod btree_walker;
pub mod space_map;
pub mod space_map_checker;
pub mod space_map_common;
pub mod space_map_disk;
pub mod space_map_metadata;
pub mod unpack;

View File

@ -103,6 +103,7 @@ where
if self.counts[b as usize] == V::from(0u8) {
self.counts[b as usize] = V::from(1u8);
self.first_free = b + 1;
self.nr_allocated += 1;
return Ok(Some(b));
}
}
@ -132,6 +133,16 @@ pub fn core_sm_without_mutex(nr_entries: u64, max_count: u32) -> Box<dyn SpaceMa
}
}
// FIXME: replace it by using the Clone trait
pub fn clone_space_map(src: &dyn SpaceMap) -> Result<Box<dyn SpaceMap>> {
let nr_blocks = src.get_nr_blocks()?;
let mut dest = Box::new(CoreSpaceMap::<u32>::new(nr_blocks));
for i in 0..nr_blocks {
dest.set(i, src.get(i)?)?;
}
Ok(dest)
}
//------------------------------------------
// This in core space map can only count to one, useful when walking

View File

@ -7,7 +7,8 @@ use crate::io_engine::IoEngine;
use crate::pdata::btree::{self, *};
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_disk::*;
use crate::pdata::space_map_common::*;
use crate::pdata::space_map_metadata::*;
use crate::pdata::unpack::*;
use crate::report::Report;

View File

@ -0,0 +1,257 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
use std::io::Cursor;
use crate::checksum;
use crate::io_engine::*;
use crate::math::*;
use crate::pdata::btree_builder::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
use crate::write_batcher::*;
//------------------------------------------
pub const ENTRIES_PER_BITMAP: usize = WORDS_PER_BITMAP * 8 * ENTRIES_PER_BYTE;
const WORDS_PER_BITMAP: usize = (BLOCK_SIZE - 16) / 8;
const ENTRIES_PER_BYTE: usize = 4;
//------------------------------------------
#[derive(Clone, Copy, Debug)]
pub struct IndexEntry {
pub blocknr: u64,
pub nr_free: u32,
pub none_free_before: u32,
}
impl Unpack for IndexEntry {
fn disk_size() -> u32 {
16
}
fn unpack(i: &[u8]) -> IResult<&[u8], IndexEntry> {
let (i, blocknr) = le_u64(i)?;
let (i, nr_free) = le_u32(i)?;
let (i, none_free_before) = le_u32(i)?;
Ok((
i,
IndexEntry {
blocknr,
nr_free,
none_free_before,
},
))
}
}
impl Pack for IndexEntry {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u64::<LittleEndian>(self.blocknr)?;
w.write_u32::<LittleEndian>(self.nr_free)?;
w.write_u32::<LittleEndian>(self.none_free_before)?;
Ok(())
}
}
//------------------------------------------
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BitmapEntry {
Small(u8),
Overflow,
}
#[derive(Debug)]
pub struct Bitmap {
pub blocknr: u64,
pub entries: Vec<BitmapEntry>,
}
impl Unpack for Bitmap {
fn disk_size() -> u32 {
BLOCK_SIZE as u32
}
fn unpack(data: &[u8]) -> IResult<&[u8], Self> {
let (i, _csum) = le_u32(data)?;
let (i, _not_used) = le_u32(i)?;
let (mut i, blocknr) = le_u64(i)?;
let header_size = 16;
let nr_words = (BLOCK_SIZE - header_size) / 8;
let mut entries = Vec::with_capacity(nr_words * 32);
for _w in 0..nr_words {
let (tmp, mut word) = le_u64(i)?;
for _b in 0..32 {
let val = word & 0x3;
word >>= 2;
// The bits are stored with the high bit at b * 2 + 1,
// and low at b *2. So we have to interpret this val.
entries.push(match val {
0 => BitmapEntry::Small(0),
1 => BitmapEntry::Small(2),
2 => BitmapEntry::Small(1),
_ => BitmapEntry::Overflow,
});
}
i = tmp;
}
Ok((i, Bitmap { blocknr, entries }))
}
}
impl Pack for Bitmap {
fn pack<W: WriteBytesExt>(&self, out: &mut W) -> Result<()> {
use BitmapEntry::*;
out.write_u32::<LittleEndian>(0)?;
out.write_u32::<LittleEndian>(0)?;
out.write_u64::<LittleEndian>(self.blocknr)?;
for chunk in self.entries.chunks(32) {
let mut w = 0u64;
for e in chunk {
w >>= 2;
match e {
Small(0) => {}
Small(1) => {
w |= 0x2 << 62;
}
Small(2) => {
w |= 0x1 << 62;
}
Small(_) => {
return Err(anyhow!("Bad small value in bitmap entry"));
}
Overflow => {
w |= 0x3 << 62;
}
}
}
u64::pack(&w, out)?;
}
Ok(())
}
}
//------------------------------------------
#[derive(Debug)]
pub struct SMRoot {
pub nr_blocks: u64,
pub nr_allocated: u64,
pub bitmap_root: u64,
pub ref_count_root: u64,
}
impl Unpack for SMRoot {
fn disk_size() -> u32 {
32
}
fn unpack(i: &[u8]) -> IResult<&[u8], Self> {
let (i, nr_blocks) = le_u64(i)?;
let (i, nr_allocated) = le_u64(i)?;
let (i, bitmap_root) = le_u64(i)?;
let (i, ref_count_root) = le_u64(i)?;
Ok((
i,
SMRoot {
nr_blocks,
nr_allocated,
bitmap_root,
ref_count_root,
},
))
}
}
pub fn unpack_root(data: &[u8]) -> Result<SMRoot> {
match SMRoot::unpack(data) {
Err(_e) => Err(anyhow!("couldn't parse SMRoot")),
Ok((_i, v)) => Ok(v),
}
}
impl Pack for SMRoot {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u64::<LittleEndian>(self.nr_blocks)?;
w.write_u64::<LittleEndian>(self.nr_allocated)?;
w.write_u64::<LittleEndian>(self.bitmap_root)?;
w.write_u64::<LittleEndian>(self.ref_count_root)?;
Ok(())
}
}
//------------------------------------------
pub fn write_common(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<(Vec<IndexEntry>, u64)> {
use BitmapEntry::*;
let mut index_entries = Vec::new();
let mut overflow_builder: Builder<u32> = Builder::new(Box::new(NoopRC {}));
// how many bitmaps do we need?
for bm in 0..div_up(sm.get_nr_blocks()? as usize, ENTRIES_PER_BITMAP) {
let mut entries = Vec::with_capacity(ENTRIES_PER_BITMAP);
let mut first_free: Option<u32> = None;
let mut nr_free: u32 = 0;
for i in 0..ENTRIES_PER_BITMAP {
let b: u64 = ((bm * ENTRIES_PER_BITMAP) as u64) + i as u64;
if b >= sm.get_nr_blocks()? {
break;
}
let rc = sm.get(b)?;
let e = match rc {
0 => {
nr_free += 1;
if first_free.is_none() {
first_free = Some(i as u32);
}
Small(0)
}
1 => Small(1),
2 => Small(2),
_ => {
overflow_builder.push_value(w, b as u64, rc)?;
Overflow
}
};
entries.push(e);
}
// allocate a new block
let b = w.alloc()?;
let mut cursor = Cursor::new(b.get_data());
// write the bitmap to it
let blocknr = b.loc;
let bitmap = Bitmap { blocknr, entries };
bitmap.pack(&mut cursor)?;
w.write(b, checksum::BT::BITMAP)?;
// Insert into the index tree
let ie = IndexEntry {
blocknr,
nr_free,
none_free_before: first_free.unwrap_or(ENTRIES_PER_BITMAP as u32),
};
index_entries.push(ie);
}
let ref_count_root = overflow_builder.complete(w)?;
Ok((index_entries, ref_count_root))
}
//------------------------------------------

View File

@ -1,301 +1,11 @@
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
use std::collections::BTreeMap;
use std::io::Cursor;
use anyhow::Result;
use crate::checksum;
use crate::io_engine::*;
use crate::math::*;
use crate::pdata::btree_builder::*;
use crate::pdata::space_map::*;
use crate::pdata::unpack::*;
use crate::pdata::space_map_common::*;
use crate::write_batcher::*;
//--------------------------------
const MAX_METADATA_BITMAPS: usize = 255;
// const MAX_METADATA_BLOCKS: u64 = 255 * ((1 << 14) - 64);
const ENTRIES_PER_BYTE: usize = 4;
const ENTRIES_PER_BITMAP: usize = WORDS_PER_BITMAP * 8 * ENTRIES_PER_BYTE;
//--------------------------------
#[derive(Clone, Copy, Debug)]
pub struct IndexEntry {
pub blocknr: u64,
pub nr_free: u32,
pub none_free_before: u32,
}
impl Unpack for IndexEntry {
fn disk_size() -> u32 {
16
}
fn unpack(i: &[u8]) -> IResult<&[u8], IndexEntry> {
let (i, blocknr) = le_u64(i)?;
let (i, nr_free) = le_u32(i)?;
let (i, none_free_before) = le_u32(i)?;
Ok((
i,
IndexEntry {
blocknr,
nr_free,
none_free_before,
},
))
}
}
impl Pack for IndexEntry {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u64::<LittleEndian>(self.blocknr)?;
w.write_u32::<LittleEndian>(self.nr_free)?;
w.write_u32::<LittleEndian>(self.none_free_before)?;
Ok(())
}
}
//--------------------------------
pub struct MetadataIndex {
pub blocknr: u64,
pub indexes: Vec<IndexEntry>,
}
impl Unpack for MetadataIndex {
fn disk_size() -> u32 {
BLOCK_SIZE as u32
}
fn unpack(i: &[u8]) -> IResult<&[u8], MetadataIndex> {
// FIXME: check the checksum
let (i, _csum) = le_u32(i)?;
let (i, _padding) = le_u32(i)?;
let (i, blocknr) = le_u64(i)?;
let (i, indexes) = nom::multi::count(IndexEntry::unpack, MAX_METADATA_BITMAPS)(i)?;
Ok((i, MetadataIndex { blocknr, indexes }))
}
}
impl Pack for MetadataIndex {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u32::<LittleEndian>(0)?; // csum
w.write_u32::<LittleEndian>(0)?; // padding
w.write_u64::<LittleEndian>(self.blocknr)?;
assert!(self.indexes.len() <= MAX_METADATA_BITMAPS);
for ie in &self.indexes {
ie.pack(w)?;
}
Ok(())
}
}
//--------------------------------
const WORDS_PER_BITMAP: usize = (BLOCK_SIZE - 16) / 8;
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum BitmapEntry {
Small(u8),
Overflow,
}
#[derive(Debug)]
pub struct Bitmap {
pub blocknr: u64,
pub entries: Vec<BitmapEntry>,
}
impl Unpack for Bitmap {
fn disk_size() -> u32 {
BLOCK_SIZE as u32
}
fn unpack(data: &[u8]) -> IResult<&[u8], Self> {
let (i, _csum) = le_u32(data)?;
let (i, _not_used) = le_u32(i)?;
let (mut i, blocknr) = le_u64(i)?;
let header_size = 16;
let nr_words = (BLOCK_SIZE - header_size) / 8;
let mut entries = Vec::with_capacity(nr_words * 32);
for _w in 0..nr_words {
let (tmp, mut word) = le_u64(i)?;
for _b in 0..32 {
let val = word & 0x3;
word >>= 2;
// The bits are stored with the high bit at b * 2 + 1,
// and low at b *2. So we have to interpret this val.
entries.push(match val {
0 => BitmapEntry::Small(0),
1 => BitmapEntry::Small(2),
2 => BitmapEntry::Small(1),
_ => BitmapEntry::Overflow,
});
}
i = tmp;
}
Ok((i, Bitmap { blocknr, entries }))
}
}
impl Pack for Bitmap {
fn pack<W: WriteBytesExt>(&self, out: &mut W) -> Result<()> {
use BitmapEntry::*;
out.write_u32::<LittleEndian>(0)?;
out.write_u32::<LittleEndian>(0)?;
out.write_u64::<LittleEndian>(self.blocknr)?;
for chunk in self.entries.chunks(32) {
let mut w = 0u64;
for e in chunk {
w >>= 2;
match e {
Small(0) => {}
Small(1) => {
w |= 0x2 << 62;
}
Small(2) => {
w |= 0x1 << 62;
}
Small(_) => {
return Err(anyhow!("Bad small value in bitmap entry"));
}
Overflow => {
w |= 0x3 << 62;
}
}
}
u64::pack(&w, out)?;
}
Ok(())
}
}
//--------------------------------
#[derive(Debug)]
pub struct SMRoot {
pub nr_blocks: u64,
pub nr_allocated: u64,
pub bitmap_root: u64,
pub ref_count_root: u64,
}
impl Unpack for SMRoot {
fn disk_size() -> u32 {
32
}
fn unpack(i: &[u8]) -> IResult<&[u8], Self> {
let (i, nr_blocks) = le_u64(i)?;
let (i, nr_allocated) = le_u64(i)?;
let (i, bitmap_root) = le_u64(i)?;
let (i, ref_count_root) = le_u64(i)?;
Ok((
i,
SMRoot {
nr_blocks,
nr_allocated,
bitmap_root,
ref_count_root,
},
))
}
}
pub fn unpack_root(data: &[u8]) -> Result<SMRoot> {
match SMRoot::unpack(data) {
Err(_e) => Err(anyhow!("couldn't parse SMRoot")),
Ok((_i, v)) => Ok(v),
}
}
impl Pack for SMRoot {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u64::<LittleEndian>(self.nr_blocks)?;
w.write_u64::<LittleEndian>(self.nr_allocated)?;
w.write_u64::<LittleEndian>(self.bitmap_root)?;
w.write_u64::<LittleEndian>(self.ref_count_root)?;
Ok(())
}
}
//--------------------------------
pub fn write_common(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<(Vec<IndexEntry>, u64)> {
use BitmapEntry::*;
let mut index_entries = Vec::new();
let mut overflow_builder: Builder<u32> = Builder::new(Box::new(NoopRC {}));
// how many bitmaps do we need?
for bm in 0..div_up(sm.get_nr_blocks()? as usize, ENTRIES_PER_BITMAP) {
let mut entries = Vec::with_capacity(ENTRIES_PER_BITMAP);
let mut first_free: Option<u32> = None;
let mut nr_free: u32 = 0;
for i in 0..ENTRIES_PER_BITMAP {
let b: u64 = ((bm * ENTRIES_PER_BITMAP) as u64) + i as u64;
if b > sm.get_nr_blocks()? {
break;
}
let rc = sm.get(b)?;
let e = match rc {
0 => {
nr_free += 1;
if first_free.is_none() {
first_free = Some(i as u32);
}
Small(0)
}
1 => Small(1),
2 => Small(2),
_ => {
overflow_builder.push_value(w, b as u64, rc)?;
Overflow
}
};
entries.push(e);
}
// allocate a new block
let b = w.alloc()?;
let mut cursor = Cursor::new(b.get_data());
// write the bitmap to it
let blocknr = b.loc;
let bitmap = Bitmap { blocknr, entries };
bitmap.pack(&mut cursor)?;
w.write(b, checksum::BT::BITMAP)?;
// Insert into the index tree
let ie = IndexEntry {
blocknr,
nr_free,
none_free_before: first_free.unwrap_or(ENTRIES_PER_BITMAP as u32),
};
index_entries.push(ie);
}
let ref_count_root = overflow_builder.complete(w)?;
Ok((index_entries, ref_count_root))
}
//------------------------------------------
pub fn write_disk_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRoot> {
let (index_entries, ref_count_root) = write_common(w, sm)?;
@ -306,6 +16,7 @@ pub fn write_disk_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRoot>
}
let bitmap_root = index_builder.complete(w)?;
w.flush()?;
Ok(SMRoot {
nr_blocks: sm.get_nr_blocks()?,
@ -315,85 +26,4 @@ pub fn write_disk_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRoot>
})
}
//----------------------------
fn block_to_bitmap(b: u64) -> usize {
(b / ENTRIES_PER_BITMAP as u64) as usize
}
fn adjust_counts(w: &mut WriteBatcher, ie: &IndexEntry, allocs: &[u64]) -> Result<IndexEntry> {
use BitmapEntry::*;
let mut first_free = ie.none_free_before;
let mut nr_free = ie.nr_free - allocs.len() as u32;
// Read the bitmap
let bitmap_block = w.engine.read(ie.blocknr)?;
let (_, mut bitmap) = Bitmap::unpack(bitmap_block.get_data())?;
// Update all the entries
for a in allocs {
if first_free == *a as u32 {
first_free = *a as u32 + 1;
}
if bitmap.entries[*a as usize] == Small(0) {
nr_free -= 1;
}
bitmap.entries[*a as usize] = Small(1);
}
// Write the bitmap
let mut cur = Cursor::new(bitmap_block.get_data());
bitmap.pack(&mut cur)?;
w.write(bitmap_block, checksum::BT::BITMAP)?;
// Return the adjusted index entry
Ok(IndexEntry {
blocknr: ie.blocknr,
nr_free,
none_free_before: first_free,
})
}
pub fn write_metadata_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRoot> {
w.clear_allocations();
let (mut indexes, ref_count_root) = write_common(w, sm)?;
let bitmap_root = w.alloc()?;
// Now we need to patch up the counts for the metadata that was used for storing
// the space map itself. These ref counts all went from 0 to 1.
let allocations = w.clear_allocations();
// Sort the allocations by bitmap
let mut by_bitmap = BTreeMap::new();
for b in allocations {
let bitmap = block_to_bitmap(b);
(*by_bitmap.entry(bitmap).or_insert_with(Vec::new)).push(b % ENTRIES_PER_BITMAP as u64);
}
for (bitmap, allocs) in by_bitmap {
indexes[bitmap] = adjust_counts(w, &indexes[bitmap], &allocs)?;
}
// Write out the metadata index
let metadata_index = MetadataIndex {
blocknr: bitmap_root.loc,
indexes,
};
let mut cur = Cursor::new(bitmap_root.get_data());
metadata_index.pack(&mut cur)?;
let loc = bitmap_root.loc;
w.write(bitmap_root, checksum::BT::INDEX)?;
Ok(SMRoot {
nr_blocks: sm.get_nr_blocks()?,
nr_allocated: sm.get_nr_allocated()?,
bitmap_root: loc,
ref_count_root,
})
}
//--------------------------------
//------------------------------------------

View File

@ -0,0 +1,139 @@
use anyhow::Result;
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
use std::collections::BTreeMap;
use std::io::Cursor;
use crate::checksum;
use crate::io_engine::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_common::*;
use crate::pdata::unpack::*;
use crate::write_batcher::*;
//------------------------------------------
const MAX_METADATA_BITMAPS: usize = 255;
//------------------------------------------
pub struct MetadataIndex {
pub blocknr: u64,
pub indexes: Vec<IndexEntry>,
}
impl Unpack for MetadataIndex {
fn disk_size() -> u32 {
BLOCK_SIZE as u32
}
fn unpack(i: &[u8]) -> IResult<&[u8], MetadataIndex> {
// FIXME: check the checksum
let (i, _csum) = le_u32(i)?;
let (i, _padding) = le_u32(i)?;
let (i, blocknr) = le_u64(i)?;
let (i, indexes) = nom::multi::count(IndexEntry::unpack, MAX_METADATA_BITMAPS)(i)?;
Ok((i, MetadataIndex { blocknr, indexes }))
}
}
impl Pack for MetadataIndex {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u32::<LittleEndian>(0)?; // csum
w.write_u32::<LittleEndian>(0)?; // padding
w.write_u64::<LittleEndian>(self.blocknr)?;
assert!(self.indexes.len() <= MAX_METADATA_BITMAPS);
for ie in &self.indexes {
ie.pack(w)?;
}
Ok(())
}
}
//------------------------------------------
fn block_to_bitmap(b: u64) -> usize {
(b / ENTRIES_PER_BITMAP as u64) as usize
}
fn adjust_counts(w: &mut WriteBatcher, ie: &IndexEntry, allocs: &[u64]) -> Result<IndexEntry> {
use BitmapEntry::*;
let mut first_free = ie.none_free_before;
let mut nr_free = ie.nr_free - allocs.len() as u32;
// Read the bitmap
let bitmap_block = w.engine.read(ie.blocknr)?;
let (_, mut bitmap) = Bitmap::unpack(bitmap_block.get_data())?;
// Update all the entries
for a in allocs {
if first_free == *a as u32 {
first_free = *a as u32 + 1;
}
if bitmap.entries[*a as usize] == Small(0) {
nr_free -= 1;
}
bitmap.entries[*a as usize] = Small(1);
}
// Write the bitmap
let mut cur = Cursor::new(bitmap_block.get_data());
bitmap.pack(&mut cur)?;
w.write(bitmap_block, checksum::BT::BITMAP)?;
// Return the adjusted index entry
Ok(IndexEntry {
blocknr: ie.blocknr,
nr_free,
none_free_before: first_free,
})
}
pub fn write_metadata_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<SMRoot> {
w.clear_allocations();
let (mut indexes, ref_count_root) = write_common(w, sm)?;
let bitmap_root = w.alloc()?;
// Now we need to patch up the counts for the metadata that was used for storing
// the space map itself. These ref counts all went from 0 to 1.
let allocations = w.clear_allocations();
// Sort the allocations by bitmap
let mut by_bitmap = BTreeMap::new();
for b in allocations {
let bitmap = block_to_bitmap(b);
(*by_bitmap.entry(bitmap).or_insert_with(Vec::new)).push(b % ENTRIES_PER_BITMAP as u64);
}
for (bitmap, allocs) in by_bitmap {
indexes[bitmap] = adjust_counts(w, &indexes[bitmap], &allocs)?;
}
// Write out the metadata index
let metadata_index = MetadataIndex {
blocknr: bitmap_root.loc,
indexes,
};
let mut cur = Cursor::new(bitmap_root.get_data());
metadata_index.pack(&mut cur)?;
let loc = bitmap_root.loc;
w.write(bitmap_root, checksum::BT::INDEX)?;
w.flush()?;
Ok(SMRoot {
nr_blocks: sm.get_nr_blocks()?,
nr_allocated: sm.get_nr_allocated()?,
bitmap_root: loc,
ref_count_root,
})
}
//------------------------------------------

View File

@ -10,7 +10,7 @@ use crate::pdata::btree::{self, *};
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_checker::*;
use crate::pdata::space_map_disk::*;
use crate::pdata::space_map_common::*;
use crate::pdata::unpack::*;
use crate::report::*;
use crate::thin::block_time::*;

View File

@ -11,7 +11,7 @@ use crate::pdata::btree::{self, *};
use crate::pdata::btree_leaf_walker::*;
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_disk::*;
use crate::pdata::space_map_common::*;
use crate::pdata::unpack::*;
use crate::report::*;
use crate::thin::block_time::*;

View File

@ -2,12 +2,17 @@ use anyhow::{anyhow, Result};
use std::collections::BTreeMap;
use std::fs::OpenOptions;
use std::io::Cursor;
use std::ops::Deref;
use std::path::Path;
use std::sync::Arc;
use std::sync::{Arc, Mutex};
use crate::io_engine::*;
use crate::pdata::btree_builder::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_disk::*;
use crate::pdata::space_map_metadata::*;
use crate::pdata::unpack::Pack;
use crate::report::*;
use crate::thin::block_time::*;
use crate::thin::device_detail::*;
@ -17,6 +22,25 @@ use crate::write_batcher::*;
//------------------------------------------
struct MappingRC {
sm: Arc<Mutex<dyn SpaceMap>>,
}
impl RefCounter<BlockTime> for MappingRC {
fn get(&self, v: &BlockTime) -> Result<u32> {
return self.sm.lock().unwrap().get(v.block);
}
fn inc(&mut self, v: &BlockTime) -> Result<()> {
self.sm.lock().unwrap().inc(v.block, 1)
}
fn dec(&mut self, v: &BlockTime) -> Result<()> {
self.sm.lock().unwrap().dec(v.block)?;
Ok(())
}
}
//------------------------------------------
enum MappedSection {
Def(String),
Dev(u32),
@ -31,9 +55,12 @@ impl std::fmt::Display for MappedSection {
}
}
//------------------------------------------
struct Pass1Result {
sb: Option<xml::Superblock>,
sb: xml::Superblock,
devices: BTreeMap<u32, (DeviceDetail, Vec<NodeSummary>)>,
data_sm: Arc<Mutex<dyn SpaceMap>>,
}
struct Pass1<'a> {
@ -45,7 +72,9 @@ struct Pass1<'a> {
// The builder for the current shared sub tree or device
map: Option<(MappedSection, NodeBuilder<BlockTime>)>,
result: Pass1Result,
sb: Option<xml::Superblock>,
devices: BTreeMap<u32, (DeviceDetail, Vec<NodeSummary>)>,
data_sm: Option<Arc<Mutex<dyn SpaceMap>>>,
}
impl<'a> Pass1<'a> {
@ -55,15 +84,21 @@ impl<'a> Pass1<'a> {
current_dev: None,
sub_trees: BTreeMap::new(),
map: None,
result: Pass1Result {
sb: None,
devices: BTreeMap::new(),
},
sb: None,
devices: BTreeMap::new(),
data_sm: None,
}
}
fn get_result(self) -> Pass1Result {
self.result
fn get_result(self) -> Result<Pass1Result> {
if self.sb.is_none() {
return Err(anyhow!("No superblock found in xml file"));
}
Ok(Pass1Result {
sb: self.sb.unwrap(),
devices: self.devices,
data_sm: self.data_sm.unwrap(),
})
}
fn begin_section(&mut self, section: MappedSection) -> Result<Visit> {
@ -75,7 +110,9 @@ impl<'a> Pass1<'a> {
return Err(anyhow!(msg));
}
let value_rc = Box::new(NoopRC {});
let value_rc = Box::new(MappingRC {
sm: self.data_sm.as_ref().unwrap().clone(),
});
let leaf_builder = NodeBuilder::new(Box::new(LeafIO {}), value_rc);
self.map = Some((section, leaf_builder));
@ -97,7 +134,8 @@ impl<'a> Pass1<'a> {
impl<'a> MetadataVisitor for Pass1<'a> {
fn superblock_b(&mut self, sb: &xml::Superblock) -> Result<Visit> {
self.result.sb = Some(sb.clone());
self.sb = Some(sb.clone());
self.data_sm = Some(core_sm(sb.nr_data_blocks, u32::MAX));
self.w.alloc()?;
Ok(Visit::Continue)
}
@ -132,7 +170,7 @@ impl<'a> MetadataVisitor for Pass1<'a> {
fn device_e(&mut self) -> Result<Visit> {
if let Some(detail) = self.current_dev.take() {
if let (MappedSection::Dev(thin_id), nodes) = self.end_section()? {
self.result.devices.insert(thin_id, (detail, nodes));
self.devices.insert(thin_id, (detail, nodes));
Ok(Visit::Continue)
} else {
Err(anyhow!("internal error, couldn't find device details"))
@ -191,13 +229,29 @@ impl<'a> MetadataVisitor for Pass1<'a> {
}
//------------------------------------------
/*
/// Writes a data space map to disk. Returns the space map root that needs
/// to be written to the superblock.
fn build_data_sm(batcher: WriteBatcher, sm: Box<dyn SpaceMap>) -> Result<Vec<u8>> {
fn build_data_sm(w: &mut WriteBatcher, sm: &dyn SpaceMap) -> Result<Vec<u8>> {
let mut sm_root = vec![0u8; SPACE_MAP_ROOT_SIZE];
let mut cur = Cursor::new(&mut sm_root);
let r = write_disk_sm(w, sm)?;
r.pack(&mut cur)?;
Ok(sm_root)
}
/// Writes the metadata space map to disk. Returns the space map root that needs
/// to be written to the superblock.
fn build_metadata_sm(w: &mut WriteBatcher) -> Result<Vec<u8>> {
let mut sm_root = vec![0u8; SPACE_MAP_ROOT_SIZE];
let mut cur = Cursor::new(&mut sm_root);
let sm_without_meta = clone_space_map(w.sm.lock().unwrap().deref())?;
let r = write_metadata_sm(w, sm_without_meta.deref())?;
r.pack(&mut cur)?;
Ok(sm_root)
}
*/
//------------------------------------------
@ -246,7 +300,7 @@ pub fn restore(opts: ThinRestoreOptions) -> Result<()> {
let mut w = WriteBatcher::new(ctx.engine.clone(), sm.clone(), ctx.engine.get_batch_size());
let mut pass = Pass1::new(&mut w);
xml::read(input, &mut pass)?;
let pass = pass.get_result();
let pass = pass.get_result()?;
// Build the device details tree.
let mut details_builder: Builder<DeviceDetail> = Builder::new(Box::new(NoopRC {}));
@ -274,33 +328,28 @@ pub fn restore(opts: ThinRestoreOptions) -> Result<()> {
let mapping_root = builder.complete(&mut w)?;
// Build data space map
let data_sm_root = build_data_sm(&mut w, pass.data_sm.lock().unwrap().deref())?;
// FIXME: I think we need to decrement the shared leaves
// Build metadata space map
w.flush()?;
let metadata_sm_root = build_metadata_sm(&mut w)?;
// Write the superblock
if let Some(xml_sb) = pass.sb {
let sb = superblock::Superblock {
flags: SuperblockFlags { needs_check: false },
block: SUPERBLOCK_LOCATION,
version: 2,
time: xml_sb.time as u32,
transaction_id: xml_sb.transaction,
metadata_snap: 0,
data_sm_root: vec![0; SPACE_MAP_ROOT_SIZE],
metadata_sm_root: vec![0; SPACE_MAP_ROOT_SIZE],
mapping_root,
details_root,
data_block_size: xml_sb.data_block_size,
nr_metadata_blocks: ctx.engine.get_nr_blocks(),
};
write_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION, &sb)?;
} else {
return Err(anyhow!("No superblock found in xml file"));
}
let sb = superblock::Superblock {
flags: SuperblockFlags { needs_check: false },
block: SUPERBLOCK_LOCATION,
version: 2,
time: pass.sb.time as u32,
transaction_id: pass.sb.transaction,
metadata_snap: 0,
data_sm_root,
metadata_sm_root,
mapping_root,
details_root,
data_block_size: pass.sb.data_block_size,
nr_metadata_blocks: ctx.engine.get_nr_blocks(),
};
write_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION, &sb)?;
Ok(())
}

View File

@ -116,12 +116,12 @@ fn pack_superblock<W: WriteBytesExt>(sb: &Superblock, w: &mut W) -> Result<()> {
w.write_u32::<LittleEndian>(sb.time)?;
w.write_u64::<LittleEndian>(sb.transaction_id)?;
w.write_u64::<LittleEndian>(sb.metadata_snap)?;
w.write_all(&[0; SPACE_MAP_ROOT_SIZE])?; // data sm root
w.write_all(&[0; SPACE_MAP_ROOT_SIZE])?; // metadata sm root
w.write_all(&sb.data_sm_root)?;
w.write_all(&sb.metadata_sm_root)?;
w.write_u64::<LittleEndian>(sb.mapping_root)?;
w.write_u64::<LittleEndian>(sb.details_root)?;
w.write_u32::<LittleEndian>(sb.data_block_size)?;
w.write_u32::<LittleEndian>(BLOCK_SIZE as u32)?;
w.write_u32::<LittleEndian>((BLOCK_SIZE >> SECTOR_SHIFT) as u32)?; // metadata block size
w.write_u64::<LittleEndian>(sb.nr_metadata_blocks)?;
Ok(())
@ -137,7 +137,7 @@ pub fn write_superblock(engine: &dyn IoEngine, _loc: u64, sb: &Superblock) -> Re
}
// calculate the checksum
write_checksum(b.get_data(), BT::SUPERBLOCK)?;
write_checksum(b.get_data(), BT::THIN_SUPERBLOCK)?;
// write
engine.write(&b)?;

View File

@ -1,10 +1,11 @@
use anyhow::{anyhow, Result};
use std::{borrow::Cow, fmt::Display, io::prelude::*, io::BufReader, io::Write};
use anyhow::Result;
use std::{io::prelude::*, io::BufReader, io::Write};
use quick_xml::events::attributes::Attribute;
use quick_xml::events::{BytesEnd, BytesStart, Event};
use quick_xml::{Reader, Writer};
use crate::xml::*;
//---------------------------------------
#[derive(Clone)]
@ -72,18 +73,6 @@ impl<W: Write> XmlWriter<W> {
}
}
fn mk_attr_<'a, T: Display>(n: T) -> Cow<'a, [u8]> {
let str = format!("{}", n);
Cow::Owned(str.into_bytes())
}
fn mk_attr<T: Display>(key: &[u8], value: T) -> Attribute {
Attribute {
key,
value: mk_attr_(value),
}
}
const XML_VERSION: u32 = 2;
impl<W: Write> MetadataVisitor for XmlWriter<W> {
@ -187,40 +176,6 @@ impl<W: Write> MetadataVisitor for XmlWriter<W> {
//---------------------------------------
// FIXME: nasty unwraps
fn string_val(kv: &Attribute) -> String {
let v = kv.unescaped_value().unwrap();
let bytes = v.to_vec();
String::from_utf8(bytes).unwrap()
}
// FIXME: there's got to be a way of doing this without copying the string
fn u64_val(kv: &Attribute) -> Result<u64> {
let n = string_val(kv).parse::<u64>()?;
Ok(n)
}
fn u32_val(kv: &Attribute) -> Result<u32> {
let n = string_val(kv).parse::<u32>()?;
Ok(n)
}
fn bad_attr<T>(_tag: &str, _attr: &[u8]) -> Result<T> {
todo!();
}
fn missing_attr<T>(tag: &str, attr: &str) -> Result<T> {
let msg = format!("missing attribute '{}' for tag '{}", attr, tag);
Err(anyhow!(msg))
}
fn check_attr<T>(tag: &str, name: &str, maybe_v: Option<T>) -> Result<T> {
match maybe_v {
None => missing_attr(tag, name),
Some(v) => Ok(v),
}
}
fn parse_superblock(e: &BytesStart) -> Result<Superblock> {
let mut uuid: Option<String> = None;
let mut time: Option<u64> = None;

View File

@ -43,6 +43,7 @@ impl WriteBatcher {
return Err(anyhow!("out of metadata space"));
}
self.allocations.insert(b.unwrap());
Ok(Block::new(b.unwrap()))
}

63
src/xml.rs Normal file
View File

@ -0,0 +1,63 @@
use anyhow::anyhow;
use quick_xml::events::attributes::Attribute;
use std::borrow::Cow;
use std::fmt::Display;
//------------------------------------------
pub fn bytes_val<'a>(kv: &'a Attribute) -> Cow<'a, [u8]> {
kv.unescaped_value().unwrap()
}
// FIXME: nasty unwraps
pub fn string_val(kv: &Attribute) -> String {
let v = kv.unescaped_value().unwrap();
let bytes = v.to_vec();
String::from_utf8(bytes).unwrap()
}
// FIXME: there's got to be a way of doing this without copying the string
pub fn u64_val(kv: &Attribute) -> anyhow::Result<u64> {
let n = string_val(kv).parse::<u64>()?;
Ok(n)
}
pub fn u32_val(kv: &Attribute) -> anyhow::Result<u32> {
let n = string_val(kv).parse::<u32>()?;
Ok(n)
}
pub fn bool_val(kv: &Attribute) -> anyhow::Result<bool> {
let n = string_val(kv).parse::<bool>()?;
Ok(n)
}
pub fn bad_attr<T>(_tag: &str, _attr: &[u8]) -> anyhow::Result<T> {
todo!();
}
pub fn check_attr<T>(tag: &str, name: &str, maybe_v: Option<T>) -> anyhow::Result<T> {
match maybe_v {
None => missing_attr(tag, name),
Some(v) => Ok(v),
}
}
fn missing_attr<T>(tag: &str, attr: &str) -> anyhow::Result<T> {
let msg = format!("missing attribute '{}' for tag '{}", attr, tag);
Err(anyhow!(msg))
}
pub fn mk_attr<T: Display>(key: &[u8], value: T) -> Attribute {
Attribute {
key,
value: mk_attr_(value),
}
}
fn mk_attr_<'a, T: Display>(n: T) -> Cow<'a, [u8]> {
let str = format!("{}", n);
Cow::Owned(str.into_bytes())
}
//------------------------------------------

View File

@ -67,7 +67,7 @@ impl XmlGen for CacheGen {
v.mappings_b()?;
{
let nr_resident = (self.nr_cache_blocks * 100 as u32) / (self.percent_resident as u32);
let nr_resident = (self.nr_cache_blocks * 100u32) / (self.percent_resident as u32);
let mut used = HashSet::new();
for n in 0..nr_resident {
let mut oblock = 0u64;