Merge branch 'main' into rust-cache-tools

This commit is contained in:
Ming-Hung Tsai
2021-05-11 23:05:03 +08:00
40 changed files with 1510 additions and 581 deletions

View File

@@ -1,3 +1,5 @@
use anyhow::Result;
use byteorder::WriteBytesExt;
use nom::{number::complete::*, IResult};
use std::fmt;
@@ -31,6 +33,13 @@ impl Unpack for BlockTime {
}
}
impl Pack for BlockTime {
fn pack<W: WriteBytesExt>(&self, data: &mut W) -> Result<()> {
let bt: u64 = (self.block << 24) | self.time as u64;
bt.pack(data)
}
}
impl fmt::Display for BlockTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{} @ {}", self.block, self.time)

View File

@@ -11,6 +11,7 @@ use crate::pdata::btree::{self, *};
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_checker::*;
use crate::pdata::space_map_disk::*;
use crate::pdata::unpack::*;
use crate::report::*;
use crate::thin::block_time::*;
@@ -79,11 +80,10 @@ fn inc_superblock(sm: &ASpaceMap) -> Result<()> {
//------------------------------------------
const MAX_CONCURRENT_IO: u32 = 1024;
pub const MAX_CONCURRENT_IO: u32 = 1024;
pub struct ThinCheckOptions<'a> {
pub dev: &'a Path,
pub async_io: bool,
pub struct ThinCheckOptions {
pub engine: Arc<dyn IoEngine + Send + Sync>,
pub sb_only: bool,
pub skip_mappings: bool,
pub ignore_non_fatal: bool,
@@ -194,25 +194,12 @@ fn check_mapping_bottom_level(
}
}
fn mk_context(opts: &ThinCheckOptions) -> Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
let nr_threads;
if opts.async_io {
nr_threads = std::cmp::min(4, num_cpus::get());
engine = Arc::new(AsyncIoEngine::new(
opts.dev,
MAX_CONCURRENT_IO,
opts.auto_repair,
)?);
} else {
nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.dev, nr_threads, opts.auto_repair)?);
}
fn mk_context(engine: Arc<dyn IoEngine + Send + Sync>, report: Arc<Report>) -> Result<Context> {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
let pool = ThreadPool::new(nr_threads);
Ok(Context {
report: opts.report.clone(),
report,
engine,
pool,
})
@@ -231,7 +218,7 @@ fn bail_out(ctx: &Context, task: &str) -> Result<()> {
}
pub fn check(opts: ThinCheckOptions) -> Result<()> {
let ctx = mk_context(&opts)?;
let ctx = mk_context(opts.engine.clone(), opts.report.clone())?;
// FIXME: temporarily get these out
let report = &ctx.report;
@@ -355,3 +342,107 @@ pub fn check(opts: ThinCheckOptions) -> Result<()> {
}
//------------------------------------------
// Some callers wish to know which blocks are allocated.
pub struct CheckMaps {
pub metadata_sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
pub data_sm: Arc<Mutex<dyn SpaceMap + Send + Sync>>,
}
pub fn check_with_maps(engine: Arc<dyn IoEngine + Send + Sync>, report: Arc<Report>) -> Result<CheckMaps> {
let ctx = mk_context(engine.clone(), report.clone())?;
report.set_title("Checking thin metadata");
// superblock
let sb = read_superblock(engine.as_ref(), SUPERBLOCK_LOCATION)?;
report.info(&format!("TRANSACTION_ID={}", sb.transaction_id));
let metadata_root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
let mut path = Vec::new();
path.push(0);
// Device details. We read this once to get the number of thin devices, and hence the
// maximum metadata ref count. Then create metadata space map, and reread to increment
// the ref counts for that metadata.
let devs = btree_to_map::<DeviceDetail>(
&mut path,
engine.clone(),
false,
sb.details_root,
)?;
let nr_devs = devs.len();
let metadata_sm = core_sm(engine.get_nr_blocks(), nr_devs as u32);
inc_superblock(&metadata_sm)?;
report.set_sub_title("device details tree");
let _devs = btree_to_map_with_sm::<DeviceDetail>(
&mut path,
engine.clone(),
metadata_sm.clone(),
false,
sb.details_root,
)?;
let (tid, stop_progress) = spawn_progress_thread(
metadata_sm.clone(),
metadata_root.nr_allocated,
report.clone(),
)?;
// mapping top level
report.set_sub_title("mapping tree");
let roots = btree_to_map_with_path::<u64>(
&mut path,
engine.clone(),
metadata_sm.clone(),
false,
sb.mapping_root,
)?;
// mapping bottom level
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
let data_sm = core_sm(root.nr_blocks, nr_devs as u32);
check_mapping_bottom_level(&ctx, &metadata_sm, &data_sm, &roots)?;
bail_out(&ctx, "mapping tree")?;
//-----------------------------------------
report.set_sub_title("data space map");
let root = unpack::<SMRoot>(&sb.data_sm_root[0..])?;
let _data_leaks = check_disk_space_map(
engine.clone(),
report.clone(),
root,
data_sm.clone(),
metadata_sm.clone(),
false,
)?;
bail_out(&ctx, "data space map")?;
//-----------------------------------------
report.set_sub_title("metadata space map");
let root = unpack::<SMRoot>(&sb.metadata_sm_root[0..])?;
report.info(&format!(
"METADATA_FREE_BLOCKS={}",
root.nr_blocks - root.nr_allocated
));
// Now the counts should be correct and we can check it.
let _metadata_leaks =
check_metadata_space_map(engine.clone(), report, root, metadata_sm.clone(), false)?;
bail_out(&ctx, "metadata space map")?;
//-----------------------------------------
stop_progress.store(true, Ordering::Relaxed);
tid.join().unwrap();
Ok(CheckMaps {
metadata_sm: metadata_sm.clone(),
data_sm: data_sm.clone(),
})
}
//------------------------------------------

View File

@@ -1,7 +1,9 @@
use anyhow::Result;
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{number::complete::*, IResult};
use std::fmt;
use crate::pdata::unpack::*;
use nom::{number::complete::*, IResult};
//------------------------------------------
@@ -15,11 +17,11 @@ pub struct DeviceDetail {
impl fmt::Display for DeviceDetail {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "mapped = {}, trans = {}, create = {}, snap = {}",
self.mapped_blocks,
self.transaction_id,
self.creation_time,
self.snapshotted_time)?;
write!(
f,
"mapped = {}, trans = {}, create = {}, snap = {}",
self.mapped_blocks, self.transaction_id, self.creation_time, self.snapshotted_time
)?;
Ok(())
}
}
@@ -47,4 +49,14 @@ impl Unpack for DeviceDetail {
}
}
impl Pack for DeviceDetail {
fn pack<W: WriteBytesExt>(&self, w: &mut W) -> Result<()> {
w.write_u64::<LittleEndian>(self.mapped_blocks)?;
w.write_u64::<LittleEndian>(self.transaction_id)?;
w.write_u32::<LittleEndian>(self.creation_time)?;
w.write_u32::<LittleEndian>(self.snapshotted_time)?;
Ok(())
}
}
//------------------------------------------

View File

@@ -11,6 +11,7 @@ use crate::pdata::btree::{self, *};
use crate::pdata::btree_leaf_walker::*;
use crate::pdata::btree_walker::*;
use crate::pdata::space_map::*;
use crate::pdata::space_map_disk::*;
use crate::pdata::unpack::*;
use crate::report::*;
use crate::thin::block_time::*;
@@ -287,7 +288,7 @@ fn find_shared_nodes(
// We have to get the leaves so w is consumed and the &mut on sm
// is dropped.
let leaves = w.get_leaves();
let _leaves = w.get_leaves();
let mut shared = BTreeSet::new();
{
for i in 0..sm.get_nr_blocks().unwrap() {
@@ -297,6 +298,8 @@ fn find_shared_nodes(
}
}
/*
// FIXME: why?!!
// we're not interested in leaves (roots will get re-added later).
{
for i in 0..leaves.len() {
@@ -305,6 +308,7 @@ fn find_shared_nodes(
}
}
}
*/
Ok((shared, sm))
}
@@ -616,9 +620,11 @@ pub fn dump(opts: ThinDumpOptions) -> Result<()> {
let sb = read_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION)?;
let md = build_metadata(&ctx, &sb)?;
/*
ctx.report
.set_title("Optimising metadata to improve leaf packing");
let md = optimise_metadata(md)?;
*/
dump_metadata(&ctx, &sb, &md)
}

View File

@@ -1,8 +1,203 @@
use anyhow::Result;
use anyhow::{anyhow, Result};
use std::collections::BTreeMap;
use std::fs::OpenOptions;
use std::path::Path;
use std::sync::Arc;
use crate::io_engine::*;
use crate::pdata::btree_builder::*;
use crate::pdata::space_map::*;
use crate::report::*;
use crate::thin::block_time::*;
use crate::thin::device_detail::*;
use crate::thin::superblock::{self, *};
use crate::thin::xml::{self, *};
use crate::write_batcher::*;
//------------------------------------------
enum MappedSection {
Def(String),
Dev(u32),
}
impl std::fmt::Display for MappedSection {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MappedSection::Def(name) => write!(f, "Def {}", name),
MappedSection::Dev(thin_id) => write!(f, "Device {}", thin_id),
}
}
}
struct Pass1Result {
sb: Option<xml::Superblock>,
devices: BTreeMap<u32, (DeviceDetail, Vec<NodeSummary>)>,
}
struct Pass1<'a> {
w: &'a mut WriteBatcher,
current_dev: Option<DeviceDetail>,
sub_trees: BTreeMap<String, Vec<NodeSummary>>,
// The builder for the current shared sub tree or device
map: Option<(MappedSection, NodeBuilder<BlockTime>)>,
result: Pass1Result,
}
impl<'a> Pass1<'a> {
fn new(w: &'a mut WriteBatcher) -> Self {
Pass1 {
w,
current_dev: None,
sub_trees: BTreeMap::new(),
map: None,
result: Pass1Result {
sb: None,
devices: BTreeMap::new(),
},
}
}
fn get_result(self) -> Pass1Result {
self.result
}
fn begin_section(&mut self, section: MappedSection) -> Result<Visit> {
if let Some((outer, _)) = self.map.as_ref() {
let msg = format!(
"Nested subtrees are not allowed '{}' within '{}'",
section, outer
);
return Err(anyhow!(msg));
}
let value_rc = Box::new(NoopRC {});
let leaf_builder = NodeBuilder::new(Box::new(LeafIO {}), value_rc);
self.map = Some((section, leaf_builder));
Ok(Visit::Continue)
}
fn end_section(&mut self) -> Result<(MappedSection, Vec<NodeSummary>)> {
let mut current = None;
std::mem::swap(&mut self.map, &mut current);
if let Some((name, nodes)) = current {
Ok((name, nodes.complete(self.w)?))
} else {
let msg = format!("Unbalanced </def> tag");
Err(anyhow!(msg))
}
}
}
impl<'a> MetadataVisitor for Pass1<'a> {
fn superblock_b(&mut self, sb: &xml::Superblock) -> Result<Visit> {
self.result.sb = Some(sb.clone());
self.w.alloc()?;
Ok(Visit::Continue)
}
fn superblock_e(&mut self) -> Result<Visit> {
Ok(Visit::Continue)
}
fn def_shared_b(&mut self, name: &str) -> Result<Visit> {
self.begin_section(MappedSection::Def(name.to_string()))
}
fn def_shared_e(&mut self) -> Result<Visit> {
if let (MappedSection::Def(name), nodes) = self.end_section()? {
self.sub_trees.insert(name, nodes);
Ok(Visit::Continue)
} else {
Err(anyhow!("unexpected </def>"))
}
}
fn device_b(&mut self, d: &Device) -> Result<Visit> {
self.current_dev = Some(DeviceDetail {
mapped_blocks: d.mapped_blocks,
transaction_id: d.transaction,
creation_time: d.creation_time as u32,
snapshotted_time: d.snap_time as u32,
});
self.begin_section(MappedSection::Dev(d.dev_id))
}
fn device_e(&mut self) -> Result<Visit> {
if let Some(detail) = self.current_dev.take() {
if let (MappedSection::Dev(thin_id), nodes) = self.end_section()? {
self.result.devices.insert(thin_id, (detail, nodes));
Ok(Visit::Continue)
} else {
Err(anyhow!("internal error, couldn't find device details"))
}
} else {
Err(anyhow!("unexpected </device>"))
}
}
fn map(&mut self, m: &Map) -> Result<Visit> {
if let Some((_name, _builder)) = self.map.as_mut() {
for i in 0..m.len {
let bt = BlockTime {
block: m.data_begin + i,
time: m.time,
};
let (_, builder) = self.map.as_mut().unwrap();
builder.push_value(self.w, m.thin_begin + i, bt)?;
}
Ok(Visit::Continue)
} else {
let msg = format!("Mapping tags must appear within a <def> or <device> tag.");
Err(anyhow!(msg))
}
}
fn ref_shared(&mut self, name: &str) -> Result<Visit> {
if self.current_dev.is_none() {
return Err(anyhow!(
"<ref> tags may only occur within <device> sections."
));
}
if let Some(leaves) = self.sub_trees.get(name) {
// We could be in a <def> or <device>
if let Some((_name, builder)) = self.map.as_mut() {
builder.push_nodes(self.w, leaves)?;
} else {
let msg = format!(
"<ref name=\"{}\"> tag must be within either a <def> or <device> section",
name
);
return Err(anyhow!(msg));
}
Ok(Visit::Continue)
} else {
let msg = format!("Couldn't find sub tree '{}'.", name);
Err(anyhow!(msg))
}
}
fn eof(&mut self) -> Result<Visit> {
// FIXME: build the rest of the device trees
Ok(Visit::Continue)
}
}
//------------------------------------------
/*
/// Writes a data space map to disk. Returns the space map root that needs
/// to be written to the superblock.
fn build_data_sm(batcher: WriteBatcher, sm: Box<dyn SpaceMap>) -> Result<Vec<u8>> {
}
*/
//------------------------------------------
@@ -13,10 +208,101 @@ pub struct ThinRestoreOptions<'a> {
pub report: Arc<Report>,
}
//------------------------------------------
struct Context {
report: Arc<Report>,
engine: Arc<dyn IoEngine + Send + Sync>,
}
pub fn restore(_opts: ThinRestoreOptions) -> Result<()> {
todo!();
const MAX_CONCURRENT_IO: u32 = 1024;
fn new_context(opts: &ThinRestoreOptions) -> Result<Context> {
let engine: Arc<dyn IoEngine + Send + Sync>;
if opts.async_io {
engine = Arc::new(AsyncIoEngine::new(opts.output, MAX_CONCURRENT_IO, true)?);
} else {
let nr_threads = std::cmp::max(8, num_cpus::get() * 2);
engine = Arc::new(SyncIoEngine::new(opts.output, nr_threads, true)?);
}
Ok(Context {
report: opts.report.clone(),
engine,
})
}
//------------------------------------------
pub fn restore(opts: ThinRestoreOptions) -> Result<()> {
let input = OpenOptions::new()
.read(true)
.write(false)
.open(opts.input)?;
let ctx = new_context(&opts)?;
let max_count = u32::MAX;
let sm = core_sm(ctx.engine.get_nr_blocks(), max_count);
let mut w = WriteBatcher::new(ctx.engine.clone(), sm.clone(), ctx.engine.get_batch_size());
let mut pass = Pass1::new(&mut w);
xml::read(input, &mut pass)?;
let pass = pass.get_result();
// Build the device details tree.
let mut details_builder: Builder<DeviceDetail> = Builder::new(Box::new(NoopRC {}));
for (thin_id, (detail, _)) in &pass.devices {
details_builder.push_value(&mut w, *thin_id as u64, *detail)?;
}
let details_root = details_builder.complete(&mut w)?;
// Build the individual mapping trees that make up the bottom layer.
let mut devs: BTreeMap<u32, u64> = BTreeMap::new();
for (thin_id, (_, nodes)) in &pass.devices {
ctx.report
.info(&format!("building btree for device {}", thin_id));
let mut builder: Builder<BlockTime> = Builder::new(Box::new(NoopRC {}));
builder.push_leaves(&mut w, nodes)?;
let root = builder.complete(&mut w)?;
devs.insert(*thin_id, root);
}
// Build the top level mapping tree
let mut builder: Builder<u64> = Builder::new(Box::new(NoopRC {}));
for (thin_id, root) in devs {
builder.push_value(&mut w, thin_id as u64, root)?;
}
let mapping_root = builder.complete(&mut w)?;
// Build data space map
// FIXME: I think we need to decrement the shared leaves
// Build metadata space map
w.flush()?;
// Write the superblock
if let Some(xml_sb) = pass.sb {
let sb = superblock::Superblock {
flags: SuperblockFlags { needs_check: false },
block: SUPERBLOCK_LOCATION,
version: 2,
time: xml_sb.time as u32,
transaction_id: xml_sb.transaction,
metadata_snap: 0,
data_sm_root: vec![0; SPACE_MAP_ROOT_SIZE],
metadata_sm_root: vec![0; SPACE_MAP_ROOT_SIZE],
mapping_root,
details_root,
data_block_size: xml_sb.data_block_size,
nr_metadata_blocks: ctx.engine.get_nr_blocks(),
};
write_superblock(ctx.engine.as_ref(), SUPERBLOCK_LOCATION, &sb)?;
} else {
return Err(anyhow!("No superblock found in xml file"));
}
Ok(())
}
//------------------------------------------

View File

@@ -1,10 +1,18 @@
use crate::io_engine::*;
use anyhow::{anyhow, Result};
use byteorder::{LittleEndian, WriteBytesExt};
use nom::{bytes::complete::*, number::complete::*, IResult};
use std::fmt;
use std::io::Cursor;
use crate::checksum::*;
use crate::io_engine::*;
//----------------------------------------
pub const MAGIC: u64 = 27022010;
pub const SUPERBLOCK_LOCATION: u64 = 0;
//const UUID_SIZE: usize = 16;
const SPACE_MAP_ROOT_SIZE: usize = 128;
const UUID_SIZE: usize = 16;
pub const SPACE_MAP_ROOT_SIZE: usize = 128;
#[derive(Debug, Clone)]
pub struct SuperblockFlags {
@@ -35,36 +43,9 @@ pub struct Superblock {
pub mapping_root: u64,
pub details_root: u64,
pub data_block_size: u32,
pub nr_metadata_blocks: u64,
}
/*
pub enum CheckSeverity {
Fatal,
NonFatal,
}
pub trait CheckError {
fn severity(&self) -> CheckSeverity;
fn block(&self) -> u64;
fn sub_errors(&self) -> Vec<Box<dyn CheckError>>;
}
enum ErrorType {
BadChecksum,
BadBlockType(&'static str),
BadBlock(u64),
BadVersion(u32),
MetadataSnapOutOfBounds(u64),
MappingRootOutOfBounds(u64),
DetailsRootOutOfBounds(u64),
}
struct SuperblockError {
severity: CheckSeverity,
kind: ErrorType,
}
*/
fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
let (i, _csum) = le_u32(data)?;
let (i, flags) = le_u32(i)?;
@@ -81,7 +62,7 @@ fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
let (i, details_root) = le_u64(i)?;
let (i, data_block_size) = le_u32(i)?;
let (i, _metadata_block_size) = le_u32(i)?;
let (i, _metadata_nr_blocks) = le_u64(i)?;
let (i, nr_metadata_blocks) = le_u64(i)?;
Ok((
i,
@@ -100,6 +81,7 @@ fn unpack(data: &[u8]) -> IResult<&[u8], Superblock> {
mapping_root,
details_root,
data_block_size,
nr_metadata_blocks,
},
))
}
@@ -115,3 +97,51 @@ pub fn read_superblock(engine: &dyn IoEngine, loc: u64) -> Result<Superblock> {
}
//------------------------------
fn pack_superblock<W: WriteBytesExt>(sb: &Superblock, w: &mut W) -> Result<()> {
// checksum, which we don't know yet
w.write_u32::<LittleEndian>(0)?;
// flags
if sb.flags.needs_check {
w.write_u32::<LittleEndian>(0x1)?;
} else {
w.write_u32::<LittleEndian>(0)?;
}
w.write_u64::<LittleEndian>(sb.block)?;
w.write_all(&vec![0; UUID_SIZE])?;
w.write_u64::<LittleEndian>(MAGIC)?;
w.write_u32::<LittleEndian>(sb.version)?;
w.write_u32::<LittleEndian>(sb.time)?;
w.write_u64::<LittleEndian>(sb.transaction_id)?;
w.write_u64::<LittleEndian>(sb.metadata_snap)?;
w.write_all(&vec![0; SPACE_MAP_ROOT_SIZE])?; // data sm root
w.write_all(&vec![0; SPACE_MAP_ROOT_SIZE])?; // metadata sm root
w.write_u64::<LittleEndian>(sb.mapping_root)?;
w.write_u64::<LittleEndian>(sb.details_root)?;
w.write_u32::<LittleEndian>(sb.data_block_size)?;
w.write_u32::<LittleEndian>(BLOCK_SIZE as u32)?;
w.write_u64::<LittleEndian>(sb.nr_metadata_blocks)?;
Ok(())
}
pub fn write_superblock(engine: &dyn IoEngine, _loc: u64, sb: &Superblock) -> Result<()> {
let b = Block::zeroed(SUPERBLOCK_LOCATION);
// pack the superblock
{
let mut cursor = Cursor::new(b.get_data());
pack_superblock(sb, &mut cursor)?;
}
// calculate the checksum
write_checksum(b.get_data(), BT::SUPERBLOCK)?;
// write
engine.write(&b)?;
Ok(())
}
//------------------------------

View File

@@ -1,4 +1,4 @@
use anyhow::Result;
use anyhow::{anyhow, Result};
use std::{borrow::Cow, fmt::Display, io::prelude::*, io::BufReader, io::Write};
use quick_xml::events::attributes::Attribute;
@@ -46,9 +46,11 @@ pub trait MetadataVisitor {
fn superblock_b(&mut self, sb: &Superblock) -> Result<Visit>;
fn superblock_e(&mut self) -> Result<Visit>;
// Defines a shared sub tree. May only contain a 'map' (no 'ref' allowed).
fn def_shared_b(&mut self, name: &str) -> Result<Visit>;
fn def_shared_e(&mut self) -> Result<Visit>;
// A device contains a number of 'map' or 'ref' items.
fn device_b(&mut self, d: &Device) -> Result<Visit>;
fn device_e(&mut self) -> Result<Visit>;
@@ -207,8 +209,9 @@ fn bad_attr<T>(_tag: &str, _attr: &[u8]) -> Result<T> {
todo!();
}
fn missing_attr<T>(_tag: &str, _attr: &str) -> Result<T> {
todo!();
fn missing_attr<T>(tag: &str, attr: &str) -> Result<T> {
let msg = format!("missing attribute '{}' for tag '{}", attr, tag);
Err(anyhow!(msg))
}
fn check_attr<T>(tag: &str, name: &str, maybe_v: Option<T>) -> Result<T> {
@@ -257,6 +260,22 @@ fn parse_superblock(e: &BytesStart) -> Result<Superblock> {
})
}
fn parse_def(e: &BytesStart, tag: &str) -> Result<String> {
let mut name: Option<String> = None;
for a in e.attributes() {
let kv = a.unwrap();
match kv.key {
b"name" => {
name = Some(string_val(&kv));
}
_ => return bad_attr(tag, kv.key),
}
}
Ok(name.unwrap())
}
fn parse_device(e: &BytesStart) -> Result<Device> {
let mut dev_id: Option<u32> = None;
let mut mapped_blocks: Option<u64> = None;
@@ -348,16 +367,19 @@ where
Ok(Event::Start(ref e)) => match e.name() {
b"superblock" => visitor.superblock_b(&parse_superblock(e)?),
b"device" => visitor.device_b(&parse_device(e)?),
b"def" => visitor.def_shared_b(&parse_def(e, "def")?),
_ => todo!(),
},
Ok(Event::End(ref e)) => match e.name() {
b"superblock" => visitor.superblock_e(),
b"device" => visitor.device_e(),
b"def" => visitor.def_shared_e(),
_ => todo!(),
},
Ok(Event::Empty(ref e)) => match e.name() {
b"single_mapping" => visitor.map(&parse_single_map(e)?),
b"range_mapping" => visitor.map(&parse_range_map(e)?),
b"ref" => visitor.ref_shared(&parse_def(e, "ref")?),
_ => todo!(),
},
Ok(Event::Text(_)) => Ok(Visit::Continue),