move space maps to their own sub directory

This commit is contained in:
Joe Thornber
2013-01-10 21:36:38 +00:00
parent 6a4facf03b
commit 326fd3408b
15 changed files with 31 additions and 29 deletions

View File

@@ -0,0 +1,189 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#include "persistent-data/space-maps/careful_alloc.h"
#include <set>
//----------------------------------------------------------------
namespace {
class sm_careful_alloc : public checked_space_map {
private:
typedef set<block_address> block_set;
public:
typedef shared_ptr<sm_careful_alloc> ptr;
sm_careful_alloc(checked_space_map::ptr sm)
: sm_(sm) {
}
virtual block_address get_nr_blocks() const {
return sm_->get_nr_blocks();
}
virtual block_address get_nr_free() const {
return sm_->get_nr_free();
}
virtual ref_t get_count(block_address b) const {
return sm_->get_count(b);
}
virtual void set_count(block_address b, ref_t c) {
if (!c && sm_->get_count(b))
mark_freed(b);
sm_->set_count(b, c);
}
virtual void commit() {
sm_->commit();
clear_freed();
}
virtual void inc(block_address b) {
if (was_freed(b))
throw runtime_error("inc of block freed within current transaction");
sm_->inc(b);
}
virtual void dec(block_address b) {
sm_->dec(b);
if (!sm_->get_count(b))
mark_freed(b);
}
// FIXME: rewrite with tests using the run_list stuff.
class no_freed_blocks_iterator : public span_iterator {
public:
no_freed_blocks_iterator(span_iterator &sub_it,
block_set const &freed_blocks)
: sub_it_(sub_it),
freed_blocks_(freed_blocks) {
}
virtual maybe_span first() {
current_span_ = sub_it_.first();
if (current_span_)
current_begin_ = current_span_->first;
return next();
}
virtual maybe_span next() {
if (!current_span_)
return current_span_;
if (current_begin_ == current_span_->second) {
current_span_ = sub_it_.next();
if (!current_span_)
return current_span_;
current_begin_ = current_span_->first;
}
// FIXME: slow
while (current_begin_ != current_span_->second &&
freed_blocks_.count(current_begin_))
current_begin_++;
block_address b = current_begin_;
// FIXME: factor out common code
while (current_begin_ != current_span_->second &&
!freed_blocks_.count(current_begin_))
current_begin_++;
block_address e = current_begin_;
return maybe_span(span(b, e));
}
private:
span_iterator &sub_it_;
block_set const &freed_blocks_;
maybe_span current_span_;
block_address current_begin_;
};
virtual maybe_block new_block(span_iterator &it) {
no_freed_blocks_iterator filtered_it(it, freed_blocks_);
return sm_->new_block(filtered_it);
}
virtual bool count_possibly_greater_than_one(block_address b) const {
return sm_->count_possibly_greater_than_one(b);
}
virtual void extend(block_address extra_blocks) {
return sm_->extend(extra_blocks);
}
virtual void iterate(iterator &it) const {
sm_->iterate(it);
}
virtual size_t root_size() const {
return sm_->root_size();
}
virtual void copy_root(void *dest, size_t len) const {
return sm_->copy_root(dest, len);
}
virtual void check(block_counter &counter) const {
return sm_->check(counter);
}
virtual checked_space_map::ptr clone() const {
return checked_space_map::ptr(new sm_careful_alloc(sm_));
}
private:
void clear_freed() {
freed_blocks_.clear();
}
void mark_freed(block_address b) {
freed_blocks_.insert(b);
}
bool was_freed(block_address b) const {
return freed_blocks_.count(b) > 0;
}
checked_space_map::ptr sm_;
block_set freed_blocks_;
};
}
//----------------------------------------------------------------
checked_space_map::ptr
persistent_data::create_careful_alloc_sm(checked_space_map::ptr sm)
{
return checked_space_map::ptr(new sm_careful_alloc(sm));
}
//----------------------------------------------------------------

View File

@@ -0,0 +1,35 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef SPACE_MAP_CAREFUL_ALLOC_H
#define SPACE_MAP_CAREFUL_ALLOC_H
#include "persistent-data/space_map.h"
//----------------------------------------------------------------
namespace persistent_data {
// This space map ensures no blocks are allocated which have been
// freed within the current transaction. This is a common
// requirement when we want resilience to crashes.
checked_space_map::ptr create_careful_alloc_sm(checked_space_map::ptr sm);
}
//----------------------------------------------------------------
#endif

View File

@@ -0,0 +1,122 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef CORE_MAP_H
#define CORE_MAP_H
#include "persistent-data/space_map.h"
//----------------------------------------------------------------
namespace persistent_data {
class core_map : public checked_space_map {
public:
typedef boost::shared_ptr<core_map> ptr;
core_map(block_address nr_blocks)
: counts_(nr_blocks, 0),
nr_free_(nr_blocks) {
}
block_address get_nr_blocks() const {
return counts_.size();
}
block_address get_nr_free() const {
return nr_free_;
}
ref_t get_count(block_address b) const {
return counts_[b];
}
void set_count(block_address b, ref_t c) {
if (counts_[b] == 0 && c > 0)
nr_free_--;
else if (counts_[b] > 0 && c == 0)
nr_free_++;
counts_[b] = c;
}
void commit() {
}
void inc(block_address b) {
if (counts_[b] == 0)
nr_free_--;
counts_[b]++;
}
void dec(block_address b) {
counts_[b]--;
if (counts_[b] == 0)
nr_free_++;
}
maybe_block new_block(span_iterator &it) {
for (maybe_span ms = it.first(); ms; ms = it.next()) {
for (block_address b = ms->first; b < ms->second; b++) {
if (b >= counts_.size())
throw std::runtime_error("block out of bounds");
if (!counts_[b]) {
counts_[b] = 1;
nr_free_--;
return maybe_block(b);
}
}
}
return maybe_block();
}
bool count_possibly_greater_than_one(block_address b) const {
return counts_[b] > 1;
}
void extend(block_address extra_blocks) {
throw std::runtime_error("'extend' not implemented");
}
// FIXME: meaningless, but this class is only used for testing
size_t root_size() const {
return 0;
}
// FIXME: meaningless, but this class is only used for testing
virtual void copy_root(void *dest, size_t len) const {
throw std::runtime_error("'copy root' not implemented");
}
checked_space_map::ptr clone() const {
return ptr(new core_map(*this));
}
private:
std::vector<ref_t> counts_;
unsigned nr_free_;
};
}
//----------------------------------------------------------------
#endif

View File

@@ -0,0 +1,727 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#include "disk.h"
#include "disk_structures.h"
#include "recursive.h"
#include "careful_alloc.h"
#include "persistent-data/checksum.h"
#include "persistent-data/endian_utils.h"
#include "persistent-data/math_utils.h"
#include "persistent-data/transaction_manager.h"
using namespace boost;
using namespace persistent_data;
using namespace std;
using namespace sm_disk_detail;
//----------------------------------------------------------------
namespace {
uint64_t const BITMAP_CSUM_XOR = 240779;
struct bitmap_block_validator : public block_manager<>::validator {
virtual void check(buffer<> const &b, block_address location) const {
bitmap_header const *data = reinterpret_cast<bitmap_header const *>(&b);
crc32c sum(BITMAP_CSUM_XOR);
sum.append(&data->not_used, MD_BLOCK_SIZE - sizeof(uint32_t));
if (sum.get_sum() != to_cpu<uint32_t>(data->csum))
throw checksum_error("bad checksum in space map bitmap");
if (to_cpu<uint64_t>(data->blocknr) != location)
throw checksum_error("bad block nr in space map bitmap");
}
virtual void prepare(buffer<> &b, block_address location) const {
bitmap_header *data = reinterpret_cast<bitmap_header *>(&b);
data->blocknr = to_disk<base::__le64, uint64_t>(location);
crc32c sum(BITMAP_CSUM_XOR);
sum.append(&data->not_used, MD_BLOCK_SIZE - sizeof(uint32_t));
data->csum = to_disk<base::__le32>(sum.get_sum());
}
};
block_manager<>::validator::ptr
bitmap_validator() {
return block_manager<>::validator::ptr(new bitmap_block_validator());
}
//--------------------------------
uint64_t const INDEX_CSUM_XOR = 160478;
// FIXME: factor out the common code in these validators
struct index_block_validator : public block_manager<>::validator {
virtual void check(buffer<> const &b, block_address location) const {
metadata_index const *mi = reinterpret_cast<metadata_index const *>(&b);
crc32c sum(INDEX_CSUM_XOR);
sum.append(&mi->padding_, MD_BLOCK_SIZE - sizeof(uint32_t));
if (sum.get_sum() != to_cpu<uint32_t>(mi->csum_))
throw checksum_error("bad checksum in metadata index block");
if (to_cpu<uint64_t>(mi->blocknr_) != location)
throw checksum_error("bad block nr in metadata index block");
}
virtual void prepare(buffer<> &b, block_address location) const {
metadata_index *mi = reinterpret_cast<metadata_index *>(&b);
mi->blocknr_ = to_disk<base::__le64, uint64_t>(location);
crc32c sum(INDEX_CSUM_XOR);
sum.append(&mi->padding_, MD_BLOCK_SIZE - sizeof(uint32_t));
mi->csum_ = to_disk<base::__le32>(sum.get_sum());
}
};
block_manager<>::validator::ptr
index_validator() {
return block_manager<>::validator::ptr(new index_block_validator());
}
//--------------------------------
class bitmap {
public:
typedef transaction_manager::read_ref read_ref;
typedef transaction_manager::write_ref write_ref;
bitmap(transaction_manager::ptr tm,
index_entry const &ie)
: tm_(tm),
ie_(ie) {
}
ref_t lookup(unsigned b) const {
read_ref rr = tm_->read_lock(ie_.blocknr_, bitmap_validator());
void const *bits = bitmap_data(rr);
ref_t b1 = test_bit_le(bits, b * 2);
ref_t b2 = test_bit_le(bits, b * 2 + 1);
ref_t result = b2 ? 1 : 0;
result |= b1 ? 0b10 : 0;
return result;
}
void insert(unsigned b, ref_t n) {
write_ref wr = tm_->shadow(ie_.blocknr_, bitmap_validator()).first;
void *bits = bitmap_data(wr);
bool was_free = !test_bit_le(bits, b * 2) && !test_bit_le(bits, b * 2 + 1);
if (n == 1 || n == 3)
set_bit_le(bits, b * 2 + 1);
else
clear_bit_le(bits, b * 2 + 1);
if (n == 2 || n == 3)
set_bit_le(bits, b * 2);
else
clear_bit_le(bits, b * 2);
ie_.blocknr_ = wr.get_location();
if (was_free && n > 0) {
ie_.nr_free_--;
if (b == ie_.none_free_before_)
ie_.none_free_before_++;
}
if (!was_free && n == 0) {
ie_.nr_free_++;
if (b < ie_.none_free_before_)
ie_.none_free_before_ = b;
}
}
boost::optional<unsigned> find_free(unsigned begin, unsigned end) {
for (unsigned i = max(begin, ie_.none_free_before_); i < end; i++) {
if (lookup(i) == 0) {
insert(i, 1);
ie_.none_free_before_ = i + 1;
return boost::optional<unsigned>(i);
}
}
return boost::optional<unsigned>();
}
index_entry const &get_ie() const {
return ie_;
}
void iterate(block_address offset, block_address hi, space_map::iterator &it) const {
read_ref rr = tm_->read_lock(ie_.blocknr_, bitmap_validator());
void const *bits = bitmap_data(rr);
for (unsigned b = 0; b < hi; b++) {
ref_t b1 = test_bit_le(bits, b * 2);
ref_t b2 = test_bit_le(bits, b * 2 + 1);
ref_t result = b2 ? 1 : 0;
result |= b1 ? 0b10 : 0;
it(offset + b, result);
}
}
private:
void *bitmap_data(transaction_manager::write_ref &wr) {
bitmap_header *h = reinterpret_cast<bitmap_header *>(&wr.data()[0]);
return h + 1;
}
void const *bitmap_data(transaction_manager::read_ref &rr) const {
bitmap_header const *h = reinterpret_cast<bitmap_header const *>(&rr.data()[0]);
return h + 1;
}
transaction_manager::ptr tm_;
index_entry ie_;
};
struct ref_count_traits {
typedef __le32 disk_type;
typedef uint32_t value_type;
typedef NoOpRefCounter<uint32_t> ref_counter;
static void unpack(disk_type const &d, value_type &v) {
v = to_cpu<value_type>(d);
}
static void pack(value_type const &v, disk_type &d) {
d = to_disk<disk_type>(v);
}
};
class ref_count_checker : public btree_checker<1, ref_count_traits> {
public:
typedef boost::shared_ptr<ref_count_checker> ptr;
ref_count_checker(block_counter &counter)
: btree_checker<1, ref_count_traits>(counter) {
}
};
class index_store {
public:
typedef boost::shared_ptr<index_store> ptr;
virtual void resize(block_address nr_indexes) = 0;
virtual index_entry find_ie(block_address b) const = 0;
virtual void save_ie(block_address b, struct index_entry ie) = 0;
virtual void commit_ies() = 0;
virtual ptr clone() const = 0;
virtual block_address get_root() const = 0;
virtual void check(block_counter &counter, block_address nr_index_entries) const = 0;
};
unsigned const ENTRIES_PER_BLOCK = (MD_BLOCK_SIZE - sizeof(bitmap_header)) * 4;
class sm_disk : public checked_space_map {
public:
typedef boost::shared_ptr<sm_disk> ptr;
typedef transaction_manager::read_ref read_ref;
typedef transaction_manager::write_ref write_ref;
sm_disk(index_store::ptr indexes,
transaction_manager::ptr tm)
: tm_(tm),
indexes_(indexes),
nr_blocks_(0),
nr_allocated_(0),
ref_counts_(tm_, ref_count_traits::ref_counter()) {
}
sm_disk(index_store::ptr indexes,
transaction_manager::ptr tm,
sm_root const &root)
: tm_(tm),
indexes_(indexes),
nr_blocks_(root.nr_blocks_),
nr_allocated_(root.nr_allocated_),
ref_counts_(tm_, root.ref_count_root_, ref_count_traits::ref_counter()) {
}
block_address get_nr_blocks() const {
return nr_blocks_;
}
block_address get_nr_free() const {
return nr_blocks_ - nr_allocated_;
}
ref_t get_count(block_address b) const {
ref_t count = lookup_bitmap(b);
if (count == 3)
return lookup_ref_count(b);
return count;
}
void set_count(block_address b, ref_t c) {
ref_t old = get_count(b);
if (c == old)
return;
if (c > 2) {
if (old < 3)
insert_bitmap(b, 3);
insert_ref_count(b, c);
} else {
if (old > 2)
remove_ref_count(b);
insert_bitmap(b, c);
}
if (old == 0)
nr_allocated_++;
else if (c == 0)
nr_allocated_--;
}
void commit() {
indexes_->commit_ies();
}
void inc(block_address b) {
// FIXME: 2 get_counts
ref_t old = get_count(b);
set_count(b, old + 1);
}
void dec(block_address b) {
ref_t old = get_count(b);
set_count(b, old - 1);
}
// FIXME: keep track of the lowest free block so we
// can start searching from a suitable place.
maybe_block new_block(span_iterator &it) {
for (maybe_span ms = it.first(); ms; ms = it.next()) {
block_address begin = ms->first;
block_address end = ms->second;
block_address begin_index = begin / ENTRIES_PER_BLOCK;
block_address end_index = div_up<block_address>(end, ENTRIES_PER_BLOCK);
for (block_address index = begin_index; index < end_index; index++) {
index_entry ie = indexes_->find_ie(index);
bitmap bm(tm_, ie);
unsigned bit_begin = (index == begin_index) ? (begin % ENTRIES_PER_BLOCK) : 0;
unsigned bit_end = (index == end_index - 1) ? (end % ENTRIES_PER_BLOCK) : ENTRIES_PER_BLOCK;
optional<unsigned> maybe_b = bm.find_free(bit_begin, bit_end);
if (maybe_b) {
indexes_->save_ie(index, bm.get_ie());
nr_allocated_++;
block_address b = (index * ENTRIES_PER_BLOCK) + *maybe_b;
assert(get_count(b) == 1);
return b;
}
}
}
return maybe_block();
}
bool count_possibly_greater_than_one(block_address b) const {
return get_count(b) > 1;
}
virtual void extend(block_address extra_blocks) {
block_address nr_blocks = nr_blocks_ + extra_blocks;
block_address bitmap_count = div_up<block_address>(nr_blocks, ENTRIES_PER_BLOCK);
block_address old_bitmap_count = div_up<block_address>(nr_blocks_, ENTRIES_PER_BLOCK);
indexes_->resize(bitmap_count);
for (block_address i = old_bitmap_count; i < bitmap_count; i++) {
write_ref wr = tm_->new_block(bitmap_validator());
index_entry ie;
ie.blocknr_ = wr.get_location();
ie.nr_free_ = i == (bitmap_count - 1) ?
(nr_blocks % ENTRIES_PER_BLOCK) : ENTRIES_PER_BLOCK;
ie.none_free_before_ = 0;
indexes_->save_ie(i, ie);
}
nr_blocks_ = nr_blocks;
}
virtual void check(block_counter &counter) const {
ref_count_checker::ptr v(new ref_count_checker(counter));
ref_counts_.visit(v);
block_address nr_entries = div_up<block_address>(get_nr_blocks(), ENTRIES_PER_BLOCK);
indexes_->check(counter, nr_entries);
}
struct look_aside_iterator : public iterator {
look_aside_iterator(sm_disk const &smd, iterator &it)
: smd_(smd),
it_(it) {
}
virtual void operator () (block_address b, ref_t c) {
it_(b, c == 3 ? smd_.lookup_ref_count(b) : c);
}
sm_disk const &smd_;
iterator &it_;
};
friend struct look_aside_iterator;
virtual void iterate(iterator &it) const {
look_aside_iterator wrapper(*this, it);
unsigned nr_indexes = div_up<block_address>(nr_blocks_, ENTRIES_PER_BLOCK);
for (unsigned i = 0; i < nr_indexes; i++) {
unsigned hi = (i == nr_indexes - 1) ? (nr_blocks_ % ENTRIES_PER_BLOCK) : ENTRIES_PER_BLOCK;
index_entry ie = indexes_->find_ie(i);
bitmap bm(tm_, ie);
bm.iterate(i * ENTRIES_PER_BLOCK, hi, wrapper);
}
}
virtual size_t root_size() const {
return sizeof(sm_root_disk);
}
virtual void copy_root(void *dest, size_t len) const {
sm_root_disk d;
sm_root v;
if (len < sizeof(d))
throw runtime_error("root too small");
v.nr_blocks_ = sm_disk::get_nr_blocks();
v.nr_allocated_ = sm_disk::get_nr_allocated();
v.bitmap_root_ = get_index_store()->get_root();
v.ref_count_root_ = sm_disk::get_ref_count_root();
sm_root_traits::pack(v, d);
::memcpy(dest, &d, sizeof(d));
}
virtual checked_space_map::ptr clone() const {
sm_root root;
root.nr_blocks_ = nr_blocks_;
root.nr_allocated_ = nr_allocated_;
root.bitmap_root_ = indexes_->get_root();
root.ref_count_root_ = ref_counts_.get_root();
return checked_space_map::ptr(
new sm_disk(indexes_->clone(), tm_, root));
}
protected:
transaction_manager::ptr get_tm() const {
return tm_;
}
block_address get_nr_allocated() const {
return nr_allocated_;
}
block_address get_ref_count_root() const {
return ref_counts_.get_root();
}
index_store::ptr get_index_store() const {
return indexes_;
}
private:
ref_t lookup_bitmap(block_address b) const {
index_entry ie = indexes_->find_ie(b / ENTRIES_PER_BLOCK);
bitmap bm(tm_, ie);
return bm.lookup(b % ENTRIES_PER_BLOCK);
}
void insert_bitmap(block_address b, unsigned n) {
if (n > 3)
throw runtime_error("bitmap can only hold 2 bit values");
index_entry ie = indexes_->find_ie(b / ENTRIES_PER_BLOCK);
bitmap bm(tm_, ie);
bm.insert(b % ENTRIES_PER_BLOCK, n);
indexes_->save_ie(b / ENTRIES_PER_BLOCK, bm.get_ie());
}
ref_t lookup_ref_count(block_address b) const {
uint64_t key[1] = {b};
optional<ref_t> mvalue = ref_counts_.lookup(key);
if (!mvalue)
throw runtime_error("ref count not in tree");
return *mvalue;
}
void insert_ref_count(block_address b, ref_t count) {
uint64_t key[1] = {b};
ref_counts_.insert(key, count);
}
void remove_ref_count(block_address b) {
uint64_t key[1] = {b};
ref_counts_.remove(key);
}
transaction_manager::ptr tm_;
index_store::ptr indexes_;
block_address nr_blocks_;
block_address nr_allocated_;
btree<1, ref_count_traits> ref_counts_;
};
class bitmap_tree_validator : public btree_checker<1, index_entry_traits> {
public:
typedef boost::shared_ptr<bitmap_tree_validator> ptr;
bitmap_tree_validator(block_counter &counter)
: btree_checker<1, index_entry_traits>(counter) {
}
bool visit_leaf(unsigned level,
bool sub_root,
optional<uint64_t> key,
btree_detail::node_ref<index_entry_traits> const &n) {
bool r = btree_checker<1, index_entry_traits>::visit_leaf(level, sub_root, key, n);
if (!r)
return r;
for (unsigned i = 0; i < n.get_nr_entries(); i++) {
if (seen_indexes_.count(n.key_at(i)) > 0) {
ostringstream out;
out << "index entry " << i << " is present twice";
throw runtime_error(out.str());
}
seen_indexes_.insert(n.key_at(i));
btree_checker<1, index_entry_traits>::get_counter().inc(n.value_at(i).blocknr_);
}
return true;
}
void check_all_index_entries_present(block_address nr_entries) {
for (block_address i = 0; i < nr_entries; i++) {
if (seen_indexes_.count(i) == 0) {
ostringstream out;
out << "missing index entry " << i;
throw runtime_error(out.str());
}
}
set<block_address>::const_iterator it;
for (it = seen_indexes_.begin(); it != seen_indexes_.end(); ++it) {
if (*it >= nr_entries) {
ostringstream out;
out << "unexpected index entry " << *it;
throw runtime_error(out.str());
}
}
}
private:
set<block_address> seen_indexes_;
};
class btree_index_store : public index_store {
public:
typedef boost::shared_ptr<btree_index_store> ptr;
btree_index_store(transaction_manager::ptr tm)
: tm_(tm),
bitmaps_(tm, index_entry_traits::ref_counter()) {
}
btree_index_store(transaction_manager::ptr tm,
block_address root)
: tm_(tm),
bitmaps_(tm, root, index_entry_traits::ref_counter()) {
}
virtual void resize(block_address nr_entries) {
// No op
}
virtual index_entry find_ie(block_address ie_index) const {
uint64_t key[1] = {ie_index};
optional<index_entry> mindex = bitmaps_.lookup(key);
if (!mindex)
throw runtime_error("Couldn't lookup bitmap");
return *mindex;
}
virtual void save_ie(block_address ie_index, struct index_entry ie) {
uint64_t key[1] = {ie_index};
bitmaps_.insert(key, ie);
}
virtual void commit_ies() {
// No op
}
virtual index_store::ptr clone() const {
return index_store::ptr(new btree_index_store(tm_, bitmaps_.get_root()));
}
virtual block_address get_root() const {
return bitmaps_.get_root();
}
virtual void check(block_counter &counter, block_address nr_index_entries) const {
bitmap_tree_validator::ptr v(new bitmap_tree_validator(counter));
bitmaps_.visit(v);
v->check_all_index_entries_present(nr_index_entries);
}
private:
transaction_manager::ptr tm_;
btree<1, index_entry_traits> bitmaps_;
};
class metadata_index_store : public index_store {
public:
typedef boost::shared_ptr<metadata_index_store> ptr;
metadata_index_store(transaction_manager::ptr tm)
: tm_(tm) {
block_manager<>::write_ref wr = tm_->new_block(index_validator());
bitmap_root_ = wr.get_location();
}
metadata_index_store(transaction_manager::ptr tm, block_address root, block_address nr_indexes)
: tm_(tm),
bitmap_root_(root) {
resize(nr_indexes);
load_ies();
}
virtual void resize(block_address nr_indexes) {
entries_.resize(nr_indexes);
}
virtual index_entry find_ie(block_address ie_index) const {
return entries_[ie_index];
}
virtual void save_ie(block_address ie_index, struct index_entry ie) {
entries_[ie_index] = ie;
}
virtual void commit_ies() {
std::pair<block_manager<>::write_ref, bool> p =
tm_->shadow(bitmap_root_, index_validator());
bitmap_root_ = p.first.get_location();
metadata_index *mdi = reinterpret_cast<metadata_index *>(&p.first.data());
for (unsigned i = 0; i < entries_.size(); i++)
index_entry_traits::pack(entries_[i], mdi->index[i]);
}
virtual index_store::ptr clone() const {
return index_store::ptr(new metadata_index_store(tm_, bitmap_root_, entries_.size()));
}
virtual block_address get_root() const {
return bitmap_root_;
}
virtual void check(block_counter &counter, block_address nr_index_entries) const {
counter.inc(bitmap_root_);
for (unsigned i = 0; i < entries_.size(); i++)
// FIXME: this looks like a hack
if (entries_[i].blocknr_ != 0) // superblock
counter.inc(entries_[i].blocknr_);
}
private:
void load_ies() {
block_manager<>::read_ref rr =
tm_->read_lock(bitmap_root_, index_validator());
metadata_index const *mdi = reinterpret_cast<metadata_index const *>(&rr.data());
for (unsigned i = 0; i < entries_.size(); i++)
index_entry_traits::unpack(*(mdi->index + i), entries_[i]);
}
transaction_manager::ptr tm_;
block_address bitmap_root_;
std::vector<index_entry> entries_;
};
}
//----------------------------------------------------------------
checked_space_map::ptr
persistent_data::create_disk_sm(transaction_manager::ptr tm,
block_address nr_blocks)
{
index_store::ptr store(new btree_index_store(tm));
checked_space_map::ptr sm(new sm_disk(store, tm));
sm->extend(nr_blocks);
sm->commit();
return sm;
}
checked_space_map::ptr
persistent_data::open_disk_sm(transaction_manager::ptr tm, void *root)
{
sm_root_disk d;
sm_root v;
::memcpy(&d, root, sizeof(d));
sm_root_traits::unpack(d, v);
index_store::ptr store(new btree_index_store(tm, v.bitmap_root_));
return checked_space_map::ptr(new sm_disk(store, tm, v));
}
checked_space_map::ptr
persistent_data::create_metadata_sm(transaction_manager::ptr tm, block_address nr_blocks)
{
index_store::ptr store(new metadata_index_store(tm));
checked_space_map::ptr sm(new sm_disk(store, tm));
sm->extend(nr_blocks);
sm->commit();
return create_careful_alloc_sm(
create_recursive_sm(sm));
}
checked_space_map::ptr
persistent_data::open_metadata_sm(transaction_manager::ptr tm, void *root)
{
sm_root_disk d;
sm_root v;
::memcpy(&d, root, sizeof(d));
sm_root_traits::unpack(d, v);
block_address nr_indexes = div_up<block_address>(v.nr_blocks_, ENTRIES_PER_BLOCK);
index_store::ptr store(new metadata_index_store(tm, v.bitmap_root_, nr_indexes));
return create_careful_alloc_sm(
create_recursive_sm(
checked_space_map::ptr(new sm_disk(store, tm, v))));
}
//----------------------------------------------------------------

View File

@@ -0,0 +1,44 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef SPACE_MAP_DISK_H
#define SPACE_MAP_DISK_H
// FIXME: why is btree_checker needed?
#include "persistent-data/btree_checker.h"
#include "persistent-data/space_map.h"
//----------------------------------------------------------------
namespace persistent_data {
checked_space_map::ptr
create_disk_sm(transaction_manager::ptr tm, block_address nr_blocks);
checked_space_map::ptr
open_disk_sm(transaction_manager::ptr tm, void *root);
checked_space_map::ptr
create_metadata_sm(transaction_manager::ptr tm, block_address nr_blocks);
checked_space_map::ptr
open_metadata_sm(transaction_manager::ptr tm, void *root);
}
//----------------------------------------------------------------
#endif

View File

@@ -0,0 +1,116 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef SPACE_MAP_DISK_STRUCTURES_H
#define SPACE_MAP_DISK_STRUCTURES_H
#include "persistent-data/endian_utils.h"
#include "persistent-data/btree.h"
//----------------------------------------------------------------
namespace persistent_data {
using namespace base;
namespace sm_disk_detail {
struct index_entry_disk {
__le64 blocknr_;
__le32 nr_free_;
__le32 none_free_before_;
} __attribute__ ((packed));
struct index_entry {
uint64_t blocknr_;
uint32_t nr_free_;
uint32_t none_free_before_;
};
struct index_entry_traits {
typedef index_entry_disk disk_type;
typedef index_entry value_type;
typedef NoOpRefCounter<index_entry> ref_counter;
static void unpack(disk_type const &disk, value_type &value) {
value.blocknr_ = to_cpu<uint64_t>(disk.blocknr_);
value.nr_free_ = to_cpu<uint32_t>(disk.nr_free_);
value.none_free_before_ = to_cpu<uint32_t>(disk.none_free_before_);
}
static void pack(value_type const &value, disk_type &disk) {
disk.blocknr_ = to_disk<__le64>(value.blocknr_);
disk.nr_free_ = to_disk<__le32>(value.nr_free_);
disk.none_free_before_ = to_disk<__le32>(value.none_free_before_);
}
};
unsigned const MAX_METADATA_BITMAPS = 255;
unsigned const ENTRIES_PER_BYTE = 4;
struct metadata_index {
__le32 csum_;
__le32 padding_;
__le64 blocknr_;
struct index_entry_disk index[MAX_METADATA_BITMAPS];
} __attribute__ ((packed));
struct sm_root_disk {
__le64 nr_blocks_;
__le64 nr_allocated_;
__le64 bitmap_root_;
__le64 ref_count_root_;
} __attribute__ ((packed));
struct sm_root {
uint64_t nr_blocks_;
uint64_t nr_allocated_;
uint64_t bitmap_root_;
uint64_t ref_count_root_;
};
struct sm_root_traits {
typedef sm_root_disk disk_type;
typedef sm_root value_type;
typedef NoOpRefCounter<sm_root> ref_counter;
static void unpack(disk_type const &disk, value_type &value) {
value.nr_blocks_ = to_cpu<uint64_t>(disk.nr_blocks_);
value.nr_allocated_ = to_cpu<uint64_t>(disk.nr_allocated_);
value.bitmap_root_ = to_cpu<uint64_t>(disk.bitmap_root_);
value.ref_count_root_ = to_cpu<uint64_t>(disk.ref_count_root_);
}
static void pack(value_type const &value, disk_type &disk) {
disk.nr_blocks_ = to_disk<__le64>(value.nr_blocks_);
disk.nr_allocated_ = to_disk<__le64>(value.nr_allocated_);
disk.bitmap_root_ = to_disk<__le64>(value.bitmap_root_);
disk.ref_count_root_ = to_disk<__le64>(value.ref_count_root_);
}
};
struct bitmap_header {
__le32 csum;
__le32 not_used;
__le64 blocknr;
} __attribute__ ((packed));
}
}
//----------------------------------------------------------------
#endif

View File

@@ -0,0 +1,242 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#include "persistent-data/space-maps/recursive.h"
using namespace persistent_data;
//----------------------------------------------------------------
namespace {
struct block_op {
enum op {
INC,
DEC,
SET
};
block_op(op o, block_address b)
: op_(o),
b_(b) {
if (o == SET)
throw runtime_error("SET must take an operand");
}
block_op(op o, block_address b, uint32_t rc)
: op_(o),
b_(b),
rc_(rc) {
if (o != SET)
throw runtime_error("only SET takes an operand");
}
op op_;
block_address b_;
uint32_t rc_;
};
class sm_recursive : public checked_space_map {
public:
sm_recursive(checked_space_map::ptr sm)
: sm_(sm),
depth_(0) {
}
virtual block_address get_nr_blocks() const {
return sm_->get_nr_blocks();
}
virtual block_address get_nr_free() const {
return sm_->get_nr_free();
}
virtual ref_t get_count(block_address b) const {
cant_recurse("get_count");
recursing_const_lock lock(*this);
return sm_->get_count(b);
}
virtual void set_count(block_address b, ref_t c) {
if (depth_)
add_op(block_op(block_op::SET, b, c));
else {
recursing_lock lock(*this);
return sm_->set_count(b, c);
}
}
virtual void commit() {
cant_recurse("commit");
sm_->commit();
}
virtual void inc(block_address b) {
if (depth_)
add_op(block_op(block_op::INC, b));
else {
recursing_lock lock(*this);
return sm_->inc(b);
}
}
virtual void dec(block_address b) {
if (depth_)
add_op(block_op(block_op::DEC, b));
else {
recursing_lock lock(*this);
return sm_->dec(b);
}
}
virtual maybe_block
new_block(span_iterator &it) {
cant_recurse("new_block()");
recursing_lock lock(*this);
return sm_->new_block(it);
}
virtual bool count_possibly_greater_than_one(block_address b) const {
if (depth_)
return true;
else {
recursing_const_lock lock(*this);
return sm_->count_possibly_greater_than_one(b);
}
}
virtual void extend(block_address extra_blocks) {
cant_recurse("extend");
recursing_lock lock(*this);
return sm_->extend(extra_blocks);
}
virtual void iterate(iterator &it) const {
sm_->iterate(it);
}
virtual size_t root_size() const {
cant_recurse("root_size");
recursing_const_lock lock(*this);
return sm_->root_size();
}
virtual void copy_root(void *dest, size_t len) const {
cant_recurse("copy_root");
recursing_const_lock lock(*this);
return sm_->copy_root(dest, len);
}
virtual void check(persistent_data::block_counter &counter) const {
cant_recurse("check");
recursing_const_lock lock(*this);
return sm_->check(counter);
}
virtual checked_space_map::ptr clone() const {
return checked_space_map::ptr(new sm_recursive(sm_->clone()));
}
void flush_ops() {
op_map::const_iterator it, end = ops_.end();
for (it = ops_.begin(); it != end; ++it) {
list<block_op> const &ops = it->second;
list<block_op>::const_iterator op_it, op_end = ops.end();
for (op_it = ops.begin(); op_it != op_end; ++op_it) {
recursing_lock lock(*this);
switch (op_it->op_) {
case block_op::INC:
sm_->inc(op_it->b_);
break;
case block_op::DEC:
sm_->dec(op_it->b_);
break;
case block_op::SET:
sm_->set_count(op_it->b_, op_it->rc_);
break;
}
}
}
ops_.clear();
}
private:
void add_op(block_op const &op) {
ops_[op.b_].push_back(op);
}
void cant_recurse(string const &method) const {
if (depth_)
throw runtime_error("recursive '" + method + "' not supported");
}
struct recursing_lock {
recursing_lock(sm_recursive &smr)
: smr_(smr) {
smr_.depth_++;
}
~recursing_lock() {
if (!--smr_.depth_)
smr_.flush_ops();
}
private:
sm_recursive &smr_;
};
struct recursing_const_lock {
recursing_const_lock(sm_recursive const &smr)
: smr_(smr) {
smr_.depth_++;
}
~recursing_const_lock() {
smr_.depth_--;
}
private:
sm_recursive const &smr_;
};
checked_space_map::ptr sm_;
mutable int depth_;
enum op {
BOP_INC,
BOP_DEC,
BOP_SET
};
typedef map<block_address, list<block_op> > op_map;
op_map ops_;
};
}
//----------------------------------------------------------------
checked_space_map::ptr
persistent_data::create_recursive_sm(checked_space_map::ptr sm)
{
return checked_space_map::ptr(new sm_recursive(sm));
}
//----------------------------------------------------------------

View File

@@ -0,0 +1,32 @@
// Copyright (C) 2011 Red Hat, Inc. All rights reserved.
//
// This file is part of the thin-provisioning-tools source.
//
// thin-provisioning-tools is free software: you can redistribute it
// and/or modify it under the terms of the GNU General Public License
// as published by the Free Software Foundation, either version 3 of
// the License, or (at your option) any later version.
//
// thin-provisioning-tools is distributed in the hope that it will be
// useful, but WITHOUT ANY WARRANTY; without even the implied warranty
// of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License along
// with thin-provisioning-tools. If not, see
// <http://www.gnu.org/licenses/>.
#ifndef SPACE_MAP_RECURSIVE_H
#define SPACE_MAP_RECURSIVE_H
#include "persistent-data/space_map.h"
//----------------------------------------------------------------
namespace persistent_data {
checked_space_map::ptr create_recursive_sm(checked_space_map::ptr sm);
}
//----------------------------------------------------------------
#endif