http://stxxl.sourceforge.net
<dementiev@mpi-sb.mpg.de>
<beckmann@cs.uni-frankfurt.de>
http://www.boost.org/LICENSE_1_0.txt
#ifndef STXXL_STACK_HEADER
#define STXXL_STACK_HEADER
#include <stack>
#include <vector>
#include <stxxl/bits/deprecated.h>
#include <stxxl/bits/io/request_operations.h>
#include <stxxl/bits/mng/mng.h>
#include <stxxl/bits/mng/typed_block.h>
#include <stxxl/bits/common/simple_vector.h>
#include <stxxl/bits/common/tmeta.h>
#include <stxxl/bits/mng/read_write_pool.h>
#include <stxxl/bits/mng/write_pool.h>
#include <stxxl/bits/mng/prefetch_pool.h>
__STXXL_BEGIN_NAMESPACE
template <class ValTp,
unsigned BlocksPerPage = 4,
unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
class SzTp = stxxl::int64>
struct stack_config_generator
{
typedef ValTp value_type;
enum { blocks_per_page = BlocksPerPage };
typedef AllocStr alloc_strategy;
enum { block_size = BlkSz };
typedef SzTp size_type;
};
template <class Config_>
class normal_stack : private noncopyable
{
public:
typedef Config_ cfg;
typedef typename cfg::value_type value_type;
typedef typename cfg::alloc_strategy alloc_strategy_type;
typedef typename cfg::size_type size_type;
enum {
blocks_per_page = cfg::blocks_per_page,
block_size = cfg::block_size
};
typedef typed_block<block_size, value_type> block_type;
typedef BID<block_size> bid_type;
private:
size_type size_;
unsigned_type cache_offset;
value_type * current_element;
simple_vector<block_type> cache;
typename simple_vector<block_type>::iterator front_page;
typename simple_vector<block_type>::iterator back_page;
std::vector<bid_type> bids;
alloc_strategy_type alloc_strategy;
public:
normal_stack() :
size_(0),
cache_offset(0),
current_element(NULL),
cache(blocks_per_page * 2),
front_page(cache.begin() + blocks_per_page),
back_page(cache.begin()),
bids(0)
{
bids.reserve(blocks_per_page);
}
void swap(normal_stack & obj)
{
std::swap(size_, obj.size_);
std::swap(cache_offset, obj.cache_offset);
std::swap(current_element, obj.current_element);
std::swap(cache, obj.cache);
std::swap(front_page, obj.front_page);
std::swap(back_page, obj.back_page);
std::swap(bids, obj.bids);
std::swap(alloc_strategy, obj.alloc_strategy);
}
template <class stack_type>
normal_stack(const stack_type & stack_) :
size_(0),
cache_offset(0),
current_element(NULL),
cache(blocks_per_page * 2),
front_page(cache.begin() + blocks_per_page),
back_page(cache.begin()),
bids(0)
{
bids.reserve(blocks_per_page);
stack_type stack_copy = stack_;
const size_type sz = stack_copy.size();
size_type i;
std::vector<value_type> tmp(sz);
for (i = 0; i < sz; ++i)
{
tmp[sz - i - 1] = stack_copy.top();
stack_copy.pop();
}
for (i = 0; i < sz; ++i)
this->push(tmp[i]);
}
virtual ~normal_stack()
{
STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
}
size_type size() const
{
return size_;
}
bool empty() const
{
return (!size_);
}
value_type & top()
{
assert(size_ > 0);
return (*current_element);
}
const value_type & top() const
{
assert(size_ > 0);
return (*current_element);
}
void push(const value_type & val)
{
assert(cache_offset <= 2 * blocks_per_page * block_type::size);
if (UNLIKELY(cache_offset == 2 * blocks_per_page * block_type::size))
{
STXXL_VERBOSE2("growing, size: " << size_);
bids.resize(bids.size() + blocks_per_page);
typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
simple_vector<request_ptr> requests(blocks_per_page);
for (int i = 0; i < blocks_per_page; ++i, ++cur_bid)
{
requests[i] = (back_page + i)->write(*cur_bid);
}
std::swap(back_page, front_page);
bids.reserve(bids.size() + blocks_per_page);
cache_offset = blocks_per_page * block_type::size + 1;
current_element = &((*front_page)[0]);
++size_;
wait_all(requests.begin(), blocks_per_page);
*current_element = val;
return;
}
current_element = element(cache_offset);
*current_element = val;
++size_;
++cache_offset;
}
void pop()
{
assert(cache_offset <= 2 * blocks_per_page * block_type::size);
assert(cache_offset > 0);
assert(size_ > 0);
if (UNLIKELY(cache_offset == 1 && bids.size() >= blocks_per_page))
{
STXXL_VERBOSE2("shrinking, size: " << size_);
simple_vector<request_ptr> requests(blocks_per_page);
{
typename std::vector<bid_type>::const_iterator cur_bid = bids.end();
for (int i = blocks_per_page - 1; i >= 0; --i)
{
requests[i] = (front_page + i)->read(*(--cur_bid));
}
}
std::swap(front_page, back_page);
cache_offset = blocks_per_page * block_type::size;
--size_;
current_element = &((*(back_page + (blocks_per_page - 1)))[block_type::size - 1]);
wait_all(requests.begin(), blocks_per_page);
block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
bids.resize(bids.size() - blocks_per_page);
return;
}
--size_;
current_element = element((--cache_offset) - 1);
}
private:
value_type * element(unsigned_type offset)
{
if (offset < blocks_per_page * block_type::size)
return &((*(back_page + offset / block_type::size))[offset % block_type::size]);
unsigned_type unbiased_offset = offset - blocks_per_page * block_type::size;
return &((*(front_page + unbiased_offset / block_type::size))[unbiased_offset % block_type::size]);
}
};
template <class Config_>
class grow_shrink_stack : private noncopyable
{
public:
typedef Config_ cfg;
typedef typename cfg::value_type value_type;
typedef typename cfg::alloc_strategy alloc_strategy_type;
typedef typename cfg::size_type size_type;
enum {
blocks_per_page = cfg::blocks_per_page,
block_size = cfg::block_size,
};
typedef typed_block<block_size, value_type> block_type;
typedef BID<block_size> bid_type;
private:
size_type size_;
unsigned_type cache_offset;
value_type * current_element;
simple_vector<block_type> cache;
typename simple_vector<block_type>::iterator cache_buffers;
typename simple_vector<block_type>::iterator overlap_buffers;
simple_vector<request_ptr> requests;
std::vector<bid_type> bids;
alloc_strategy_type alloc_strategy;
public:
grow_shrink_stack() :
size_(0),
cache_offset(0),
current_element(NULL),
cache(blocks_per_page * 2),
cache_buffers(cache.begin()),
overlap_buffers(cache.begin() + blocks_per_page),
requests(blocks_per_page),
bids(0)
{
bids.reserve(blocks_per_page);
}
void swap(grow_shrink_stack & obj)
{
std::swap(size_, obj.size_);
std::swap(cache_offset, obj.cache_offset);
std::swap(current_element, obj.current_element);
std::swap(cache, obj.cache);
std::swap(cache_buffers, obj.cache_buffers);
std::swap(overlap_buffers, obj.overlap_buffers);
std::swap(requests, obj.requests);
std::swap(bids, obj.bids);
std::swap(alloc_strategy, obj.alloc_strategy);
}
template <class stack_type>
grow_shrink_stack(const stack_type & stack_) :
size_(0),
cache_offset(0),
current_element(NULL),
cache(blocks_per_page * 2),
cache_buffers(cache.begin()),
overlap_buffers(cache.begin() + blocks_per_page),
requests(blocks_per_page),
bids(0)
{
bids.reserve(blocks_per_page);
stack_type stack_copy = stack_;
const size_type sz = stack_copy.size();
size_type i;
std::vector<value_type> tmp(sz);
for (i = 0; i < sz; ++i)
{
tmp[sz - i - 1] = stack_copy.top();
stack_copy.pop();
}
for (i = 0; i < sz; ++i)
this->push(tmp[i]);
}
virtual ~grow_shrink_stack()
{
STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
try
{
if (requests[0].get())
wait_all(requests.begin(), blocks_per_page);
}
catch (const io_error & ex)
{ }
block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
}
size_type size() const
{
return size_;
}
bool empty() const
{
return (!size_);
}
value_type & top()
{
assert(size_ > 0);
return (*current_element);
}
const value_type & top() const
{
assert(size_ > 0);
return (*current_element);
}
void push(const value_type & val)
{
assert(cache_offset <= blocks_per_page * block_type::size);
if (UNLIKELY(cache_offset == blocks_per_page * block_type::size))
{
STXXL_VERBOSE2("growing, size: " << size_);
bids.resize(bids.size() + blocks_per_page);
typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
for (int i = 0; i < blocks_per_page; ++i, ++cur_bid)
{
if (requests[i].get())
requests[i]->wait();
requests[i] = (cache_buffers + i)->write(*cur_bid);
}
std::swap(cache_buffers, overlap_buffers);
bids.reserve(bids.size() + blocks_per_page);
cache_offset = 1;
current_element = &((*cache_buffers)[0]);
++size_;
*current_element = val;
return;
}
current_element = &((*(cache_buffers + cache_offset / block_type::size))[cache_offset % block_type::size]);
*current_element = val;
++size_;
++cache_offset;
}
void pop()
{
assert(cache_offset <= blocks_per_page * block_type::size);
assert(cache_offset > 0);
assert(size_ > 0);
if (UNLIKELY(cache_offset == 1 && bids.size() >= blocks_per_page))
{
STXXL_VERBOSE2("shrinking, size: " << size_);
if (requests[0].get())
wait_all(requests.begin(), blocks_per_page);
std::swap(cache_buffers, overlap_buffers);
if (bids.size() > blocks_per_page)
{
STXXL_VERBOSE2("prefetching, size: " << size_);
typename std::vector<bid_type>::const_iterator cur_bid = bids.end() - blocks_per_page;
for (int i = blocks_per_page - 1; i >= 0; --i)
requests[i] = (overlap_buffers + i)->read(*(--cur_bid));
}
block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
bids.resize(bids.size() - blocks_per_page);
cache_offset = blocks_per_page * block_type::size;
--size_;
current_element = &((*(cache_buffers + (blocks_per_page - 1)))[block_type::size - 1]);
return;
}
--size_;
unsigned_type cur_offset = (--cache_offset) - 1;
current_element = &((*(cache_buffers + cur_offset / block_type::size))[cur_offset % block_type::size]);
}
};
template <class Config_>
class grow_shrink_stack2 : private noncopyable
{
public:
typedef Config_ cfg;
typedef typename cfg::value_type value_type;
typedef typename cfg::alloc_strategy alloc_strategy_type;
typedef typename cfg::size_type size_type;
enum {
blocks_per_page = cfg::blocks_per_page,
block_size = cfg::block_size,
};
typedef typed_block<block_size, value_type> block_type;
typedef BID<block_size> bid_type;
private:
typedef read_write_pool<block_type> pool_type;
size_type size_;
unsigned_type cache_offset;
block_type * cache;
std::vector<bid_type> bids;
alloc_strategy_type alloc_strategy;
unsigned_type pref_aggr;
pool_type * owned_pool;
pool_type * pool;
public:
grow_shrink_stack2(
pool_type & pool_,
unsigned_type prefetch_aggressiveness = 0) :
size_(0),
cache_offset(0),
cache(new block_type),
pref_aggr(prefetch_aggressiveness),
owned_pool(NULL),
pool(&pool_)
{
STXXL_VERBOSE2("grow_shrink_stack2::grow_shrink_stack2(...)");
}
_STXXL_DEPRECATED(grow_shrink_stack2(
prefetch_pool<block_type> & p_pool_,
write_pool<block_type> & w_pool_,
unsigned_type prefetch_aggressiveness = 0)) :
size_(0),
cache_offset(0),
cache(new block_type),
pref_aggr(prefetch_aggressiveness),
owned_pool(new pool_type(p_pool_, w_pool_)),
pool(owned_pool)
{
STXXL_VERBOSE2("grow_shrink_stack2::grow_shrink_stack2(...)");
}
void swap(grow_shrink_stack2 & obj)
{
std::swap(size_, obj.size_);
std::swap(cache_offset, obj.cache_offset);
std::swap(cache, obj.cache);
std::swap(bids, obj.bids);
std::swap(alloc_strategy, obj.alloc_strategy);
std::swap(pref_aggr, obj.pref_aggr);
std::swap(owned_pool, obj.owned_pool);
std::swap(pool, obj.pool);
}
virtual ~grow_shrink_stack2()
{
try
{
STXXL_VERBOSE2("grow_shrink_stack2::~grow_shrink_stack2()");
const int_type bids_size = bids.size();
const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
int_type i;
for (i = bids_size - 1; i >= last_pref; --i)
{
pool->invalidate(bids[i]);
}
typename std::vector<bid_type>::iterator cur = bids.begin();
typename std::vector<bid_type>::const_iterator end = bids.end();
for ( ; cur != end; ++cur)
{
block_type * b = NULL;
if (b)
{
pool->add(cache);
cache = b;
}
}
delete cache;
}
catch (const io_error & ex)
{ }
block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
delete owned_pool;
}
size_type size() const
{
return size_;
}
bool empty() const
{
return (!size_);
}
void push(const value_type & val)
{
STXXL_VERBOSE3("grow_shrink_stack2::push(" << val << ")");
assert(cache_offset <= block_type::size);
if (UNLIKELY(cache_offset == block_type::size))
{
STXXL_VERBOSE2("grow_shrink_stack2::push(" << val << ") growing, size: " << size_);
bids.resize(bids.size() + 1);
typename std::vector<bid_type>::iterator cur_bid = bids.end() - 1;
block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
pool->write(cache, bids.back());
cache = pool->steal();
const int_type bids_size = bids.size();
const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr) - 1, (int_type)0);
for (int_type i = bids_size - 2; i >= last_pref; --i)
{
pool->invalidate(bids[i]);
}
cache_offset = 0;
}
(*cache)[cache_offset] = val;
++size_;
++cache_offset;
assert(cache_offset > 0);
assert(cache_offset <= block_type::size);
}
value_type & top()
{
assert(size_ > 0);
assert(cache_offset > 0);
assert(cache_offset <= block_type::size);
return (*cache)[cache_offset - 1];
}
const value_type & top() const
{
assert(size_ > 0);
assert(cache_offset > 0);
assert(cache_offset <= block_type::size);
return (*cache)[cache_offset - 1];
}
void pop()
{
STXXL_VERBOSE3("grow_shrink_stack2::pop()");
assert(size_ > 0);
assert(cache_offset > 0);
assert(cache_offset <= block_type::size);
if (UNLIKELY(cache_offset == 1 && (!bids.empty())))
{
STXXL_VERBOSE2("grow_shrink_stack2::pop() shrinking, size = " << size_);
bid_type last_block = bids.back();
bids.pop_back();
pool->read(cache, last_block)->wait();
block_manager::get_instance()->delete_block(last_block);
rehint();
cache_offset = block_type::size + 1;
}
--cache_offset;
--size_;
}
void set_prefetch_aggr(unsigned_type new_p)
{
if (pref_aggr > new_p)
{
const int_type bids_size = bids.size();
const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
for (int_type i = bids_size - new_p - 1; i >= last_pref; --i)
{
pool->invalidate(bids[i]);
}
}
pref_aggr = new_p;
rehint();
}
unsigned_type get_prefetch_aggr() const
{
return pref_aggr;
}
private:
void rehint()
{
const int_type bids_size = bids.size();
const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
for (int_type i = bids_size - 1; i >= last_pref; --i)
{
pool->hint(bids[i]);
}
}
};
template <unsigned_type CritSize, class ExternalStack, class InternalStack>
class migrating_stack : private noncopyable
{
public:
typedef typename ExternalStack::cfg cfg;
typedef typename cfg::value_type value_type;
typedef typename cfg::size_type size_type;
enum {
blocks_per_page = cfg::blocks_per_page,
block_size = cfg::block_size
};
typedef InternalStack int_stack_type;
typedef ExternalStack ext_stack_type;
private:
enum { critical_size = CritSize };
int_stack_type * int_impl;
ext_stack_type * ext_impl;
template <class stack_type>
migrating_stack(const stack_type & stack_);
public:
migrating_stack() : int_impl(new int_stack_type()), ext_impl(NULL) { }
void swap(migrating_stack & obj)
{
std::swap(int_impl, obj.int_impl);
std::swap(ext_impl, obj.ext_impl);
}
bool internal() const
{
assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
return int_impl;
}
bool external() const
{
assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
return ext_impl;
}
bool empty() const
{
assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
return (int_impl) ? int_impl->empty() : ext_impl->empty();
}
size_type size() const
{
assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
return (int_impl) ? size_type(int_impl->size()) : ext_impl->size();
}
value_type & top()
{
assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
return (int_impl) ? int_impl->top() : ext_impl->top();
}
const value_type & top() const
{
assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
return (int_impl) ? int_impl->top() : ext_impl->top();
}
void push(const value_type & val)
{
assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
if (int_impl)
{
int_impl->push(val);
if (UNLIKELY(int_impl->size() == critical_size))
{
ext_impl = new ext_stack_type(*int_impl);
delete int_impl;
int_impl = NULL;
}
}
else
ext_impl->push(val);
}
void pop()
{
assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
if (int_impl)
int_impl->pop();
else
ext_impl->pop();
}
virtual ~migrating_stack()
{
delete int_impl;
delete ext_impl;
}
};
enum stack_externality { external, migrating, internal };
enum stack_behaviour { normal, grow_shrink, grow_shrink2 };
template <
class ValTp,
stack_externality Externality = external,
stack_behaviour Behaviour = normal,
unsigned BlocksPerPage = 4,
unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
class IntStackTp = std::stack<ValTp>,
unsigned_type MigrCritSize = (2 * BlocksPerPage * BlkSz),
class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
class SzTp = stxxl::int64
>
class STACK_GENERATOR
{
typedef stack_config_generator<ValTp, BlocksPerPage, BlkSz, AllocStr, SzTp> cfg;
typedef typename IF<Behaviour == grow_shrink,
grow_shrink_stack<cfg>,
grow_shrink_stack2<cfg> >::result GrShrTp;
typedef typename IF<Behaviour == normal, normal_stack<cfg>, GrShrTp>::result ExtStackTp;
typedef typename IF<Externality == migrating,
migrating_stack<MigrCritSize, ExtStackTp, IntStackTp>, ExtStackTp>::result MigrOrNotStackTp;
public:
typedef typename IF<Externality == internal, IntStackTp, MigrOrNotStackTp>::result result;
};
__STXXL_END_NAMESPACE
namespace std
{
template <class Config_>
void swap(stxxl::normal_stack<Config_> & a,
stxxl::normal_stack<Config_> & b)
{
a.swap(b);
}
template <class Config_>
void swap(stxxl::grow_shrink_stack<Config_> & a,
stxxl::grow_shrink_stack<Config_> & b)
{
a.swap(b);
}
template <class Config_>
void swap(stxxl::grow_shrink_stack2<Config_> & a,
stxxl::grow_shrink_stack2<Config_> & b)
{
a.swap(b);
}
template <stxxl::unsigned_type CritSize, class ExternalStack, class InternalStack>
void swap(stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & a,
stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & b)
{
a.swap(b);
}
}
#endif