Stxxl  1.4.0
include/stxxl/bits/containers/stack.h
Go to the documentation of this file.
00001 /***************************************************************************
00002  *  include/stxxl/bits/containers/stack.h
00003  *
00004  *  Part of the STXXL. See http://stxxl.sourceforge.net
00005  *
00006  *  Copyright (C) 2003-2004 Roman Dementiev <dementiev@mpi-sb.mpg.de>
00007  *  Copyright (C) 2009, 2010 Andreas Beckmann <beckmann@cs.uni-frankfurt.de>
00008  *
00009  *  Distributed under the Boost Software License, Version 1.0.
00010  *  (See accompanying file LICENSE_1_0.txt or copy at
00011  *  http://www.boost.org/LICENSE_1_0.txt)
00012  **************************************************************************/
00013 
00014 #ifndef STXXL_STACK_HEADER
00015 #define STXXL_STACK_HEADER
00016 
00017 #include <stack>
00018 #include <vector>
00019 
00020 #include <stxxl/bits/deprecated.h>
00021 #include <stxxl/bits/io/request_operations.h>
00022 #include <stxxl/bits/mng/mng.h>
00023 #include <stxxl/bits/mng/typed_block.h>
00024 #include <stxxl/bits/common/simple_vector.h>
00025 #include <stxxl/bits/common/tmeta.h>
00026 #include <stxxl/bits/mng/read_write_pool.h>
00027 #include <stxxl/bits/mng/write_pool.h>
00028 #include <stxxl/bits/mng/prefetch_pool.h>
00029 
00030 
00031 __STXXL_BEGIN_NAMESPACE
00032 
00033 //! \addtogroup stlcontinternals
00034 //! \{
00035 
00036 template <class ValTp,
00037           unsigned BlocksPerPage = 4,
00038           unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
00039           class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
00040           class SzTp = stxxl::int64>
00041 struct stack_config_generator
00042 {
00043     typedef ValTp value_type;
00044     enum { blocks_per_page = BlocksPerPage };
00045     typedef AllocStr alloc_strategy;
00046     enum { block_size = BlkSz };
00047     typedef SzTp size_type;
00048 };
00049 
00050 
00051 //! \brief External stack container
00052 
00053 //! Conservative implementation. Fits best if your access pattern consists of irregularly mixed
00054 //! push'es and pop's.
00055 //! For semantics of the methods see documentation of the STL \c std::stack. <BR>
00056 //! To gain full bandwidth of disks \c Config_::BlocksPerPage must >= number of disks <BR>
00057 //! \internal
00058 template <class Config_>
00059 class normal_stack : private noncopyable
00060 {
00061 public:
00062     typedef Config_ cfg;
00063     typedef typename cfg::value_type value_type;
00064     typedef typename cfg::alloc_strategy alloc_strategy_type;
00065     typedef typename cfg::size_type size_type;
00066     enum {
00067         blocks_per_page = cfg::blocks_per_page,
00068         block_size = cfg::block_size
00069     };
00070 
00071     typedef typed_block<block_size, value_type> block_type;
00072     typedef BID<block_size> bid_type;
00073 
00074 private:
00075     size_type size_;
00076     unsigned_type cache_offset;
00077     value_type * current_element;
00078     simple_vector<block_type> cache;
00079     typename simple_vector<block_type>::iterator front_page;
00080     typename simple_vector<block_type>::iterator back_page;
00081     std::vector<bid_type> bids;
00082     alloc_strategy_type alloc_strategy;
00083 
00084 public:
00085     normal_stack() :
00086         size_(0),
00087         cache_offset(0),
00088         current_element(NULL),
00089         cache(blocks_per_page * 2),
00090         front_page(cache.begin() + blocks_per_page),
00091         back_page(cache.begin()),
00092         bids(0)
00093     {
00094         bids.reserve(blocks_per_page);
00095     }
00096 
00097     void swap(normal_stack & obj)
00098     {
00099         std::swap(size_, obj.size_);
00100         std::swap(cache_offset, obj.cache_offset);
00101         std::swap(current_element, obj.current_element);
00102         std::swap(cache, obj.cache);
00103         std::swap(front_page, obj.front_page);
00104         std::swap(back_page, obj.back_page);
00105         std::swap(bids, obj.bids);
00106         std::swap(alloc_strategy, obj.alloc_strategy);
00107     }
00108 
00109     //! \brief Construction from a stack
00110     //! \param stack_ stack object (could be external or internal, important is that it must
00111     //! have a copy constructor, \c top() and \c pop() methods )
00112     template <class stack_type>
00113     normal_stack(const stack_type & stack_) :
00114         size_(0),
00115         cache_offset(0),
00116         current_element(NULL),
00117         cache(blocks_per_page * 2),
00118         front_page(cache.begin() + blocks_per_page),
00119         back_page(cache.begin()),
00120         bids(0)
00121     {
00122         bids.reserve(blocks_per_page);
00123 
00124         stack_type stack_copy = stack_;
00125         const size_type sz = stack_copy.size();
00126         size_type i;
00127 
00128         std::vector<value_type> tmp(sz);
00129 
00130         for (i = 0; i < sz; ++i)
00131         {
00132             tmp[sz - i - 1] = stack_copy.top();
00133             stack_copy.pop();
00134         }
00135         for (i = 0; i < sz; ++i)
00136             this->push(tmp[i]);
00137     }
00138     virtual ~normal_stack()
00139     {
00140         STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
00141         block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
00142     }
00143     size_type size() const
00144     {
00145         return size_;
00146     }
00147     bool empty() const
00148     {
00149         return (!size_);
00150     }
00151     value_type & top()
00152     {
00153         assert(size_ > 0);
00154         return (*current_element);
00155     }
00156     const value_type & top() const
00157     {
00158         assert(size_ > 0);
00159         return (*current_element);
00160     }
00161     void push(const value_type & val)
00162     {
00163         assert(cache_offset <= 2 * blocks_per_page * block_type::size);
00164         //assert(cache_offset >= 0);
00165 
00166         if (UNLIKELY(cache_offset == 2 * blocks_per_page * block_type::size))  // cache overflow
00167         {
00168             STXXL_VERBOSE2("growing, size: " << size_);
00169 
00170             bids.resize(bids.size() + blocks_per_page);
00171             typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
00172             block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
00173 
00174             simple_vector<request_ptr> requests(blocks_per_page);
00175 
00176             for (int i = 0; i < blocks_per_page; ++i, ++cur_bid)
00177             {
00178                 requests[i] = (back_page + i)->write(*cur_bid);
00179             }
00180 
00181 
00182             std::swap(back_page, front_page);
00183 
00184             bids.reserve(bids.size() + blocks_per_page);
00185 
00186             cache_offset = blocks_per_page * block_type::size + 1;
00187             current_element = &((*front_page)[0]);
00188             ++size_;
00189 
00190             wait_all(requests.begin(), blocks_per_page);
00191 
00192             *current_element = val;
00193 
00194             return;
00195         }
00196 
00197         current_element = element(cache_offset);
00198         *current_element = val;
00199         ++size_;
00200         ++cache_offset;
00201     }
00202     void pop()
00203     {
00204         assert(cache_offset <= 2 * blocks_per_page * block_type::size);
00205         assert(cache_offset > 0);
00206         assert(size_ > 0);
00207 
00208         if (UNLIKELY(cache_offset == 1 && bids.size() >= blocks_per_page))
00209         {
00210             STXXL_VERBOSE2("shrinking, size: " << size_);
00211 
00212             simple_vector<request_ptr> requests(blocks_per_page);
00213 
00214             {
00215                 typename std::vector<bid_type>::const_iterator cur_bid = bids.end();
00216                 for (int i = blocks_per_page - 1; i >= 0; --i)
00217                 {
00218                     requests[i] = (front_page + i)->read(*(--cur_bid));
00219                 }
00220             }
00221 
00222             std::swap(front_page, back_page);
00223 
00224             cache_offset = blocks_per_page * block_type::size;
00225             --size_;
00226             current_element = &((*(back_page + (blocks_per_page - 1)))[block_type::size - 1]);
00227 
00228             wait_all(requests.begin(), blocks_per_page);
00229 
00230             block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
00231             bids.resize(bids.size() - blocks_per_page);
00232 
00233             return;
00234         }
00235 
00236         --size_;
00237 
00238         current_element = element((--cache_offset) - 1);
00239     }
00240 
00241 private:
00242     value_type * element(unsigned_type offset)
00243     {
00244         if (offset < blocks_per_page * block_type::size)
00245             return &((*(back_page + offset / block_type::size))[offset % block_type::size]);
00246 
00247 
00248         unsigned_type unbiased_offset = offset - blocks_per_page * block_type::size;
00249         return &((*(front_page + unbiased_offset / block_type::size))[unbiased_offset % block_type::size]);
00250     }
00251 };
00252 
00253 
00254 //! \brief Efficient implementation that uses prefetching and overlapping using internal buffers
00255 
00256 //! Use it if your access pattern consists of many repeated push'es and pop's
00257 //! For semantics of the methods see documentation of the STL \c std::stack.
00258 //! \warning The amortized complexity of operation is not O(1/DB), rather O(DB)
00259 template <class Config_>
00260 class grow_shrink_stack : private noncopyable
00261 {
00262 public:
00263     typedef Config_ cfg;
00264     typedef typename cfg::value_type value_type;
00265     typedef typename cfg::alloc_strategy alloc_strategy_type;
00266     typedef typename cfg::size_type size_type;
00267     enum {
00268         blocks_per_page = cfg::blocks_per_page,
00269         block_size = cfg::block_size,
00270     };
00271 
00272     typedef typed_block<block_size, value_type> block_type;
00273     typedef BID<block_size> bid_type;
00274 
00275 private:
00276     size_type size_;
00277     unsigned_type cache_offset;
00278     value_type * current_element;
00279     simple_vector<block_type> cache;
00280     typename simple_vector<block_type>::iterator cache_buffers;
00281     typename simple_vector<block_type>::iterator overlap_buffers;
00282     simple_vector<request_ptr> requests;
00283     std::vector<bid_type> bids;
00284     alloc_strategy_type alloc_strategy;
00285 
00286 public:
00287     grow_shrink_stack() :
00288         size_(0),
00289         cache_offset(0),
00290         current_element(NULL),
00291         cache(blocks_per_page * 2),
00292         cache_buffers(cache.begin()),
00293         overlap_buffers(cache.begin() + blocks_per_page),
00294         requests(blocks_per_page),
00295         bids(0)
00296     {
00297         bids.reserve(blocks_per_page);
00298     }
00299 
00300     void swap(grow_shrink_stack & obj)
00301     {
00302         std::swap(size_, obj.size_);
00303         std::swap(cache_offset, obj.cache_offset);
00304         std::swap(current_element, obj.current_element);
00305         std::swap(cache, obj.cache);
00306         std::swap(cache_buffers, obj.cache_buffers);
00307         std::swap(overlap_buffers, obj.overlap_buffers);
00308         std::swap(requests, obj.requests);
00309         std::swap(bids, obj.bids);
00310         std::swap(alloc_strategy, obj.alloc_strategy);
00311     }
00312 
00313     //! \brief Construction from a stack
00314     //! \param stack_ stack object (could be external or internal, important is that it must
00315     //! have a copy constructor, \c top() and \c pop() methods )
00316     template <class stack_type>
00317     grow_shrink_stack(const stack_type & stack_) :
00318         size_(0),
00319         cache_offset(0),
00320         current_element(NULL),
00321         cache(blocks_per_page * 2),
00322         cache_buffers(cache.begin()),
00323         overlap_buffers(cache.begin() + blocks_per_page),
00324         requests(blocks_per_page),
00325         bids(0)
00326     {
00327         bids.reserve(blocks_per_page);
00328 
00329         stack_type stack_copy = stack_;
00330         const size_type sz = stack_copy.size();
00331         size_type i;
00332 
00333         std::vector<value_type> tmp(sz);
00334 
00335         for (i = 0; i < sz; ++i)
00336         {
00337             tmp[sz - i - 1] = stack_copy.top();
00338             stack_copy.pop();
00339         }
00340         for (i = 0; i < sz; ++i)
00341             this->push(tmp[i]);
00342     }
00343     virtual ~grow_shrink_stack()
00344     {
00345         STXXL_VERBOSE(STXXL_PRETTY_FUNCTION_NAME);
00346         try
00347         {
00348             if (requests[0].get())
00349                 wait_all(requests.begin(), blocks_per_page);
00350         }
00351         catch (const io_error & ex)
00352         { }
00353         block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
00354     }
00355     size_type size() const
00356     {
00357         return size_;
00358     }
00359     bool empty() const
00360     {
00361         return (!size_);
00362     }
00363     value_type & top()
00364     {
00365         assert(size_ > 0);
00366         return (*current_element);
00367     }
00368     const value_type & top() const
00369     {
00370         assert(size_ > 0);
00371         return (*current_element);
00372     }
00373     void push(const value_type & val)
00374     {
00375         assert(cache_offset <= blocks_per_page * block_type::size);
00376         //assert(cache_offset >= 0);
00377 
00378         if (UNLIKELY(cache_offset == blocks_per_page * block_type::size))  // cache overflow
00379         {
00380             STXXL_VERBOSE2("growing, size: " << size_);
00381 
00382             bids.resize(bids.size() + blocks_per_page);
00383             typename std::vector<bid_type>::iterator cur_bid = bids.end() - blocks_per_page;
00384             block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
00385 
00386             for (int i = 0; i < blocks_per_page; ++i, ++cur_bid)
00387             {
00388                 if (requests[i].get())
00389                     requests[i]->wait();
00390 
00391                 requests[i] = (cache_buffers + i)->write(*cur_bid);
00392             }
00393 
00394             std::swap(cache_buffers, overlap_buffers);
00395 
00396             bids.reserve(bids.size() + blocks_per_page);
00397 
00398             cache_offset = 1;
00399             current_element = &((*cache_buffers)[0]);
00400             ++size_;
00401 
00402             *current_element = val;
00403 
00404             return;
00405         }
00406 
00407         current_element = &((*(cache_buffers + cache_offset / block_type::size))[cache_offset % block_type::size]);
00408         *current_element = val;
00409         ++size_;
00410         ++cache_offset;
00411     }
00412     void pop()
00413     {
00414         assert(cache_offset <= blocks_per_page * block_type::size);
00415         assert(cache_offset > 0);
00416         assert(size_ > 0);
00417 
00418         if (UNLIKELY(cache_offset == 1 && bids.size() >= blocks_per_page))
00419         {
00420             STXXL_VERBOSE2("shrinking, size: " << size_);
00421 
00422             if (requests[0].get())
00423                 wait_all(requests.begin(), blocks_per_page);
00424 
00425 
00426             std::swap(cache_buffers, overlap_buffers);
00427 
00428             if (bids.size() > blocks_per_page)
00429             {
00430                 STXXL_VERBOSE2("prefetching, size: " << size_);
00431                 typename std::vector<bid_type>::const_iterator cur_bid = bids.end() - blocks_per_page;
00432                 for (int i = blocks_per_page - 1; i >= 0; --i)
00433                     requests[i] = (overlap_buffers + i)->read(*(--cur_bid));
00434             }
00435 
00436             block_manager::get_instance()->delete_blocks(bids.end() - blocks_per_page, bids.end());
00437             bids.resize(bids.size() - blocks_per_page);
00438 
00439             cache_offset = blocks_per_page * block_type::size;
00440             --size_;
00441             current_element = &((*(cache_buffers + (blocks_per_page - 1)))[block_type::size - 1]);
00442 
00443             return;
00444         }
00445 
00446         --size_;
00447         unsigned_type cur_offset = (--cache_offset) - 1;
00448         current_element = &((*(cache_buffers + cur_offset / block_type::size))[cur_offset % block_type::size]);
00449     }
00450 };
00451 
00452 //! \brief Efficient implementation that uses prefetching and overlapping using (shared) buffers pools
00453 //! \warning This is a single buffer stack! Each direction change (push() followed by pop() or vice versa) may cause one I/O.
00454 template <class Config_>
00455 class grow_shrink_stack2 : private noncopyable
00456 {
00457 public:
00458     typedef Config_ cfg;
00459     typedef typename cfg::value_type value_type;
00460     typedef typename cfg::alloc_strategy alloc_strategy_type;
00461     typedef typename cfg::size_type size_type;
00462     enum {
00463         blocks_per_page = cfg::blocks_per_page,     // stack of this type has only one page
00464         block_size = cfg::block_size,
00465     };
00466 
00467     typedef typed_block<block_size, value_type> block_type;
00468     typedef BID<block_size> bid_type;
00469 
00470 private:
00471     typedef read_write_pool<block_type> pool_type;
00472 
00473     size_type size_;
00474     unsigned_type cache_offset;
00475     block_type * cache;
00476     std::vector<bid_type> bids;
00477     alloc_strategy_type alloc_strategy;
00478     unsigned_type pref_aggr;
00479     pool_type * owned_pool;
00480     pool_type * pool;
00481 
00482 public:
00483     //! \brief Constructs stack
00484     //! \param pool_ block write/prefetch pool
00485     //! \param prefetch_aggressiveness number of blocks that will be used from prefetch pool
00486     grow_shrink_stack2(
00487         pool_type & pool_,
00488         unsigned_type prefetch_aggressiveness = 0) :
00489         size_(0),
00490         cache_offset(0),
00491         cache(new block_type),
00492         pref_aggr(prefetch_aggressiveness),
00493         owned_pool(NULL),
00494         pool(&pool_)
00495     {
00496         STXXL_VERBOSE2("grow_shrink_stack2::grow_shrink_stack2(...)");
00497     }
00498 
00499     //! \brief Constructs stack
00500     //! \param p_pool_ prefetch pool, that will be used for block prefetching
00501     //! \param w_pool_ write pool, that will be used for block writing
00502     //! \param prefetch_aggressiveness number of blocks that will be used from prefetch pool
00503     _STXXL_DEPRECATED(grow_shrink_stack2(
00504         prefetch_pool<block_type> & p_pool_,
00505         write_pool<block_type> & w_pool_,
00506         unsigned_type prefetch_aggressiveness = 0)) :
00507         size_(0),
00508         cache_offset(0),
00509         cache(new block_type),
00510         pref_aggr(prefetch_aggressiveness),
00511         owned_pool(new pool_type(p_pool_, w_pool_)),
00512         pool(owned_pool)
00513     {
00514         STXXL_VERBOSE2("grow_shrink_stack2::grow_shrink_stack2(...)");
00515     }
00516 
00517     void swap(grow_shrink_stack2 & obj)
00518     {
00519         std::swap(size_, obj.size_);
00520         std::swap(cache_offset, obj.cache_offset);
00521         std::swap(cache, obj.cache);
00522         std::swap(bids, obj.bids);
00523         std::swap(alloc_strategy, obj.alloc_strategy);
00524         std::swap(pref_aggr, obj.pref_aggr);
00525         std::swap(owned_pool, obj.owned_pool);
00526         std::swap(pool, obj.pool);
00527     }
00528 
00529     virtual ~grow_shrink_stack2()
00530     {
00531         try
00532         {
00533             STXXL_VERBOSE2("grow_shrink_stack2::~grow_shrink_stack2()");
00534             const int_type bids_size = bids.size();
00535             const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
00536             int_type i;
00537             for (i = bids_size - 1; i >= last_pref; --i)
00538             {
00539                 // clean the prefetch buffer
00540                 pool->invalidate(bids[i]);
00541             }
00542             typename std::vector<bid_type>::iterator cur = bids.begin();
00543             typename std::vector<bid_type>::const_iterator end = bids.end();
00544             for ( ; cur != end; ++cur)
00545             {
00546                 // FIXME: read_write_pool needs something like cancel_write(bid)
00547                 block_type * b = NULL;  // w_pool.steal(*cur);
00548                 if (b)
00549                 {
00550                     pool->add(cache);   // return buffer
00551                     cache = b;
00552                 }
00553             }
00554             delete cache;
00555         }
00556         catch (const io_error & ex)
00557         { }
00558         block_manager::get_instance()->delete_blocks(bids.begin(), bids.end());
00559         delete owned_pool;
00560     }
00561 
00562     size_type size() const
00563     {
00564         return size_;
00565     }
00566 
00567     bool empty() const
00568     {
00569         return (!size_);
00570     }
00571 
00572     void push(const value_type & val)
00573     {
00574         STXXL_VERBOSE3("grow_shrink_stack2::push(" << val << ")");
00575         assert(cache_offset <= block_type::size);
00576 
00577         if (UNLIKELY(cache_offset == block_type::size))
00578         {
00579             STXXL_VERBOSE2("grow_shrink_stack2::push(" << val << ") growing, size: " << size_);
00580 
00581             bids.resize(bids.size() + 1);
00582             typename std::vector<bid_type>::iterator cur_bid = bids.end() - 1;
00583             block_manager::get_instance()->new_blocks(alloc_strategy, cur_bid, bids.end(), cur_bid - bids.begin());
00584             pool->write(cache, bids.back());
00585             cache = pool->steal();
00586             const int_type bids_size = bids.size();
00587             const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr) - 1, (int_type)0);
00588             for (int_type i = bids_size - 2; i >= last_pref; --i)
00589             {
00590                 // clean prefetch buffers
00591                 pool->invalidate(bids[i]);
00592             }
00593             cache_offset = 0;
00594         }
00595         (*cache)[cache_offset] = val;
00596         ++size_;
00597         ++cache_offset;
00598 
00599         assert(cache_offset > 0);
00600         assert(cache_offset <= block_type::size);
00601     }
00602 
00603     value_type & top()
00604     {
00605         assert(size_ > 0);
00606         assert(cache_offset > 0);
00607         assert(cache_offset <= block_type::size);
00608         return (*cache)[cache_offset - 1];
00609     }
00610 
00611     const value_type & top() const
00612     {
00613         assert(size_ > 0);
00614         assert(cache_offset > 0);
00615         assert(cache_offset <= block_type::size);
00616         return (*cache)[cache_offset - 1];
00617     }
00618 
00619     void pop()
00620     {
00621         STXXL_VERBOSE3("grow_shrink_stack2::pop()");
00622         assert(size_ > 0);
00623         assert(cache_offset > 0);
00624         assert(cache_offset <= block_type::size);
00625         if (UNLIKELY(cache_offset == 1 && (!bids.empty())))
00626         {
00627             STXXL_VERBOSE2("grow_shrink_stack2::pop() shrinking, size = " << size_);
00628 
00629             bid_type last_block = bids.back();
00630             bids.pop_back();
00631             pool->read(cache, last_block)->wait();
00632             block_manager::get_instance()->delete_block(last_block);
00633             rehint();
00634             cache_offset = block_type::size + 1;
00635         }
00636 
00637         --cache_offset;
00638         --size_;
00639     }
00640 
00641     //! \brief Sets level of prefetch aggressiveness (number
00642     //! of blocks from the prefetch pool used for prefetching)
00643     //! \param new_p new value for the prefetch aggressiveness
00644     void set_prefetch_aggr(unsigned_type new_p)
00645     {
00646         if (pref_aggr > new_p)
00647         {
00648             const int_type bids_size = bids.size();
00649             const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
00650             for (int_type i = bids_size - new_p - 1; i >= last_pref; --i)
00651             {
00652                 // clean prefetch buffers
00653                 pool->invalidate(bids[i]);
00654             }
00655         }
00656         pref_aggr = new_p;
00657         rehint();
00658     }
00659 
00660     //! \brief Returns number of blocks used for prefetching
00661     unsigned_type get_prefetch_aggr() const
00662     {
00663         return pref_aggr;
00664     }
00665 
00666 private:
00667     //! \brief hint the last pref_aggr external blocks
00668     void rehint()
00669     {
00670         const int_type bids_size = bids.size();
00671         const int_type last_pref = STXXL_MAX(int_type(bids_size) - int_type(pref_aggr), (int_type)0);
00672         for (int_type i = bids_size - 1; i >= last_pref; --i)
00673         {
00674             pool->hint(bids[i]);  // prefetch
00675         }
00676     }
00677 };
00678 
00679 
00680 //! \brief A stack that migrates from internal memory to external when its size exceeds a certain threshold
00681 
00682 //! For semantics of the methods see documentation of the STL \c std::stack.
00683 template <unsigned_type CritSize, class ExternalStack, class InternalStack>
00684 class migrating_stack : private noncopyable
00685 {
00686 public:
00687     typedef typename ExternalStack::cfg cfg;
00688     typedef typename cfg::value_type value_type;
00689     typedef typename cfg::size_type size_type;
00690     enum {
00691         blocks_per_page = cfg::blocks_per_page,
00692         block_size = cfg::block_size
00693     };
00694 
00695 
00696     typedef InternalStack int_stack_type;
00697     typedef ExternalStack ext_stack_type;
00698 
00699 private:
00700     enum { critical_size = CritSize };
00701 
00702     int_stack_type * int_impl;
00703     ext_stack_type * ext_impl;
00704 
00705     // not implemented yet
00706     template <class stack_type>
00707     migrating_stack(const stack_type & stack_);
00708 
00709 public:
00710     migrating_stack() : int_impl(new int_stack_type()), ext_impl(NULL) { }
00711 
00712     void swap(migrating_stack & obj)
00713     {
00714         std::swap(int_impl, obj.int_impl);
00715         std::swap(ext_impl, obj.ext_impl);
00716     }
00717 
00718     //! \brief Returns true if current implementation is internal, otherwise false
00719     bool internal() const
00720     {
00721         assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
00722         return int_impl;
00723     }
00724     //! \brief Returns true if current implementation is external, otherwise false
00725     bool external() const
00726     {
00727         assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
00728         return ext_impl;
00729     }
00730 
00731     bool empty() const
00732     {
00733         assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
00734         return (int_impl) ? int_impl->empty() : ext_impl->empty();
00735     }
00736     size_type size() const
00737     {
00738         assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
00739         return (int_impl) ? size_type(int_impl->size()) : ext_impl->size();
00740     }
00741     value_type & top()
00742     {
00743         assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
00744         return (int_impl) ? int_impl->top() : ext_impl->top();
00745     }
00746     const value_type & top() const
00747     {
00748         assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
00749         return (int_impl) ? int_impl->top() : ext_impl->top();
00750     }
00751     void push(const value_type & val)
00752     {
00753         assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
00754 
00755         if (int_impl)
00756         {
00757             int_impl->push(val);
00758             if (UNLIKELY(int_impl->size() == critical_size))
00759             {
00760                 // migrate to external stack
00761                 ext_impl = new ext_stack_type(*int_impl);
00762                 delete int_impl;
00763                 int_impl = NULL;
00764             }
00765         }
00766         else
00767             ext_impl->push(val);
00768     }
00769     void pop()
00770     {
00771         assert((int_impl && !ext_impl) || (!int_impl && ext_impl));
00772 
00773         if (int_impl)
00774             int_impl->pop();
00775         else
00776             ext_impl->pop();
00777     }
00778     virtual ~migrating_stack()
00779     {
00780         delete int_impl;
00781         delete ext_impl;
00782     }
00783 };
00784 
00785 //! \}
00786 
00787 
00788 //! \addtogroup stlcont
00789 //! \{
00790 
00791 enum stack_externality { external, migrating, internal };
00792 enum stack_behaviour { normal, grow_shrink, grow_shrink2 };
00793 
00794 //! \brief Stack type generator
00795 
00796 //!  \tparam ValTp type of contained objects (POD with no references to internal memory)
00797 //!  \tparam Externality one of
00798 //!    - \c external , \b external container, implementation is chosen according
00799 //!      to \c Behaviour parameter, is default
00800 //!    - \c migrating , migrates from internal implementation given by \c IntStackTp parameter
00801 //!      to external implementation given by \c Behaviour parameter when size exceeds \c MigrCritSize
00802 //!    - \c internal , choses \c IntStackTp implementation
00803 //!  \tparam Behaviour chooses \b external implementation, one of:
00804 //!    - \c normal , conservative version, implemented in \c stxxl::normal_stack , is default
00805 //!    - \c grow_shrink , efficient version, implemented in \c stxxl::grow_shrink_stack
00806 //!    - \c grow_shrink2 , efficient version, implemented in \c stxxl::grow_shrink_stack2
00807 //!  \tparam BlocksPerPage defines how many blocks has one page of internal cache of an
00808 //!       \b external implementation, default is four. All \b external implementations have
00809 //!       \b two pages.
00810 //!  \tparam BlkSz external block size in bytes, default is 2 MiB
00811 //!  \tparam IntStackTp type of internal stack used for some implementations
00812 //!  \tparam MigrCritSize threshold value for number of elements when
00813 //!    \c stxxl::migrating_stack migrates to the external memory
00814 //!  \tparam  AllocStr one of allocation strategies: \c striping , \c RC , \c SR , or \c FR
00815 //!    default is RC
00816 //!  \tparam SzTp size type, default is \c stxxl::int64
00817 //!
00818 //! Configured stack type is available as \c STACK_GENERATOR<>::result. <BR> <BR>
00819 //! Examples:
00820 //!    - \c STACK_GENERATOR<double>::result external stack of \c double's ,
00821 //!    - \c STACK_GENERATOR<double,internal>::result internal stack of \c double's ,
00822 //!    - \c STACK_GENERATOR<double,external,grow_shrink>::result external
00823 //!      grow-shrink stack of \c double's ,
00824 //!    - \c STACK_GENERATOR<double,migrating,grow_shrink>::result migrating
00825 //!      grow-shrink stack of \c double's, internal implementation is \c std::stack<double> ,
00826 //!    - \c STACK_GENERATOR<double,migrating,grow_shrink,1,512*1024>::result migrating
00827 //!      grow-shrink stack of \c double's with 1 block per page and block size 512 KiB
00828 //!      (total memory occupied = 1 MiB).
00829 //! For configured stack method semantics see documentation of the STL \c std::stack.
00830 template <
00831     class ValTp,
00832     stack_externality Externality = external,
00833     stack_behaviour Behaviour = normal,
00834     unsigned BlocksPerPage = 4,
00835     unsigned BlkSz = STXXL_DEFAULT_BLOCK_SIZE(ValTp),
00836 
00837     class IntStackTp = std::stack<ValTp>,
00838     unsigned_type MigrCritSize = (2 * BlocksPerPage * BlkSz),
00839 
00840     class AllocStr = STXXL_DEFAULT_ALLOC_STRATEGY,
00841     class SzTp = stxxl::int64
00842     >
00843 class STACK_GENERATOR
00844 {
00845     typedef stack_config_generator<ValTp, BlocksPerPage, BlkSz, AllocStr, SzTp> cfg;
00846 
00847     typedef typename IF<Behaviour == grow_shrink,
00848                         grow_shrink_stack<cfg>,
00849                         grow_shrink_stack2<cfg> >::result GrShrTp;
00850     typedef typename IF<Behaviour == normal, normal_stack<cfg>, GrShrTp>::result ExtStackTp;
00851     typedef typename IF<Externality == migrating,
00852                         migrating_stack<MigrCritSize, ExtStackTp, IntStackTp>, ExtStackTp>::result MigrOrNotStackTp;
00853 
00854 public:
00855     typedef typename IF<Externality == internal, IntStackTp, MigrOrNotStackTp>::result result;
00856 };
00857 
00858 //! \}
00859 
00860 __STXXL_END_NAMESPACE
00861 
00862 
00863 namespace std
00864 {
00865     template <class Config_>
00866     void swap(stxxl::normal_stack<Config_> & a,
00867               stxxl::normal_stack<Config_> & b)
00868     {
00869         a.swap(b);
00870     }
00871 
00872     template <class Config_>
00873     void swap(stxxl::grow_shrink_stack<Config_> & a,
00874               stxxl::grow_shrink_stack<Config_> & b)
00875     {
00876         a.swap(b);
00877     }
00878 
00879     template <class Config_>
00880     void swap(stxxl::grow_shrink_stack2<Config_> & a,
00881               stxxl::grow_shrink_stack2<Config_> & b)
00882     {
00883         a.swap(b);
00884     }
00885 
00886     template <stxxl::unsigned_type CritSize, class ExternalStack, class InternalStack>
00887     void swap(stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & a,
00888               stxxl::migrating_stack<CritSize, ExternalStack, InternalStack> & b)
00889     {
00890         a.swap(b);
00891     }
00892 }
00893 
00894 #endif // !STXXL_STACK_HEADER
00895 // vim: et:ts=4:sw=4
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines