flow_graph.h

Go to the documentation of this file.
00001 /*
00002     Copyright 2005-2012 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_flow_graph_H
00022 #define __TBB_flow_graph_H
00023 
00024 #include "tbb_stddef.h"
00025 #include "atomic.h"
00026 #include "spin_mutex.h"
00027 #include "null_mutex.h"
00028 #include "spin_rw_mutex.h"
00029 #include "null_rw_mutex.h"
00030 #include "task.h"
00031 #include "concurrent_vector.h"
00032 #include "internal/_aggregator_impl.h"
00033 
00034 // use the VC10 or gcc version of tuple if it is available.
00035 #if __TBB_CPP11_TUPLE_PRESENT
00036     #include <tuple>
00037 namespace tbb {
00038     namespace flow {
00039         using std::tuple;
00040         using std::tuple_size;
00041         using std::tuple_element;
00042         using std::get;
00043     }
00044 }
00045 #else
00046     #include "compat/tuple"
00047 #endif
00048 
00049 #include<list>
00050 #include<queue>
00051 
00062 namespace tbb {
00063 namespace flow {
00064 
00066 enum concurrency { unlimited = 0, serial = 1 };
00067 
00068 namespace interface6 {
00069 
00070 namespace internal {
00071     template<typename T, typename M> class successor_cache;
00072     template<typename T, typename M> class broadcast_cache;
00073     template<typename T, typename M> class round_robin_cache;
00074 }
00075 
00077 class continue_msg {};
00078 
00079 template< typename T > class sender;
00080 template< typename T > class receiver;
00081 class continue_receiver;
00082 
00084 template< typename T >
00085 class sender {
00086 public:
00088     typedef T output_type;
00089 
00091     typedef receiver<T> successor_type;
00092 
00093     virtual ~sender() {}
00094 
00096     virtual bool register_successor( successor_type &r ) = 0;
00097 
00099     virtual bool remove_successor( successor_type &r ) = 0;
00100 
00102     virtual bool try_get( T & ) { return false; }
00103 
00105     virtual bool try_reserve( T & ) { return false; }
00106 
00108     virtual bool try_release( ) { return false; }
00109 
00111     virtual bool try_consume( ) { return false; }
00112 };
00113 
00114 template< typename T > class limiter_node;  // needed for resetting decrementer
00115 template< typename R, typename B > class run_and_put_task;
00116 
00117 static tbb::task * const SUCCESSFULLY_ENQUEUED = (task *)-1;
00118 
00119 // enqueue left task if necessary.  Returns the non-enqueued task if there is one.
00120 static inline tbb::task *combine_tasks( tbb::task * left, tbb::task * right) {
00121     // if no RHS task, don't change left.
00122     if(right == NULL) return left;
00123     // right != NULL
00124     if(left == NULL) return right;
00125     if(left == SUCCESSFULLY_ENQUEUED) return right;
00126     // left contains a task
00127     if(right != SUCCESSFULLY_ENQUEUED) {
00128         // both are valid tasks
00129         tbb::task::enqueue(*left);
00130         return right;
00131     }
00132     return left;
00133 }
00134 
00136 template< typename T >
00137 class receiver {
00138 public:
00140     typedef T input_type;
00141 
00143     typedef sender<T> predecessor_type;
00144 
00146     virtual ~receiver() {}
00147 
00149     bool try_put( const T& t ) {
00150             task *res = try_put_task(t);
00151             if(!res) return false;
00152             if (res != SUCCESSFULLY_ENQUEUED) task::enqueue(*res);
00153             return true;
00154         }
00155 
00157 protected:
00158     template< typename R, typename B > friend class run_and_put_task;
00159     template<typename X, typename Y> friend class internal::broadcast_cache;
00160     template<typename X, typename Y> friend class internal::round_robin_cache;
00161     virtual task *try_put_task(const T& t) = 0;
00162 public:
00163 
00165     virtual bool register_predecessor( predecessor_type & ) { return false; }
00166 
00168     virtual bool remove_predecessor( predecessor_type & ) { return false; }
00169 
00170 protected:
00172     template<typename U> friend class limiter_node;
00173     virtual void reset_receiver() = 0;
00174 
00175     template<typename TT, typename M>
00176         friend class internal::successor_cache;
00177     virtual bool is_continue_receiver() { return false; }
00178 };
00179 
00181 
00182 class continue_receiver : public receiver< continue_msg > {
00183 public:
00184 
00186     typedef continue_msg input_type;
00187 
00189     typedef sender< continue_msg > predecessor_type;
00190 
00192     continue_receiver( int number_of_predecessors = 0 ) {
00193         my_predecessor_count = my_initial_predecessor_count = number_of_predecessors;
00194         my_current_count = 0;
00195     }
00196 
00198     continue_receiver( const continue_receiver& src ) : receiver<continue_msg>() {
00199         my_predecessor_count = my_initial_predecessor_count = src.my_initial_predecessor_count;
00200         my_current_count = 0;
00201     }
00202 
00204     virtual ~continue_receiver() { }
00205 
00207     /* override */ bool register_predecessor( predecessor_type & ) {
00208         spin_mutex::scoped_lock l(my_mutex);
00209         ++my_predecessor_count;
00210         return true;
00211     }
00212 
00214 
00217     /* override */ bool remove_predecessor( predecessor_type & ) {
00218         spin_mutex::scoped_lock l(my_mutex);
00219         --my_predecessor_count;
00220         return true;
00221     }
00222 
00223 protected:
00224     template< typename R, typename B > friend class run_and_put_task;
00225     template<typename X, typename Y> friend class internal::broadcast_cache;
00226     template<typename X, typename Y> friend class internal::round_robin_cache;
00227     // execute body is supposed to be too small to create a task for.
00228     /* override */ task *try_put_task( const input_type & ) {
00229         {
00230             spin_mutex::scoped_lock l(my_mutex);
00231             if ( ++my_current_count < my_predecessor_count )
00232                 return SUCCESSFULLY_ENQUEUED;
00233             else
00234                 my_current_count = 0;
00235         }
00236         task * res = execute();
00237         return res;
00238     }
00239 
00240     spin_mutex my_mutex;
00241     int my_predecessor_count;
00242     int my_current_count;
00243     int my_initial_predecessor_count;
00244     // the friend declaration in the base class did not eliminate the "protected class"
00245     // error in gcc 4.1.2
00246     template<typename U> friend class limiter_node;
00247     /*override*/void reset_receiver() {
00248         my_current_count = 0;
00249     }
00250 
00252 
00254     virtual task * execute() = 0;
00255     template<typename TT, typename M>
00256         friend class internal::successor_cache;
00257     /*override*/ bool is_continue_receiver() { return true; }
00258 };
00259 
00260 #include "internal/_flow_graph_impl.h"
00261 using namespace internal::graph_policy_namespace;
00262 
00263 class graph;
00264 class graph_node;
00265 
00266 template <typename GraphContainerType, typename GraphNodeType>
00267 class graph_iterator {
00268     friend class graph;
00269     friend class graph_node;
00270 public:
00271     typedef size_t size_type;
00272     typedef GraphNodeType value_type;
00273     typedef GraphNodeType* pointer;
00274     typedef GraphNodeType& reference;
00275     typedef const GraphNodeType& const_reference;
00276     typedef std::forward_iterator_tag iterator_category;
00277 
00279     graph_iterator() : my_graph(NULL), current_node(NULL) {}
00280 
00282     graph_iterator(const graph_iterator& other) :
00283         my_graph(other.my_graph), current_node(other.current_node)
00284     {}
00285 
00287     graph_iterator& operator=(const graph_iterator& other) {
00288         if (this != &other) {
00289             my_graph = other.my_graph;
00290             current_node = other.current_node;
00291         }
00292         return *this;
00293     }
00294 
00296     reference operator*() const;
00297 
00299     pointer operator->() const;
00300 
00302     bool operator==(const graph_iterator& other) const {
00303         return ((my_graph == other.my_graph) && (current_node == other.current_node));
00304     }
00305 
00307     bool operator!=(const graph_iterator& other) const { return !(operator==(other)); }
00308 
00310     graph_iterator& operator++() {
00311         internal_forward();
00312         return *this;
00313     }
00314 
00316     graph_iterator operator++(int) {
00317         graph_iterator result = *this;
00318         operator++();
00319         return result;
00320     }
00321 
00322 private:
00323     // the graph over which we are iterating
00324     GraphContainerType *my_graph;
00325     // pointer into my_graph's my_nodes list
00326     pointer current_node;
00327 
00329     graph_iterator(GraphContainerType *g, bool begin);
00330     void internal_forward();
00331 };
00332 
00334 
00335 class graph : tbb::internal::no_copy {
00336     friend class graph_node;
00337 
00338     template< typename Body >
00339     class run_task : public task {
00340     public:
00341         run_task( Body& body ) : my_body(body) {}
00342         task *execute() {
00343             my_body();
00344             return NULL;
00345         }
00346     private:
00347         Body my_body;
00348     };
00349 
00350     template< typename Receiver, typename Body >
00351     class run_and_put_task : public task {
00352     public:
00353         run_and_put_task( Receiver &r, Body& body ) : my_receiver(r), my_body(body) {}
00354         task *execute() {
00355             task *res = my_receiver.try_put_task( my_body() );
00356             if(res == SUCCESSFULLY_ENQUEUED) res = NULL;
00357             return res;
00358         }
00359     private:
00360         Receiver &my_receiver;
00361         Body my_body;
00362     };
00363 
00364 public:
00366     explicit graph() : my_nodes(NULL), my_nodes_last(NULL)
00367     {
00368         own_context = true;
00369         cancelled = false;
00370         caught_exception = false;
00371         my_context = new task_group_context();
00372         my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task );
00373         my_root_task->set_ref_count(1);
00374     }
00375 
00377     explicit graph(task_group_context& use_this_context) :
00378     my_context(&use_this_context), my_nodes(NULL), my_nodes_last(NULL)
00379     {
00380         own_context = false;
00381         my_root_task = ( new ( task::allocate_root(*my_context) ) empty_task );
00382         my_root_task->set_ref_count(1);
00383     }
00384 
00386 
00387     ~graph() {
00388         wait_for_all();
00389         my_root_task->set_ref_count(0);
00390         task::destroy( *my_root_task );
00391         if (own_context) delete my_context;
00392     }
00393 
00395 
00397     void increment_wait_count() {
00398         if (my_root_task)
00399             my_root_task->increment_ref_count();
00400     }
00401 
00403 
00405     void decrement_wait_count() {
00406         if (my_root_task)
00407             my_root_task->decrement_ref_count();
00408     }
00409 
00411 
00413     template< typename Receiver, typename Body >
00414         void run( Receiver &r, Body body ) {
00415        task::enqueue( * new ( task::allocate_additional_child_of( *my_root_task ) )
00416            run_and_put_task< Receiver, Body >( r, body ) );
00417     }
00418 
00420 
00422     template< typename Body >
00423     void run( Body body ) {
00424        task::enqueue( * new ( task::allocate_additional_child_of( *my_root_task ) )
00425            run_task< Body >( body ) );
00426     }
00427 
00429 
00430     void wait_for_all() {
00431         cancelled = false;
00432         caught_exception = false;
00433         if (my_root_task) {
00434 #if TBB_USE_EXCEPTIONS
00435             try {
00436 #endif
00437                 my_root_task->wait_for_all();
00438                 cancelled = my_context->is_group_execution_cancelled();
00439 #if TBB_USE_EXCEPTIONS
00440             }
00441             catch(...) {
00442                 my_root_task->set_ref_count(1);
00443                 my_context->reset();
00444                 caught_exception = true;
00445                 cancelled = true;
00446                 throw;
00447             }
00448 #endif
00449             my_context->reset();  // consistent with behavior in catch()
00450             my_root_task->set_ref_count(1);
00451         }
00452     }
00453 
00455     task * root_task() {
00456         return my_root_task;
00457     }
00458 
00459     // ITERATORS
00460     template<typename C, typename N>
00461     friend class graph_iterator;
00462 
00463     // Graph iterator typedefs
00464     typedef graph_iterator<graph,graph_node> iterator;
00465     typedef graph_iterator<const graph,const graph_node> const_iterator;
00466 
00467     // Graph iterator constructors
00469     iterator begin() { return iterator(this, true); }
00471     iterator end() { return iterator(this, false); }
00473     const_iterator begin() const { return const_iterator(this, true); }
00475     const_iterator end() const { return const_iterator(this, false); }
00477     const_iterator cbegin() const { return const_iterator(this, true); }
00479     const_iterator cend() const { return const_iterator(this, false); }
00480 
00482     bool is_cancelled() { return cancelled; }
00483     bool exception_thrown() { return caught_exception; }
00484 
00485     // un-thread-safe state reset.
00486     void reset();
00487 
00488 private:
00489     task *my_root_task;
00490     task_group_context *my_context;
00491     bool own_context;
00492     bool cancelled;
00493     bool caught_exception;
00494 
00495     graph_node *my_nodes, *my_nodes_last;
00496 
00497     spin_mutex nodelist_mutex;
00498     void register_node(graph_node *n); 
00499     void remove_node(graph_node *n);
00500 
00501 };
00502 
00503 template <typename C, typename N>
00504 graph_iterator<C,N>::graph_iterator(C *g, bool begin) : my_graph(g), current_node(NULL)
00505 {
00506     if (begin) current_node = my_graph->my_nodes;
00507     //else it is an end iterator by default
00508 }
00509 
00510 template <typename C, typename N>
00511 typename graph_iterator<C,N>::reference graph_iterator<C,N>::operator*() const {
00512     __TBB_ASSERT(current_node, "graph_iterator at end");
00513     return *operator->();
00514 }
00515 
00516 template <typename C, typename N>
00517 typename graph_iterator<C,N>::pointer graph_iterator<C,N>::operator->() const { 
00518     return current_node;
00519 }
00520 
00521 
00522 template <typename C, typename N>
00523 void graph_iterator<C,N>::internal_forward() {
00524     if (current_node) current_node = current_node->next;
00525 }
00526 
00528 class graph_node : tbb::internal::no_assign {
00529     friend class graph;
00530     template<typename C, typename N>
00531     friend class graph_iterator;
00532 protected:
00533     graph& my_graph;
00534     graph_node *next, *prev;
00535 public:
00536     graph_node(graph& g) : my_graph(g) {
00537         my_graph.register_node(this);
00538     }
00539     virtual ~graph_node() {
00540         my_graph.remove_node(this);
00541     }
00542 
00543 protected:
00544     virtual void reset() = 0;
00545 };
00546 
00547 inline void graph::register_node(graph_node *n) {
00548     n->next = NULL;
00549     {
00550         spin_mutex::scoped_lock lock(nodelist_mutex);
00551         n->prev = my_nodes_last;
00552         if (my_nodes_last) my_nodes_last->next = n;
00553         my_nodes_last = n;
00554         if (!my_nodes) my_nodes = n;
00555     }
00556 }
00557 
00558 inline void graph::remove_node(graph_node *n) {
00559     {
00560         spin_mutex::scoped_lock lock(nodelist_mutex);
00561         __TBB_ASSERT(my_nodes && my_nodes_last, "graph::remove_node: Error: no registered nodes");
00562         if (n->prev) n->prev->next = n->next;
00563         if (n->next) n->next->prev = n->prev;
00564         if (my_nodes_last == n) my_nodes_last = n->prev;
00565         if (my_nodes == n) my_nodes = n->next;
00566     }
00567     n->prev = n->next = NULL;
00568 }
00569 
00570 inline void graph::reset() {
00571     // reset context
00572     if(my_context) my_context->reset();
00573     cancelled = false;
00574     caught_exception = false;
00575     // reset all the nodes comprising the graph
00576     for(iterator ii = begin(); ii != end(); ++ii) {
00577         graph_node *my_p = &(*ii);
00578         my_p->reset();
00579     }
00580 }
00581 
00582 
00583 #include "internal/_flow_graph_node_impl.h"
00584 
00586 template < typename Output >
00587 class source_node : public graph_node, public sender< Output > {
00588 protected:
00589     using graph_node::my_graph;
00590 public:
00592     typedef Output output_type;
00593 
00595     typedef receiver< Output > successor_type;
00596 
00598     template< typename Body >
00599     source_node( graph &g, Body body, bool is_active = true )
00600         : graph_node(g), my_root_task(g.root_task()), my_active(is_active), init_my_active(is_active),
00601         my_body( new internal::source_body_leaf< output_type, Body>(body) ),
00602         my_reserved(false), my_has_cached_item(false)
00603     {
00604         my_successors.set_owner(this);
00605     }
00606 
00608     source_node( const source_node& src ) :
00609         graph_node(src.my_graph), sender<Output>(),
00610         my_root_task( src.my_root_task), my_active(src.init_my_active),
00611         init_my_active(src.init_my_active), my_body( src.my_body->clone() ),
00612         my_reserved(false), my_has_cached_item(false)
00613     {
00614         my_successors.set_owner(this);
00615     }
00616 
00618     ~source_node() { delete my_body; }
00619 
00621     /* override */ bool register_successor( receiver<output_type> &r ) {
00622         spin_mutex::scoped_lock lock(my_mutex);
00623         my_successors.register_successor(r);
00624         if ( my_active )
00625             spawn_put();
00626         return true;
00627     }
00628 
00630     /* override */ bool remove_successor( receiver<output_type> &r ) {
00631         spin_mutex::scoped_lock lock(my_mutex);
00632         my_successors.remove_successor(r);
00633         return true;
00634     }
00635 
00637     /*override */ bool try_get( output_type &v ) {
00638         spin_mutex::scoped_lock lock(my_mutex);
00639         if ( my_reserved )
00640             return false;
00641 
00642         if ( my_has_cached_item ) {
00643             v = my_cached_item;
00644             my_has_cached_item = false;
00645             return true;
00646         }
00647         return false;
00648     }
00649 
00651     /* override */ bool try_reserve( output_type &v ) {
00652         spin_mutex::scoped_lock lock(my_mutex);
00653         if ( my_reserved ) {
00654             return false;
00655         }
00656 
00657         if ( my_has_cached_item ) {
00658             v = my_cached_item;
00659             my_reserved = true;
00660             return true;
00661         } else {
00662             return false;
00663         }
00664     }
00665 
00667 
00668     /* override */ bool try_release( ) {
00669         spin_mutex::scoped_lock lock(my_mutex);
00670         __TBB_ASSERT( my_reserved && my_has_cached_item, "releasing non-existent reservation" );
00671         my_reserved = false;
00672         if(!my_successors.empty())
00673             spawn_put();
00674         return true;
00675     }
00676 
00678     /* override */ bool try_consume( ) {
00679         spin_mutex::scoped_lock lock(my_mutex);
00680         __TBB_ASSERT( my_reserved && my_has_cached_item, "consuming non-existent reservation" );
00681         my_reserved = false;
00682         my_has_cached_item = false;
00683         if ( !my_successors.empty() ) {
00684             spawn_put();
00685         }
00686         return true;
00687     }
00688 
00690     void activate() {
00691         spin_mutex::scoped_lock lock(my_mutex);
00692         my_active = true;
00693         if ( !my_successors.empty() )
00694             spawn_put();
00695     }
00696 
00697     template<typename Body>
00698     Body copy_function_object() { 
00699         internal::source_body<output_type> &body_ref = *this->my_body;
00700         return dynamic_cast< internal::source_body_leaf<output_type, Body> & >(body_ref).get_body(); 
00701     }
00702 
00703 protected:
00704 
00706     void reset() {
00707         my_active = init_my_active;
00708         my_reserved =false;
00709         if(my_has_cached_item) {
00710             my_has_cached_item = false;
00711         }
00712     }
00713 
00714 private:
00715     task *my_root_task;
00716     spin_mutex my_mutex;
00717     bool my_active;
00718     bool init_my_active;
00719     internal::source_body<output_type> *my_body;
00720     internal::broadcast_cache< output_type > my_successors;
00721     bool my_reserved;
00722     bool my_has_cached_item;
00723     output_type my_cached_item;
00724 
00725     // used by apply_body, can invoke body of node.
00726     bool try_reserve_apply_body(output_type &v) {
00727         spin_mutex::scoped_lock lock(my_mutex);
00728         if ( my_reserved ) {
00729             return false;
00730         }
00731         if ( !my_has_cached_item && (*my_body)(my_cached_item) )
00732             my_has_cached_item = true;
00733         if ( my_has_cached_item ) {
00734             v = my_cached_item;
00735             my_reserved = true;
00736             return true;
00737         } else {
00738             return false;
00739         }
00740     }
00741 
00743     /* override */ void spawn_put( ) {
00744         task::enqueue( * new ( task::allocate_additional_child_of( *my_root_task ) )
00745            internal:: source_task_bypass < source_node< output_type > >( *this ) );
00746     }
00747 
00748     friend class internal::source_task_bypass< source_node< output_type > >;
00750     /* override */ task * apply_body_bypass( ) {
00751         output_type v;
00752         if ( !try_reserve_apply_body(v) )
00753             return NULL;
00754 
00755         task *last_task = my_successors.try_put_task(v);
00756         if ( last_task )
00757             try_consume();
00758         else
00759             try_release();
00760         return last_task;
00761     }
00762 };  // source_node
00763 
00765 template < typename Input, typename Output = continue_msg, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator<Input> >
00766 class function_node : public graph_node, public internal::function_input<Input,Output,Allocator>, public internal::function_output<Output> {
00767 protected:
00768     using graph_node::my_graph;
00769 public:
00770     typedef Input input_type;
00771     typedef Output output_type;
00772     typedef sender< input_type > predecessor_type;
00773     typedef receiver< output_type > successor_type;
00774     typedef internal::function_input<input_type,output_type,Allocator> fInput_type;
00775     typedef internal::function_output<output_type> fOutput_type;
00776 
00778     template< typename Body >
00779     function_node( graph &g, size_t concurrency, Body body ) :
00780         graph_node(g), internal::function_input<input_type,output_type,Allocator>(g, concurrency, body)
00781     {}
00782 
00784     function_node( const function_node& src ) :
00785         graph_node(src.my_graph), internal::function_input<input_type,output_type,Allocator>( src ),
00786         fOutput_type()
00787     {}
00788 
00789 protected:
00790     template< typename R, typename B > friend class run_and_put_task;
00791     template<typename X, typename Y> friend class internal::broadcast_cache;
00792     template<typename X, typename Y> friend class internal::round_robin_cache;
00793     using fInput_type::try_put_task;
00794 
00795     // override of graph_node's reset.
00796     /*override*/void reset() {fInput_type::reset_function_input(); }
00797 
00798     /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
00799 };
00800 
00802 template < typename Input, typename Output, typename Allocator >
00803 class function_node<Input,Output,queueing,Allocator> : public graph_node, public internal::function_input<Input,Output,Allocator>, public internal::function_output<Output> {
00804 protected:
00805     using graph_node::my_graph;
00806 public:
00807     typedef Input input_type;
00808     typedef Output output_type;
00809     typedef sender< input_type > predecessor_type;
00810     typedef receiver< output_type > successor_type;
00811     typedef internal::function_input<input_type,output_type,Allocator> fInput_type;
00812     typedef internal::function_input_queue<input_type, Allocator> queue_type;
00813     typedef internal::function_output<output_type> fOutput_type;
00814 
00816     template< typename Body >
00817     function_node( graph &g, size_t concurrency, Body body ) :
00818         graph_node(g), fInput_type( g, concurrency, body, new queue_type() )
00819     {}
00820 
00822     function_node( const function_node& src ) :
00823         graph_node(src.my_graph), fInput_type( src, new queue_type() ), fOutput_type()
00824     {}
00825 
00826 protected:
00827     template< typename R, typename B > friend class run_and_put_task;
00828     template<typename X, typename Y> friend class internal::broadcast_cache;
00829     template<typename X, typename Y> friend class internal::round_robin_cache;
00830     using fInput_type::try_put_task;
00831 
00832     /*override*/void reset() { fInput_type::reset_function_input(); }
00833 
00834     /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
00835 };
00836 
00837 #include "tbb/internal/_flow_graph_types_impl.h"
00838 
00840 // Output is a tuple of output types.
00841 template < typename Input, typename Output, graph_buffer_policy = queueing, typename Allocator=cache_aligned_allocator<Input> >
00842 class multifunction_node :
00843     public graph_node,
00844     public internal::multifunction_input
00845     <
00846         Input,
00847         typename internal::wrap_tuple_elements<
00848             tbb::flow::tuple_size<Output>::value,  // #elements in tuple
00849             internal::multifunction_output,  // wrap this around each element
00850             Output // the tuple providing the types
00851         >::type,
00852         Allocator
00853     > {
00854 protected:
00855     using graph_node::my_graph;
00856 private:
00857     static const int N = tbb::flow::tuple_size<Output>::value;
00858 public:
00859     typedef Input input_type;
00860     typedef typename internal::wrap_tuple_elements<N,internal::multifunction_output, Output>::type output_ports_type;
00861 private:
00862     typedef typename internal::multifunction_input<input_type, output_ports_type, Allocator> base_type;
00863     typedef typename internal::function_input_queue<input_type,Allocator> queue_type;
00864 public:
00865     template<typename Body>
00866     multifunction_node( graph &g, size_t concurrency, Body body ) :
00867         graph_node(g), base_type(g,concurrency, body)
00868     {}
00869     multifunction_node( const multifunction_node &other) :
00870         graph_node(other.my_graph), base_type(other)
00871     {}
00872     // all the guts are in multifunction_input...
00873 protected:
00874     /*override*/void reset() { base_type::reset(); }
00875 };  // multifunction_node
00876 
00877 template < typename Input, typename Output, typename Allocator >
00878 class multifunction_node<Input,Output,queueing,Allocator> : public graph_node, public internal::multifunction_input<Input,
00879     typename internal::wrap_tuple_elements<tbb::flow::tuple_size<Output>::value, internal::multifunction_output, Output>::type, Allocator> {
00880 protected:
00881     using graph_node::my_graph;
00882     static const int N = tbb::flow::tuple_size<Output>::value;
00883 public:
00884     typedef Input input_type;
00885     typedef typename internal::wrap_tuple_elements<N, internal::multifunction_output, Output>::type output_ports_type;
00886 private:
00887     typedef typename internal::multifunction_input<input_type, output_ports_type, Allocator> base_type;
00888     typedef typename internal::function_input_queue<input_type,Allocator> queue_type;
00889 public:
00890     template<typename Body>
00891     multifunction_node( graph &g, size_t concurrency, Body body) :
00892         graph_node(g), base_type(g,concurrency, body, new queue_type())
00893     {}
00894     multifunction_node( const multifunction_node &other) :
00895         graph_node(other.my_graph), base_type(other, new queue_type())
00896     {}
00897     // all the guts are in multifunction_input...
00898 protected:
00899     /*override*/void reset() { base_type::reset(); }
00900 };  // multifunction_node
00901 
00903 //  successors.  The node has unlimited concurrency, so though it is marked as
00904 //  "rejecting" it does not reject inputs.
00905 template<typename TupleType, typename Allocator=cache_aligned_allocator<TupleType> >
00906 class split_node : public multifunction_node<TupleType, TupleType, rejecting, Allocator> {
00907     static const int N = tbb::flow::tuple_size<TupleType>::value;
00908     typedef multifunction_node<TupleType,TupleType,rejecting,Allocator> base_type;
00909 public:
00910     typedef typename base_type::output_ports_type output_ports_type;
00911 private:
00912     struct splitting_body {
00913         void operator()(const TupleType& t, output_ports_type &p) {
00914             internal::emit_element<N>::emit_this(t, p);
00915         }
00916     };
00917 public:
00918     typedef TupleType input_type;
00919     typedef Allocator allocator_type;
00920     split_node(graph &g) : base_type(g, unlimited, splitting_body()) {}
00921     split_node( const split_node & other) : base_type(other) {}
00922 };
00923 
00925 template <typename Output>
00926 class continue_node : public graph_node, public internal::continue_input<Output>, public internal::function_output<Output> {
00927 protected:
00928     using graph_node::my_graph;
00929 public:
00930     typedef continue_msg input_type;
00931     typedef Output output_type;
00932     typedef sender< input_type > predecessor_type;
00933     typedef receiver< output_type > successor_type;
00934     typedef internal::continue_input<Output> fInput_type;
00935     typedef internal::function_output<output_type> fOutput_type;
00936 
00938     template <typename Body >
00939     continue_node( graph &g, Body body ) :
00940         graph_node(g), internal::continue_input<output_type>( g, body )
00941     {}
00942 
00944     template <typename Body >
00945     continue_node( graph &g, int number_of_predecessors, Body body ) :
00946         graph_node(g), internal::continue_input<output_type>( g, number_of_predecessors, body )
00947     {}
00948 
00950     continue_node( const continue_node& src ) :
00951         graph_node(src.my_graph), internal::continue_input<output_type>(src),
00952         internal::function_output<Output>()
00953     {}
00954 
00955 protected:
00956     template< typename R, typename B > friend class run_and_put_task;
00957     template<typename X, typename Y> friend class internal::broadcast_cache;
00958     template<typename X, typename Y> friend class internal::round_robin_cache;
00959     using fInput_type::try_put_task;
00960 
00961     /*override*/void reset() { internal::continue_input<Output>::reset_receiver(); }
00962 
00963     /* override */ internal::broadcast_cache<output_type> &successors () { return fOutput_type::my_successors; }
00964 };
00965 
00966 template< typename T >
00967 class overwrite_node : public graph_node, public receiver<T>, public sender<T> {
00968 protected:
00969     using graph_node::my_graph;
00970 public:
00971     typedef T input_type;
00972     typedef T output_type;
00973     typedef sender< input_type > predecessor_type;
00974     typedef receiver< output_type > successor_type;
00975 
00976     overwrite_node(graph &g) : graph_node(g), my_buffer_is_valid(false) {
00977         my_successors.set_owner( this );
00978     }
00979 
00980     // Copy constructor; doesn't take anything from src; default won't work
00981     overwrite_node( const overwrite_node& src ) :
00982         graph_node(src.my_graph), receiver<T>(), sender<T>(), my_buffer_is_valid(false)
00983     {
00984         my_successors.set_owner( this );
00985     }
00986 
00987     ~overwrite_node() {}
00988 
00989     /* override */ bool register_successor( successor_type &s ) {
00990         spin_mutex::scoped_lock l( my_mutex );
00991         if ( my_buffer_is_valid ) {
00992             // We have a valid value that must be forwarded immediately.
00993             if ( s.try_put( my_buffer ) || !s.register_predecessor( *this  ) ) {
00994                 // We add the successor: it accepted our put or it rejected it but won't let us become a predecessor
00995                 my_successors.register_successor( s );
00996                 return true;
00997             } else {
00998                 // We don't add the successor: it rejected our put and we became its predecessor instead
00999                 return false;
01000             }
01001         } else {
01002             // No valid value yet, just add as successor
01003             my_successors.register_successor( s );
01004             return true;
01005         }
01006     }
01007 
01008     /* override */ bool remove_successor( successor_type &s ) {
01009         spin_mutex::scoped_lock l( my_mutex );
01010         my_successors.remove_successor(s);
01011         return true;
01012     }
01013 
01014     /* override */ bool try_get( T &v ) {
01015         spin_mutex::scoped_lock l( my_mutex );
01016         if ( my_buffer_is_valid ) {
01017             v = my_buffer;
01018             return true;
01019         } else {
01020             return false;
01021         }
01022     }
01023 
01024     bool is_valid() {
01025        spin_mutex::scoped_lock l( my_mutex );
01026        return my_buffer_is_valid;
01027     }
01028 
01029     void clear() {
01030        spin_mutex::scoped_lock l( my_mutex );
01031        my_buffer_is_valid = false;
01032     }
01033 
01034 protected:
01035     template< typename R, typename B > friend class run_and_put_task;
01036     template<typename X, typename Y> friend class internal::broadcast_cache;
01037     template<typename X, typename Y> friend class internal::round_robin_cache;
01038     /* override */ task * try_put_task( const T &v ) {
01039         spin_mutex::scoped_lock l( my_mutex );
01040         my_buffer = v;
01041         my_buffer_is_valid = true;
01042         task * rtask = my_successors.try_put_task(v);
01043         if(!rtask) rtask = SUCCESSFULLY_ENQUEUED;
01044         return rtask;
01045     }
01046 
01047     /*override*/void reset() { my_buffer_is_valid = false; }
01048 
01049     spin_mutex my_mutex;
01050     internal::broadcast_cache< T, null_rw_mutex > my_successors;
01051     T my_buffer;
01052     bool my_buffer_is_valid;
01053     /*override*/void reset_receiver() {}
01054 };
01055 
01056 template< typename T >
01057 class write_once_node : public overwrite_node<T> {
01058 public:
01059     typedef T input_type;
01060     typedef T output_type;
01061     typedef sender< input_type > predecessor_type;
01062     typedef receiver< output_type > successor_type;
01063 
01065     write_once_node(graph& g) : overwrite_node<T>(g) {}
01066 
01068     write_once_node( const write_once_node& src ) : overwrite_node<T>(src) {}
01069 
01070 protected:
01071     template< typename R, typename B > friend class run_and_put_task;
01072     template<typename X, typename Y> friend class internal::broadcast_cache;
01073     template<typename X, typename Y> friend class internal::round_robin_cache;
01074     /* override */ task *try_put_task( const T &v ) {
01075         spin_mutex::scoped_lock l( this->my_mutex );
01076         if ( this->my_buffer_is_valid ) {
01077             return NULL;
01078         } else {
01079             this->my_buffer = v;
01080             this->my_buffer_is_valid = true;
01081             task *res = this->my_successors.try_put_task(v);
01082             if(!res) res = SUCCESSFULLY_ENQUEUED;
01083             return res;
01084         }
01085     }
01086 };
01087 
01089 template <typename T>
01090 class broadcast_node : public graph_node, public receiver<T>, public sender<T> {
01091 protected:
01092     using graph_node::my_graph;
01093 private:
01094     internal::broadcast_cache<T> my_successors;
01095 public:
01096     typedef T input_type;
01097     typedef T output_type;
01098     typedef sender< input_type > predecessor_type;
01099     typedef receiver< output_type > successor_type;
01100 
01101     broadcast_node(graph& g) : graph_node(g) {
01102         my_successors.set_owner( this );
01103     }
01104 
01105     // Copy constructor
01106     broadcast_node( const broadcast_node& src ) :
01107         graph_node(src.my_graph), receiver<T>(), sender<T>()
01108     {
01109         my_successors.set_owner( this );
01110     }
01111 
01113     virtual bool register_successor( receiver<T> &r ) {
01114         my_successors.register_successor( r );
01115         return true;
01116     }
01117 
01119     virtual bool remove_successor( receiver<T> &r ) {
01120         my_successors.remove_successor( r );
01121         return true;
01122     }
01123 
01124 protected:
01125     template< typename R, typename B > friend class run_and_put_task;
01126     template<typename X, typename Y> friend class internal::broadcast_cache;
01127     template<typename X, typename Y> friend class internal::round_robin_cache;
01129     /*override*/ task *try_put_task(const T& t) {
01130         task *new_task = my_successors.try_put_task(t);
01131         if(!new_task) new_task = SUCCESSFULLY_ENQUEUED;
01132         return new_task;
01133     }
01134 
01135     /*override*/void reset() {}
01136     /*override*/void reset_receiver() {}
01137 };  // broadcast_node
01138 
01139 #include "internal/_flow_graph_item_buffer_impl.h"
01140 
01142 template <typename T, typename A=cache_aligned_allocator<T> >
01143 class buffer_node : public graph_node, public reservable_item_buffer<T, A>, public receiver<T>, public sender<T> {
01144 protected:
01145     using graph_node::my_graph;
01146 public:
01147     typedef T input_type;
01148     typedef T output_type;
01149     typedef sender< input_type > predecessor_type;
01150     typedef receiver< output_type > successor_type;
01151     typedef buffer_node<T, A> my_class;
01152 protected:
01153     typedef size_t size_type;
01154     internal::round_robin_cache< T, null_rw_mutex > my_successors;
01155 
01156     task *my_parent;
01157 
01158     friend class internal::forward_task_bypass< buffer_node< T, A > >;
01159 
01160     enum op_type {reg_succ, rem_succ, req_item, res_item, rel_res, con_res, put_item, try_fwd_task };
01161     enum op_stat {WAIT=0, SUCCEEDED, FAILED};
01162 
01163     // implements the aggregator_operation concept
01164     class buffer_operation : public internal::aggregated_operation< buffer_operation > {
01165     public:
01166         char type;
01167         T *elem;
01168         task * ltask;
01169         successor_type *r;
01170         buffer_operation(const T& e, op_type t) : type(char(t)), elem(const_cast<T*>(&e)) , ltask(NULL) , r(NULL) {}
01171         buffer_operation(op_type t) : type(char(t)) , ltask(NULL) , r(NULL) {}
01172     };
01173 
01174     bool forwarder_busy;
01175     typedef internal::aggregating_functor<my_class, buffer_operation> my_handler;
01176     friend class internal::aggregating_functor<my_class, buffer_operation>;
01177     internal::aggregator< my_handler, buffer_operation> my_aggregator;
01178 
01179     virtual void handle_operations(buffer_operation *op_list) {
01180         buffer_operation *tmp = NULL;
01181         bool try_forwarding=false;
01182         while (op_list) {
01183             tmp = op_list;
01184             op_list = op_list->next;
01185             switch (tmp->type) {
01186             case reg_succ: internal_reg_succ(tmp);  try_forwarding = true; break;
01187             case rem_succ: internal_rem_succ(tmp); break;
01188             case req_item: internal_pop(tmp); break;
01189             case res_item: internal_reserve(tmp); break;
01190             case rel_res:  internal_release(tmp);  try_forwarding = true; break;
01191             case con_res:  internal_consume(tmp);  try_forwarding = true; break;
01192             case put_item: internal_push(tmp);  try_forwarding = true; break;
01193             case try_fwd_task: internal_forward_task(tmp); break;
01194             }
01195         }
01196         if (try_forwarding && !forwarder_busy) {
01197             forwarder_busy = true;
01198             task *new_task = new(task::allocate_additional_child_of(*my_parent)) internal::
01199                     forward_task_bypass
01200                     < buffer_node<input_type, A> >(*this);
01201             // tmp should point to the last item handled by the aggregator.  This is the operation
01202             // the handling thread enqueued.  So modifying that record will be okay.
01203             tbb::task *z = tmp->ltask;
01204             tmp->ltask = combine_tasks(z, new_task);  // in case the op generated a task
01205         }
01206     }
01207 
01208     inline task *grab_forwarding_task( buffer_operation &op_data) {
01209         return op_data.ltask;
01210     }
01211 
01212     inline bool enqueue_forwarding_task(buffer_operation &op_data) {
01213         task *ft = grab_forwarding_task(op_data);
01214         if(ft) {
01215             task::enqueue(*ft);
01216             return true;
01217         }
01218         return false;
01219     }
01220 
01222     virtual task *forward_task() {
01223         buffer_operation op_data(try_fwd_task);
01224         task *last_task = NULL;
01225         do {
01226             op_data.status = WAIT;
01227             op_data.ltask = NULL;
01228             my_aggregator.execute(&op_data);
01229             tbb::task *xtask = op_data.ltask;
01230             last_task = combine_tasks(last_task, xtask);
01231         } while (op_data.status == SUCCEEDED);
01232         return last_task;
01233     }
01234 
01236     virtual void internal_reg_succ(buffer_operation *op) {
01237         my_successors.register_successor(*(op->r));
01238         __TBB_store_with_release(op->status, SUCCEEDED);
01239     }
01240 
01242     virtual void internal_rem_succ(buffer_operation *op) {
01243         my_successors.remove_successor(*(op->r));
01244         __TBB_store_with_release(op->status, SUCCEEDED);
01245     }
01246 
01248     virtual void internal_forward_task(buffer_operation *op) {
01249         if (this->my_reserved || !this->item_valid(this->my_tail-1)) {
01250             __TBB_store_with_release(op->status, FAILED);
01251             this->forwarder_busy = false;
01252             return;
01253         }
01254         T i_copy;
01255         task * last_task = NULL;
01256         size_type counter = my_successors.size();
01257         // Try forwarding, giving each successor a chance
01258         while (counter>0 && !this->buffer_empty() && this->item_valid(this->my_tail-1)) {
01259             this->fetch_back(i_copy);
01260             task *new_task = my_successors.try_put_task(i_copy);
01261             last_task = combine_tasks(last_task, new_task);
01262             if(new_task) {
01263                 this->invalidate_back();
01264                 --(this->my_tail);
01265             }
01266             --counter;
01267         }
01268         op->ltask = last_task;  // return task
01269         if (last_task && !counter) {
01270             __TBB_store_with_release(op->status, SUCCEEDED);
01271         }
01272         else {
01273             __TBB_store_with_release(op->status, FAILED);
01274             forwarder_busy = false;
01275         }
01276     }
01277 
01278     virtual void internal_push(buffer_operation *op) {
01279         this->push_back(*(op->elem));
01280         __TBB_store_with_release(op->status, SUCCEEDED);
01281     }
01282 
01283     virtual void internal_pop(buffer_operation *op) {
01284         if(this->pop_back(*(op->elem))) {
01285             __TBB_store_with_release(op->status, SUCCEEDED);
01286         }
01287         else {
01288             __TBB_store_with_release(op->status, FAILED);
01289         }
01290     }
01291 
01292     virtual void internal_reserve(buffer_operation *op) {
01293         if(this->reserve_front(*(op->elem))) {
01294             __TBB_store_with_release(op->status, SUCCEEDED);
01295         }
01296         else {
01297             __TBB_store_with_release(op->status, FAILED);
01298         }
01299     }
01300 
01301     virtual void internal_consume(buffer_operation *op) {
01302         this->consume_front();
01303         __TBB_store_with_release(op->status, SUCCEEDED);
01304     }
01305 
01306     virtual void internal_release(buffer_operation *op) {
01307         this->release_front();
01308         __TBB_store_with_release(op->status, SUCCEEDED);
01309     }
01310 
01311 public:
01313     buffer_node( graph &g ) : graph_node(g), reservable_item_buffer<T>(),
01314         my_parent( g.root_task() ), forwarder_busy(false) {
01315         my_successors.set_owner(this);
01316         my_aggregator.initialize_handler(my_handler(this));
01317     }
01318 
01320     buffer_node( const buffer_node& src ) : graph_node(src.my_graph),
01321         reservable_item_buffer<T>(), receiver<T>(), sender<T>(),
01322         my_parent( src.my_parent ) {
01323         forwarder_busy = false;
01324         my_successors.set_owner(this);
01325         my_aggregator.initialize_handler(my_handler(this));
01326     }
01327 
01328     virtual ~buffer_node() {}
01329 
01330     //
01331     // message sender implementation
01332     //
01333 
01335 
01336     /* override */ bool register_successor( receiver<output_type> &r ) {
01337         buffer_operation op_data(reg_succ);
01338         op_data.r = &r;
01339         my_aggregator.execute(&op_data);
01340         (void)enqueue_forwarding_task(op_data);
01341         return true;
01342     }
01343 
01345 
01347     /* override */ bool remove_successor( receiver<output_type> &r ) {
01348         r.remove_predecessor(*this);
01349         buffer_operation op_data(rem_succ);
01350         op_data.r = &r;
01351         my_aggregator.execute(&op_data);
01352         // even though this operation does not cause a forward, if we are the handler, and
01353         // a forward is scheduled, we may be the first to reach this point after the aggregator,
01354         // and so should check for the task.
01355         (void)enqueue_forwarding_task(op_data);
01356         return true;
01357     }
01358 
01360 
01362     /* override */ bool try_get( T &v ) {
01363         buffer_operation op_data(req_item);
01364         op_data.elem = &v;
01365         my_aggregator.execute(&op_data);
01366         (void)enqueue_forwarding_task(op_data);
01367         return (op_data.status==SUCCEEDED);
01368     }
01369 
01371 
01373     /* override */ bool try_reserve( T &v ) {
01374         buffer_operation op_data(res_item);
01375         op_data.elem = &v;
01376         my_aggregator.execute(&op_data);
01377         (void)enqueue_forwarding_task(op_data);
01378         return (op_data.status==SUCCEEDED);
01379     }
01380 
01382 
01383     /* override */ bool try_release() {
01384         buffer_operation op_data(rel_res);
01385         my_aggregator.execute(&op_data);
01386         (void)enqueue_forwarding_task(op_data);
01387         return true;
01388     }
01389 
01391 
01392     /* override */ bool try_consume() {
01393         buffer_operation op_data(con_res);
01394         my_aggregator.execute(&op_data);
01395         (void)enqueue_forwarding_task(op_data);
01396         return true;
01397     }
01398 
01399 protected:
01400 
01401     template< typename R, typename B > friend class run_and_put_task;
01402     template<typename X, typename Y> friend class internal::broadcast_cache;
01403     template<typename X, typename Y> friend class internal::round_robin_cache;
01405     /* override */ task *try_put_task(const T &t) {
01406         buffer_operation op_data(t, put_item);
01407         my_aggregator.execute(&op_data);
01408         task *ft = grab_forwarding_task(op_data);
01409         if(!ft) {
01410             ft = SUCCESSFULLY_ENQUEUED;
01411         }
01412         return ft;
01413     }
01414 
01415     /*override*/void reset() {
01416         reservable_item_buffer<T, A>::reset();
01417         forwarder_busy = false;
01418     }
01419 
01420     /*override*/void reset_receiver() {
01421         // nothing to do; no predecesor_cache
01422     }
01423 
01424 };  // buffer_node
01425 
01427 template <typename T, typename A=cache_aligned_allocator<T> >
01428 class queue_node : public buffer_node<T, A> {
01429 protected:
01430     typedef typename buffer_node<T, A>::size_type size_type;
01431     typedef typename buffer_node<T, A>::buffer_operation queue_operation;
01432 
01433     enum op_stat {WAIT=0, SUCCEEDED, FAILED};
01434 
01435     /* override */ void internal_forward_task(queue_operation *op) {
01436         if (this->my_reserved || !this->item_valid(this->my_head)) {
01437             __TBB_store_with_release(op->status, FAILED);
01438             this->forwarder_busy = false;
01439             return;
01440         }
01441         T i_copy;
01442         task *last_task = NULL;
01443         size_type counter = this->my_successors.size();
01444         // Keep trying to send items while there is at least one accepting successor
01445         while (counter>0 && this->item_valid(this->my_head)) {
01446             this->fetch_front(i_copy);
01447             task *new_task = this->my_successors.try_put_task(i_copy);
01448             if(new_task) {
01449                 this->invalidate_front();
01450                 ++(this->my_head);
01451                 last_task = combine_tasks(last_task, new_task);
01452             }
01453             --counter;
01454         }
01455         op->ltask = last_task;
01456         if (last_task && !counter)
01457             __TBB_store_with_release(op->status, SUCCEEDED);
01458         else {
01459             __TBB_store_with_release(op->status, FAILED);
01460             this->forwarder_busy = false;
01461         }
01462     }
01463 
01464     /* override */ void internal_pop(queue_operation *op) {
01465         if ( this->my_reserved || !this->item_valid(this->my_head)){
01466             __TBB_store_with_release(op->status, FAILED);
01467         }
01468         else {
01469             this->pop_front(*(op->elem));
01470             __TBB_store_with_release(op->status, SUCCEEDED);
01471         }
01472     }
01473     /* override */ void internal_reserve(queue_operation *op) {
01474         if (this->my_reserved || !this->item_valid(this->my_head)) {
01475             __TBB_store_with_release(op->status, FAILED);
01476         }
01477         else {
01478             this->my_reserved = true;
01479             this->fetch_front(*(op->elem));
01480             this->invalidate_front();
01481             __TBB_store_with_release(op->status, SUCCEEDED);
01482         }
01483     }
01484     /* override */ void internal_consume(queue_operation *op) {
01485         this->consume_front();
01486         __TBB_store_with_release(op->status, SUCCEEDED);
01487     }
01488 
01489 public:
01490     typedef T input_type;
01491     typedef T output_type;
01492     typedef sender< input_type > predecessor_type;
01493     typedef receiver< output_type > successor_type;
01494 
01496     queue_node( graph &g ) : buffer_node<T, A>(g) {}
01497 
01499     queue_node( const queue_node& src) : buffer_node<T, A>(src) {}
01500 };
01501 
01503 template< typename T, typename A=cache_aligned_allocator<T> >
01504 class sequencer_node : public queue_node<T, A> {
01505     internal::function_body< T, size_t > *my_sequencer;
01506 public:
01507     typedef T input_type;
01508     typedef T output_type;
01509     typedef sender< input_type > predecessor_type;
01510     typedef receiver< output_type > successor_type;
01511 
01513     template< typename Sequencer >
01514     sequencer_node( graph &g, const Sequencer& s ) : queue_node<T, A>(g),
01515         my_sequencer(new internal::function_body_leaf< T, size_t, Sequencer>(s) ) {}
01516 
01518     sequencer_node( const sequencer_node& src ) : queue_node<T, A>(src),
01519         my_sequencer( src.my_sequencer->clone() ) {}
01520 
01522     ~sequencer_node() { delete my_sequencer; }
01523 protected:
01524     typedef typename buffer_node<T, A>::size_type size_type;
01525     typedef typename buffer_node<T, A>::buffer_operation sequencer_operation;
01526 
01527     enum op_stat {WAIT=0, SUCCEEDED, FAILED};
01528 
01529 private:
01530     /* override */ void internal_push(sequencer_operation *op) {
01531         size_type tag = (*my_sequencer)(*(op->elem));
01532 
01533         this->my_tail = (tag+1 > this->my_tail) ? tag+1 : this->my_tail;
01534 
01535         if(this->size() > this->capacity())
01536             this->grow_my_array(this->size());  // tail already has 1 added to it
01537         this->item(tag) = std::make_pair( *(op->elem), true );
01538         __TBB_store_with_release(op->status, SUCCEEDED);
01539     }
01540 };
01541 
01543 template< typename T, typename Compare = std::less<T>, typename A=cache_aligned_allocator<T> >
01544 class priority_queue_node : public buffer_node<T, A> {
01545 public:
01546     typedef T input_type;
01547     typedef T output_type;
01548     typedef buffer_node<T,A> base_type;
01549     typedef sender< input_type > predecessor_type;
01550     typedef receiver< output_type > successor_type;
01551 
01553     priority_queue_node( graph &g ) : buffer_node<T, A>(g), mark(0) {}
01554 
01556     priority_queue_node( const priority_queue_node &src ) : buffer_node<T, A>(src), mark(0) {}
01557 
01558 protected:
01559 
01560     /*override*/void reset() {
01561         mark = 0;
01562         base_type::reset();
01563     }
01564 
01565     typedef typename buffer_node<T, A>::size_type size_type;
01566     typedef typename buffer_node<T, A>::item_type item_type;
01567     typedef typename buffer_node<T, A>::buffer_operation prio_operation;
01568 
01569     enum op_stat {WAIT=0, SUCCEEDED, FAILED};
01570 
01571     /* override */ void handle_operations(prio_operation *op_list) {
01572         prio_operation *tmp = op_list /*, *pop_list*/ ;
01573         bool try_forwarding=false;
01574         while (op_list) {
01575             tmp = op_list;
01576             op_list = op_list->next;
01577             switch (tmp->type) {
01578             case buffer_node<T, A>::reg_succ: this->internal_reg_succ(tmp); try_forwarding = true; break;
01579             case buffer_node<T, A>::rem_succ: this->internal_rem_succ(tmp); break;
01580             case buffer_node<T, A>::put_item: internal_push(tmp); try_forwarding = true; break;
01581             case buffer_node<T, A>::try_fwd_task: internal_forward_task(tmp); break;
01582             case buffer_node<T, A>::rel_res: internal_release(tmp); try_forwarding = true; break;
01583             case buffer_node<T, A>::con_res: internal_consume(tmp); try_forwarding = true; break;
01584             case buffer_node<T, A>::req_item: internal_pop(tmp); break;
01585             case buffer_node<T, A>::res_item: internal_reserve(tmp); break;
01586             }
01587         }
01588         // process pops!  for now, no special pop processing
01589         if (mark<this->my_tail) heapify();
01590         if (try_forwarding && !this->forwarder_busy) {
01591             this->forwarder_busy = true;
01592             task *new_task = new(task::allocate_additional_child_of(*(this->my_parent))) internal::
01593                     forward_task_bypass
01594                     < buffer_node<input_type, A> >(*this);
01595             // tmp should point to the last item handled by the aggregator.  This is the operation
01596             // the handling thread enqueued.  So modifying that record will be okay.
01597             tbb::task *tmp1 = tmp->ltask;
01598             tmp->ltask = combine_tasks(tmp1, new_task);
01599         }
01600     }
01601 
01603     /* override */ void internal_forward_task(prio_operation *op) {
01604         T i_copy;
01605         task * last_task = NULL; // flagged when a successor accepts
01606         size_type counter = this->my_successors.size();
01607 
01608         if (this->my_reserved || this->my_tail == 0) {
01609             __TBB_store_with_release(op->status, FAILED);
01610             this->forwarder_busy = false;
01611             return;
01612         }
01613         // Keep trying to send while there exists an accepting successor
01614         while (counter>0 && this->my_tail > 0) {
01615             i_copy = this->my_array[0].first;
01616             task * new_task = this->my_successors.try_put_task(i_copy);
01617             last_task = combine_tasks(last_task, new_task);
01618             if ( new_task ) {
01619                  if (mark == this->my_tail) --mark;
01620                 --(this->my_tail);
01621                 this->my_array[0].first=this->my_array[this->my_tail].first;
01622                 if (this->my_tail > 1) // don't reheap for heap of size 1
01623                     reheap();
01624             }
01625             --counter;
01626         }
01627         op->ltask = last_task;
01628         if (last_task && !counter)
01629             __TBB_store_with_release(op->status, SUCCEEDED);
01630         else {
01631             __TBB_store_with_release(op->status, FAILED);
01632             this->forwarder_busy = false;
01633         }
01634     }
01635 
01636     /* override */ void internal_push(prio_operation *op) {
01637         if ( this->my_tail >= this->my_array_size )
01638             this->grow_my_array( this->my_tail + 1 );
01639         this->my_array[this->my_tail] = std::make_pair( *(op->elem), true );
01640         ++(this->my_tail);
01641         __TBB_store_with_release(op->status, SUCCEEDED);
01642     }
01643 
01644     /* override */ void internal_pop(prio_operation *op) {
01645         if ( this->my_reserved == true || this->my_tail == 0 ) {
01646             __TBB_store_with_release(op->status, FAILED);
01647         }
01648         else {
01649             if (mark<this->my_tail &&
01650                 compare(this->my_array[0].first,
01651                         this->my_array[this->my_tail-1].first)) {
01652                 // there are newly pushed elems; last one higher than top
01653                 // copy the data
01654                 *(op->elem) = this->my_array[this->my_tail-1].first;
01655                 --(this->my_tail);
01656                 __TBB_store_with_release(op->status, SUCCEEDED);
01657             }
01658             else { // extract and push the last element down heap
01659                 *(op->elem) = this->my_array[0].first; // copy the data
01660                 if (mark == this->my_tail) --mark;
01661                 --(this->my_tail);
01662                 __TBB_store_with_release(op->status, SUCCEEDED);
01663                 this->my_array[0].first=this->my_array[this->my_tail].first;
01664                 if (this->my_tail > 1) // don't reheap for heap of size 1
01665                     reheap();
01666             }
01667         }
01668     }
01669     /* override */ void internal_reserve(prio_operation *op) {
01670         if (this->my_reserved == true || this->my_tail == 0) {
01671             __TBB_store_with_release(op->status, FAILED);
01672         }
01673         else {
01674             this->my_reserved = true;
01675             *(op->elem) = reserved_item = this->my_array[0].first;
01676             if (mark == this->my_tail) --mark;
01677             --(this->my_tail);
01678             __TBB_store_with_release(op->status, SUCCEEDED);
01679             this->my_array[0].first = this->my_array[this->my_tail].first;
01680             if (this->my_tail > 1) // don't reheap for heap of size 1
01681                 reheap();
01682         }
01683     }
01684     /* override */ void internal_consume(prio_operation *op) {
01685         this->my_reserved = false;
01686         __TBB_store_with_release(op->status, SUCCEEDED);
01687     }
01688     /* override */ void internal_release(prio_operation *op) {
01689         if (this->my_tail >= this->my_array_size)
01690             this->grow_my_array( this->my_tail + 1 );
01691         this->my_array[this->my_tail] = std::make_pair(reserved_item, true);
01692         ++(this->my_tail);
01693         this->my_reserved = false;
01694         __TBB_store_with_release(op->status, SUCCEEDED);
01695         heapify();
01696     }
01697 private:
01698     Compare compare;
01699     size_type mark;
01700     input_type reserved_item;
01701 
01702     void heapify() {
01703         if (!mark) mark = 1;
01704         for (; mark<this->my_tail; ++mark) { // for each unheaped element
01705             size_type cur_pos = mark;
01706             input_type to_place = this->my_array[mark].first;
01707             do { // push to_place up the heap
01708                 size_type parent = (cur_pos-1)>>1;
01709                 if (!compare(this->my_array[parent].first, to_place))
01710                     break;
01711                 this->my_array[cur_pos].first = this->my_array[parent].first;
01712                 cur_pos = parent;
01713             } while( cur_pos );
01714             this->my_array[cur_pos].first = to_place;
01715         }
01716     }
01717 
01718     void reheap() {
01719         size_type cur_pos=0, child=1;
01720         while (child < mark) {
01721             size_type target = child;
01722             if (child+1<mark &&
01723                 compare(this->my_array[child].first,
01724                         this->my_array[child+1].first))
01725                 ++target;
01726             // target now has the higher priority child
01727             if (compare(this->my_array[target].first,
01728                         this->my_array[this->my_tail].first))
01729                 break;
01730             this->my_array[cur_pos].first = this->my_array[target].first;
01731             cur_pos = target;
01732             child = (cur_pos<<1)+1;
01733         }
01734         this->my_array[cur_pos].first = this->my_array[this->my_tail].first;
01735     }
01736 };
01737 
01739 
01742 template< typename T >
01743 class limiter_node : public graph_node, public receiver< T >, public sender< T > {
01744 protected:
01745     using graph_node::my_graph;
01746 public:
01747     typedef T input_type;
01748     typedef T output_type;
01749     typedef sender< input_type > predecessor_type;
01750     typedef receiver< output_type > successor_type;
01751 
01752 private:
01753     task *my_root_task;
01754     size_t my_threshold;
01755     size_t my_count;
01756     internal::predecessor_cache< T > my_predecessors;
01757     spin_mutex my_mutex;
01758     internal::broadcast_cache< T > my_successors;
01759     int init_decrement_predecessors;
01760 
01761     friend class internal::forward_task_bypass< limiter_node<T> >;
01762 
01763     // Let decrementer call decrement_counter()
01764     friend class internal::decrementer< limiter_node<T> >;
01765 
01766     // only returns a valid task pointer or NULL, never SUCCESSFULLY_ENQUEUED
01767     task * decrement_counter() {
01768         input_type v;
01769         task *rval = NULL;
01770 
01771         // If we can't get / put an item immediately then drop the count
01772         if ( my_predecessors.get_item( v ) == false
01773              || (rval = my_successors.try_put_task(v)) == NULL ) {
01774             spin_mutex::scoped_lock lock(my_mutex);
01775             --my_count;
01776             if ( !my_predecessors.empty() ) {
01777                 task *rtask = new ( task::allocate_additional_child_of( *my_root_task ) )
01778                     internal::forward_task_bypass< limiter_node<T> >( *this );
01779                 __TBB_ASSERT(!rval, "Have two tasks to handle");
01780                 return rtask;
01781             }
01782         }
01783         return rval;
01784     }
01785 
01786     void forward() {
01787         {
01788             spin_mutex::scoped_lock lock(my_mutex);
01789             if ( my_count < my_threshold )
01790                 ++my_count;
01791             else
01792                 return;
01793         }
01794         task * rtask = decrement_counter();
01795         if(rtask) task::enqueue(*rtask);
01796     }
01797 
01798     task *forward_task() {
01799         spin_mutex::scoped_lock lock(my_mutex);
01800         if ( my_count >= my_threshold )
01801             return NULL;
01802         ++my_count;
01803         task * rtask = decrement_counter();
01804         return rtask;
01805     }
01806 
01807 public:
01809     internal::decrementer< limiter_node<T> > decrement;
01810 
01812     limiter_node(graph &g, size_t threshold, int num_decrement_predecessors=0) :
01813         graph_node(g), my_root_task(g.root_task()), my_threshold(threshold), my_count(0),
01814         init_decrement_predecessors(num_decrement_predecessors),
01815         decrement(num_decrement_predecessors)
01816     {
01817         my_predecessors.set_owner(this);
01818         my_successors.set_owner(this);
01819         decrement.set_owner(this);
01820     }
01821 
01823     limiter_node( const limiter_node& src ) :
01824         graph_node(src.my_graph), receiver<T>(), sender<T>(),
01825         my_root_task(src.my_root_task), my_threshold(src.my_threshold), my_count(0),
01826         init_decrement_predecessors(src.init_decrement_predecessors),
01827         decrement(src.init_decrement_predecessors)
01828     {
01829         my_predecessors.set_owner(this);
01830         my_successors.set_owner(this);
01831         decrement.set_owner(this);
01832     }
01833 
01835     /* override */ bool register_successor( receiver<output_type> &r ) {
01836         my_successors.register_successor(r);
01837         return true;
01838     }
01839 
01841 
01842     /* override */ bool remove_successor( receiver<output_type> &r ) {
01843         r.remove_predecessor(*this);
01844         my_successors.remove_successor(r);
01845         return true;
01846     }
01847 
01849     /* override */ bool register_predecessor( predecessor_type &src ) {
01850         spin_mutex::scoped_lock lock(my_mutex);
01851         my_predecessors.add( src );
01852         if ( my_count < my_threshold && !my_successors.empty() ) {
01853             task::enqueue( * new ( task::allocate_additional_child_of( *my_root_task ) )
01854                            internal::
01855                            forward_task_bypass
01856                            < limiter_node<T> >( *this ) );
01857         }
01858         return true;
01859     }
01860 
01862     /* override */ bool remove_predecessor( predecessor_type &src ) {
01863         my_predecessors.remove( src );
01864         return true;
01865     }
01866 
01867 protected:
01868 
01869     template< typename R, typename B > friend class run_and_put_task;
01870     template<typename X, typename Y> friend class internal::broadcast_cache;
01871     template<typename X, typename Y> friend class internal::round_robin_cache;
01873     /* override */ task *try_put_task( const T &t ) {
01874         {
01875             spin_mutex::scoped_lock lock(my_mutex);
01876             if ( my_count >= my_threshold )
01877                 return NULL;
01878             else
01879                 ++my_count;
01880         }
01881 
01882         task * rtask = my_successors.try_put_task(t);
01883 
01884         if ( !rtask ) {  // try_put_task failed.
01885             spin_mutex::scoped_lock lock(my_mutex);
01886             --my_count;
01887             if ( !my_predecessors.empty() ) {
01888                 rtask = new ( task::allocate_additional_child_of( *my_root_task ) )
01889                     internal::forward_task_bypass< limiter_node<T> >( *this );
01890             }
01891         }
01892         return rtask;
01893     }
01894 
01895     /*override*/void reset() {
01896         my_count = 0;
01897         my_predecessors.reset();
01898         decrement.reset_receiver();
01899     }
01900 
01901     /*override*/void reset_receiver() { my_predecessors.reset(); }
01902 };  // limiter_node
01903 
01904 #include "internal/_flow_graph_join_impl.h"
01905 
01906 using internal::reserving_port;
01907 using internal::queueing_port;
01908 using internal::tag_matching_port;
01909 using internal::input_port;
01910 using internal::tag_value;
01911 using internal::NO_TAG;
01912 
01913 template<typename OutputTuple, graph_buffer_policy JP=queueing> class join_node;
01914 
01915 template<typename OutputTuple>
01916 class join_node<OutputTuple,reserving>: public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value, reserving_port, OutputTuple, reserving> {
01917 private:
01918     static const int N = tbb::flow::tuple_size<OutputTuple>::value;
01919     typedef typename internal::unfolded_join_node<N, reserving_port, OutputTuple, reserving> unfolded_type;
01920 public:
01921     typedef OutputTuple output_type;
01922     typedef typename unfolded_type::input_ports_type input_ports_type;
01923     join_node(graph &g) : unfolded_type(g) { }
01924     join_node(const join_node &other) : unfolded_type(other) {}
01925 };
01926 
01927 template<typename OutputTuple>
01928 class join_node<OutputTuple,queueing>: public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value, queueing_port, OutputTuple, queueing> {
01929 private:
01930     static const int N = tbb::flow::tuple_size<OutputTuple>::value;
01931     typedef typename internal::unfolded_join_node<N, queueing_port, OutputTuple, queueing> unfolded_type;
01932 public:
01933     typedef OutputTuple output_type;
01934     typedef typename unfolded_type::input_ports_type input_ports_type;
01935     join_node(graph &g) : unfolded_type(g) { }
01936     join_node(const join_node &other) : unfolded_type(other) {}
01937 };
01938 
01939 // template for tag_matching join_node
01940 template<typename OutputTuple>
01941 class join_node<OutputTuple, tag_matching> : public internal::unfolded_join_node<tbb::flow::tuple_size<OutputTuple>::value,
01942       tag_matching_port, OutputTuple, tag_matching> {
01943 private:
01944     static const int N = tbb::flow::tuple_size<OutputTuple>::value;
01945     typedef typename internal::unfolded_join_node<N, tag_matching_port, OutputTuple, tag_matching> unfolded_type;
01946 public:
01947     typedef OutputTuple output_type;
01948     typedef typename unfolded_type::input_ports_type input_ports_type;
01949     template<typename B0, typename B1>
01950     join_node(graph &g, B0 b0, B1 b1) : unfolded_type(g, b0, b1) { }
01951     template<typename B0, typename B1, typename B2>
01952     join_node(graph &g, B0 b0, B1 b1, B2 b2) : unfolded_type(g, b0, b1, b2) { }
01953     template<typename B0, typename B1, typename B2, typename B3>
01954     join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3) : unfolded_type(g, b0, b1, b2, b3) { }
01955     template<typename B0, typename B1, typename B2, typename B3, typename B4>
01956     join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4) : unfolded_type(g, b0, b1, b2, b3, b4) { }
01957 #if __TBB_VARIADIC_MAX >= 6
01958     template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5>
01959     join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5) : unfolded_type(g, b0, b1, b2, b3, b4, b5) { }
01960 #endif
01961 #if __TBB_VARIADIC_MAX >= 7
01962     template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6>
01963     join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6) { }
01964 #endif
01965 #if __TBB_VARIADIC_MAX >= 8
01966     template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7>
01967     join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7) { }
01968 #endif
01969 #if __TBB_VARIADIC_MAX >= 9
01970     template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7, typename B8>
01971     join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8) { }
01972 #endif
01973 #if __TBB_VARIADIC_MAX >= 10
01974     template<typename B0, typename B1, typename B2, typename B3, typename B4, typename B5, typename B6, typename B7, typename B8, typename B9>
01975     join_node(graph &g, B0 b0, B1 b1, B2 b2, B3 b3, B4 b4, B5 b5, B6 b6, B7 b7, B8 b8, B9 b9) : unfolded_type(g, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9) { }
01976 #endif
01977     join_node(const join_node &other) : unfolded_type(other) {}
01978 };
01979 
01980 #if TBB_PREVIEW_GRAPH_NODES
01981 // or node
01982 #include "internal/_flow_graph_or_impl.h"
01983 
01984 template<typename InputTuple>
01985 class or_node : public internal::unfolded_or_node<InputTuple> {
01986 private:
01987     static const int N = tbb::flow::tuple_size<InputTuple>::value;
01988 public:
01989     typedef typename internal::or_output_type<InputTuple>::type output_type;
01990     typedef typename internal::unfolded_or_node<InputTuple> unfolded_type;
01991     or_node(graph& g) : unfolded_type(g) { }
01992     // Copy constructor
01993     or_node( const or_node& other ) : unfolded_type(other) { }
01994 };
01995 #endif  // TBB_PREVIEW_GRAPH_NODES
01996 
01998 template< typename T >
01999 inline void make_edge( sender<T> &p, receiver<T> &s ) {
02000     p.register_successor( s );
02001 }
02002 
02004 template< typename T >
02005 inline void remove_edge( sender<T> &p, receiver<T> &s ) {
02006     p.remove_successor( s );
02007 }
02008 
02010 template< typename Body, typename Node >
02011 Body copy_body( Node &n ) {
02012     return n.template copy_function_object<Body>();
02013 }
02014 
02015 } // interface6
02016 
02017     using interface6::graph;
02018     using interface6::graph_node;
02019     using interface6::continue_msg;
02020     using interface6::sender;
02021     using interface6::receiver;
02022     using interface6::continue_receiver;
02023 
02024     using interface6::source_node;
02025     using interface6::function_node;
02026     using interface6::multifunction_node;
02027     using interface6::split_node;
02028     using interface6::internal::output_port;
02029 #if TBB_PREVIEW_GRAPH_NODES
02030     using interface6::or_node;
02031 #endif
02032     using interface6::continue_node;
02033     using interface6::overwrite_node;
02034     using interface6::write_once_node;
02035     using interface6::broadcast_node;
02036     using interface6::buffer_node;
02037     using interface6::queue_node;
02038     using interface6::sequencer_node;
02039     using interface6::priority_queue_node;
02040     using interface6::limiter_node;
02041     using namespace interface6::internal::graph_policy_namespace;
02042     using interface6::join_node;
02043     using interface6::input_port;
02044     using interface6::copy_body; 
02045     using interface6::make_edge; 
02046     using interface6::remove_edge; 
02047     using interface6::internal::NO_TAG;
02048     using interface6::internal::tag_value;
02049 
02050 } // flow
02051 } // tbb
02052 
02053 #endif // __TBB_flow_graph_H

Copyright © 2005-2012 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.