task.h

00001 /*
00002     Copyright 2005-2012 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_task_H
00022 #define __TBB_task_H
00023 
00024 #include "tbb_stddef.h"
00025 #include "tbb_machine.h"
00026 #include <climits>
00027 
00028 typedef struct ___itt_caller *__itt_caller;
00029 
00030 namespace tbb {
00031 
00032 class task;
00033 class task_list;
00034 
00035 #if __TBB_TASK_GROUP_CONTEXT
00036 class task_group_context;
00037 #endif /* __TBB_TASK_GROUP_CONTEXT */
00038 
00039 // MSVC does not allow taking the address of a member that was defined
00040 // privately in task_base and made public in class task via a using declaration.
00041 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
00042 #define __TBB_TASK_BASE_ACCESS public
00043 #else
00044 #define __TBB_TASK_BASE_ACCESS private
00045 #endif
00046 
00047 namespace internal {
00048 
00049     class allocate_additional_child_of_proxy: no_assign {
00051         task* self;
00052         task& parent;
00053     public:
00054         explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {}
00055         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00056         void __TBB_EXPORTED_METHOD free( task& ) const;
00057     };
00058 
00059 }
00060 
00061 namespace interface5 {
00062     namespace internal {
00064 
00069         class task_base: tbb::internal::no_copy {
00070         __TBB_TASK_BASE_ACCESS:
00071             friend class tbb::task;
00072 
00074             static void spawn( task& t );
00075 
00077             static void spawn( task_list& list );
00078 
00080 
00082             static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
00083                 return tbb::internal::allocate_additional_child_of_proxy(t);
00084             }
00085 
00087 
00091             static void __TBB_EXPORTED_FUNC destroy( task& victim );
00092         };
00093     } // internal
00094 } // interface5
00095 
00097 namespace internal {
00098 
00099     class scheduler: no_copy {
00100     public:
00102         virtual void spawn( task& first, task*& next ) = 0;
00103 
00105         virtual void wait_for_all( task& parent, task* child ) = 0;
00106 
00108         virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
00109 
00111         //  Have to have it just to shut up overzealous compilation warnings
00112         virtual ~scheduler() = 0;
00113 
00115         virtual void enqueue( task& t, void* reserved ) = 0;
00116     };
00117 
00119 
00120     typedef intptr_t reference_count;
00121 
00123     typedef unsigned short affinity_id;
00124 
00125 #if __TBB_TASK_GROUP_CONTEXT
00126     class generic_scheduler;
00127 
00128     struct context_list_node_t {
00129         context_list_node_t *my_prev,
00130                             *my_next;
00131     };
00132 
00133     class allocate_root_with_context_proxy: no_assign {
00134         task_group_context& my_context;
00135     public:
00136         allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
00137         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00138         void __TBB_EXPORTED_METHOD free( task& ) const;
00139     };
00140 #endif /* __TBB_TASK_GROUP_CONTEXT */
00141 
00142     class allocate_root_proxy: no_assign {
00143     public:
00144         static task& __TBB_EXPORTED_FUNC allocate( size_t size );
00145         static void __TBB_EXPORTED_FUNC free( task& );
00146     };
00147 
00148     class allocate_continuation_proxy: no_assign {
00149     public:
00150         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00151         void __TBB_EXPORTED_METHOD free( task& ) const;
00152     };
00153 
00154     class allocate_child_proxy: no_assign {
00155     public:
00156         task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00157         void __TBB_EXPORTED_METHOD free( task& ) const;
00158     };
00159 
00161 
00172     class task_prefix {
00173     private:
00174         friend class tbb::task;
00175         friend class tbb::interface5::internal::task_base;
00176         friend class tbb::task_list;
00177         friend class internal::scheduler;
00178         friend class internal::allocate_root_proxy;
00179         friend class internal::allocate_child_proxy;
00180         friend class internal::allocate_continuation_proxy;
00181         friend class internal::allocate_additional_child_of_proxy;
00182 
00183 #if __TBB_TASK_GROUP_CONTEXT
00185 
00188         task_group_context  *context;
00189 #endif /* __TBB_TASK_GROUP_CONTEXT */
00190 
00192 
00197         scheduler* origin;
00198 
00199 #if __TBB_TASK_PRIORITY
00200         union {
00201 #endif /* __TBB_TASK_PRIORITY */
00203 
00205         scheduler* owner;
00206 
00207 #if __TBB_TASK_PRIORITY
00209 
00210         task* next_offloaded;
00211         };
00212 #endif /* __TBB_TASK_PRIORITY */
00213 
00215 
00218         tbb::task* parent;
00219 
00221 
00225         __TBB_atomic reference_count ref_count;
00226 
00228 
00230         int depth;
00231 
00233 
00234         unsigned char state;
00235 
00237 
00243         unsigned char extra_state;
00244 
00245         affinity_id affinity;
00246 
00248         tbb::task* next;
00249 
00251         tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
00252     };
00253 
00254 } // namespace internal
00256 
00257 #if __TBB_TASK_GROUP_CONTEXT
00258 
00259 #if __TBB_TASK_PRIORITY
00260 namespace internal {
00261     static const int priority_stride_v4 = INT_MAX / 4;
00262 }
00263 
00264 enum priority_t {
00265     priority_normal = internal::priority_stride_v4 * 2,
00266     priority_low = priority_normal - internal::priority_stride_v4,
00267     priority_high = priority_normal + internal::priority_stride_v4
00268 };
00269 
00270 #endif /* __TBB_TASK_PRIORITY */
00271 
00272 #if TBB_USE_CAPTURED_EXCEPTION
00273     class tbb_exception;
00274 #else
00275     namespace internal {
00276         class tbb_exception_ptr;
00277     }
00278 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
00279 
00280 class task_scheduler_init;
00281 
00283 
00303 class task_group_context : internal::no_copy {
00304 private:
00305     friend class internal::generic_scheduler;
00306     friend class task_scheduler_init;
00307 
00308 #if TBB_USE_CAPTURED_EXCEPTION
00309     typedef tbb_exception exception_container_type;
00310 #else
00311     typedef internal::tbb_exception_ptr exception_container_type;
00312 #endif
00313 
00314     enum version_traits_word_layout {
00315         traits_offset = 16,
00316         version_mask = 0xFFFF,
00317         traits_mask = 0xFFFFul << traits_offset
00318     };
00319 
00320 public:
00321     enum kind_type {
00322         isolated,
00323         bound
00324     };
00325 
00326     enum traits_type {
00327         exact_exception = 0x0001ul << traits_offset,
00328         concurrent_wait = 0x0004ul << traits_offset,
00329 #if TBB_USE_CAPTURED_EXCEPTION
00330         default_traits = 0
00331 #else
00332         default_traits = exact_exception
00333 #endif /* !TBB_USE_CAPTURED_EXCEPTION */
00334     };
00335 
00336 private:
00337     enum state {
00338         may_have_children = 1
00339     };
00340 
00341     union {
00343         kind_type my_kind;
00344         uintptr_t _my_kind_aligner;
00345     };
00346 
00348     task_group_context *my_parent;
00349 
00351 
00353     internal::context_list_node_t my_node;
00354 
00356     __itt_caller itt_caller;
00357 
00359 
00362     char _leading_padding[internal::NFS_MaxLineSize
00363                           - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
00364                           - sizeof(__itt_caller)];
00365 
00367     uintptr_t my_cancellation_requested;
00368 
00370 
00373     uintptr_t  my_version_and_traits;
00374 
00376     exception_container_type *my_exception;
00377 
00379     internal::generic_scheduler *my_owner;
00380 
00382     uintptr_t my_state;
00383 
00384 #if __TBB_TASK_PRIORITY
00386     intptr_t my_priority;
00387 #endif /* __TBB_TASK_PRIORITY */
00388 
00390 
00391     char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
00392 #if __TBB_TASK_PRIORITY
00393                             - sizeof(intptr_t)
00394 #endif /* __TBB_TASK_PRIORITY */
00395                           ];
00396 
00397 public:
00399 
00427     task_group_context ( kind_type relation_with_parent = bound,
00428                          uintptr_t traits = default_traits )
00429         : my_kind(relation_with_parent)
00430         , my_version_and_traits(1 | traits)
00431     {
00432         init();
00433     }
00434 
00435     __TBB_EXPORTED_METHOD ~task_group_context ();
00436 
00438 
00445     void __TBB_EXPORTED_METHOD reset ();
00446 
00448 
00455     bool __TBB_EXPORTED_METHOD cancel_group_execution ();
00456 
00458     bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
00459 
00461 
00467     void __TBB_EXPORTED_METHOD register_pending_exception ();
00468 
00469 #if __TBB_TASK_PRIORITY
00471     void set_priority ( priority_t );
00472 
00474     priority_t priority () const;
00475 #endif /* __TBB_TASK_PRIORITY */
00476 
00477 protected:
00479 
00480     void __TBB_EXPORTED_METHOD init ();
00481 
00482 private:
00483     friend class task;
00484     friend class internal::allocate_root_with_context_proxy;
00485 
00486     static const kind_type binding_required = bound;
00487     static const kind_type binding_completed = kind_type(bound+1);
00488     static const kind_type detached = kind_type(binding_completed+1);
00489     static const kind_type dying = kind_type(detached+1);
00490 
00492 
00494     template <typename T>
00495     void propagate_state_from_ancestors ( T task_group_context::*mptr_state, T new_state );
00496 
00498     inline void finish_initialization ( internal::generic_scheduler *local_sched );
00499 
00501     void bind_to ( internal::generic_scheduler *local_sched );
00502 
00504     void register_with ( internal::generic_scheduler *local_sched );
00505 
00506 }; // class task_group_context
00507 
00508 #endif /* __TBB_TASK_GROUP_CONTEXT */
00509 
00511 
00512 class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
00513 
00515     void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
00516 
00518     internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
00519 
00520 protected:
00522     task() {prefix().extra_state=1;}
00523 
00524 public:
00526     virtual ~task() {}
00527 
00529     virtual task* execute() = 0;
00530 
00532     enum state_type {
00534         executing,
00536         reexecute,
00538         ready,
00540         allocated,
00542         freed,
00544         recycle
00545     };
00546 
00547     //------------------------------------------------------------------------
00548     // Allocating tasks
00549     //------------------------------------------------------------------------
00550 
00552     static internal::allocate_root_proxy allocate_root() {
00553         return internal::allocate_root_proxy();
00554     }
00555 
00556 #if __TBB_TASK_GROUP_CONTEXT
00558     static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
00559         return internal::allocate_root_with_context_proxy(ctx);
00560     }
00561 #endif /* __TBB_TASK_GROUP_CONTEXT */
00562 
00564 
00565     internal::allocate_continuation_proxy& allocate_continuation() {
00566         return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
00567     }
00568 
00570     internal::allocate_child_proxy& allocate_child() {
00571         return *reinterpret_cast<internal::allocate_child_proxy*>(this);
00572     }
00573 
00575     using task_base::allocate_additional_child_of;
00576 
00577 #if __TBB_DEPRECATED_TASK_INTERFACE
00579 
00583     void __TBB_EXPORTED_METHOD destroy( task& t );
00584 #else /* !__TBB_DEPRECATED_TASK_INTERFACE */
00586     using task_base::destroy;
00587 #endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
00588 
00589     //------------------------------------------------------------------------
00590     // Recycling of tasks
00591     //------------------------------------------------------------------------
00592 
00594 
00600     void recycle_as_continuation() {
00601         __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00602         prefix().state = allocated;
00603     }
00604 
00606 
00608     void recycle_as_safe_continuation() {
00609         __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00610         prefix().state = recycle;
00611     }
00612 
00614     void recycle_as_child_of( task& new_parent ) {
00615         internal::task_prefix& p = prefix();
00616         __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
00617         __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
00618         __TBB_ASSERT( p.parent==NULL, "parent must be null" );
00619         __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
00620         __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
00621         p.state = allocated;
00622         p.parent = &new_parent;
00623 #if __TBB_TASK_GROUP_CONTEXT
00624         p.context = new_parent.prefix().context;
00625 #endif /* __TBB_TASK_GROUP_CONTEXT */
00626     }
00627 
00629 
00630     void recycle_to_reexecute() {
00631         __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00632         __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
00633         prefix().state = reexecute;
00634     }
00635 
00636     // All depth-related methods are obsolete, and are retained for the sake
00637     // of backward source compatibility only
00638     intptr_t depth() const {return 0;}
00639     void set_depth( intptr_t ) {}
00640     void add_to_depth( int ) {}
00641 
00642 
00643     //------------------------------------------------------------------------
00644     // Spawning and blocking
00645     //------------------------------------------------------------------------
00646 
00648     void set_ref_count( int count ) {
00649 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00650         internal_set_ref_count(count);
00651 #else
00652         prefix().ref_count = count;
00653 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
00654     }
00655 
00657 
00658     void increment_ref_count() {
00659         __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
00660     }
00661 
00663 
00664     int decrement_ref_count() {
00665 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00666         return int(internal_decrement_ref_count());
00667 #else
00668         return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
00669 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
00670     }
00671 
00673     using task_base::spawn;
00674 
00676     void spawn_and_wait_for_all( task& child ) {
00677         prefix().owner->wait_for_all( *this, &child );
00678     }
00679 
00681     void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
00682 
00684     static void spawn_root_and_wait( task& root ) {
00685         root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
00686     }
00687 
00689 
00691     static void spawn_root_and_wait( task_list& root_list );
00692 
00694 
00695     void wait_for_all() {
00696         prefix().owner->wait_for_all( *this, NULL );
00697     }
00698 
00700 #if __TBB_TASK_PRIORITY
00701 
00711 #endif /* __TBB_TASK_PRIORITY */
00712     static void enqueue( task& t ) {
00713         t.prefix().owner->enqueue( t, NULL );
00714     }
00715 
00716 #if __TBB_TASK_PRIORITY
00718     static void enqueue( task& t, priority_t p ) {
00719         __TBB_ASSERT( p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value" );
00720         t.prefix().owner->enqueue( t, (void*)p );
00721     }
00722 #endif /* __TBB_TASK_PRIORITY */
00723 
00725     static task& __TBB_EXPORTED_FUNC self();
00726 
00728     task* parent() const {return prefix().parent;}
00729 
00731     void set_parent(task* p) {
00732 #if __TBB_TASK_GROUP_CONTEXT
00733         __TBB_ASSERT(prefix().context == p->prefix().context, "The tasks must be in the same context");
00734 #endif
00735         prefix().parent = p;
00736     }
00737 
00738 #if __TBB_TASK_GROUP_CONTEXT
00740 
00741     task_group_context* context() {return prefix().context;}
00742 
00744     task_group_context* group () { return prefix().context; }
00745 #endif /* __TBB_TASK_GROUP_CONTEXT */
00746 
00748     bool is_stolen_task() const {
00749         return (prefix().extra_state & 0x80)!=0;
00750     }
00751 
00752     //------------------------------------------------------------------------
00753     // Debugging
00754     //------------------------------------------------------------------------
00755 
00757     state_type state() const {return state_type(prefix().state);}
00758 
00760     int ref_count() const {
00761 #if TBB_USE_ASSERT
00762         internal::reference_count ref_count_ = prefix().ref_count;
00763         __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
00764 #endif
00765         return int(prefix().ref_count);
00766     }
00767 
00769     bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
00770 
00771     //------------------------------------------------------------------------
00772     // Affinity
00773     //------------------------------------------------------------------------
00774 
00776 
00777     typedef internal::affinity_id affinity_id;
00778 
00780     void set_affinity( affinity_id id ) {prefix().affinity = id;}
00781 
00783     affinity_id affinity() const {return prefix().affinity;}
00784 
00786 
00790     virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
00791 
00792 #if __TBB_TASK_GROUP_CONTEXT
00794 
00804     void __TBB_EXPORTED_METHOD change_group ( task_group_context& ctx );
00805 
00807 
00808     bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
00809 
00811     bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
00812 #endif /* __TBB_TASK_GROUP_CONTEXT */
00813 
00814 #if __TBB_TASK_PRIORITY
00816     void set_group_priority ( priority_t p ) {  prefix().context->set_priority(p); }
00817 
00819     priority_t group_priority () const { return prefix().context->priority(); }
00820 
00821 #endif /* __TBB_TASK_PRIORITY */
00822 
00823 private:
00824     friend class interface5::internal::task_base;
00825     friend class task_list;
00826     friend class internal::scheduler;
00827     friend class internal::allocate_root_proxy;
00828 #if __TBB_TASK_GROUP_CONTEXT
00829     friend class internal::allocate_root_with_context_proxy;
00830 #endif /* __TBB_TASK_GROUP_CONTEXT */
00831     friend class internal::allocate_continuation_proxy;
00832     friend class internal::allocate_child_proxy;
00833     friend class internal::allocate_additional_child_of_proxy;
00834 
00836 
00837     internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
00838         return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
00839     }
00840 }; // class task
00841 
00843 
00844 class empty_task: public task {
00845     /*override*/ task* execute() {
00846         return NULL;
00847     }
00848 };
00849 
00851 
00853 class task_list: internal::no_copy {
00854 private:
00855     task* first;
00856     task** next_ptr;
00857     friend class task;
00858     friend class interface5::internal::task_base;
00859 public:
00861     task_list() : first(NULL), next_ptr(&first) {}
00862 
00864     ~task_list() {}
00865 
00867     bool empty() const {return !first;}
00868 
00870     void push_back( task& task ) {
00871         task.prefix().next = NULL;
00872         *next_ptr = &task;
00873         next_ptr = &task.prefix().next;
00874     }
00875 
00877     task& pop_front() {
00878         __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
00879         task* result = first;
00880         first = result->prefix().next;
00881         if( !first ) next_ptr = &first;
00882         return *result;
00883     }
00884 
00886     void clear() {
00887         first=NULL;
00888         next_ptr=&first;
00889     }
00890 };
00891 
00892 inline void interface5::internal::task_base::spawn( task& t ) {
00893     t.prefix().owner->spawn( t, t.prefix().next );
00894 }
00895 
00896 inline void interface5::internal::task_base::spawn( task_list& list ) {
00897     if( task* t = list.first ) {
00898         t->prefix().owner->spawn( *t, *list.next_ptr );
00899         list.clear();
00900     }
00901 }
00902 
00903 inline void task::spawn_root_and_wait( task_list& root_list ) {
00904     if( task* t = root_list.first ) {
00905         t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
00906         root_list.clear();
00907     }
00908 }
00909 
00910 } // namespace tbb
00911 
00912 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
00913     return &tbb::internal::allocate_root_proxy::allocate(bytes);
00914 }
00915 
00916 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
00917     tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
00918 }
00919 
00920 #if __TBB_TASK_GROUP_CONTEXT
00921 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
00922     return &p.allocate(bytes);
00923 }
00924 
00925 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
00926     p.free( *static_cast<tbb::task*>(task) );
00927 }
00928 #endif /* __TBB_TASK_GROUP_CONTEXT */
00929 
00930 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
00931     return &p.allocate(bytes);
00932 }
00933 
00934 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
00935     p.free( *static_cast<tbb::task*>(task) );
00936 }
00937 
00938 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
00939     return &p.allocate(bytes);
00940 }
00941 
00942 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
00943     p.free( *static_cast<tbb::task*>(task) );
00944 }
00945 
00946 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00947     return &p.allocate(bytes);
00948 }
00949 
00950 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00951     p.free( *static_cast<tbb::task*>(task) );
00952 }
00953 
00954 #endif /* __TBB_task_H */

Copyright © 2005-2012 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.