Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
Loading...
Searching...
No Matches
task.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
18
19#if !defined(__TBB_show_deprecation_message_task_H) && defined(__TBB_show_deprecated_header_message)
20#define __TBB_show_deprecation_message_task_H
21#pragma message("TBB Warning: tbb/task.h is deprecated. For details, please see Deprecated Features appendix in the TBB reference manual.")
22#endif
23
24#if defined(__TBB_show_deprecated_header_message)
25#undef __TBB_show_deprecated_header_message
26#endif
27
28#ifndef __TBB_task_H
29#define __TBB_task_H
30
31#define __TBB_task_H_include_area
33
34#include "tbb_stddef.h"
35#include "tbb_machine.h"
36#include "tbb_profiling.h"
37#include <climits>
38
39typedef struct ___itt_caller *__itt_caller;
40
41namespace tbb {
42
43class task;
44class task_list;
45class task_group_context;
46
47// MSVC does not allow taking the address of a member that was defined
48// privately in task_base and made public in class task via a using declaration.
49#if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
50#define __TBB_TASK_BASE_ACCESS public
51#else
52#define __TBB_TASK_BASE_ACCESS private
53#endif
54
55namespace internal { //< @cond INTERNAL
56
61 public:
62 explicit allocate_additional_child_of_proxy( task& parent_ ) : self(NULL), parent(parent_) {
64 }
65 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
66 void __TBB_EXPORTED_METHOD free( task& ) const;
67 };
68
69 struct cpu_ctl_env_space { int space[sizeof(internal::uint64_t)/sizeof(int)]; };
70} //< namespace internal @endcond
71
72namespace interface5 {
73 namespace internal {
75
82 friend class tbb::task;
83
85 static void spawn( task& t );
86
88 static void spawn( task_list& list );
89
91
95 }
96
98
102 static void __TBB_EXPORTED_FUNC destroy( task& victim );
103 };
104 } // internal
105} // interface5
106
108namespace internal {
109
111 public:
113 virtual void spawn( task& first, task*& next ) = 0;
114
116 virtual void wait_for_all( task& parent, task* child ) = 0;
117
119 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
120
122 // Have to have it just to shut up overzealous compilation warnings
123 virtual ~scheduler() = 0;
124
126 virtual void enqueue( task& t, void* reserved ) = 0;
127 };
128
130
131 typedef intptr_t reference_count;
132
133#if __TBB_PREVIEW_RESUMABLE_TASKS
135 static const reference_count abandon_flag = reference_count(1) << (sizeof(reference_count)*CHAR_BIT - 2);
136#endif
137
139 typedef unsigned short affinity_id;
140
141#if __TBB_TASK_ISOLATION
143 typedef intptr_t isolation_tag;
145#endif /* __TBB_TASK_ISOLATION */
146
147#if __TBB_TASK_GROUP_CONTEXT
148 class generic_scheduler;
149
153 };
154
157 public:
161 };
162#endif /* __TBB_TASK_GROUP_CONTEXT */
163
165 public:
166 static task& __TBB_EXPORTED_FUNC allocate( size_t size );
167 static void __TBB_EXPORTED_FUNC free( task& );
168 };
169
171 public:
172 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
173 void __TBB_EXPORTED_METHOD free( task& ) const;
174 };
175
177 public:
178 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
179 void __TBB_EXPORTED_METHOD free( task& ) const;
180 };
181
182#if __TBB_PREVIEW_CRITICAL_TASKS
183 // TODO: move to class methods when critical task API becomes public
184 void make_critical( task& t );
185 bool is_critical( task& t );
186#endif
187
189
204 private:
205 friend class tbb::task;
207 friend class tbb::task_list;
213#if __TBB_PREVIEW_CRITICAL_TASKS
214 friend void make_critical( task& );
215 friend bool is_critical( task& );
216#endif
217
218#if __TBB_TASK_ISOLATION
221#else
222 intptr_t reserved_space_for_task_isolation_tag;
223#endif /* __TBB_TASK_ISOLATION */
224
225#if __TBB_TASK_GROUP_CONTEXT
227
231#endif /* __TBB_TASK_GROUP_CONTEXT */
232
234
240
241#if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS
242 union {
243#endif /* __TBB_TASK_PRIORITY */
245
248
249#if __TBB_TASK_PRIORITY
251
253#endif
254
255#if __TBB_PREVIEW_RESUMABLE_TASKS
257 scheduler* abandoned_scheduler;
258#endif
259#if __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS
260 };
261#endif /* __TBB_TASK_PRIORITY || __TBB_PREVIEW_RESUMABLE_TASKS */
262
264
268
270
275
277
279 int depth;
280
282
283 unsigned char state;
284
286
292 unsigned char extra_state;
293
295
298
300 tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
301 };
302
303} // namespace internal
305
306#if __TBB_TASK_GROUP_CONTEXT
307
308#if __TBB_TASK_PRIORITY
309namespace internal {
310 static const int priority_stride_v4 = INT_MAX / 4;
311#if __TBB_PREVIEW_CRITICAL_TASKS
312 // TODO: move into priority_t enum when critical tasks become public feature
314#endif
315}
316
322
323#endif /* __TBB_TASK_PRIORITY */
324
325#if TBB_USE_CAPTURED_EXCEPTION
326 class tbb_exception;
327#else
328 namespace internal {
329 class tbb_exception_ptr;
330 }
331#endif /* !TBB_USE_CAPTURED_EXCEPTION */
332
333class task_scheduler_init;
334namespace interface7 { class task_arena; }
335using interface7::task_arena;
336
338
359private:
362 friend class task_arena;
363
364#if TBB_USE_CAPTURED_EXCEPTION
366#else
368#endif
369
372 version_mask = 0xFFFF,
373 traits_mask = 0xFFFFul << traits_offset
374 };
375
376public:
379 bound
380 };
381
384#if __TBB_FP_CONTEXT
386#endif
388#if TBB_USE_CAPTURED_EXCEPTION
390#else
392#endif /* !TBB_USE_CAPTURED_EXCEPTION */
393 };
394
395private:
396 enum state {
398 // the following enumerations must be the last, new 2^x values must go above
400 };
401
402 union {
404 // TODO: describe asynchronous use, and whether any memory semantics are needed
407 };
408
411
413
416
419
421
425 - 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
426 - sizeof(__itt_caller)
427#if __TBB_FP_CONTEXT
429#endif
430 ];
431
432#if __TBB_FP_CONTEXT
434
437#endif
438
441
443
447
450
453
455 uintptr_t my_state;
456
457#if __TBB_TASK_PRIORITY
459 intptr_t my_priority;
460#endif /* __TBB_TASK_PRIORITY */
461
464
466
467 char _trailing_padding[internal::NFS_MaxLineSize - 2 * sizeof(uintptr_t) - 2 * sizeof(void*)
468#if __TBB_TASK_PRIORITY
469 - sizeof(intptr_t)
470#endif /* __TBB_TASK_PRIORITY */
471 - sizeof(internal::string_index)
472 ];
473
474public:
476
504 task_group_context ( kind_type relation_with_parent = bound,
505 uintptr_t t = default_traits )
506 : my_kind(relation_with_parent)
507 , my_version_and_traits(3 | t)
508 , my_name(internal::CUSTOM_CTX)
509 {
510 init();
511 }
512
513 // Custom constructor for instrumentation of TBB algorithm
515 : my_kind(bound)
517 , my_name(name)
518 {
519 init();
520 }
521
522 // Do not introduce standalone unbind method since it will break state propagation assumptions
524
526
534
536
544
547
549
556
557#if __TBB_FP_CONTEXT
559
567#endif
568
569#if __TBB_TASK_PRIORITY
572
575#endif /* __TBB_TASK_PRIORITY */
576
578 uintptr_t traits() const { return my_version_and_traits & traits_mask; }
579
580protected:
582
584
585private:
586 friend class task;
588
593
595 template <typename T>
596 void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
597
600
603
604#if __TBB_FP_CONTEXT
606 // TODO: Consider adding #else stub in order to omit #if sections in other code
608#endif /* __TBB_FP_CONTEXT */
609}; // class task_group_context
610
611#endif /* __TBB_TASK_GROUP_CONTEXT */
612
614
616
618 void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
619
621 internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
622
623protected:
625 task() {prefix().extra_state=1;}
626
627public:
629 virtual ~task() {}
630
632 virtual task* execute() = 0;
633
647 recycle
648#if __TBB_RECYCLE_TO_ENQUEUE
650 ,to_enqueue
651#endif
652#if __TBB_PREVIEW_RESUMABLE_TASKS
654 ,to_resume
655#endif
656 };
657
658 //------------------------------------------------------------------------
659 // Allocating tasks
660 //------------------------------------------------------------------------
661
665 }
666
667#if __TBB_TASK_GROUP_CONTEXT
671 }
672#endif /* __TBB_TASK_GROUP_CONTEXT */
673
675
677 return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
678 }
679
682 return *reinterpret_cast<internal::allocate_child_proxy*>(this);
683 }
684
686 using task_base::allocate_additional_child_of;
687
688#if __TBB_DEPRECATED_TASK_INTERFACE
690
694 void __TBB_EXPORTED_METHOD destroy( task& t );
695#else /* !__TBB_DEPRECATED_TASK_INTERFACE */
697 using task_base::destroy;
698#endif /* !__TBB_DEPRECATED_TASK_INTERFACE */
699
700 //------------------------------------------------------------------------
701 // Recycling of tasks
702 //------------------------------------------------------------------------
703
705
712 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
713 prefix().state = allocated;
714 }
715
717
720 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
721 prefix().state = recycle;
722 }
723
725 void recycle_as_child_of( task& new_parent ) {
726 internal::task_prefix& p = prefix();
727 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
728 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
729 __TBB_ASSERT( p.parent==NULL, "parent must be null" );
730 __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
731 __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
732 p.state = allocated;
733 p.parent = &new_parent;
734#if __TBB_TASK_GROUP_CONTEXT
735 p.context = new_parent.prefix().context;
736#endif /* __TBB_TASK_GROUP_CONTEXT */
737 }
738
740
742 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
743 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
744 prefix().state = reexecute;
745 }
746
747#if __TBB_RECYCLE_TO_ENQUEUE
749
750 void recycle_to_enqueue() {
751 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
752 prefix().state = to_enqueue;
753 }
754#endif /* __TBB_RECYCLE_TO_ENQUEUE */
755
756 //------------------------------------------------------------------------
757 // Spawning and blocking
758 //------------------------------------------------------------------------
759
761 void set_ref_count( int count ) {
762#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
763 internal_set_ref_count(count);
764#else
765 prefix().ref_count = count;
766#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
767 }
768
770
772 __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
773 }
774
776
777 int add_ref_count( int count ) {
778 internal::call_itt_notify( internal::releasing, &prefix().ref_count );
779 internal::reference_count k = count+__TBB_FetchAndAddW( &prefix().ref_count, count );
780 __TBB_ASSERT( k>=0, "task's reference count underflowed" );
781 if( k==0 )
782 internal::call_itt_notify( internal::acquired, &prefix().ref_count );
783 return int(k);
784 }
785
787
789#if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
790 return int(internal_decrement_ref_count());
791#else
792 return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
793#endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
794 }
795
797 using task_base::spawn;
798
801 prefix().owner->wait_for_all( *this, &child );
802 }
803
805 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
806
808 static void spawn_root_and_wait( task& root ) {
809 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
810 }
811
813
815 static void spawn_root_and_wait( task_list& root_list );
816
818
820 prefix().owner->wait_for_all( *this, NULL );
821 }
822
824#if __TBB_TASK_PRIORITY
835#endif /* __TBB_TASK_PRIORITY */
836 static void enqueue( task& t ) {
837 t.prefix().owner->enqueue( t, NULL );
838 }
839
840#if __TBB_TASK_PRIORITY
842 static void enqueue( task& t, priority_t p ) {
843#if __TBB_PREVIEW_CRITICAL_TASKS
845 || p == internal::priority_critical, "Invalid priority level value");
846#else
847 __TBB_ASSERT(p == priority_low || p == priority_normal || p == priority_high, "Invalid priority level value");
848#endif
849 t.prefix().owner->enqueue( t, (void*)p );
850 }
851#endif /* __TBB_TASK_PRIORITY */
852
855#if __TBB_TASK_PRIORITY
856 inline static void enqueue( task& t, task_arena& arena, priority_t p = priority_t(0) );
857#else
858 inline static void enqueue( task& t, task_arena& arena);
859#endif
860
862 static task& __TBB_EXPORTED_FUNC self();
863
865 task* parent() const {return prefix().parent;}
866
869#if __TBB_TASK_GROUP_CONTEXT
870 __TBB_ASSERT(!p || prefix().context == p->prefix().context, "The tasks must be in the same context");
871#endif
872 prefix().parent = p;
873 }
874
875#if __TBB_TASK_GROUP_CONTEXT
877
878 task_group_context* context() {return prefix().context;}
879
881 task_group_context* group () { return prefix().context; }
882#endif /* __TBB_TASK_GROUP_CONTEXT */
883
885 bool is_stolen_task() const {
886 return (prefix().extra_state & 0x80)!=0;
887 }
888
890 bool is_enqueued_task() const {
891 // es_task_enqueued = 0x10
892 return (prefix().extra_state & 0x10)!=0;
893 }
894
895#if __TBB_PREVIEW_RESUMABLE_TASKS
897 typedef void* suspend_point;
898
900 template <typename F>
901 static void suspend(F f);
902
904 static void resume(suspend_point tag);
905#endif
906
907 //------------------------------------------------------------------------
908 // Debugging
909 //------------------------------------------------------------------------
910
912 state_type state() const {return state_type(prefix().state);}
913
915 int ref_count() const {
916#if TBB_USE_ASSERT
917#if __TBB_PREVIEW_RESUMABLE_TASKS
918 internal::reference_count ref_count_ = prefix().ref_count & ~internal::abandon_flag;
919#else
920 internal::reference_count ref_count_ = prefix().ref_count;
921#endif
922 __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
923#endif
924#if __TBB_PREVIEW_RESUMABLE_TASKS
925 return int(prefix().ref_count & ~internal::abandon_flag);
926#else
927 return int(prefix().ref_count);
928#endif
929 }
930
932 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
933
934 //------------------------------------------------------------------------
935 // Affinity
936 //------------------------------------------------------------------------
937
939
941
943 void set_affinity( affinity_id id ) {prefix().affinity = id;}
944
946 affinity_id affinity() const {return prefix().affinity;}
947
949
953 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
954
955#if __TBB_TASK_GROUP_CONTEXT
957
968
970
971 bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
972
974 bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
975#else
976 bool is_cancelled () const { return false; }
977#endif /* __TBB_TASK_GROUP_CONTEXT */
978
979#if __TBB_TASK_PRIORITY
981 __TBB_DEPRECATED void set_group_priority ( priority_t p ) { prefix().context->set_priority(p); }
982
984 __TBB_DEPRECATED priority_t group_priority () const { return prefix().context->priority(); }
985
986#endif /* __TBB_TASK_PRIORITY */
987
988private:
990 friend class task_list;
993#if __TBB_TASK_GROUP_CONTEXT
995#endif /* __TBB_TASK_GROUP_CONTEXT */
999
1001
1003 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
1004 }
1005#if __TBB_PREVIEW_CRITICAL_TASKS
1006 friend void internal::make_critical( task& );
1007 friend bool internal::is_critical( task& );
1008#endif
1009}; // class task
1010
1011#if __TBB_PREVIEW_CRITICAL_TASKS
1012namespace internal {
1013inline void make_critical( task& t ) { t.prefix().extra_state |= 0x8; }
1014inline bool is_critical( task& t ) { return bool((t.prefix().extra_state & 0x8) != 0); }
1015} // namespace internal
1016#endif /* __TBB_PREVIEW_CRITICAL_TASKS */
1017
1018#if __TBB_PREVIEW_RESUMABLE_TASKS
1019namespace internal {
1020 template <typename F>
1021 static void suspend_callback(void* user_callback, task::suspend_point tag) {
1022 // Copy user function to a new stack to avoid a race when the previous scheduler is resumed.
1023 F user_callback_copy = *static_cast<F*>(user_callback);
1024 user_callback_copy(tag);
1025 }
1026 void __TBB_EXPORTED_FUNC internal_suspend(void* suspend_callback, void* user_callback);
1027 void __TBB_EXPORTED_FUNC internal_resume(task::suspend_point);
1028 task::suspend_point __TBB_EXPORTED_FUNC internal_current_suspend_point();
1029}
1030
1031template <typename F>
1032inline void task::suspend(F f) {
1033 internal::internal_suspend((void*)internal::suspend_callback<F>, &f);
1034}
1035inline void task::resume(suspend_point tag) {
1036 internal::internal_resume(tag);
1037}
1038#endif
1039
1041
1043#if __has_cpp_attribute(gnu::used)
1044 [[gnu::used]]
1045#endif
1047 return NULL;
1048 }
1049};
1050
1052namespace internal {
1053 template<typename F>
1054 class function_task : public task {
1055#if __TBB_ALLOW_MUTABLE_FUNCTORS
1056 // TODO: deprecated behavior, remove
1057 F my_func;
1058#else
1059 const F my_func;
1060#endif
1062 my_func();
1063 return NULL;
1064 }
1065 public:
1066 function_task( const F& f ) : my_func(f) {}
1067#if __TBB_CPP11_RVALUE_REF_PRESENT
1068 function_task( F&& f ) : my_func( std::move(f) ) {}
1069#endif
1070 };
1071} // namespace internal
1073
1075
1078private:
1081 friend class task;
1083public:
1085 task_list() : first(NULL), next_ptr(&first) {}
1086
1089
1091 bool empty() const {return !first;}
1092
1095 task.prefix().next = NULL;
1096 *next_ptr = &task;
1097 next_ptr = &task.prefix().next;
1098 }
1099#if __TBB_TODO
1100 // TODO: add this method and implement&document the local execution ordering. See more in generic_scheduler::local_spawn
1102 void push_front( task& task ) {
1103 if( empty() ) {
1104 push_back(task);
1105 } else {
1106 task.prefix().next = first;
1107 first = &task;
1108 }
1109 }
1110#endif
1113 __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
1114 task* result = first;
1115 first = result->prefix().next;
1116 if( !first ) next_ptr = &first;
1117 return *result;
1118 }
1119
1121 void clear() {
1122 first=NULL;
1123 next_ptr=&first;
1124 }
1125};
1126
1128 t.prefix().owner->spawn( t, t.prefix().next );
1129}
1130
1132 if( task* t = list.first ) {
1133 t->prefix().owner->spawn( *t, *list.next_ptr );
1134 list.clear();
1135 }
1136}
1137
1138inline void task::spawn_root_and_wait( task_list& root_list ) {
1139 if( task* t = root_list.first ) {
1140 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
1141 root_list.clear();
1142 }
1143}
1144
1145} // namespace tbb
1146
1147inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
1149}
1150
1151inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
1153}
1154
1155#if __TBB_TASK_GROUP_CONTEXT
1156inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
1157 return &p.allocate(bytes);
1158}
1159
1160inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
1161 p.free( *static_cast<tbb::task*>(task) );
1162}
1163#endif /* __TBB_TASK_GROUP_CONTEXT */
1164
1165inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
1166 return &p.allocate(bytes);
1167}
1168
1169inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
1170 p.free( *static_cast<tbb::task*>(task) );
1171}
1172
1173inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
1174 return &p.allocate(bytes);
1175}
1176
1177inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
1178 p.free( *static_cast<tbb::task*>(task) );
1179}
1180
1181inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1182 return &p.allocate(bytes);
1183}
1184
1185inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
1186 p.free( *static_cast<tbb::task*>(task) );
1187}
1188
1190#undef __TBB_task_H_include_area
1191
1192#endif /* __TBB_task_H */
#define __TBB_TASK_BASE_ACCESS
Definition: task.h:52
struct ___itt_caller * __itt_caller
Definition: task.h:39
#define __TBB_atomic
Definition: tbb_stddef.h:237
#define __TBB_EXPORTED_METHOD
Definition: tbb_stddef.h:98
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define __TBB_override
Definition: tbb_stddef.h:240
#define __TBB_FetchAndDecrementWrelease(P)
Definition: tbb_machine.h:311
#define __TBB_FetchAndIncrementWacquire(P)
Definition: tbb_machine.h:310
#define __TBB_DEPRECATED_IN_VERBOSE_MODE
Definition: tbb_config.h:647
#define __TBB_DEPRECATED
Definition: tbb_config.h:636
#define __TBB_EXPORTED_FUNC
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t count
void const char const char int ITT_FORMAT __itt_group_sync x void const char * name
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t size
void const char const char int ITT_FORMAT __itt_group_sync p
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
STL namespace.
The graph class.
priority_t
Definition: task.h:317
@ priority_normal
Definition: task.h:318
@ priority_high
Definition: task.h:320
@ priority_low
Definition: task.h:319
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319
intptr_t isolation_tag
A tag for task isolation.
Definition: task.h:143
intptr_t reference_count
A reference count.
Definition: task.h:131
bool is_critical(task &t)
Definition: task.h:1014
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:139
static const int priority_stride_v4
Definition: task.h:310
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:398
static const int priority_critical
Definition: task.h:313
void make_critical(task &t)
Definition: task.h:1013
const isolation_tag no_isolation
Definition: task.h:144
auto first(Container &c) -> decltype(begin(c))
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:134
task * self
No longer used, but retained for binary layout compatibility. Always NULL.
Definition: task.h:59
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:128
int space[sizeof(internal::uint64_t)/sizeof(int)]
Definition: task.h:69
Base class for methods that became static in TBB 3.0.
Definition: task.h:80
static void __TBB_EXPORTED_FUNC destroy(task &victim)
Destroy a task.
Definition: task.cpp:212
static void spawn(task &t)
Schedule task for execution when a worker becomes available.
Definition: task.h:1127
static tbb::internal::allocate_additional_child_of_proxy allocate_additional_child_of(task &t)
Like allocate_child, except that task's parent becomes "t", not this.
Definition: task.h:93
virtual void wait_for_all(task &parent, task *child)=0
For internal use only.
virtual void spawn_root_and_wait(task &first, task *&next)=0
For internal use only.
virtual void spawn(task &first, task *&next)=0
For internal use only.
virtual void enqueue(task &t, void *reserved)=0
For internal use only.
virtual ~scheduler()=0
Pure virtual destructor;.
Definition: scheduler.cpp:72
context_list_node_t * my_next
Definition: task.h:152
context_list_node_t * my_prev
Definition: task.h:151
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
allocate_root_with_context_proxy(task_group_context &ctx)
Definition: task.h:158
void __TBB_EXPORTED_METHOD free(task &) const
static void __TBB_EXPORTED_FUNC free(task &)
Definition: task.cpp:47
static task &__TBB_EXPORTED_FUNC allocate(size_t size)
Definition: task.cpp:35
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:96
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:105
void __TBB_EXPORTED_METHOD free(task &) const
Definition: task.cpp:121
task &__TBB_EXPORTED_METHOD allocate(size_t size) const
Definition: task.cpp:114
Memory prefix to a task object.
Definition: task.h:203
tbb::task * next
"next" field for list of task
Definition: task.h:297
scheduler * origin
The scheduler that allocated the task, or NULL if the task is big.
Definition: task.h:239
scheduler * owner
Obsolete. The scheduler that owns the task.
Definition: task.h:247
unsigned char extra_state
Miscellaneous state that is not directly visible to users, stored as a byte for compactness.
Definition: task.h:292
int depth
Obsolete. Used to be scheduling depth before TBB 2.2.
Definition: task.h:279
isolation_tag isolation
The tag used for task isolation.
Definition: task.h:220
__TBB_atomic reference_count ref_count
Reference count used for synchronization.
Definition: task.h:274
tbb::task & task()
The task corresponding to this task_prefix.
Definition: task.h:300
unsigned char state
A task::state_type, stored as a byte for compactness.
Definition: task.h:283
task * next_offloaded
Pointer to the next offloaded lower priority task.
Definition: task.h:252
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
Definition: task.h:230
friend void make_critical(task &)
Definition: task.h:1013
friend bool is_critical(task &)
Definition: task.h:1014
tbb::task * parent
The task whose reference count includes me.
Definition: task.h:267
affinity_id affinity
Definition: task.h:294
Used to form groups of tasks.
Definition: task.h:358
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:405
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
Definition: task.h:449
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
Definition: task.h:410
internal::string_index my_name
Description of algorithm for scheduler based instrumentation.
Definition: task.h:463
internal::tbb_exception_ptr exception_container_type
Definition: task.h:367
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
task_group_context(internal::string_index name)
Definition: task.h:514
static const kind_type binding_completed
Definition: task.h:590
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:459
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:440
static const kind_type detached
Definition: task.h:591
__TBB_DEPRECATED_IN_VERBOSE_MODE void set_priority(priority_t)
Changes priority of the task group.
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
Definition: task.h:504
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
Definition: task.h:418
char _trailing_padding[internal::NFS_MaxLineSize - 2 *sizeof(uintptr_t) - 2 *sizeof(void *) - sizeof(intptr_t) - sizeof(internal::string_index)]
Trailing padding protecting accesses to frequently used members from false sharing.
Definition: task.h:472
__TBB_DEPRECATED_IN_VERBOSE_MODE priority_t priority() const
Retrieves current priority of the current task group.
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
uintptr_t _my_kind_aligner
Definition: task.h:406
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
Definition: task.h:455
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
Definition: task.h:415
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
__TBB_EXPORTED_METHOD ~task_group_context()
char _leading_padding[internal::NFS_MaxLineSize - 2 *sizeof(uintptr_t) - sizeof(void *) - sizeof(internal::context_list_node_t) - sizeof(__itt_caller) - sizeof(internal::cpu_ctl_env_space)]
Leading padding protecting accesses to frequently used members from false sharing.
Definition: task.h:430
uintptr_t traits() const
Returns the context's trait.
Definition: task.h:578
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
Definition: task.h:436
static const kind_type dying
Definition: task.h:592
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
Definition: task.h:452
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:446
static const kind_type binding_required
Definition: task.h:589
Base class for user-defined tasks.
Definition: task.h:615
void __TBB_EXPORTED_METHOD change_group(task_group_context &ctx)
Moves this task from its current group into another one.
virtual task * execute()=0
Should be overridden by derived classes.
bool is_enqueued_task() const
True if the task was enqueued.
Definition: task.h:890
affinity_id affinity() const
Current affinity of this task.
Definition: task.h:946
internal::allocate_child_proxy & allocate_child()
Returns proxy for overloaded new that allocates a child task of *this.
Definition: task.h:681
bool is_stolen_task() const
True if task was stolen from the task pool of another thread.
Definition: task.h:885
task * parent() const
task on whose behalf this task is working, or NULL if this is a root.
Definition: task.h:865
static void enqueue(task &t)
Enqueue task for starvation-resistant execution.
Definition: task.h:836
state_type
Enumeration of task states that the scheduler considers.
Definition: task.h:635
@ allocated
task object is freshly allocated or recycled.
Definition: task.h:643
@ reexecute
task to be rescheduled.
Definition: task.h:639
@ ready
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:641
@ freed
task object is on free list, or is going to be put there, or was just taken off.
Definition: task.h:645
@ executing
task is running, and will be destroyed after method execute() completes.
Definition: task.h:637
void recycle_to_reexecute()
Schedule this for reexecution after current execute() returns.
Definition: task.h:741
internal::allocate_continuation_proxy & allocate_continuation()
Returns proxy for overloaded new that allocates a continuation task of *this.
Definition: task.h:676
int decrement_ref_count()
Atomically decrement reference count and returns its new value.
Definition: task.h:788
__TBB_DEPRECATED priority_t group_priority() const
Retrieves current priority of the task group this task belongs to.
Definition: task.h:984
state_type state() const
Current execution state.
Definition: task.h:912
void recycle_as_child_of(task &new_parent)
Change this to be a child of new_parent.
Definition: task.h:725
void set_parent(task *p)
sets parent task pointer to specified value
Definition: task.h:868
bool cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
Definition: task.h:971
bool is_cancelled() const
Returns true if the context has received cancellation request.
Definition: task.h:974
void recycle_as_continuation()
Change this to be a continuation of its former self.
Definition: task.h:711
int ref_count() const
The internal reference count.
Definition: task.h:915
void spawn_and_wait_for_all(task &child)
Similar to spawn followed by wait_for_all, but more efficient.
Definition: task.h:800
task_group_context * group()
Pointer to the task group descriptor.
Definition: task.h:881
void wait_for_all()
Wait for reference count to become one, and set reference count to zero.
Definition: task.h:819
void recycle_as_safe_continuation()
Recommended to use, safe variant of recycle_as_continuation.
Definition: task.h:719
void set_affinity(affinity_id id)
Set affinity for this task.
Definition: task.h:943
virtual ~task()
Destructor.
Definition: task.h:629
static internal::allocate_root_proxy allocate_root()
Returns proxy for overloaded new that allocates a root task.
Definition: task.h:663
void set_ref_count(int count)
Set reference count.
Definition: task.h:761
void increment_ref_count()
Atomically increment reference count.
Definition: task.h:771
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:1002
task()
Default constructor.
Definition: task.h:625
task_group_context * context()
This method is deprecated and will be removed in the future.
Definition: task.h:878
internal::affinity_id affinity_id
An id as used for specifying affinity.
Definition: task.h:940
static internal::allocate_root_with_context_proxy allocate_root(task_group_context &ctx)
Returns proxy for overloaded new that allocates a root task associated with user supplied context.
Definition: task.h:669
__TBB_DEPRECATED void set_group_priority(priority_t p)
Changes priority of the task group this task belongs to.
Definition: task.h:981
static void enqueue(task &t, priority_t p)
Enqueue task for starvation-resistant execution on the specified priority level.
Definition: task.h:842
static void spawn_root_and_wait(task &root)
Spawn task allocated by allocate_root, wait for it to complete, and deallocate it.
Definition: task.h:808
int add_ref_count(int count)
Atomically adds to reference count and returns its new value.
Definition: task.h:777
task that does nothing. Useful for synchronization.
Definition: task.h:1042
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:1046
function_task(const F &f)
Definition: task.h:1066
task * execute() __TBB_override
Should be overridden by derived classes.
Definition: task.h:1061
A list of children.
Definition: task.h:1077
task & pop_front()
Pop the front task from the list.
Definition: task.h:1112
~task_list()
Destroys the list, but does not destroy the task objects.
Definition: task.h:1088
void push_back(task &task)
Push task onto back of list.
Definition: task.h:1094
task ** next_ptr
Definition: task.h:1080
bool empty() const
True if list is empty; false otherwise.
Definition: task.h:1091
task * first
Definition: task.h:1079
task_list()
Construct empty list.
Definition: task.h:1085
void clear()
Clear the list.
Definition: task.h:1121
Class delimiting the scope of task scheduler activity.
Interface to be implemented by all exceptions TBB recognizes and propagates across the threads.
Exception container that preserves the exact copy of the original exception.
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322
Base class for types that should not be copied or assigned.
Definition: tbb_stddef.h:330
Work stealing task scheduler.
Definition: scheduler.h:140

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.