µOS++ IIIe Reference 7.0.0
The third edition of µOS++, a POSIX inspired open source framework, written in C++
Loading...
Searching...
No Matches
os-thread.cpp
Go to the documentation of this file.
1/*
2 * This file is part of the µOS++ distribution.
3 * (https://github.com/micro-os-plus)
4 * Copyright (c) 2016 Liviu Ionescu.
5 *
6 * Permission is hereby granted, free of charge, to any person
7 * obtaining a copy of this software and associated documentation
8 * files (the "Software"), to deal in the Software without
9 * restriction, including without limitation the rights to use,
10 * copy, modify, merge, publish, distribute, sublicense, and/or
11 * sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following
13 * conditions:
14 *
15 * The above copyright notice and this permission notice shall be
16 * included in all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
20 * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
22 * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
26 */
27
28#include <cmsis-plus/rtos/os.h>
29
30#include <memory>
31#include <stdexcept>
32
33// ----------------------------------------------------------------------------
34
35#if defined(__clang__)
36#pragma clang diagnostic ignored "-Wc++98-compat"
37#endif
38
39// ----------------------------------------------------------------------------
40
41namespace os
42{
43 namespace rtos
44 {
45 // ------------------------------------------------------------------------
46
51 std::size_t thread::stack::min_size_bytes_ = port::stack::min_size_bytes;
52
53 std::size_t thread::stack::default_size_bytes_ =
54 port::stack::default_size_bytes;
55
60 // ------------------------------------------------------------------------
61 using mutexes_list = utils::intrusive_list<
62 mutex, utils::double_list_links, &mutex::owner_links_>;
63
64 // ========================================================================
112 const thread::attributes thread::initializer;
113
163 void
165 {
166 // Align the bottom of the stack.
167 void* pa = bottom_address_;
168 bottom_address_ = static_cast<stack::element_t*> (std::align (
170 size_bytes_));
171
172 // If there is not enough space for the minimal stack, fail.
173 os_assert_throw(bottom_address_ != nullptr, ENOMEM);
174
175 element_t* p = bottom_address_;
176 element_t* pend = top ();
177
178 // Initialise the entire stack with the magic word.
179 for (; p < pend; ++p)
180 {
181 *p = magic;
182 }
183
184 // Compute the actual size. The -1 is to leave space for the magic.
185 size_bytes_ = ((static_cast<std::size_t> (p - bottom_address_) - 1)
186 * sizeof(element_t));
187 }
188
201 std::size_t
203 {
204 element_t* p = bottom_address_;
205 std::size_t count = 0;
206 while (*p == magic)
207 {
208 count += sizeof(element_t);
209 ++p;
210 }
211
212 return count;
213 }
214
227 void
228 thread::internal_invoke_with_exit_ (thread* thread)
229 {
230#if defined(OS_TRACE_RTOS_THREAD)
231 trace::printf ("%s() @%p %s\n", __func__, thread, thread->name ());
232#endif
233
234 void* exit_ptr;
235#if defined(__EXCEPTIONS)
236 try
237 {
238 exit_ptr = thread->func_ (thread->func_args_);
239 }
240 catch (std::exception e)
241 {
242 trace::printf ("%s() @%p %s top exception \"%s\".\n", __func__,
243 thread, thread->name (), e.what ());
244 exit_ptr = nullptr;
245 }
246 catch (...)
247 {
248 trace::printf ("%s() @%p %s top exception.\n", __func__, thread,
249 thread->name ());
250 exit_ptr = nullptr;
251 }
252#else
253 exit_ptr = thread->func_ (thread->func_args_);
254#endif
255 thread->internal_exit_ (exit_ptr);
256 }
257
259 {
260#if defined(OS_TRACE_RTOS_THREAD)
261 trace::printf ("%s() @%p %s\n", __func__, this, this->name ());
262#endif
263 // Must be explicit here, since they are not done in the members
264 // declarations to allow th_enable_assert_reuse.
265 state_ = state::initializing;
266 func_ = nullptr;
267 }
268
269 thread::thread (const char* name) :
271 { name }
272 {
273#if defined(OS_TRACE_RTOS_THREAD)
274 trace::printf ("%s() @%p %s\n", __func__, this, this->name ());
275#endif
276 // Must be explicit here, since they are not done in the members
277 // declarations to allow th_enable_assert_reuse.
278 state_ = state::initializing;
279 }
280
303 bool
305 {
306 return ((thread.state_ == state::ready ||
307 thread.state_ == state::running ||
308 thread.state_ == state::suspended ||
309 thread.state_ == state::terminated));
310 }
311
356 thread::thread (func_t function, func_args_t args, const attributes& attr,
357 const allocator_type& allocator) :
358 thread
359 { nullptr, function, args, attr, allocator }
360 {
361 ;
362 }
363
408 thread::thread (const char* name, func_t function, func_args_t args,
409 const attributes& attr, const allocator_type& allocator) :
410 object_named_system
411 { name }
412 {
413#if defined(OS_TRACE_RTOS_THREAD)
414 trace::printf ("%s() @%p %s\n", __func__, this, this->name ());
415#endif
416
417#if defined(DEBUG)
418 if (attr.th_enable_assert_reuse) {
419 // Expect either statically initialised (undefined), or destroyed.
420 assert((state_ == state::undefined) || (state_ == state::destroyed));
421 }
422#endif /* DEBUG */
423
424 state_ = state::initializing;
425
426 allocator_ = &allocator;
427
428 if (attr.th_stack_address != nullptr
429 && attr.th_stack_size_bytes > stack::min_size ())
430 {
431 internal_construct_ (function, args, attr, nullptr, 0);
432 }
433 else
434 {
435 using allocator_type2 = memory::allocator<stack::allocation_element_t>;
436
437 if (attr.th_stack_size_bytes > stack::min_size ())
438 {
439 allocated_stack_size_elements_ = (attr.th_stack_size_bytes
440 + sizeof(stack::allocation_element_t) - 1)
441 / sizeof(stack::allocation_element_t);
442 }
443 else
444 {
445 allocated_stack_size_elements_ = (stack::default_size ()
446 + sizeof(stack::allocation_element_t) - 1)
447 / sizeof(stack::allocation_element_t);
448 }
449
450 allocated_stack_address_ =
451 reinterpret_cast<stack::element_t*> (const_cast<allocator_type2&> (allocator).allocate (
452 allocated_stack_size_elements_));
453
454 // Stack allocation failed.
455 assert(allocated_stack_address_ != nullptr);
456
457 internal_construct_ (
458 function,
459 args,
460 attr,
461 allocated_stack_address_,
462 allocated_stack_size_elements_
463 * sizeof(stack::allocation_element_t));
464 }
465 }
466
471 void
472 thread::internal_construct_ (func_t function, func_args_t args,
473 const attributes& attr, void* stack_address,
474 std::size_t stack_size_bytes)
475 {
476 // Don't call this from interrupt handlers.
478
479 // The thread function must be real.
480 assert(function != nullptr);
481 // Don't forget to set the thread priority.
482 assert(attr.th_priority != priority::none);
483
484 clock_ = attr.clock != nullptr ? attr.clock : &sysclock;
485
486 if (stack_address != nullptr)
487 {
488 // The attributes should not define any storage in this case.
489 if (attr.th_stack_size_bytes > stack::min_size ())
490 {
491 // The stack address must be real.
492 assert(attr.th_stack_address == nullptr);
493 }
494
495 stack ().set (static_cast<stack::element_t*> (stack_address),
496 stack_size_bytes);
497 }
498 else
499 {
500 stack ().set (static_cast<stack::element_t*> (attr.th_stack_address),
501 attr.th_stack_size_bytes);
502 }
503
504#if defined(OS_TRACE_RTOS_THREAD)
505 trace::printf ("%s() @%p %s p%u stack{%p,%u}\n", __func__, this, name (),
506 attr.th_priority, stack ().bottom_address_,
507 stack ().size_bytes_);
508#endif
509
510 {
511 // Prevent the new thread to execute before all members are set.
512 // ----- Enter critical section -------------------------------------
513 scheduler::critical_section scs;
514
515 // Get attributes from user structure.
516 prio_assigned_ = attr.th_priority;
517
518 func_ = function;
519 func_args_ = args;
520
521 parent_ = this_thread::_thread ();
522 if (scheduler::started () && (parent_ != nullptr))
523 {
524 parent_->children_.link (*this);
525 }
526 else
527 {
528 scheduler::top_threads_list_.link (*this);
529 }
530
531 stack ().initialize ();
532
533#if defined(OS_USE_RTOS_PORT_SCHEDULER)
534
535 port::thread::create (this);
536 state_ = state::ready;
537
538#else
539
540#pragma GCC diagnostic push
541#if defined(__clang__)
542#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
543#endif
544 // Create the port specific context.
546 &context_, reinterpret_cast<void*> (internal_invoke_with_exit_),
547 this);
548#pragma GCC diagnostic pop
549
550 if (!scheduler::started ())
551 {
552 scheduler::current_thread_ = this;
553 }
554
555 // Add to ready list, but do not yield yet.
556 resume ();
557
558#endif
559 // ----- Exit critical section --------------------------------------
560 }
561 // For just in case the new thread has higher priority.
563 }
564
584 {
585#if defined(OS_TRACE_RTOS_THREAD)
586 trace::printf ("%s() @%p %s \n", __func__, this, name ());
587#endif
588
589 // Prevent the main thread to destroy itself while running
590 // the exit cleanup code.
591 if (this != &this_thread::thread ())
592 {
593 // Extra test to avoid the 'already gone' message.
594 if (state_ != state::destroyed)
595 {
596 kill ();
597 }
598 }
599 else
600 {
601#if defined(OS_TRACE_RTOS_THREAD)
602 trace::printf ("%s() @%p %s nop, cannot commit suicide\n", __func__,
603 this, name ());
604#endif
605 }
606 }
607
617 void
619 {
620#if defined(OS_TRACE_RTOS_THREAD_CONTEXT)
621 trace::printf ("%s() @%p %s %u\n", __func__, this, name (),
622 prio_assigned_);
623#endif
624
625#if defined(OS_USE_RTOS_PORT_SCHEDULER)
626
627 {
628 // ----- Enter critical section -------------------------------------
630
631 state_ = state::ready;
632 port::thread::resume (this);
633 // ----- Exit critical section --------------------------------------
634 }
635
636#else
637
638 // Don't call this from high priority interrupts.
639 assert(port::interrupts::is_priority_valid ());
640
641 {
642 // ----- Enter critical section -------------------------------------
644
645 // If the thread is not already in the ready list, enqueue it.
646 if (ready_node_.next () == nullptr)
647 {
648 scheduler::ready_threads_list_.link (ready_node_);
649 // state::ready set in above link().
650 }
651 // ----- Exit critical section --------------------------------------
652 }
653
655
656#endif
657
658 }
659
668 {
669 // trace::printf ("%s() @%p %s\n", __func__, this, name ());
670
671 if (prio_inherited_ == priority::none)
672 {
673 // The common case is to have no inherited priority;
674 // return the assigned one.
675 return prio_assigned_;
676 }
677 else
678 {
679 // Return the maximum between inherited and assigned.
680 return
681 (prio_inherited_ >= prio_assigned_) ?
682 prio_inherited_ : prio_assigned_;
683 }
684 }
685
694 {
695 // Don't call this from interrupt handlers.
697
698 return prio_inherited_;
699 }
700
722 {
723#if defined(OS_TRACE_RTOS_THREAD)
724 trace::printf ("%s(%u) @%p %s\n", __func__, prio, this, name ());
725#endif
726
727 // Don't call this from interrupt handlers.
729 // Check the priority, it is not in the allowed range.
730 os_assert_err(prio < priority::error, EINVAL);
731 os_assert_err(prio != priority::none, EINVAL);
732
733 if (prio_assigned_ == prio)
734 {
735 // Optimise, if priority did not change.
736 return result::ok;
737 }
738
739 prio_assigned_ = prio;
740
741 result_t res = result::ok;
742
743#if defined(OS_USE_RTOS_PORT_SCHEDULER)
744
745 // The port must perform a context switch.
746 res = port::thread::priority (this, prio);
747
748#else
749
750 if (state_ == state::ready)
751 {
752 // ----- Enter critical section -------------------------------------
754
755 // Remove from initial location and reinsert according
756 // to new priority.
757 ready_node_.unlink ();
758 scheduler::ready_threads_list_.link (ready_node_);
759 // ----- Exit critical section --------------------------------------
760 }
761
762 // Mandatory, the priority might have been raised, the
763 // task must be scheduled to run.
765
766#endif
767
768 return res;
769 }
770
790 {
791#if defined(OS_TRACE_RTOS_THREAD)
792 trace::printf ("%s(%u) @%p %s\n", __func__, prio, this, name ());
793#endif
794
795 // Don't call this from interrupt handlers.
797 // Check the priority, it is not in the allowed range.
798 os_assert_err(prio < priority::error, EINVAL);
799
800 // Warning: do not check for `priority::none`, since
801 // `mutex::unlock()` sets it when the list of mutexes owned
802 // by a thread is empty.
803
804 if (prio == prio_inherited_)
805 {
806 // Optimise, if priority did not change.
807 return result::ok;
808 }
809
810 prio_inherited_ = prio;
811
812 if (prio_inherited_ < prio_assigned_)
813 {
814 // Optimise, no need to reschedule.
815 return result::ok;
816 }
817
818 result_t res = result::ok;
819
820#if defined(OS_USE_RTOS_PORT_SCHEDULER)
821
822 // The port must perform a context switch.
823 res = port::thread::priority (this, prio);
824
825#else
826
827 if (state_ == state::ready)
828 {
829 // ----- Enter critical section -------------------------------------
831
832 // Remove from initial location and reinsert according
833 // to new priority.
834 ready_node_.unlink ();
835 scheduler::ready_threads_list_.link (ready_node_);
836 // ----- Exit critical section --------------------------------------
837 }
838
839 // Mandatory, the priority might have been raised, the
840 // task must be scheduled to run.
842
843#endif
844
845 return res;
846 }
847
868 {
869#if defined(OS_TRACE_RTOS_THREAD)
870 trace::printf ("%s() @%p %s\n", __func__, this, name ());
871#endif
872
873 // Don't call this from interrupt handlers.
875
876#if defined(OS_USE_RTOS_PORT_SCHEDULER)
877
878 result_t res = port::thread::detach (this);
879 if (res != result::ok)
880 {
881 return res;
882 }
883
884#else
885
886 // TODO: implement
887
888#endif
889
890 return result::ok;
891 }
892
922 thread::join (void** exit_ptr)
923 {
924#if defined(OS_TRACE_RTOS_THREAD)
925 trace::printf ("%s() @%p %s\n", __func__, this, name ());
926#endif
927
928 // Don't call this from interrupt handlers.
930 // Don't call this from critical regions.
932
933 // Fail if current thread
934 assert(this != this_thread::_thread ());
935
936 while (state_ != state::destroyed)
937 {
938 joiner_ = this_thread::_thread ();
939 this_thread::_thread ()->internal_suspend_ ();
940 }
941
942#if defined(OS_TRACE_RTOS_THREAD)
943 trace::printf ("%s() @%p %s joined\n", __func__, this, name ());
944#endif
945
946 if (exit_ptr != nullptr)
947 {
948 *exit_ptr = func_result_;
949 }
950
951 return result::ok;
952 }
953
974 {
975#if defined(OS_TRACE_RTOS_THREAD)
976 trace::printf ("%s() @%p %s\n", __func__, this, name ());
977#endif
978
979 // Don't call this from interrupt handlers.
981
982 // TODO: implement according to POSIX specs.
983 return result::ok;
984 }
985
997 bool
998 thread::interrupt (bool interrupt)
999 {
1000#if defined(OS_TRACE_RTOS_THREAD)
1001 trace::printf ("%s() @%p %s\n", __func__, this, name ());
1002#endif
1003
1004 bool tmp = interrupted_;
1005 interrupted_ = interrupt;
1006
1007 resume ();
1008 return tmp;
1009 }
1010
1019 void
1020 thread::internal_suspend_ (void)
1021 {
1022#if defined(OS_TRACE_RTOS_THREAD)
1023 trace::printf ("%s() @%p %s\n", __func__, this, name ());
1024#endif
1025
1026 {
1027 // ----- Enter critical section -------------------------------------
1029
1030 // Remove this thread from the ready list, if there.
1032
1033 state_ = state::suspended;
1034 // ----- Exit critical section --------------------------------------
1035 }
1036
1038 }
1039
1040 void
1041 thread::internal_exit_ (void* exit_ptr)
1042 {
1043#if defined(OS_TRACE_RTOS_THREAD)
1044 trace::printf ("%s() @%p %s\n", __func__, this, name ());
1045#endif
1046
1047 // Don't call this from interrupt handlers.
1048 assert(!interrupts::in_handler_mode ());
1049
1050 {
1051 // ----- Enter critical section -------------------------------------
1052 scheduler::critical_section scs;
1053
1054 {
1055 // ----- Enter critical section ---------------------------------
1056 interrupts::critical_section ics;
1057
1058 ready_node_.unlink ();
1059
1060 child_links_.unlink ();
1061 // ----- Exit critical section ----------------------------------
1062 }
1063
1064 // There must be no children threads still alive.
1065 assert(children_.empty ());
1066 parent_ = nullptr;
1067
1068 func_ = nullptr;
1069 func_args_ = nullptr;
1070
1071 // There must be no more mutexes locked by this thread.
1072 assert(mutexes_.empty ());
1073 assert(acquired_mutexes_ == 0);
1074
1075 func_result_ = exit_ptr;
1076 // ----- Exit critical section --------------------------------------
1077 }
1078
1079 {
1080 // ----- Enter critical section -------------------------------------
1081 interrupts::critical_section ics;
1082
1083 // Add to a list of threads to be destroyed by the idle thread.
1084 // Also set state::terminated.
1085 scheduler::terminated_threads_list_.link (ready_node_);
1086 // ----- Exit critical section --------------------------------------
1087 }
1088
1089#if defined(OS_USE_RTOS_PORT_SCHEDULER)
1090
1091 port::thread::destroy_this (this);
1092 // Does not return if the current thread.
1093
1094#else
1095
1096 // At this point, since the thread state is no longer 'running',
1097 // the thread is no longer linked in the READY list.
1099
1100#endif
1101
1102 assert(true);
1103 while (true)
1104 ;
1105
1106 // Definitely does not return.
1107 }
1108
1109 void
1110 thread::internal_check_stack_ (void)
1111 {
1112 if (stack ().size () > 0)
1113 {
1114 if (!stack ().check_bottom_magic () || !stack ().check_top_magic ())
1115 {
1116 trace::printf("%s() @%p %s\n", __func__, this, name ());
1117 assert(stack ().check_bottom_magic ());
1118 assert(stack ().check_top_magic ());
1119 }
1120
1121#if defined(OS_TRACE_RTOS_THREAD)
1122 trace::printf ("%s() @%p %s stack: %u/%u bytes used\n", __func__,
1123 this, name (),
1124 stack ().size () - stack ().available (),
1125 stack ().size ());
1126#endif
1127
1128 // Clear stack to avoid further checks
1129 stack ().clear ();
1130 }
1131 }
1132
1133 // Called from kill() and from idle thread.
1134 void
1135 thread::internal_destroy_ (void)
1136 {
1137#if defined(OS_TRACE_RTOS_THREAD)
1138 trace::printf ("%s() @%p %s\n", __func__, this, name ());
1139#endif
1140
1141 internal_check_stack_ ();
1142
1143 if (allocated_stack_address_ != nullptr)
1144 {
1145 typedef typename std::allocator_traits<allocator_type>::pointer pointer;
1146
1147 static_cast<allocator_type*> (const_cast<void*> (allocator_))->deallocate (
1148 reinterpret_cast<pointer> (allocated_stack_address_),
1149 allocated_stack_size_elements_);
1150
1151 allocated_stack_address_ = nullptr;
1152 }
1153
1154 {
1155 // ----- Enter critical section -------------------------------------
1156 scheduler::critical_section scs;
1157
1158 mutexes_list& mx_list = reinterpret_cast<mutexes_list&> (mutexes_);
1159 while (not mx_list.empty ())
1160 {
1161 auto* mx = mx_list.unlink_head ();
1162
1163 mx->internal_mark_owner_dead_ ();
1164
1165 // Unlock the mutex as owned by the thread itself.
1166 mx->internal_unlock_ (this);
1167 }
1168 // ----- Exit critical section --------------------------------------
1169 }
1170
1171 state_ = state::destroyed;
1172
1173 if (joiner_ != nullptr)
1174 {
1175 joiner_->resume ();
1176 }
1177 }
1178
1191 result_t
1193 {
1194#if defined(OS_TRACE_RTOS_THREAD)
1195 trace::printf ("%s() @%p %s\n", __func__, this, name ());
1196#endif
1197
1198 // Don't call this from interrupt handlers.
1200
1201 {
1202 // ----- Enter critical section -------------------------------------
1204
1205 if (state_ == state::destroyed)
1206 {
1207#if defined(OS_TRACE_RTOS_THREAD)
1208 trace::printf ("%s() @%p %s already gone\n", __func__, this,
1209 name ());
1210#endif
1211 return result::ok; // Already exited itself
1212 }
1213
1214 {
1215 // ----- Enter critical section ---------------------------------
1217
1218 // Remove thread from the funeral list and kill it here.
1219 ready_node_.unlink ();
1220
1221 // If the thread is waiting on an event, remove it from the list.
1222 if (waiting_node_ != nullptr)
1223 {
1224 waiting_node_->unlink ();
1225 }
1226
1227 // If the thread is waiting on a timeout, remove it from the list.
1228 if (clock_node_ != nullptr)
1229 {
1230 clock_node_->unlink ();
1231 }
1232
1233 child_links_.unlink ();
1234 // ----- Exit critical section ----------------------------------
1235 }
1236
1237 // The must be no more children threads alive.
1238 assert(children_.empty ());
1239 parent_ = nullptr;
1240
1241#if defined(OS_USE_RTOS_PORT_SCHEDULER)
1242
1243 port::thread::destroy_other (this);
1244
1245#endif
1246
1247 func_result_ = nullptr;
1248
1249 func_ = nullptr;
1250 func_args_ = nullptr;
1251
1252 internal_destroy_ ();
1253
1254 // There must be no mutexes locked by this thread.
1255 // Must have been cleaned before.
1256 assert(mutexes_.empty ());
1257 assert(acquired_mutexes_ == 0);
1258
1259 // ----- Exit critical section --------------------------------------
1260 }
1261
1262 return result::ok;
1263 }
1264
1273 result_t
1275 {
1276#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1277 trace::printf ("%s(0x%X) @%p %s <0x%X\n", __func__, mask, this, name (),
1278 event_flags_.mask ());
1279#endif
1280
1281 result_t res = event_flags_.raise (mask, oflags);
1282
1283 this->resume ();
1284
1285#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1286 trace::printf ("%s(0x%X) @%p %s >0x%X\n", __func__, mask, this, name (),
1287 event_flags_.mask ());
1288#endif
1289
1290 return res;
1291 }
1292
1297 result_t
1298 thread::internal_flags_wait_ (flags::mask_t mask, flags::mask_t* oflags,
1299 flags::mode_t mode)
1300 {
1301#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1302 trace::printf ("%s(0x%X,%u) @%p %s <0x%X\n", __func__, mask, mode, this,
1303 name (), event_flags_.mask ());
1304#endif
1305
1306 // Don't call this from interrupt handlers.
1308 // Don't call this from critical regions.
1309 os_assert_err(!scheduler::locked (), EPERM);
1310
1311 {
1312 // ----- Enter critical section ---------------------------------
1314
1315 if (event_flags_.check_raised (mask, oflags, mode))
1316 {
1317#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1318 trace::printf ("%s(0x%X,%u) @%p %s >0x%X\n", __func__, mask, mode,
1319 this, name (), event_flags_.mask ());
1320#endif
1321 return result::ok;
1322 }
1323 // ----- Exit critical section ----------------------------------
1324 }
1325
1326#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1327 clock::timestamp_t begin_timestamp = clock_->now ();
1328#endif
1329 for (;;)
1330 {
1331 {
1332 // ----- Enter critical section ---------------------------------
1333 interrupts::critical_section ics;
1334
1335 if (event_flags_.check_raised (mask, oflags, mode))
1336 {
1337#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1338 clock::duration_t slept_ticks =
1339 static_cast<clock::duration_t> (clock_->now ()
1340 - begin_timestamp);
1341 trace::printf ("%s(0x%X,%u) in %d @%p %s >0x%X\n", __func__,
1342 mask, mode, slept_ticks, this, name (),
1343 event_flags_.mask ());
1344#endif
1345 return result::ok;
1346 }
1347 // ----- Exit critical section ----------------------------------
1348 }
1349
1350 internal_suspend_ ();
1351
1352 if (interrupted ())
1353 {
1354#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1355 trace::printf ("%s(0x%X,%u) EINTR @%p %s\n", __func__, mask, mode,
1356 this, name ());
1357#endif
1358 return EINTR;
1359 }
1360 }
1361
1362 /* NOTREACHED */
1363 return ENOTRECOVERABLE;
1364 }
1365
1366 result_t
1367 thread::internal_flags_try_wait_ (flags::mask_t mask, flags::mask_t* oflags,
1368 flags::mode_t mode)
1369 {
1370#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1371 trace::printf ("%s(0x%X,%u) @%p %s <0x%X\n", __func__, mask, mode, this,
1372 name (), event_flags_.mask ());
1373#endif
1374
1375 // Don't call this from interrupt handlers.
1377
1378 {
1379 // ----- Enter critical section -------------------------------------
1380 interrupts::critical_section ics;
1381
1382 if (event_flags_.check_raised (mask, oflags, mode))
1383 {
1384#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1385 trace::printf ("%s(0x%X,%u) @%p %s >0x%X\n", __func__, mask, mode,
1386 this, name (), event_flags_.mask ());
1387#endif
1388 return result::ok;
1389 }
1390 else
1391 {
1392#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1393 trace::printf ("%s(0x%X,%u) EWOULDBLOCK @%p %s \n", __func__,
1394 mask, mode, this, name ());
1395#endif
1396 return EWOULDBLOCK;
1397 }
1398 // ----- Exit critical section --------------------------------------
1399 }
1400 }
1401
1402 result_t
1403 thread::internal_flags_timed_wait_ (flags::mask_t mask,
1404 clock::duration_t timeout,
1405 flags::mask_t* oflags,
1406 flags::mode_t mode)
1407 {
1408#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1409 trace::printf ("%s(0x%X,%u,%u) @%p %s <0x%X\n", __func__, mask, timeout,
1410 mode, this, name (), event_flags_.mask ());
1411#endif
1412
1413 // Don't call this from interrupt handlers.
1415 // Don't call this from critical regions.
1416 os_assert_err(!scheduler::locked (), EPERM);
1417
1418 {
1419 // ----- Enter critical section -------------------------------------
1420 interrupts::critical_section ics;
1421
1422 if (event_flags_.check_raised (mask, oflags, mode))
1423 {
1424#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1425 trace::printf ("%s(0x%X,%u,%u) @%p %s >0x%X\n", __func__, mask,
1426 timeout, mode, this, name (),
1427 event_flags_.mask ());
1428#endif
1429 return result::ok;
1430 }
1431 // ----- Exit critical section --------------------------------------
1432 }
1433
1434 internal::clock_timestamps_list& clock_list = clock_->steady_list ();
1435 clock::timestamp_t timeout_timestamp = clock_->steady_now () + timeout;
1436
1437#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1438 clock::timestamp_t begin_timestamp = clock_->steady_now ();
1439#endif
1440
1441 // Prepare a timeout node pointing to the current thread.
1442 internal::timeout_thread_node timeout_node
1443 { timeout_timestamp, *this };
1444
1445 for (;;)
1446 {
1447 {
1448 // ----- Enter critical section ---------------------------------
1449 interrupts::critical_section ics;
1450
1451 if (event_flags_.check_raised (mask, oflags, mode))
1452 {
1453#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1454 clock::duration_t slept_ticks =
1455 static_cast<clock::duration_t> (clock_->steady_now ()
1456 - begin_timestamp);
1457 trace::printf ("%s(0x%X,%u,%u) in %u @%p %s >0x%X\n",
1458 __func__, mask, timeout, mode,
1459 static_cast<unsigned int> (slept_ticks), this,
1460 name (), event_flags_.mask ());
1461#endif
1462 return result::ok;
1463 }
1464
1465 // Remove this thread from the ready list, if there.
1467
1468 // Add this thread to the clock timeout list.
1469 clock_list.link (timeout_node);
1470 timeout_node.thread.clock_node_ = &timeout_node;
1471
1472 state_ = state::suspended;
1473 // ----- Exit critical section ----------------------------------
1474 }
1475
1477
1478 {
1479 // ----- Enter critical section ---------------------------------
1480 interrupts::critical_section ics;
1481
1482 // Remove the thread from the clock timeout list,
1483 // if not already removed by the timer.
1484 timeout_node.thread.clock_node_ = nullptr;
1485 timeout_node.unlink ();
1486 // ----- Exit critical section ----------------------------------
1487 }
1488
1489 if (interrupted ())
1490 {
1491#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1492 trace::printf ("%s(0x%X,%u,%u) EINTR @%p %s\n", __func__, mask,
1493 timeout, mode, this, name ());
1494#endif
1495 return EINTR;
1496 }
1497
1498 if (clock_->steady_now () >= timeout_timestamp)
1499 {
1500#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1501 trace::printf ("%s(0x%X,%u,%u) ETIMEDOUT @%p %s\n", __func__,
1502 mask, timeout, mode, this, name ());
1503#endif
1504 return ETIMEDOUT;
1505 }
1506 }
1507
1508 return ENOTRECOVERABLE;
1509 }
1510
1523 thread::internal_flags_get_ (flags::mask_t mask, flags::mode_t mode)
1524 {
1525#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1526 trace::printf ("%s(0x%X) @%p %s\n", __func__, mask, this, name ());
1527#endif
1528
1529 // Don't call this from interrupt handlers.
1531
1532 flags::mask_t ret = event_flags_.get (mask, mode);
1533
1534#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1535 trace::printf ("%s(0x%X)=0x%X @%p %s\n", __func__, mask,
1536 event_flags_.mask (), this, name ());
1537#endif
1538 // Return the selected bits.
1539 return ret;
1540 }
1541
1545 result_t
1546 thread::internal_flags_clear_ (flags::mask_t mask, flags::mask_t* oflags)
1547 {
1548#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1549 trace::printf ("%s(0x%X) @%p %s <0x%X\n", __func__, mask, this, name (),
1550 event_flags_.mask ());
1551#endif
1552
1553 // Don't call this from interrupt handlers.
1555
1556 result_t res = event_flags_.clear (mask, oflags);
1557
1558#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1559 trace::printf ("%s(0x%X) @%p %s >0x%X\n", __func__, mask, this, name (),
1560 event_flags_.mask ());
1561#endif
1562 return res;
1563 }
1564
1569 // ------------------------------------------------------------------------
1575 namespace this_thread
1576 {
1577
1582 rtos::thread*
1583 _thread (void)
1584 {
1585 rtos::thread* th;
1586
1587#if defined(OS_USE_RTOS_PORT_SCHEDULER)
1588
1589 th = port::this_thread::thread ();
1590
1591#else
1592
1593 th = scheduler::current_thread_;
1594
1595#endif
1596 return th;
1597 }
1598
1606 rtos::thread&
1607 thread (void)
1608 {
1609 // Don't call this from interrupt handlers.
1611
1612 rtos::thread* th;
1613
1614 th = _thread ();
1615
1616 // Could not get the current thread.
1617 assert(th != nullptr);
1618 return (*th);
1619 }
1620
1627 void
1628 yield (void)
1629 {
1630 // Don't call this from interrupt handlers.
1632
1633 if (!scheduler::started ())
1634 {
1635#if defined(OS_TRACE_RTOS_THREAD_CONTEXT)
1636 trace::printf ("%s() nop %s \n", __func__, _thread ()->name ());
1637#endif
1638 return;
1639 }
1640
1641#if defined(OS_TRACE_RTOS_THREAD_CONTEXT)
1642 trace::printf ("%s() from %s\n", __func__, _thread ()->name ());
1643#endif
1644
1645#if defined(OS_USE_RTOS_PORT_SCHEDULER)
1646
1648
1649#else
1650
1652
1653#endif
1654
1655#if defined(OS_TRACE_RTOS_THREAD_CONTEXT)
1656 trace::printf ("%s() to %s\n", __func__, _thread ()->name ());
1657#endif
1658 }
1659
1660 } /* namespace this_thread */
1661
1662 // --------------------------------------------------------------------------
1663 } /* namespace rtos */
1664} /* namespace os */
1665
1666// ----------------------------------------------------------------------------
object_named_system()
Construct a named system object instance.
Definition os-decls.h:782
const char * name(void) const
Get object name.
Definition os-decls.h:774
Interrupts critical section RAII helper.
Definition os-sched.h:524
Standard allocator based on the RTOS system default memory manager.
Definition os-memory.h:544
static void create(void *context, void *func, void *args)
Scheduler critical section RAII helper.
Definition os-sched.h:182
Thread attributes.
Definition os-thread.h:791
void initialize(void)
Align the pointers and initialise to a known pattern.
os::rtos::port::stack::element_t element_t
Type of a stack element.
Definition os-thread.h:436
static std::size_t min_size(void)
Get the min stack size.
Definition os-thread.h:2189
os::rtos::port::stack::allocation_element_t allocation_element_t
Type of a stack allocation element.
Definition os-thread.h:444
void clear(void)
Clear the stack pointer and size.
Definition os-thread.h:2123
std::size_t available(void)
Compute how much available stack remains.
static const element_t magic
Definition os-thread.h:446
stack::element_t * top(void)
Get the top stack address.
Definition os-thread.h:2153
void set(stack::element_t *address, std::size_t size_bytes)
Set the stack address and size.
Definition os-thread.h:2133
POSIX compliant thread, using the default RTOS allocator.
Definition os-thread.h:247
result_t flags_raise(flags::mask_t mask, flags::mask_t *oflags=nullptr)
Raise thread event flags.
result_t kill(void)
Force thread termination.
virtual ~thread()
Destruct the thread object instance.
void *(*)(func_args_t args) func_t
Type of thread function.
Definition os-thread.h:417
void resume(void)
Resume the thread.
static const attributes initializer
Default thread initialiser.
Definition os-thread.h:1000
class thread::stack & stack(void)
Get the thread context stack.
Definition os-thread.h:2438
priority_t priority_inherited(void)
Get the inherited scheduling priority.
bool interrupted(void)
Check if interrupted.
Definition os-thread.h:2361
memory::allocator< stack::allocation_element_t > allocator_type
Default RTOS allocator.
Definition os-thread.h:1007
result_t detach(void)
Detach a thread.
result_t cancel(void)
Cancel thread execution.
bool interrupt(bool interrupt=true)
Set the interrupt flag, possibly interrupting the thread.
_func_args_t func_args_t
Type of thread function arguments.
Definition os-thread.h:409
static bool is_constructed(const thread &thread)
Check if the thread is constructed.
Standard thread.
thread() noexcept=default
void join(void)
Definition thread-cpp.h:96
int printf(const char *format,...)
Write a formatted string to the trace device.
Definition trace.cpp:74
port::clock::duration_t duration_t
Type of variables holding clock durations.
Definition os-clocks.h:83
port::clock::timestamp_t timestamp_t
Type of variables holding clock time stamps.
Definition os-clocks.h:92
clock_systick sysclock
The system clock object instance.
allocator_stateless_default_resource< T > allocator
Type of allocator used by the system objects. Must be stateless.
Definition os-types.h:67
uint8_t priority_t
Type of variables holding thread priorities.
Definition os-thread.h:268
uint32_t mode_t
Type of variables holding flags modes.
Definition os-decls.h:289
uint32_t mask_t
Type of variables holding flags masks.
Definition os-decls.h:279
bool in_handler_mode(void)
Check if the CPU is in handler mode.
Definition os-sched.h:1136
@ ok
Function completed; no errors or events occurred.
Definition os-decls.h:195
bool started(void)
Check if the scheduler was started.
Definition os-sched.h:853
bool locked(void)
Check if the scheduler is locked.
Definition os-sched.h:882
thread & thread(void)
Get the current running thread.
void yield(void)
Yield execution to the next ready thread.
utils::intrusive_list< mutex, utils::double_list_links, &mutex::owner_links_ > mutexes_list
Definition os-mutex.cpp:305
uint32_t result_t
Type of values returned by RTOS functions.
Definition os-decls.h:110
System namespace.
A namespace for functions applying to the current thread.
#define os_assert_throw(__e, __er)
Assert or throw a system error exception.
Definition os-decls.h:1141
#define os_assert_err(__e, __er)
Assert or return an error.
Definition os-decls.h:1126
Single file µOS++ RTOS definitions.
Thread priorities.
Definition os-thread.h:279
@ running
Has the CPU and runs.
Definition os-thread.h:382
@ destroyed
Terminated and resources (like stack) released.
Definition os-thread.h:394
@ initializing
Used to check reused threads.
Definition os-thread.h:398
@ terminated
No longer usable, but resources not yet released.
Definition os-thread.h:390
@ ready
Present in the READY list and competing for CPU.
Definition os-thread.h:378
@ suspended
Not present in the READY list, waiting for an event.
Definition os-thread.h:386