36#pragma clang diagnostic ignored "-Wc++98-compat"
51 std::size_t thread::stack::min_size_bytes_ = port::stack::min_size_bytes;
53 std::size_t thread::stack::default_size_bytes_ =
54 port::stack::default_size_bytes;
62 mutex, utils::double_list_links, &mutex::owner_links_>;
167 void* pa = bottom_address_;
179 for (; p < pend; ++p)
185 size_bytes_ = ((
static_cast<std::size_t
> (p - bottom_address_) - 1)
205 std::size_t count = 0;
230#if defined(OS_TRACE_RTOS_THREAD)
235#if defined(__EXCEPTIONS)
240 catch (std::exception e)
242 trace::printf (
"%s() @%p %s top exception \"%s\".\n", __func__,
255 thread->internal_exit_ (exit_ptr);
260#if defined(OS_TRACE_RTOS_THREAD)
273#if defined(OS_TRACE_RTOS_THREAD)
359 {
nullptr, function, args, attr, allocator }
413#if defined(OS_TRACE_RTOS_THREAD)
414 trace::printf (
"%s() @%p %s\n", __func__,
this, this->name ());
418 if (attr.th_enable_assert_reuse) {
420 assert((state_ == state::undefined) || (state_ == state::destroyed));
424 state_ = state::initializing;
426 allocator_ = &allocator;
428 if (attr.th_stack_address !=
nullptr
429 && attr.th_stack_size_bytes > stack::min_size ())
431 internal_construct_ (function, args, attr,
nullptr, 0);
435 using allocator_type2 = memory::allocator<stack::allocation_element_t>;
437 if (attr.th_stack_size_bytes > stack::min_size ())
439 allocated_stack_size_elements_ = (attr.th_stack_size_bytes
440 +
sizeof(stack::allocation_element_t) - 1)
441 /
sizeof(stack::allocation_element_t);
445 allocated_stack_size_elements_ = (stack::default_size ()
446 +
sizeof(stack::allocation_element_t) - 1)
447 /
sizeof(stack::allocation_element_t);
450 allocated_stack_address_ =
451 reinterpret_cast<stack::element_t*
> (
const_cast<allocator_type2&
> (
allocator).allocate (
452 allocated_stack_size_elements_));
455 assert(allocated_stack_address_ !=
nullptr);
457 internal_construct_ (
461 allocated_stack_address_,
462 allocated_stack_size_elements_
463 *
sizeof(stack::allocation_element_t));
472 thread::internal_construct_ (func_t function, func_args_t args,
473 const attributes& attr,
void* stack_address,
474 std::size_t stack_size_bytes)
480 assert(function !=
nullptr);
484 clock_ = attr.clock !=
nullptr ? attr.clock : &
sysclock;
486 if (stack_address !=
nullptr)
492 assert(attr.th_stack_address ==
nullptr);
501 attr.th_stack_size_bytes);
504#if defined(OS_TRACE_RTOS_THREAD)
506 attr.th_priority,
stack ().bottom_address_,
507 stack ().size_bytes_);
513 scheduler::critical_section scs;
516 prio_assigned_ = attr.th_priority;
521 parent_ = this_thread::_thread ();
524 parent_->children_.link (*
this);
528 scheduler::top_threads_list_.link (*
this);
533#if defined(OS_USE_RTOS_PORT_SCHEDULER)
535 port::thread::create (
this);
540#pragma GCC diagnostic push
541#if defined(__clang__)
542#pragma clang diagnostic ignored "-Wc++98-compat-pedantic"
546 &context_,
reinterpret_cast<void*
> (internal_invoke_with_exit_),
548#pragma GCC diagnostic pop
552 scheduler::current_thread_ =
this;
585#if defined(OS_TRACE_RTOS_THREAD)
601#if defined(OS_TRACE_RTOS_THREAD)
602 trace::printf (
"%s() @%p %s nop, cannot commit suicide\n", __func__,
620#if defined(OS_TRACE_RTOS_THREAD_CONTEXT)
625#if defined(OS_USE_RTOS_PORT_SCHEDULER)
632 port::thread::resume (
this);
639 assert(port::interrupts::is_priority_valid ());
646 if (ready_node_.next () ==
nullptr)
648 scheduler::ready_threads_list_.link (ready_node_);
675 return prio_assigned_;
681 (prio_inherited_ >= prio_assigned_) ?
682 prio_inherited_ : prio_assigned_;
698 return prio_inherited_;
723#if defined(OS_TRACE_RTOS_THREAD)
733 if (prio_assigned_ == prio)
739 prio_assigned_ = prio;
743#if defined(OS_USE_RTOS_PORT_SCHEDULER)
746 res = port::thread::priority (
this, prio);
757 ready_node_.unlink ();
758 scheduler::ready_threads_list_.link (ready_node_);
791#if defined(OS_TRACE_RTOS_THREAD)
804 if (prio == prio_inherited_)
810 prio_inherited_ = prio;
812 if (prio_inherited_ < prio_assigned_)
820#if defined(OS_USE_RTOS_PORT_SCHEDULER)
823 res = port::thread::priority (
this, prio);
834 ready_node_.unlink ();
835 scheduler::ready_threads_list_.link (ready_node_);
869#if defined(OS_TRACE_RTOS_THREAD)
876#if defined(OS_USE_RTOS_PORT_SCHEDULER)
878 result_t res = port::thread::detach (
this);
924#if defined(OS_TRACE_RTOS_THREAD)
934 assert(
this != this_thread::_thread ());
938 joiner_ = this_thread::_thread ();
939 this_thread::_thread ()->internal_suspend_ ();
942#if defined(OS_TRACE_RTOS_THREAD)
946 if (exit_ptr !=
nullptr)
948 *exit_ptr = func_result_;
975#if defined(OS_TRACE_RTOS_THREAD)
1000#if defined(OS_TRACE_RTOS_THREAD)
1004 bool tmp = interrupted_;
1020 thread::internal_suspend_ (
void)
1022#if defined(OS_TRACE_RTOS_THREAD)
1041 thread::internal_exit_ (
void* exit_ptr)
1043#if defined(OS_TRACE_RTOS_THREAD)
1052 scheduler::critical_section scs;
1056 interrupts::critical_section ics;
1058 ready_node_.unlink ();
1060 child_links_.unlink ();
1065 assert(children_.empty ());
1069 func_args_ =
nullptr;
1072 assert(mutexes_.empty ());
1073 assert(acquired_mutexes_ == 0);
1075 func_result_ = exit_ptr;
1081 interrupts::critical_section ics;
1085 scheduler::terminated_threads_list_.link (ready_node_);
1089#if defined(OS_USE_RTOS_PORT_SCHEDULER)
1091 port::thread::destroy_this (
this);
1110 thread::internal_check_stack_ (
void)
1112 if (stack ().size () > 0)
1114 if (!stack ().check_bottom_magic () || !stack ().check_top_magic ())
1117 assert(stack ().check_bottom_magic ());
1118 assert(stack ().check_top_magic ());
1121#if defined(OS_TRACE_RTOS_THREAD)
1122 trace::printf (
"%s() @%p %s stack: %u/%u bytes used\n", __func__,
1124 stack ().size () - stack ().available (),
1135 thread::internal_destroy_ (
void)
1137#if defined(OS_TRACE_RTOS_THREAD)
1141 internal_check_stack_ ();
1143 if (allocated_stack_address_ !=
nullptr)
1145 typedef typename std::allocator_traits<allocator_type>::pointer pointer;
1147 static_cast<allocator_type*
> (
const_cast<void*
> (allocator_))->deallocate (
1148 reinterpret_cast<pointer
> (allocated_stack_address_),
1149 allocated_stack_size_elements_);
1151 allocated_stack_address_ =
nullptr;
1156 scheduler::critical_section scs;
1159 while (not mx_list.empty ())
1161 auto* mx = mx_list.unlink_head ();
1163 mx->internal_mark_owner_dead_ ();
1166 mx->internal_unlock_ (
this);
1173 if (joiner_ !=
nullptr)
1194#if defined(OS_TRACE_RTOS_THREAD)
1207#if defined(OS_TRACE_RTOS_THREAD)
1208 trace::printf (
"%s() @%p %s already gone\n", __func__,
this,
1219 ready_node_.unlink ();
1222 if (waiting_node_ !=
nullptr)
1224 waiting_node_->unlink ();
1228 if (clock_node_ !=
nullptr)
1230 clock_node_->unlink ();
1233 child_links_.unlink ();
1238 assert(children_.empty ());
1241#if defined(OS_USE_RTOS_PORT_SCHEDULER)
1243 port::thread::destroy_other (
this);
1247 func_result_ =
nullptr;
1250 func_args_ =
nullptr;
1252 internal_destroy_ ();
1256 assert(mutexes_.empty ());
1257 assert(acquired_mutexes_ == 0);
1276#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1278 event_flags_.mask ());
1281 result_t res = event_flags_.raise (mask, oflags);
1285#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1287 event_flags_.mask ());
1301#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1302 trace::printf (
"%s(0x%X,%u) @%p %s <0x%X\n", __func__, mask, mode,
this,
1303 name (), event_flags_.mask ());
1315 if (event_flags_.check_raised (mask, oflags, mode))
1317#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1318 trace::printf (
"%s(0x%X,%u) @%p %s >0x%X\n", __func__, mask, mode,
1319 this,
name (), event_flags_.mask ());
1326#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1333 interrupts::critical_section ics;
1335 if (event_flags_.check_raised (mask, oflags, mode))
1337#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1341 trace::printf (
"%s(0x%X,%u) in %d @%p %s >0x%X\n", __func__,
1342 mask, mode, slept_ticks,
this,
name (),
1343 event_flags_.mask ());
1350 internal_suspend_ ();
1354#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1355 trace::printf (
"%s(0x%X,%u) EINTR @%p %s\n", __func__, mask, mode,
1363 return ENOTRECOVERABLE;
1370#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1371 trace::printf (
"%s(0x%X,%u) @%p %s <0x%X\n", __func__, mask, mode,
this,
1372 name (), event_flags_.mask ());
1380 interrupts::critical_section ics;
1382 if (event_flags_.check_raised (mask, oflags, mode))
1384#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1385 trace::printf (
"%s(0x%X,%u) @%p %s >0x%X\n", __func__, mask, mode,
1386 this,
name (), event_flags_.mask ());
1392#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1393 trace::printf (
"%s(0x%X,%u) EWOULDBLOCK @%p %s \n", __func__,
1394 mask, mode,
this,
name ());
1408#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1409 trace::printf (
"%s(0x%X,%u,%u) @%p %s <0x%X\n", __func__, mask, timeout,
1410 mode,
this,
name (), event_flags_.mask ());
1420 interrupts::critical_section ics;
1422 if (event_flags_.check_raised (mask, oflags, mode))
1424#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1425 trace::printf (
"%s(0x%X,%u,%u) @%p %s >0x%X\n", __func__, mask,
1426 timeout, mode,
this,
name (),
1427 event_flags_.mask ());
1434 internal::clock_timestamps_list& clock_list = clock_->steady_list ();
1437#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1442 internal::timeout_thread_node timeout_node
1443 { timeout_timestamp, *
this };
1449 interrupts::critical_section ics;
1451 if (event_flags_.check_raised (mask, oflags, mode))
1453#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1458 __func__, mask, timeout, mode,
1459 static_cast<unsigned int> (slept_ticks),
this,
1460 name (), event_flags_.mask ());
1469 clock_list.link (timeout_node);
1470 timeout_node.thread.clock_node_ = &timeout_node;
1480 interrupts::critical_section ics;
1484 timeout_node.thread.clock_node_ =
nullptr;
1485 timeout_node.unlink ();
1491#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1492 trace::printf (
"%s(0x%X,%u,%u) EINTR @%p %s\n", __func__, mask,
1493 timeout, mode,
this,
name ());
1498 if (clock_->steady_now () >= timeout_timestamp)
1500#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1501 trace::printf (
"%s(0x%X,%u,%u) ETIMEDOUT @%p %s\n", __func__,
1502 mask, timeout, mode,
this,
name ());
1508 return ENOTRECOVERABLE;
1525#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1534#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1536 event_flags_.mask (),
this,
name ());
1548#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1550 event_flags_.mask ());
1556 result_t res = event_flags_.clear (mask, oflags);
1558#if defined(OS_TRACE_RTOS_THREAD_FLAGS)
1560 event_flags_.mask ());
1587#if defined(OS_USE_RTOS_PORT_SCHEDULER)
1589 th = port::this_thread::thread ();
1593 th = scheduler::current_thread_;
1617 assert(th !=
nullptr);
1635#if defined(OS_TRACE_RTOS_THREAD_CONTEXT)
1636 trace::printf (
"%s() nop %s \n", __func__, _thread ()->name ());
1641#if defined(OS_TRACE_RTOS_THREAD_CONTEXT)
1642 trace::printf (
"%s() from %s\n", __func__, _thread ()->name ());
1645#if defined(OS_USE_RTOS_PORT_SCHEDULER)
1655#if defined(OS_TRACE_RTOS_THREAD_CONTEXT)
1656 trace::printf (
"%s() to %s\n", __func__, _thread ()->name ());
object_named_system()
Construct a named system object instance.
const char * name(void) const
Get object name.
Interrupts critical section RAII helper.
Standard allocator based on the RTOS system default memory manager.
static void create(void *context, void *func, void *args)
Scheduler critical section RAII helper.
void initialize(void)
Align the pointers and initialise to a known pattern.
os::rtos::port::stack::element_t element_t
Type of a stack element.
static std::size_t min_size(void)
Get the min stack size.
os::rtos::port::stack::allocation_element_t allocation_element_t
Type of a stack allocation element.
void clear(void)
Clear the stack pointer and size.
std::size_t available(void)
Compute how much available stack remains.
static const element_t magic
stack::element_t * top(void)
Get the top stack address.
void set(stack::element_t *address, std::size_t size_bytes)
Set the stack address and size.
POSIX compliant thread, using the default RTOS allocator.
result_t flags_raise(flags::mask_t mask, flags::mask_t *oflags=nullptr)
Raise thread event flags.
result_t kill(void)
Force thread termination.
virtual ~thread()
Destruct the thread object instance.
void *(*)(func_args_t args) func_t
Type of thread function.
void resume(void)
Resume the thread.
static const attributes initializer
Default thread initialiser.
class thread::stack & stack(void)
Get the thread context stack.
priority_t priority_inherited(void)
Get the inherited scheduling priority.
bool interrupted(void)
Check if interrupted.
memory::allocator< stack::allocation_element_t > allocator_type
Default RTOS allocator.
result_t detach(void)
Detach a thread.
result_t cancel(void)
Cancel thread execution.
bool interrupt(bool interrupt=true)
Set the interrupt flag, possibly interrupting the thread.
_func_args_t func_args_t
Type of thread function arguments.
static bool is_constructed(const thread &thread)
Check if the thread is constructed.
thread() noexcept=default
int printf(const char *format,...)
Write a formatted string to the trace device.
port::clock::duration_t duration_t
Type of variables holding clock durations.
port::clock::timestamp_t timestamp_t
Type of variables holding clock time stamps.
clock_systick sysclock
The system clock object instance.
allocator_stateless_default_resource< T > allocator
Type of allocator used by the system objects. Must be stateless.
uint8_t priority_t
Type of variables holding thread priorities.
uint32_t mode_t
Type of variables holding flags modes.
uint32_t mask_t
Type of variables holding flags masks.
bool in_handler_mode(void)
Check if the CPU is in handler mode.
void prepare_suspend(void)
@ ok
Function completed; no errors or events occurred.
bool started(void)
Check if the scheduler was started.
bool locked(void)
Check if the scheduler is locked.
thread & thread(void)
Get the current running thread.
void yield(void)
Yield execution to the next ready thread.
utils::intrusive_list< mutex, utils::double_list_links, &mutex::owner_links_ > mutexes_list
uint32_t result_t
Type of values returned by RTOS functions.
A namespace for functions applying to the current thread.
#define os_assert_throw(__e, __er)
Assert or throw a system error exception.
#define os_assert_err(__e, __er)
Assert or return an error.
Single file µOS++ RTOS definitions.
@ running
Has the CPU and runs.
@ destroyed
Terminated and resources (like stack) released.
@ initializing
Used to check reused threads.
@ terminated
No longer usable, but resources not yet released.
@ ready
Present in the READY list and competing for CPU.
@ suspended
Not present in the READY list, waiting for an event.