µOS++ IIIe Reference  v6.3.15
“Perfekt ist nicht gut genug”
The third edition of µOS++, a POSIX inspired open source system, written in C++.
os-mutex.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of the µOS++ distribution.
3  * (https://github.com/micro-os-plus)
4  * Copyright (c) 2016 Liviu Ionescu.
5  *
6  * Permission is hereby granted, free of charge, to any person
7  * obtaining a copy of this software and associated documentation
8  * files (the "Software"), to deal in the Software without
9  * restriction, including without limitation the rights to use,
10  * copy, modify, merge, publish, distribute, sublicense, and/or
11  * sell copies of the Software, and to permit persons to whom
12  * the Software is furnished to do so, subject to the following
13  * conditions:
14  *
15  * The above copyright notice and this permission notice shall be
16  * included in all copies or substantial portions of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
20  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
21  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
22  * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
23  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  */
27 
28 #include <cmsis-plus/rtos/os.h>
29 
30 // ----------------------------------------------------------------------------
31 
32 namespace os
33 {
34  namespace rtos
35  {
36  // ------------------------------------------------------------------------
37 
287  const mutex::attributes mutex::initializer_normal;
288 
293  const mutex::attributes_recursive mutex::initializer_recursive;
294 
295  // ------------------------------------------------------------------------
296 
297  using mutexes_list = utils::intrusive_list<
298  mutex, utils::double_list_links, &mutex::owner_links_>;
299 
300  // ------------------------------------------------------------------------
301 
431  mutex::mutex (const attributes& attr) :
432  mutex
433  { nullptr, attr }
434  {
435  ;
436  }
437 
465  mutex::mutex (const char* name, const attributes& attr) :
467  { name }, //
468  type_ (attr.mx_type), //
469  protocol_ (attr.mx_protocol), //
470  robustness_ (attr.mx_robustness), //
471  max_count_ ((attr.mx_type == type::recursive) ? attr.mx_max_count : 1)
472  {
473 #if defined(OS_TRACE_RTOS_MUTEX)
474  trace::printf ("%s() @%p %s\n", __func__, this, this->name ());
475 #endif
476 
477  // Don't call this from interrupt handlers.
479 
480  os_assert_throw(type_ <= type::max_, EINVAL);
481  os_assert_throw(protocol_ <= protocol::max_, EINVAL);
482  os_assert_throw(robustness_ <= robustness::max_, EINVAL);
483 
484 #if !defined(OS_USE_RTOS_PORT_MUTEX)
485  clock_ = attr.clock != nullptr ? attr.clock : &sysclock;
486 #endif
487 
488  os_assert_throw(attr.mx_priority_ceiling >= thread::priority::lowest,
489  EINVAL);
490  os_assert_throw(attr.mx_priority_ceiling <= thread::priority::highest,
491  EINVAL);
492 
493  initial_prio_ceiling_ = attr.mx_priority_ceiling;
494  prio_ceiling_ = attr.mx_priority_ceiling;
495 
496 #if defined(OS_USE_RTOS_PORT_MUTEX)
497 
498  count_ = 0;
499  port::mutex::create (this);
500 
501 #else
502 
503  internal_init_ ();
504 
505 #endif
506  }
507 
526  {
527 #if defined(OS_TRACE_RTOS_MUTEX)
528  trace::printf ("%s() @%p %s\n", __func__, this, name ());
529 #endif
530 
531 #if defined(OS_USE_RTOS_PORT_MUTEX)
532 
533  port::mutex::destroy (this);
534 
535 #else
536 
537  // The mutex must have no owner (must have been unlocked).
538  assert(owner_ == nullptr);
539  // There must be no threads waiting for this mutex.
540  assert(list_.empty ());
541 
542 #endif
543  }
544 
549  void
550  mutex::internal_init_ (void)
551  {
552  owner_ = nullptr;
553  owner_links_.unlink ();
554  count_ = 0;
555  prio_ceiling_ = initial_prio_ceiling_;
556  boosted_prio_ = thread::priority::none;
557  owner_dead_ = false;
558  consistent_ = true;
559  recoverable_ = true;
560 
561 #if !defined(OS_USE_RTOS_PORT_MUTEX)
562 
563  // Wake-up all threads, if any.
564  // Need not be inside the critical section,
565  // the list is protected by inner `resume_one()`.
566  list_.resume_all ();
567 
568 #endif
569  }
570 
571  /*
572  * Internal function.
573  * Should be called from a scheduler critical section.
574  */
575  result_t
576  mutex::internal_try_lock_ (class thread* th)
577  {
578  // Save the initial owner for later protocol tests.
579  thread* saved_owner = owner_;
580 
581  // First lock.
582  if (owner_ == nullptr)
583  {
584  // If the mutex has no owner, own it.
585  owner_ = th;
586 
587  // For recursive mutexes, initialise counter.
588  count_ = 1;
589 
590  // Add mutex to the thread list.
591  mutexes_list* th_list =
592  reinterpret_cast<mutexes_list*> (&owner_->mutexes_);
593  th_list->link (*this);
594 
595  // Count the number of mutexes acquired by the thread.
596  ++(owner_->acquired_mutexes_);
597 
598  if (protocol_ == protocol::protect)
599  {
600  if (th->priority () > prio_ceiling_)
601  {
602  // No need to keep the lock.
603  owner_ = nullptr;
604 
605  // Prio ceiling must be at least the priority of the
606  // highest priority thread.
607  return EINVAL;
608  }
609 
610  // POSIX: When a thread owns one or more mutexes
611  // initialised with the mutex::protocol::protect protocol,
612  // it shall execute at the higher of its priority or the
613  // highest of the priority ceilings of all the mutexes
614  // owned by this thread and initialised with this
615  // attribute, regardless of whether other threads are
616  // blocked on any of these robust mutexes or not.
617 
618  // Boost priority.
619  boosted_prio_ = prio_ceiling_;
620  if (boosted_prio_ > owner_->priority_inherited ())
621  {
622  // ----- Enter uncritical section ---------------------------
624 
625  owner_->priority_inherited (boosted_prio_);
626  // ----- Exit uncritical section ----------------------------
627  }
628  }
629 
630 #if defined(OS_TRACE_RTOS_MUTEX)
631  trace::printf ("%s() @%p %s by %p %s LCK\n", __func__, this, name (),
632  th, th->name ());
633 #endif
634  // If the owning thread of a robust mutex terminates while
635  // holding the mutex lock, the next thread that acquires the
636  // mutex may be notified about the termination by the return
637  // value EOWNERDEAD.
638  if (owner_dead_)
639  {
640  // TODO: decide if the lock must be preserved.
641  return EOWNERDEAD;
642  }
643  return result::ok;
644  }
645 
646  // Relock? (lock by the same thread).
647  if (saved_owner == th)
648  {
649  // The mutex was requested again by the same thread.
650  if (type_ == type::recursive)
651  {
652  if (count_ >= max_count_)
653  {
654  // The recursive mutex reached its limit.
655 #if defined(OS_TRACE_RTOS_MUTEX)
656  trace::printf ("%s() @%p %s EAGAIN\n", __func__, this,
657  name ());
658 #endif
659  return EAGAIN;
660  }
661 
662  // Increment the recursion depth counter.
663  ++count_;
664 
665 #if defined(OS_TRACE_RTOS_MUTEX)
666  trace::printf ("%s() @%p %s by %p %s >%u\n", __func__, this,
667  name (), th, th->name (), count_);
668 #endif
669  return result::ok;
670  }
671  else if (type_ == type::errorcheck)
672  {
673  // Errorcheck mutexes do not block, but return an error.
674 #if defined(OS_TRACE_RTOS_MUTEX)
675  trace::printf ("%s() @%p %s EDEADLK\n", __func__, this, name ());
676 #endif
677  return EDEADLK;
678  }
679  else if (type_ == type::normal)
680  {
681 #if defined(OS_TRACE_RTOS_MUTEX)
682  trace::printf ("%s() @%p %s deadlock\n", __func__, this, name ());
683 #endif
684  return EWOULDBLOCK;
685  }
686 
687  return EWOULDBLOCK;
688  }
689  else
690  {
691  // Try to lock when not owner (another thread requested the mutex).
692 
693  // POSIX: When a thread makes a call to mutex::lock(), the mutex was
694  // initialised with the protocol attribute having the value
695  // mutex::protocol::inherit, when the calling thread is blocked
696  // because the mutex is owned by another thread, that owner thread
697  // shall inherit the priority level of the calling thread as long
698  // as it continues to own the mutex. The implementation shall
699  // update its execution priority to the maximum of its assigned
700  // priority and all its inherited priorities.
701  // Furthermore, if this owner thread itself becomes blocked on
702  // another mutex with the protocol attribute having the value
703  // mutex::protocol::inherit, the same priority inheritance effect
704  // shall be propagated to this other owner thread, in a recursive
705  // manner.
706  if (protocol_ == protocol::inherit)
707  {
708  thread::priority_t prio = th->priority ();
709  boosted_prio_ = prio;
710 
711  if (owner_links_.unlinked ())
712  {
713  mutexes_list* th_list =
714  reinterpret_cast<mutexes_list*> (&owner_->mutexes_);
715  th_list->link (*this);
716  }
717 
718  // Boost owner priority.
719  if ((boosted_prio_ > owner_->priority_inherited ()))
720  {
721  // ----- Enter uncritical section ---------------------------
723 
724  owner_->priority_inherited (boosted_prio_);
725  // ----- Exit uncritical section ----------------------------
726  }
727 
728 #if defined(OS_TRACE_RTOS_MUTEX)
729  trace::printf ("%s() @%p %s boost %u by %p %s \n", __func__, this,
730  name (), boosted_prio_, th, th->name ());
731 #endif
732 
733  return EWOULDBLOCK;
734  }
735  }
736 
737  // Block anyway.
738  return EWOULDBLOCK;
739  }
740 
741  result_t
742  mutex::internal_unlock_ (thread* th)
743  {
744  if (!recoverable_)
745  {
746  return ENOTRECOVERABLE;
747  }
748 
749  {
750  // ----- Enter critical section -------------------------------------
752 
753  // Is the rightful owner?
754  if (owner_ == th)
755  {
756  if ((type_ == type::recursive) && (count_ > 1))
757  {
758  --count_;
759 #if defined(OS_TRACE_RTOS_MUTEX)
760  trace::printf ("%s() @%p %s >%u\n", __func__, this, name (),
761  count_);
762 #endif
763  return result::ok;
764  }
765 
766  --(owner_->acquired_mutexes_);
767 
768  // Remove this mutex from the thread list; ineffective if
769  // not linked.
770  owner_links_.unlink ();
771 
772  if (boosted_prio_ != thread::priority::none)
773  {
774  mutexes_list* thread_mutexes =
775  reinterpret_cast<mutexes_list*> (&owner_->mutexes_);
776 
777  if (thread_mutexes->empty ())
778  {
779  // If the owner thread has no more mutexes,
780  // clear the inherited priority,
781  // and the assigned priority will take precedence.
782  boosted_prio_ = thread::priority::none;
783  }
784  else
785  {
786  // If the owner thread acquired other mutexes too,
787  // compute the maximum boosted priority.
788  thread::priority_t max_prio = 0;
789  for (auto&& mx : *thread_mutexes)
790  {
791  if (mx.boosted_prio_ > max_prio)
792  {
793  max_prio = mx.boosted_prio_;
794  }
795  }
796  boosted_prio_ = max_prio;
797  }
798  // Delayed until end of critical section.
799  owner_->priority_inherited (boosted_prio_);
800  }
801 
802  // Delayed until end of critical section.
803  list_.resume_one ();
804 
805  // Finally release the mutex.
806  owner_ = nullptr;
807  count_ = 0;
808 
809 #if defined(OS_TRACE_RTOS_MUTEX)
810  trace::printf ("%s() @%p %s ULCK\n", __func__, this, name ());
811 #endif
812 
813  // POSIX: If a robust mutex whose owner died is unlocked without
814  // a call to consistent(), it shall be in a permanently
815  // unusable state and all attempts to lock the mutex
816  // shall fail with the error ENOTRECOVERABLE.
817 
818  if (owner_dead_)
819  {
820  owner_dead_ = false;
821 
822  if (!consistent_)
823  {
824  recoverable_ = false;
825  return ENOTRECOVERABLE;
826  }
827  }
828 
829  return result::ok;
830  }
831 
832  // Not owner, or not locked.
833  if (type_ == type::errorcheck || type_ == type::recursive
834  || robustness_ == robustness::robust)
835  {
836 #if defined(OS_TRACE_RTOS_MUTEX)
837  trace::printf ("%s() EPERM @%p %s \n", __func__, this, name ());
838 #endif
839  return EPERM;
840  }
841 
842  // Normal no-robust mutexes owned by other threads have
843  // undefined behaviour.
844 
845 #if defined(OS_TRACE_RTOS_MUTEX)
846  trace::printf ("%s() ENOTRECOVERABLE @%p %s \n", __func__, this,
847  name ());
848 #endif
849  return ENOTRECOVERABLE;
850  // ----- Exit critical section --------------------------------------
851  }
852  }
853 
854  // Called from thread termination, in a critical section.
855  void
856  mutex::internal_mark_owner_dead_ (void)
857  {
858  // May return error if not the rightful owner.
859  trace::printf ("%s() @%p %s\n", __func__, this, name ());
860 
861  if (robustness_ == mutex::robustness::robust)
862  {
863  // If the owning thread of a robust mutex terminates
864  // while holding the mutex lock, the next thread that
865  // acquires the mutex may be notified about the termination
866  // by the return value EOWNERDEAD.
867  owner_dead_ = true;
868  consistent_ = false;
869  }
870  }
871 
917  result_t
918  mutex::lock (void)
919  {
920 #if defined(OS_TRACE_RTOS_MUTEX)
921  trace::printf ("%s() @%p %s by %p %s\n", __func__, this, name (),
923 #endif
924 
925  // Don't call this from interrupt handlers.
927  // Don't try to lock a non-recursive mutex again.
928  os_assert_err(!scheduler::locked (), EPERM);
929 
930  if (!recoverable_)
931  {
932  return ENOTRECOVERABLE;
933  }
934 
935 #if defined(OS_USE_RTOS_PORT_MUTEX)
936 
937  return port::mutex::lock (this);
938 
939 #else
940 
941  thread& crt_thread = this_thread::thread ();
942 
943  result_t res;
944  {
945  // ----- Enter critical section -------------------------------------
947 
948  res = internal_try_lock_ (&crt_thread);
949  if (res != EWOULDBLOCK)
950  {
951  return res;
952  }
953  // ----- Exit critical section --------------------------------------
954  }
955 
956  // Prepare a list node pointing to the current thread.
957  // Do not worry for being on stack, it is temporarily linked to the
958  // list and guaranteed to be removed before this function returns.
960  { crt_thread };
961 
962  for (;;)
963  {
964  {
965  // ----- Enter critical section ---------------------------------
967 
968  res = internal_try_lock_ (&crt_thread);
969  if (res != EWOULDBLOCK)
970  {
971  return res;
972  }
973 
974  {
975  // ----- Enter critical section -----------------------------
977 
978  // Add this thread to the mutex waiting list.
979  scheduler::internal_link_node (list_, node);
980  // state::suspended set in above link().
981  // ----- Exit critical section ------------------------------
982  }
983  // ----- Exit critical section ----------------------------------
984  }
985 
987 
988  // Remove the thread from the semaphore waiting list,
989  // if not already removed by unlock().
990  scheduler::internal_unlink_node (node);
991 
992  if (crt_thread.interrupted ())
993  {
994 #if defined(OS_TRACE_RTOS_MUTEX)
995  trace::printf ("%s() EINTR @%p %s\n", __func__, this, name ());
996 #endif
997  return EINTR;
998  }
999  }
1000 
1001  /* NOTREACHED */
1002  return ENOTRECOVERABLE;
1003 
1004 #endif
1005  }
1006 
1038  result_t
1040  {
1041 #if defined(OS_TRACE_RTOS_MUTEX)
1042  trace::printf ("%s() @%p %s by %p %s\n", __func__, this, name (),
1044 #endif
1045 
1046  // Don't call this from interrupt handlers.
1048 
1049  if (!recoverable_)
1050  {
1051  return ENOTRECOVERABLE;
1052  }
1053 
1054 #if defined(OS_USE_RTOS_PORT_MUTEX)
1055 
1056  return port::mutex::try_lock (this);
1057 
1058 #else
1059 
1060  thread& crt_thread = this_thread::thread ();
1061 
1062  {
1063  // ----- Enter critical section -------------------------------------
1065 
1066  return internal_try_lock_ (&crt_thread);
1067  // ----- Exit critical section --------------------------------------
1068  }
1069 
1070 #endif
1071  }
1072 
1114  result_t
1116  {
1117 #if defined(OS_TRACE_RTOS_MUTEX)
1118  trace::printf ("%s(%u) @%p %s by %p %s\n", __func__,
1119  static_cast<unsigned int> (timeout), this, name (),
1121 #endif
1122 
1123  // Don't call this from interrupt handlers.
1125  // Don't try to lock a non-recursive mutex again.
1126  os_assert_err(!scheduler::locked (), EPERM);
1127 
1128  if (!recoverable_)
1129  {
1130  return ENOTRECOVERABLE;
1131  }
1132 
1133 #if defined(OS_USE_RTOS_PORT_MUTEX)
1134 
1135  return port::mutex::timed_lock (this, timeout);
1136 
1137 #else
1138 
1139  thread& crt_thread = this_thread::thread ();
1140 
1141  result_t res;
1142 
1143  // Extra test before entering the loop, with its inherent weight.
1144  // Trade size for speed.
1145  {
1146  // ----- Enter critical section -------------------------------------
1148 
1149  res = internal_try_lock_ (&crt_thread);
1150  if (res != EWOULDBLOCK)
1151  {
1152  return res;
1153  }
1154  // ----- Exit critical section --------------------------------------
1155  }
1156 
1157  // Prepare a list node pointing to the current thread.
1158  // Do not worry for being on stack, it is temporarily linked to the
1159  // list and guaranteed to be removed before this function returns.
1161  { crt_thread };
1162 
1163  internal::clock_timestamps_list& clock_list = clock_->steady_list ();
1164  clock::timestamp_t timeout_timestamp = clock_->steady_now () + timeout;
1165 
1166  // Prepare a timeout node pointing to the current thread.
1167  internal::timeout_thread_node timeout_node
1168  { timeout_timestamp, crt_thread };
1169 
1170  for (;;)
1171  {
1172  {
1173  // ----- Enter critical section ---------------------------------
1175 
1176  res = internal_try_lock_ (&crt_thread);
1177  if (res != EWOULDBLOCK)
1178  {
1179  return res;
1180  }
1181 
1182  {
1183  // ----- Enter critical section -----------------------------
1185 
1186  // Add this thread to the mutex waiting list,
1187  // and the clock timeout list.
1188  scheduler::internal_link_node (list_, node, clock_list,
1189  timeout_node);
1190  // state::suspended set in above link().
1191  // ----- Exit critical section ------------------------------
1192  }
1193  // ----- Exit critical section ----------------------------------
1194  }
1195 
1197 
1198  // Remove the thread from the semaphore waiting list,
1199  // if not already removed by unlock() and from the clock
1200  // timeout list, if not already removed by the timer.
1201  scheduler::internal_unlink_node (node, timeout_node);
1202 
1203  res = result::ok;
1204 
1205  if (crt_thread.interrupted ())
1206  {
1207 #if defined(OS_TRACE_RTOS_MUTEX)
1208  trace::printf ("%s() EINTR @%p %s \n", __func__, this, name ());
1209 #endif
1210  res = EINTR;
1211  }
1212  else if (clock_->steady_now () >= timeout_timestamp)
1213  {
1214 #if defined(OS_TRACE_RTOS_MUTEX)
1215  trace::printf ("%s() ETIMEDOUT @%p %s \n", __func__, this,
1216  name ());
1217 #endif
1218  res = ETIMEDOUT;
1219  }
1220  if (res != result::ok)
1221  {
1222  if (boosted_prio_ != thread::priority::none)
1223  {
1224  // If the priority was boosted, it must be restored
1225  // to the highest priority of the waiting threads, if any.
1226 
1228 
1229  for (auto&& th : list_)
1230  {
1231  thread::priority_t prio = th.priority ();
1232  if (prio > max_prio)
1233  {
1234  max_prio = prio;
1235  }
1236  }
1237 
1238  if (max_prio != thread::priority::none)
1239  {
1240  boosted_prio_ = max_prio;
1241  owner_->priority (boosted_prio_);
1242  }
1243  }
1244  return res;
1245  }
1246  }
1247 
1248  /* NOTREACHED */
1249  return ENOTRECOVERABLE;
1250 
1251 #endif
1252  }
1253 
1274  result_t
1276  {
1277 #if defined(OS_TRACE_RTOS_MUTEX)
1278  trace::printf ("%s() @%p %s by %p %s\n", __func__, this, name (),
1280 #endif
1281 
1282  // Don't call this from interrupt handlers.
1284 
1285 #if defined(OS_USE_RTOS_PORT_MUTEX)
1286 
1287  return port::mutex::unlock (this);
1288 
1289 #else
1290 
1291  thread* crt_thread = &this_thread::thread ();
1292 
1293  return internal_unlock_ (crt_thread);
1294 
1295 #endif
1296  }
1297 
1310  mutex::prio_ceiling (void) const
1311  {
1312 #if defined(OS_TRACE_RTOS_MUTEX)
1313  trace::printf ("%s() @%p %s\n", __func__, this, name ());
1314 #endif
1315 
1316  // Don't call this from interrupt handlers.
1317  assert(!interrupts::in_handler_mode ());
1318 
1319 #if defined(OS_USE_RTOS_PORT_MUTEX)
1320 
1321  return port::mutex::prio_ceiling (this);
1322 
1323 #else
1324 
1325  return prio_ceiling_;
1326 
1327 #endif
1328  }
1329 
1350  result_t
1352  thread::priority_t* old_prio_ceiling)
1353  {
1354 #if defined(OS_TRACE_RTOS_MUTEX)
1355  trace::printf ("%s() @%p %s\n", __func__, this, name ());
1356 #endif
1357 
1358  // Don't call this from interrupt handlers.
1360 
1361 #if defined(OS_USE_RTOS_PORT_MUTEX)
1362 
1363  return port::mutex::prio_ceiling (this, prio_ceiling, old_prio_ceiling);
1364 
1365 #else
1366 
1367  // TODO: lock() must not adhere to the priority protocol.
1368  result_t res = lock ();
1369  if (res != result::ok)
1370  {
1371  return res;
1372  }
1373 
1374  if (old_prio_ceiling != nullptr)
1375  {
1376  *old_prio_ceiling = prio_ceiling_;
1377  }
1378 
1379  prio_ceiling_ = prio_ceiling;
1380 
1381  unlock ();
1382 
1383  return result::ok;
1384 
1385 #endif
1386  }
1387 
1416  result_t
1418  {
1419 #if defined(OS_TRACE_RTOS_MUTEX)
1420  trace::printf ("%s() @%p %s\n", __func__, this, name ());
1421 #endif
1422 
1423  // Don't call this from interrupt handlers.
1425  // Don't call this for non-robust mutexes.
1426  os_assert_err(robustness_ == robustness::robust, EINVAL);
1427  // Don't call it if already consistent.
1428  os_assert_err(!consistent_, EINVAL);
1429 
1430 #if defined(OS_USE_RTOS_PORT_MUTEX)
1431 
1432  return port::mutex::consistent (this);
1433 
1434 #else
1435 
1436  // Update status to consistent.
1437  consistent_ = true;
1438  return result::ok;
1439 
1440 #endif
1441  }
1442 
1453  result_t
1455  {
1456 #if defined(OS_TRACE_RTOS_MUTEX)
1457  trace::printf ("%s() @%p %s\n", __func__, this, name ());
1458 #endif
1459 
1460  // Don't call this from interrupt handlers.
1462 
1463  {
1464  // ----- Enter critical section -------------------------------------
1466 
1467  internal_init_ ();
1468  return result::ok;
1469  // ----- Exit critical section --------------------------------------
1470  }
1471 
1472  }
1473 
1474  // ==========================================================================
1475 
1491  // --------------------------------------------------------------------------
1492  } /* namespace rtos */
1493 } /* namespace os */
result_t reset(void)
Reset the mutex.
Definition: os-mutex.cpp:1454
#define os_assert_throw(__e, __er)
Assert or throw a system error exception.
Definition: os-decls.h:1127
port::scheduler::state_t unlock(void)
Unlock the scheduler.
Definition: os-sched.h:882
Scheduler uncritical section RAII helper.
Definition: os-sched.h:242
port::clock::timestamp_t timestamp_t
Type of variables holding clock time stamps.
Definition: os-clocks.h:81
port::scheduler::state_t lock(void)
Lock the scheduler.
Definition: os-sched.h:869
List of intrusive nodes.
Definition: lists.h:708
Normal mutex behaviour.
Definition: os-mutex.h:166
static const attributes initializer_normal
Default normal mutex initialiser.
Definition: os-mutex.h:302
state_t locked(state_t state)
Lock/unlock the scheduler.
Definition: os-sched.h:899
POSIX compliant mutex.
Definition: os-mutex.h:53
thread::priority_t prio_ceiling(void) const
Get the priority ceiling of a mutex.
Definition: os-mutex.cpp:1310
result_t timed_lock(clock::duration_t timeout)
Timed attempt to lock/acquire the mutex.
Definition: os-mutex.cpp:1115
Double linked list node, with thread reference.
Definition: os-lists.h:58
mutex(const attributes &attr=initializer_normal)
Construct a mutex object instance.
Definition: os-mutex.cpp:431
Interrupts critical section RAII helper.
Definition: os-sched.h:498
System namespace.
Double linked list node, with time stamp and thread.
Definition: os-lists.h:213
utils::intrusive_list< mutex, utils::double_list_links, &mutex::owner_links_ > mutexes_list
Definition: os-mutex.cpp:298
Scheduler critical section RAII helper.
Definition: os-sched.h:170
result_t unlock(void)
Unlock/release the mutex.
Definition: os-mutex.cpp:1275
object_named_system()
Construct a named system object instance.
Definition: os-decls.h:768
const char * name(void) const
Get object name.
Definition: os-decls.h:760
Inherit priority from highest priority thread.
Definition: os-mutex.h:84
uint8_t priority_t
Type of variables holding thread priorities.
Definition: os-thread.h:252
Enhanced robustness at thread termination.
Definition: os-mutex.h:130
POSIX compliant thread, using the default RTOS allocator.
Definition: os-thread.h:230
port::clock::duration_t duration_t
Type of variables holding clock durations.
Definition: os-clocks.h:72
void link(reference node)
Add a node to the tail of the list.
Definition: lists.h:1197
Maximum value, for validation purposes.
Definition: os-mutex.h:99
Single file µOS++ RTOS definitions.
clock_systick sysclock
The system clock object instance.
Definition: os-clocks.cpp:551
int printf(const char *format,...)
Write a formatted string to the trace device.
Definition: trace.cpp:74
Function completed; no errors or events occurred.
Definition: os-decls.h:181
~mutex()
Destruct the mutex object instance.
Definition: os-mutex.cpp:525
static const attributes_recursive initializer_recursive
Default recursive mutex initialiser.
Definition: os-mutex.h:351
Maximum value, for validation purposes.
Definition: os-mutex.h:184
result_t consistent(void)
Mark mutex as consistent.
Definition: os-mutex.cpp:1417
bool empty(void) const
Check if the list is empty.
Definition: lists.h:1032
Maximum value, for validation purposes.
Definition: os-mutex.h:140
result_t priority(priority_t prio)
Set the assigned scheduling priority.
Definition: os-thread.cpp:658
result_t try_lock(void)
Try to lock/acquire the mutex.
Definition: os-mutex.cpp:1039
Ordered list of time stamp nodes.
Definition: os-lists.h:657
result_t lock(void)
Lock/acquire the mutex.
Definition: os-mutex.cpp:918
thread & thread(void)
Get the current running thread.
Definition: os-thread.cpp:1543
uint32_t result_t
Type of values returned by RTOS functions.
Definition: os-decls.h:96
Recursive mutex behaviour.
Definition: os-mutex.h:174
Execute at the highest priority.
Definition: os-mutex.h:89
Mutex attributes.
Definition: os-mutex.h:207
bool interrupted(void)
Check if interrupted.
Definition: os-thread.h:2326
Check mutex behaviour.
Definition: os-mutex.h:170
#define os_assert_err(__e, __er)
Assert or return an error.
Definition: os-decls.h:1112
bool in_handler_mode(void)
Check if the CPU is in handler mode.
Definition: os-sched.h:1091