tlk: 6/19 update
[3rdparty/ote_partner/tlk.git] / kernel / thread.c
1 /*
2  * Copyright (c) 2008-2009 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * @file
26  * @brief  Kernel threading
27  *
28  * This file is the core kernel threading interface.
29  *
30  * @defgroup thread Threads
31  * @{
32  */
33 #include <debug.h>
34 #include <assert.h>
35 #include <list.h>
36 #include <malloc.h>
37 #include <string.h>
38 #include <err.h>
39 #include <kernel/thread.h>
40 #include <kernel/timer.h>
41 #include <kernel/dpc.h>
42 #include <platform.h>
43 #include <target.h>
44 #include <kernel/task_unload.h>
45
46 #if DEBUGLEVEL > 1
47 #define THREAD_CHECKS 1
48 #endif
49
50 #if THREAD_STATS
51 struct thread_stats thread_stats;
52 #endif
53
54 /* global thread list */
55 static struct list_node thread_list;
56
57 /* the current thread */
58 thread_t *current_thread;
59
60 /* the global critical section count */
61 int critical_section_count;
62
63 /* the run queue */
64 static struct list_node run_queue[NUM_PRIORITIES];
65 static uint32_t run_queue_bitmap;
66
67 /* the bootstrap thread (statically allocated) */
68 static thread_t bootstrap_thread;
69
70 /* the idle thread */
71 thread_t *idle_thread;
72
73 /* local routines */
74 static void thread_resched(void);
75 static void idle_thread_routine(void) __NO_RETURN;
76
77 #if PLATFORM_HAS_DYNAMIC_TIMER
78 /* preemption timer */
79 static timer_t preempt_timer;
80 #endif
81
82 /* run queue manipulation */
83 static void insert_in_run_queue_head(thread_t *t)
84 {
85 #if THREAD_CHECKS
86         ASSERT(t->magic == THREAD_MAGIC);
87         ASSERT(t->state == THREAD_READY);
88         ASSERT(!list_in_list(&t->queue_node));
89         ASSERT(in_critical_section());
90 #endif
91
92         list_add_head(&run_queue[t->priority], &t->queue_node);
93         run_queue_bitmap |= (1<<t->priority);
94 }
95
96 static void insert_in_run_queue_tail(thread_t *t)
97 {
98 #if THREAD_CHECKS
99         ASSERT(t->magic == THREAD_MAGIC);
100         ASSERT(t->state == THREAD_READY);
101         ASSERT(!list_in_list(&t->queue_node));
102         ASSERT(in_critical_section());
103 #endif
104
105         list_add_tail(&run_queue[t->priority], &t->queue_node);
106         run_queue_bitmap |= (1<<t->priority);
107 }
108
109 static void init_thread_struct(thread_t *t, const char *name)
110 {
111         memset(t, 0, sizeof(thread_t));
112         t->magic = THREAD_MAGIC;
113         strlcpy(t->name, name, sizeof(t->name));
114 }
115
116 /**
117  * @brief  Create a new thread
118  *
119  * This function creates a new thread.  The thread is initially suspended, so you
120  * need to call thread_resume() to execute it.
121  *
122  * @param  name        Name of thread
123  * @param  entry       Entry point of thread
124  * @param  arg         Arbitrary argument passed to entry()
125  * @param  priority    Execution priority for the thread.
126  * @param  stack_size  Stack size for the thread.
127  *
128  * Thread priority is an integer from 0 (lowest) to 31 (highest).  Some standard
129  * prioritys are defined in <kernel/thread.h>:
130  *
131  *      HIGHEST_PRIORITY
132  *      DPC_PRIORITY
133  *      HIGH_PRIORITY
134  *      DEFAULT_PRIORITY
135  *      LOW_PRIORITY
136  *      IDLE_PRIORITY
137  *      LOWEST_PRIORITY
138  *
139  * Stack size is typically set to DEFAULT_STACK_SIZE
140  *
141  * @return  Pointer to thread object, or NULL on failure.
142  */
143 thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
144 {
145         thread_t *t;
146
147         t = malloc(sizeof(thread_t));
148         if (!t)
149                 return NULL;
150
151         init_thread_struct(t, name);
152
153         t->entry = entry;
154         t->arg = arg;
155         t->priority = priority;
156         t->saved_critical_section_count = 1; /* we always start inside a critical section */
157         t->state = THREAD_SUSPENDED;
158         t->blocking_wait_queue = NULL;
159         t->wait_queue_block_ret = NO_ERROR;
160
161         /* create the stack */
162         t->stack = malloc(stack_size);
163         if (!t->stack) {
164                 free(t);
165                 return NULL;
166         }
167
168         t->stack_size = stack_size;
169
170         /* inheirit thread local storage from the parent */
171         int i;
172         for (i=0; i < MAX_TLS_ENTRY; i++)
173                 t->tls[i] = current_thread->tls[i];
174
175         /* set up the initial stack frame */
176         arch_thread_initialize(t);
177
178         /* add it to the global thread list */
179         enter_critical_section();
180         list_add_head(&thread_list, &t->thread_list_node);
181         exit_critical_section();
182
183         return t;
184 }
185
186 /**
187  * @brief  Make a suspended thread executable.
188  *
189  * This function is typically called to start a thread which has just been
190  * created with thread_create()
191  *
192  * @param t  Thread to resume
193  *
194  * @return NO_ERROR on success, ERR_NOT_SUSPENDED if thread was not suspended.
195  */
196 status_t thread_resume(thread_t *t)
197 {
198 #if THREAD_CHECKS
199         ASSERT(t->magic == THREAD_MAGIC);
200         ASSERT(t->state != THREAD_DEATH);
201 #endif
202
203         if (t->state == THREAD_READY || t->state == THREAD_RUNNING)
204                 return ERR_NOT_SUSPENDED;
205
206         enter_critical_section();
207         t->state = THREAD_READY;
208         insert_in_run_queue_head(t);
209         thread_yield();
210         exit_critical_section();
211
212         return NO_ERROR;
213 }
214
215 static void thread_cleanup_dpc(void *thread)
216 {
217         thread_t *t = (thread_t *)thread;
218
219 //      dprintf(SPEW, "thread_cleanup_dpc: thread %p (%s)\n", t, t->name);
220
221 #if THREAD_CHECKS
222         ASSERT(t->state == THREAD_DEATH);
223         ASSERT(t->blocking_wait_queue == NULL);
224         ASSERT(!list_in_list(&t->queue_node));
225 #endif
226
227         /* remove it from the master thread list */
228         enter_critical_section();
229         list_delete(&t->thread_list_node);
230         exit_critical_section();
231
232 #if HAVE_UNLOAD_TASKS != 0
233         /* If this thread is associated with a task, deal with it */
234         if (t->arch.task)
235                 task_thread_killed(t);
236 #endif /* HAVE_UNLOAD_TASKS != 0 */
237
238         /* free its stack and the thread structure itself */
239         if (t->stack)
240                 free(t->stack);
241
242         free(t);
243 }
244
245 /* Remove thread from queue if it is there.
246  */
247 static void thread_remove_from_queue(thread_t *t)
248 {
249 #if THREAD_CHECKS
250         ASSERT(t->magic == THREAD_MAGIC);
251         ASSERT(in_critical_section());
252 #endif
253
254         /* kick thread out of wait queue (if there)... */
255         (void)thread_unblock_from_wait_queue(t, false, NO_ERROR);
256
257         if (list_in_list(&t->queue_node)) {
258                 list_delete(&t->queue_node);
259
260                 if (list_is_empty(&run_queue[t->priority]))
261                         run_queue_bitmap &= ~(1<<t->priority);
262         }
263 }
264
265 /**
266  * @brief  Terminate a thread
267  *
268  * Specified thread is terminated.
269  */
270 void thread_kill(thread_t *thread)
271 {
272         if (!thread)
273                 return;
274
275         if (thread == current_thread) {
276                 thread_exit(0);
277                 panic("somehow fell through thread_kill(self)\n");
278         }
279
280 #if THREAD_CHECKS
281         ASSERT(thread->magic == THREAD_MAGIC);
282 #endif
283
284         enter_critical_section();
285
286         /* Kick thread out of any wait/run/priority queue (if there)... */
287         thread_remove_from_queue(thread);
288
289         /* ...and enter the dead state... */
290         thread->state   = THREAD_DEATH;
291         thread->retcode = 0;
292         thread->remaining_quantum = 0;
293
294         exit_critical_section();
295
296         /* ...and finally cleanup the thread resources */
297         thread_cleanup_dpc(thread);
298 }
299
300 /**
301  * @brief  Suspend the current thread
302  *
303  * Current thread suspends and made runnable again by thread_resume,
304  * similar to an initial thread_create / thread_resume sequence.
305  *
306  * This function does not return.
307  */
308 void thread_suspend()
309 {
310 #if THREAD_CHECKS
311         ASSERT(current_thread->magic == THREAD_MAGIC);
312         ASSERT(current_thread->state == THREAD_RUNNING);
313 #endif
314
315 //      dprintf("thread_suspend: current %p\n", current_thread);
316
317         enter_critical_section();
318         current_thread->state = THREAD_SUSPENDED;
319         current_thread->remaining_quantum = 0;
320         thread_resched();
321
322         panic("somehow fell through thread_suspend()\n");
323 }
324
325 /**
326  * @brief  Terminate the current thread
327  *
328  * Current thread exits with the specified return code.
329  *
330  * This function does not return.
331  */
332 void thread_exit(int retcode)
333 {
334 #if THREAD_CHECKS
335         ASSERT(current_thread->magic == THREAD_MAGIC);
336         ASSERT(current_thread->state == THREAD_RUNNING);
337 #endif
338
339 //      dprintf("thread_exit: current %p\n", current_thread);
340
341         enter_critical_section();
342
343         /* enter the dead state */
344         current_thread->state = THREAD_DEATH;
345         current_thread->retcode = retcode;
346
347         /* schedule a dpc to clean ourselves up */
348         dpc_queue(thread_cleanup_dpc, (void *)current_thread, DPC_FLAG_NORESCHED);
349
350         /* reschedule */
351         thread_resched();
352
353         panic("somehow fell through thread_exit()\n");
354 }
355
356 static void idle_thread_routine(void)
357 {
358         for(;;)
359 #if WITH_PLATFORM_IDLE
360                 platform_idle();
361 #else
362                 arch_idle();
363 #endif
364 }
365
366 /**
367  * @brief  Cause another thread to be executed.
368  *
369  * Internal reschedule routine. The current thread needs to already be in whatever
370  * state and queues it needs to be in. This routine simply picks the next thread and
371  * switches to it.
372  *
373  * This is probably not the function you're looking for. See
374  * thread_yield() instead.
375  */
376 void thread_resched(void)
377 {
378         thread_t *oldthread;
379         thread_t *newthread;
380
381 //      printf("thread_resched: current %p: ", current_thread);
382 //      dump_thread(current_thread);
383
384 #if THREAD_CHECKS
385         ASSERT(in_critical_section());
386 #endif
387
388         THREAD_STATS_INC(reschedules);
389
390         oldthread = current_thread;
391
392         // at the moment, can't deal with more than 32 priority levels
393         ASSERT(NUM_PRIORITIES <= 32);
394
395         // should at least find the idle thread
396 #if THREAD_CHECKS
397         ASSERT(run_queue_bitmap != 0);
398 #endif
399
400         int next_queue = HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES);
401         //dprintf(SPEW, "bitmap 0x%x, next %d\n", run_queue_bitmap, next_queue);
402
403         newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node);
404
405 #if THREAD_CHECKS
406         ASSERT(newthread);
407 #endif
408
409         if (list_is_empty(&run_queue[next_queue]))
410                 run_queue_bitmap &= ~(1<<next_queue);
411
412 #if 0
413         // XXX make this more efficient
414         newthread = NULL;
415         for (i=HIGHEST_PRIORITY; i >= LOWEST_PRIORITY; i--) {
416                 newthread = list_remove_head_type(&run_queue[i], thread_t, queue_node);
417                 if (newthread)
418                         break;
419         }
420 #endif
421
422 //      printf("newthread: ");
423 //      dump_thread(newthread);
424
425         newthread->state = THREAD_RUNNING;
426
427         if (newthread == oldthread)
428                 return;
429
430         /* set up quantum for the new thread if it was consumed */
431         if (newthread->remaining_quantum <= 0) {
432                 newthread->remaining_quantum = 5; // XXX make this smarter
433         }
434
435 #if THREAD_STATS
436         THREAD_STATS_INC(context_switches);
437
438         if (oldthread == idle_thread) {
439                 lk_bigtime_t now = current_time_hires();
440                 thread_stats.idle_time += now - thread_stats.last_idle_timestamp;
441         }
442         if (newthread == idle_thread) {
443                 thread_stats.last_idle_timestamp = current_time_hires();
444         }
445 #endif
446
447 #if THREAD_CHECKS
448         ASSERT(critical_section_count > 0);
449         ASSERT(newthread->saved_critical_section_count > 0);
450 #endif
451
452 #if PLATFORM_HAS_DYNAMIC_TIMER
453         /* if we're switching from idle to a real thread, set up a periodic
454          * timer to run our preemption tick.
455          */
456         if (oldthread == idle_thread) {
457                 timer_set_periodic(&preempt_timer, 10, (timer_callback)thread_timer_tick, NULL);
458         } else if (newthread == idle_thread) {
459                 timer_cancel(&preempt_timer);
460         }
461 #endif
462
463         /* set some optional target debug leds */
464         target_set_debug_led(0, newthread != idle_thread);
465
466         /* do the switch */
467         oldthread->saved_critical_section_count = critical_section_count;
468         current_thread = newthread;
469         critical_section_count = newthread->saved_critical_section_count;
470         arch_context_switch(oldthread, newthread);
471 }
472
473 /**
474  * @brief Yield the cpu to another thread
475  *
476  * This function places the current thread at the end of the run queue
477  * and yields the cpu to another waiting thread (if any.)
478  *
479  * This function will return at some later time. Possibly immediately if
480  * no other threads are waiting to execute.
481  */
482 void thread_yield(void)
483 {
484 #if THREAD_CHECKS
485         ASSERT(current_thread->magic == THREAD_MAGIC);
486         ASSERT(current_thread->state == THREAD_RUNNING);
487 #endif
488
489         enter_critical_section();
490
491         THREAD_STATS_INC(yields);
492
493         /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
494         current_thread->state = THREAD_READY;
495         current_thread->remaining_quantum = 0;
496         insert_in_run_queue_tail(current_thread);
497         thread_resched();
498
499         exit_critical_section();
500 }
501
502 /**
503  * @brief  Briefly yield cpu to another thread
504  *
505  * This function is similar to thread_yield(), except that it will
506  * restart more quickly.
507  *
508  * This function places the current thread at the head of the run
509  * queue and then yields the cpu to another thread.
510  *
511  * Exception:  If the time slice for this thread has expired, then
512  * the thread goes to the end of the run queue.
513  *
514  * This function will return at some later time. Possibly immediately if
515  * no other threads are waiting to execute.
516  */
517 void thread_preempt(void)
518 {
519 #if THREAD_CHECKS
520         ASSERT(current_thread->magic == THREAD_MAGIC);
521         ASSERT(current_thread->state == THREAD_RUNNING);
522 #endif
523
524         enter_critical_section();
525
526 #if THREAD_STATS
527         if (current_thread != idle_thread)
528                 THREAD_STATS_INC(preempts); /* only track when a meaningful preempt happens */
529 #endif
530
531         /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
532         current_thread->state = THREAD_READY;
533         if (current_thread->remaining_quantum > 0)
534                 insert_in_run_queue_head(current_thread);
535         else
536                 insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
537         thread_resched();
538
539         exit_critical_section();
540 }
541
542 /**
543  * @brief  Suspend thread until woken.
544  *
545  * This function schedules another thread to execute.  This function does not
546  * return until the thread is made runable again by some other module.
547  *
548  * You probably don't want to call this function directly; it's meant to be called
549  * from other modules, such as mutex, which will presumably set the thread's
550  * state to blocked and add it to some queue or another.
551  */
552 void thread_block(void)
553 {
554 #if THREAD_CHECKS
555         ASSERT(current_thread->magic == THREAD_MAGIC);
556         ASSERT(current_thread->state == THREAD_BLOCKED);
557 #endif
558
559         enter_critical_section();
560
561         /* we are blocking on something. the blocking code should have already stuck us on a queue */
562         thread_resched();
563
564         exit_critical_section();
565 }
566
567 enum handler_return thread_timer_tick(void)
568 {
569         if (current_thread == idle_thread)
570                 return INT_NO_RESCHEDULE;
571
572         current_thread->remaining_quantum--;
573         if (current_thread->remaining_quantum <= 0)
574                 return INT_RESCHEDULE;
575         else
576                 return INT_NO_RESCHEDULE;
577 }
578
579 /* timer callback to wake up a sleeping thread */
580 static enum handler_return thread_sleep_handler(timer_t *timer, lk_time_t now, void *arg)
581 {
582         thread_t *t = (thread_t *)arg;
583
584 #if THREAD_CHECKS
585         ASSERT(t->magic == THREAD_MAGIC);
586         ASSERT(t->state == THREAD_SLEEPING);
587 #endif
588
589         t->state = THREAD_READY;
590         insert_in_run_queue_head(t);
591
592         return INT_RESCHEDULE;
593 }
594
595 /**
596  * @brief  Put thread to sleep; delay specified in ms
597  *
598  * This function puts the current thread to sleep until the specified
599  * delay in ms has expired.
600  *
601  * Note that this function could sleep for longer than the specified delay if
602  * other threads are running.  When the timer expires, this thread will
603  * be placed at the head of the run queue.
604  */
605 void thread_sleep(lk_time_t delay)
606 {
607         timer_t timer;
608
609 #if THREAD_CHECKS
610         ASSERT(current_thread->magic == THREAD_MAGIC);
611         ASSERT(current_thread->state == THREAD_RUNNING);
612 #endif
613
614         timer_initialize(&timer);
615
616         enter_critical_section();
617         timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
618         current_thread->state = THREAD_SLEEPING;
619         thread_resched();
620         exit_critical_section();
621 }
622
623 /**
624  * @brief  Initialize threading system
625  *
626  * This function is called once, from kmain()
627  */
628 void thread_init_early(void)
629 {
630         int i;
631
632         /* initialize the run queues */
633         for (i=0; i < NUM_PRIORITIES; i++)
634                 list_initialize(&run_queue[i]);
635
636         /* initialize the thread list */
637         list_initialize(&thread_list);
638
639         /* create a thread to cover the current running state */
640         thread_t *t = &bootstrap_thread;
641         init_thread_struct(t, "bootstrap");
642
643         /* half construct this thread, since we're already running */
644         t->priority = HIGHEST_PRIORITY;
645         t->state = THREAD_RUNNING;
646         t->saved_critical_section_count = 1;
647         list_add_head(&thread_list, &t->thread_list_node);
648         current_thread = t;
649 }
650
651 /**
652  * @brief Complete thread initialization
653  *
654  * This function is called once at boot time
655  */
656 void thread_init(void)
657 {
658         arch_thread_initialize(current_thread);
659 #if PLATFORM_HAS_DYNAMIC_TIMER
660         timer_initialize(&preempt_timer);
661 #endif
662 }
663
664 /**
665  * @brief Change name of current thread
666  */
667 void thread_set_name(const char *name)
668 {
669         strlcpy(current_thread->name, name, sizeof(current_thread->name));
670 }
671
672 /**
673  * @brief Change priority of current thread
674  *
675  * See thread_create() for a discussion of priority values.
676  */
677 void thread_set_priority(int priority)
678 {
679         if (priority < LOWEST_PRIORITY)
680                 priority = LOWEST_PRIORITY;
681         if (priority > HIGHEST_PRIORITY)
682                 priority = HIGHEST_PRIORITY;
683         current_thread->priority = priority;
684 }
685
686 /**
687  * @brief  Become an idle thread
688  *
689  * This function marks the current thread as the idle thread -- the one which
690  * executes when there is nothing else to do.  This function does not return.
691  * This function is called once at boot time.
692  */
693 void thread_become_idle(void)
694 {
695         thread_set_name("idle");
696         thread_set_priority(IDLE_PRIORITY);
697         idle_thread = current_thread;
698
699         /* release the implicit boot critical section and yield to the scheduler */
700         exit_critical_section();
701         thread_yield();
702
703         idle_thread_routine();
704 }
705
706 /**
707  * @brief  Dump debugging info about the specified thread.
708  */
709 void dump_thread(thread_t *t)
710 {
711         dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
712         dprintf(INFO, "\tstate %d, priority %d, remaining quantum %d, critical section %d\n", t->state, t->priority, t->remaining_quantum, t->saved_critical_section_count);
713         dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
714         dprintf(INFO, "\tentry %p, arg %p\n", t->entry, t->arg);
715         dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
716         dprintf(INFO, "\ttls:");
717         int i;
718         for (i=0; i < MAX_TLS_ENTRY; i++) {
719                 dprintf(INFO, " 0x%x", t->tls[i]);
720         }
721         dprintf(INFO, "\n");
722 }
723
724 /**
725  * @brief  Dump debugging info about all threads
726  */
727 void dump_all_threads(void)
728 {
729         thread_t *t;
730
731         enter_critical_section();
732         list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
733                 dump_thread(t);
734         }
735         exit_critical_section();
736 }
737
738 /** @} */
739
740
741 /**
742  * @defgroup  wait  Wait Queue
743  * @{
744  */
745
746 /**
747  * @brief  Initialize a wait queue
748  */
749 void wait_queue_init(wait_queue_t *wait)
750 {
751         wait->magic = WAIT_QUEUE_MAGIC;
752         list_initialize(&wait->list);
753         wait->count = 0;
754 }
755
756 static enum handler_return wait_queue_timeout_handler(timer_t *timer, lk_time_t now, void *arg)
757 {
758         thread_t *thread = (thread_t *)arg;
759
760 #if THREAD_CHECKS
761         ASSERT(thread->magic == THREAD_MAGIC);
762 #endif
763
764         if (thread_unblock_from_wait_queue(thread, false, ERR_TIMED_OUT) >= NO_ERROR)
765                 return INT_RESCHEDULE;
766
767         return INT_NO_RESCHEDULE;
768 }
769
770 /**
771  * @brief  Block until a wait queue is notified.
772  *
773  * This function puts the current thread at the end of a wait
774  * queue and then blocks until some other thread wakes the queue
775  * up again.
776  *
777  * @param  wait     The wait queue to enter
778  * @param  timeout  The maximum time, in ms, to wait
779  *
780  * If the timeout is zero, this function returns immediately with
781  * ERR_TIMED_OUT.  If the timeout is INFINITE_TIME, this function
782  * waits indefinitely.  Otherwise, this function returns with
783  * ERR_TIMED_OUT at the end of the timeout period.
784  *
785  * @return ERR_TIMED_OUT on timeout, else returns the return
786  * value specified when the queue was woken by wait_queue_wake_one().
787  */
788 status_t wait_queue_block(wait_queue_t *wait, lk_time_t timeout)
789 {
790         timer_t timer;
791
792 #if THREAD_CHECKS
793         ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
794         ASSERT(current_thread->state == THREAD_RUNNING);
795         ASSERT(in_critical_section());
796 #endif
797
798         if (timeout == 0)
799                 return ERR_TIMED_OUT;
800
801         list_add_tail(&wait->list, &current_thread->queue_node);
802         wait->count++;
803         current_thread->state = THREAD_BLOCKED;
804         current_thread->blocking_wait_queue = wait;
805         current_thread->wait_queue_block_ret = NO_ERROR;
806
807         /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
808         if (timeout != INFINITE_TIME) {
809                 timer_initialize(&timer);
810                 timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
811         }
812
813         thread_block();
814
815         /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
816         if (timeout != INFINITE_TIME) {
817                 timer_cancel(&timer);
818         }
819
820         return current_thread->wait_queue_block_ret;
821 }
822
823 /**
824  * @brief  Wake up one thread sleeping on a wait queue
825  *
826  * This function removes one thread (if any) from the head of the wait queue and
827  * makes it executable.  The new thread will be placed at the head of the
828  * run queue.
829  *
830  * @param wait  The wait queue to wake
831  * @param reschedule  If true, the newly-woken thread will run immediately.
832  * @param wait_queue_error  The return value which the new thread will receive
833  * from wait_queue_block().
834  *
835  * @return  The number of threads woken (zero or one)
836  */
837 int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
838 {
839         thread_t *t;
840         int ret = 0;
841
842 #if THREAD_CHECKS
843         ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
844         ASSERT(in_critical_section());
845 #endif
846
847         t = list_remove_head_type(&wait->list, thread_t, queue_node);
848         if (t) {
849                 wait->count--;
850 #if THREAD_CHECKS
851                 ASSERT(t->state == THREAD_BLOCKED);
852 #endif
853                 t->state = THREAD_READY;
854                 t->wait_queue_block_ret = wait_queue_error;
855                 t->blocking_wait_queue = NULL;
856
857                 /* if we're instructed to reschedule, stick the current thread on the head
858                  * of the run queue first, so that the newly awakened thread gets a chance to run
859                  * before the current one, but the current one doesn't get unnecessarilly punished.
860                  */
861                 if (reschedule) {
862                         current_thread->state = THREAD_READY;
863                         insert_in_run_queue_head(current_thread);
864                 }
865                 insert_in_run_queue_head(t);
866                 if (reschedule)
867                         thread_resched();
868                 ret = 1;
869         }
870
871         return ret;
872 }
873
874
875 /**
876  * @brief  Wake all threads sleeping on a wait queue
877  *
878  * This function removes all threads (if any) from the wait queue and
879  * makes them executable.  The new threads will be placed at the head of the
880  * run queue.
881  *
882  * @param wait  The wait queue to wake
883  * @param reschedule  If true, the newly-woken threads will run immediately.
884  * @param wait_queue_error  The return value which the new thread will receive
885  * from wait_queue_block().
886  *
887  * @return  The number of threads woken (zero or one)
888  */
889 int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
890 {
891         thread_t *t;
892         int ret = 0;
893
894 #if THREAD_CHECKS
895         ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
896         ASSERT(in_critical_section());
897 #endif
898
899         if (reschedule && wait->count > 0) {
900                 /* if we're instructed to reschedule, stick the current thread on the head
901                  * of the run queue first, so that the newly awakened threads get a chance to run
902                  * before the current one, but the current one doesn't get unnecessarilly punished.
903                  */
904                 current_thread->state = THREAD_READY;
905                 insert_in_run_queue_head(current_thread);
906         }
907
908         /* pop all the threads off the wait queue into the run queue */
909         while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
910                 wait->count--;
911 #if THREAD_CHECKS
912                 ASSERT(t->state == THREAD_BLOCKED);
913 #endif
914                 t->state = THREAD_READY;
915                 t->wait_queue_block_ret = wait_queue_error;
916                 t->blocking_wait_queue = NULL;
917
918                 insert_in_run_queue_head(t);
919                 ret++;
920         }
921
922 #if THREAD_CHECKS
923         ASSERT(wait->count == 0);
924 #endif
925
926         if (reschedule && ret > 0)
927                 thread_resched();
928
929         return ret;
930 }
931
932 /**
933  * @brief  Free all resources allocated in wait_queue_init()
934  *
935  * If any threads were waiting on this queue, they are all woken.
936  */
937 void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
938 {
939 #if THREAD_CHECKS
940         ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
941         ASSERT(in_critical_section());
942 #endif
943         wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
944         wait->magic = 0;
945 }
946
947 /**
948  * @brief  Wake a specific thread in a wait queue
949  *
950  * This function extracts a specific thread from a wait queue, wakes it, and
951  * puts it at the head of the run queue.
952  *
953  * @param t  The thread to wake
954  * @param reschedule  If true, the newly-woken threads will run immediately.
955  * @param wait_queue_error  The return value which the new thread will receive
956  *   from wait_queue_block().
957  *
958  * @return ERR_NOT_BLOCKED if thread was not in any wait queue.
959  */
960 status_t thread_unblock_from_wait_queue(thread_t *t, bool reschedule, status_t wait_queue_error)
961 {
962 #if THREAD_CHECKS
963         ASSERT(in_critical_section());
964         ASSERT(t->magic == THREAD_MAGIC);
965 #endif
966
967         if (t->state != THREAD_BLOCKED)
968                 return ERR_NOT_BLOCKED;
969
970 #if THREAD_CHECKS
971         ASSERT(t->blocking_wait_queue != NULL);
972         ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
973         ASSERT(list_in_list(&t->queue_node));
974 #endif
975
976         list_delete(&t->queue_node);
977         t->blocking_wait_queue->count--;
978         t->blocking_wait_queue = NULL;
979         t->state = THREAD_READY;
980         t->wait_queue_block_ret = wait_queue_error;
981         insert_in_run_queue_head(t);
982
983         if (reschedule)
984                 thread_resched();
985
986         return NO_ERROR;
987 }
988
989