First version
[3rdparty/ote_partner/tlk.git] / kernel / thread.c
1 /*
2  * Copyright (c) 2008-2009 Travis Geiselbrecht
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * @file
26  * @brief  Kernel threading
27  *
28  * This file is the core kernel threading interface.
29  *
30  * @defgroup thread Threads
31  * @{
32  */
33 #include <debug.h>
34 #include <assert.h>
35 #include <list.h>
36 #include <malloc.h>
37 #include <string.h>
38 #include <err.h>
39 #include <kernel/thread.h>
40 #include <kernel/timer.h>
41 #include <kernel/dpc.h>
42 #include <platform.h>
43 #include <target.h>
44
45 #if DEBUGLEVEL > 1
46 #define THREAD_CHECKS 1
47 #endif
48
49 #if THREAD_STATS
50 struct thread_stats thread_stats;
51 #endif
52
53 /* global thread list */
54 static struct list_node thread_list;
55
56 /* the current thread */
57 thread_t *current_thread;
58
59 /* the global critical section count */
60 int critical_section_count;
61
62 /* the run queue */
63 static struct list_node run_queue[NUM_PRIORITIES];
64 static uint32_t run_queue_bitmap;
65
66 /* the bootstrap thread (statically allocated) */
67 static thread_t bootstrap_thread;
68
69 /* the idle thread */
70 thread_t *idle_thread;
71
72 /* local routines */
73 static void thread_resched(void);
74 static void idle_thread_routine(void) __NO_RETURN;
75
76 #if PLATFORM_HAS_DYNAMIC_TIMER
77 /* preemption timer */
78 static timer_t preempt_timer;
79 #endif
80
81 /* run queue manipulation */
82 static void insert_in_run_queue_head(thread_t *t)
83 {
84 #if THREAD_CHECKS
85         ASSERT(t->magic == THREAD_MAGIC);
86         ASSERT(t->state == THREAD_READY);
87         ASSERT(!list_in_list(&t->queue_node));
88         ASSERT(in_critical_section());
89 #endif
90
91         list_add_head(&run_queue[t->priority], &t->queue_node);
92         run_queue_bitmap |= (1<<t->priority);
93 }
94
95 static void insert_in_run_queue_tail(thread_t *t)
96 {
97 #if THREAD_CHECKS
98         ASSERT(t->magic == THREAD_MAGIC);
99         ASSERT(t->state == THREAD_READY);
100         ASSERT(!list_in_list(&t->queue_node));
101         ASSERT(in_critical_section());
102 #endif
103
104         list_add_tail(&run_queue[t->priority], &t->queue_node);
105         run_queue_bitmap |= (1<<t->priority);
106 }
107
108 static void init_thread_struct(thread_t *t, const char *name)
109 {
110         memset(t, 0, sizeof(thread_t));
111         t->magic = THREAD_MAGIC;
112         strlcpy(t->name, name, sizeof(t->name));
113 }
114
115 /**
116  * @brief  Create a new thread
117  *
118  * This function creates a new thread.  The thread is initially suspended, so you
119  * need to call thread_resume() to execute it.
120  *
121  * @param  name        Name of thread
122  * @param  entry       Entry point of thread
123  * @param  arg         Arbitrary argument passed to entry()
124  * @param  priority    Execution priority for the thread.
125  * @param  stack_size  Stack size for the thread.
126  *
127  * Thread priority is an integer from 0 (lowest) to 31 (highest).  Some standard
128  * prioritys are defined in <kernel/thread.h>:
129  *
130  *      HIGHEST_PRIORITY
131  *      DPC_PRIORITY
132  *      HIGH_PRIORITY
133  *      DEFAULT_PRIORITY
134  *      LOW_PRIORITY
135  *      IDLE_PRIORITY
136  *      LOWEST_PRIORITY
137  *
138  * Stack size is typically set to DEFAULT_STACK_SIZE
139  *
140  * @return  Pointer to thread object, or NULL on failure.
141  */
142 thread_t *thread_create(const char *name, thread_start_routine entry, void *arg, int priority, size_t stack_size)
143 {
144         thread_t *t;
145
146         t = malloc(sizeof(thread_t));
147         if (!t)
148                 return NULL;
149
150         init_thread_struct(t, name);
151
152         t->entry = entry;
153         t->arg = arg;
154         t->priority = priority;
155         t->saved_critical_section_count = 1; /* we always start inside a critical section */
156         t->state = THREAD_SUSPENDED;
157         t->blocking_wait_queue = NULL;
158         t->wait_queue_block_ret = NO_ERROR;
159
160         /* create the stack */
161         t->stack = malloc(stack_size);
162         if (!t->stack) {
163                 free(t);
164                 return NULL;
165         }
166
167         t->stack_size = stack_size;
168
169         /* inheirit thread local storage from the parent */
170         int i;
171         for (i=0; i < MAX_TLS_ENTRY; i++)
172                 t->tls[i] = current_thread->tls[i];
173
174         /* set up the initial stack frame */
175         arch_thread_initialize(t);
176
177         /* add it to the global thread list */
178         enter_critical_section();
179         list_add_head(&thread_list, &t->thread_list_node);
180         exit_critical_section();
181
182         return t;
183 }
184
185 /**
186  * @brief  Make a suspended thread executable.
187  *
188  * This function is typically called to start a thread which has just been
189  * created with thread_create()
190  *
191  * @param t  Thread to resume
192  *
193  * @return NO_ERROR on success, ERR_NOT_SUSPENDED if thread was not suspended.
194  */
195 status_t thread_resume(thread_t *t)
196 {
197 #if THREAD_CHECKS
198         ASSERT(t->magic == THREAD_MAGIC);
199         ASSERT(t->state != THREAD_DEATH);
200 #endif
201
202         if (t->state == THREAD_READY || t->state == THREAD_RUNNING)
203                 return ERR_NOT_SUSPENDED;
204
205         enter_critical_section();
206         t->state = THREAD_READY;
207         insert_in_run_queue_head(t);
208         thread_yield();
209         exit_critical_section();
210
211         return NO_ERROR;
212 }
213
214 static void thread_cleanup_dpc(void *thread)
215 {
216         thread_t *t = (thread_t *)thread;
217
218 //      dprintf(SPEW, "thread_cleanup_dpc: thread %p (%s)\n", t, t->name);
219
220 #if THREAD_CHECKS
221         ASSERT(t->state == THREAD_DEATH);
222         ASSERT(t->blocking_wait_queue == NULL);
223         ASSERT(!list_in_list(&t->queue_node));
224 #endif
225
226         /* remove it from the master thread list */
227         enter_critical_section();
228         list_delete(&t->thread_list_node);
229         exit_critical_section();
230
231         /* free its stack and the thread structure itself */
232         if (t->stack)
233                 free(t->stack);
234
235         free(t);
236 }
237
238 /**
239  * @brief  Suspend the current thread
240  *
241  * Current thread suspends and made runnable again by thread_resume,
242  * similar to an initial thread_create / thread_resume sequence.
243  *
244  * This function does not return.
245  */
246 void thread_suspend()
247 {
248 #if THREAD_CHECKS
249         ASSERT(current_thread->magic == THREAD_MAGIC);
250         ASSERT(current_thread->state == THREAD_RUNNING);
251 #endif
252
253 //      dprintf("thread_suspend: current %p\n", current_thread);
254
255         enter_critical_section();
256         current_thread->state = THREAD_SUSPENDED;
257         current_thread->remaining_quantum = 0;
258         thread_resched();
259
260         panic("somehow fell through thread_suspend()\n");
261 }
262
263 /**
264  * @brief  Terminate the current thread
265  *
266  * Current thread exits with the specified return code.
267  *
268  * This function does not return.
269  */
270 void thread_exit(int retcode)
271 {
272 #if THREAD_CHECKS
273         ASSERT(current_thread->magic == THREAD_MAGIC);
274         ASSERT(current_thread->state == THREAD_RUNNING);
275 #endif
276
277 //      dprintf("thread_exit: current %p\n", current_thread);
278
279         enter_critical_section();
280
281         /* enter the dead state */
282         current_thread->state = THREAD_DEATH;
283         current_thread->retcode = retcode;
284
285         /* schedule a dpc to clean ourselves up */
286         dpc_queue(thread_cleanup_dpc, (void *)current_thread, DPC_FLAG_NORESCHED);
287
288         /* reschedule */
289         thread_resched();
290
291         panic("somehow fell through thread_exit()\n");
292 }
293
294 static void idle_thread_routine(void)
295 {
296         for(;;)
297 #if WITH_PLATFORM_IDLE
298                 platform_idle();
299 #else
300                 arch_idle();
301 #endif
302 }
303
304 /**
305  * @brief  Cause another thread to be executed.
306  *
307  * Internal reschedule routine. The current thread needs to already be in whatever
308  * state and queues it needs to be in. This routine simply picks the next thread and
309  * switches to it.
310  *
311  * This is probably not the function you're looking for. See
312  * thread_yield() instead.
313  */
314 void thread_resched(void)
315 {
316         thread_t *oldthread;
317         thread_t *newthread;
318
319 //      printf("thread_resched: current %p: ", current_thread);
320 //      dump_thread(current_thread);
321
322 #if THREAD_CHECKS
323         ASSERT(in_critical_section());
324 #endif
325
326         THREAD_STATS_INC(reschedules);
327
328         oldthread = current_thread;
329
330         // at the moment, can't deal with more than 32 priority levels
331         ASSERT(NUM_PRIORITIES <= 32);
332
333         // should at least find the idle thread
334 #if THREAD_CHECKS
335         ASSERT(run_queue_bitmap != 0);
336 #endif
337
338         int next_queue = HIGHEST_PRIORITY - __builtin_clz(run_queue_bitmap) - (32 - NUM_PRIORITIES);
339         //dprintf(SPEW, "bitmap 0x%x, next %d\n", run_queue_bitmap, next_queue);
340
341         newthread = list_remove_head_type(&run_queue[next_queue], thread_t, queue_node);
342
343 #if THREAD_CHECKS
344         ASSERT(newthread);
345 #endif
346
347         if (list_is_empty(&run_queue[next_queue]))
348                 run_queue_bitmap &= ~(1<<next_queue);
349
350 #if 0
351         // XXX make this more efficient
352         newthread = NULL;
353         for (i=HIGHEST_PRIORITY; i >= LOWEST_PRIORITY; i--) {
354                 newthread = list_remove_head_type(&run_queue[i], thread_t, queue_node);
355                 if (newthread)
356                         break;
357         }
358 #endif
359
360 //      printf("newthread: ");
361 //      dump_thread(newthread);
362
363         newthread->state = THREAD_RUNNING;
364
365         if (newthread == oldthread)
366                 return;
367
368         /* set up quantum for the new thread if it was consumed */
369         if (newthread->remaining_quantum <= 0) {
370                 newthread->remaining_quantum = 5; // XXX make this smarter
371         }
372
373 #if THREAD_STATS
374         THREAD_STATS_INC(context_switches);
375
376         if (oldthread == idle_thread) {
377                 lk_bigtime_t now = current_time_hires();
378                 thread_stats.idle_time += now - thread_stats.last_idle_timestamp;
379         }
380         if (newthread == idle_thread) {
381                 thread_stats.last_idle_timestamp = current_time_hires();
382         }
383 #endif
384
385 #if THREAD_CHECKS
386         ASSERT(critical_section_count > 0);
387         ASSERT(newthread->saved_critical_section_count > 0);
388 #endif
389
390 #if PLATFORM_HAS_DYNAMIC_TIMER
391         /* if we're switching from idle to a real thread, set up a periodic
392          * timer to run our preemption tick.
393          */
394         if (oldthread == idle_thread) {
395                 timer_set_periodic(&preempt_timer, 10, (timer_callback)thread_timer_tick, NULL);
396         } else if (newthread == idle_thread) {
397                 timer_cancel(&preempt_timer);
398         }
399 #endif
400
401         /* set some optional target debug leds */
402         target_set_debug_led(0, newthread != idle_thread);
403
404         /* do the switch */
405         oldthread->saved_critical_section_count = critical_section_count;
406         current_thread = newthread;
407         critical_section_count = newthread->saved_critical_section_count;
408         arch_context_switch(oldthread, newthread);
409 }
410
411 /**
412  * @brief Yield the cpu to another thread
413  *
414  * This function places the current thread at the end of the run queue
415  * and yields the cpu to another waiting thread (if any.)
416  *
417  * This function will return at some later time. Possibly immediately if
418  * no other threads are waiting to execute.
419  */
420 void thread_yield(void)
421 {
422 #if THREAD_CHECKS
423         ASSERT(current_thread->magic == THREAD_MAGIC);
424         ASSERT(current_thread->state == THREAD_RUNNING);
425 #endif
426
427         enter_critical_section();
428
429         THREAD_STATS_INC(yields);
430
431         /* we are yielding the cpu, so stick ourselves into the tail of the run queue and reschedule */
432         current_thread->state = THREAD_READY;
433         current_thread->remaining_quantum = 0;
434         insert_in_run_queue_tail(current_thread);
435         thread_resched();
436
437         exit_critical_section();
438 }
439
440 /**
441  * @brief  Briefly yield cpu to another thread
442  *
443  * This function is similar to thread_yield(), except that it will
444  * restart more quickly.
445  *
446  * This function places the current thread at the head of the run
447  * queue and then yields the cpu to another thread.
448  *
449  * Exception:  If the time slice for this thread has expired, then
450  * the thread goes to the end of the run queue.
451  *
452  * This function will return at some later time. Possibly immediately if
453  * no other threads are waiting to execute.
454  */
455 void thread_preempt(void)
456 {
457 #if THREAD_CHECKS
458         ASSERT(current_thread->magic == THREAD_MAGIC);
459         ASSERT(current_thread->state == THREAD_RUNNING);
460 #endif
461
462         enter_critical_section();
463
464 #if THREAD_STATS
465         if (current_thread != idle_thread)
466                 THREAD_STATS_INC(preempts); /* only track when a meaningful preempt happens */
467 #endif
468
469         /* we are being preempted, so we get to go back into the front of the run queue if we have quantum left */
470         current_thread->state = THREAD_READY;
471         if (current_thread->remaining_quantum > 0)
472                 insert_in_run_queue_head(current_thread);
473         else
474                 insert_in_run_queue_tail(current_thread); /* if we're out of quantum, go to the tail of the queue */
475         thread_resched();
476
477         exit_critical_section();
478 }
479
480 /**
481  * @brief  Suspend thread until woken.
482  *
483  * This function schedules another thread to execute.  This function does not
484  * return until the thread is made runable again by some other module.
485  *
486  * You probably don't want to call this function directly; it's meant to be called
487  * from other modules, such as mutex, which will presumably set the thread's
488  * state to blocked and add it to some queue or another.
489  */
490 void thread_block(void)
491 {
492 #if THREAD_CHECKS
493         ASSERT(current_thread->magic == THREAD_MAGIC);
494         ASSERT(current_thread->state == THREAD_BLOCKED);
495 #endif
496
497         enter_critical_section();
498
499         /* we are blocking on something. the blocking code should have already stuck us on a queue */
500         thread_resched();
501
502         exit_critical_section();
503 }
504
505 enum handler_return thread_timer_tick(void)
506 {
507         if (current_thread == idle_thread)
508                 return INT_NO_RESCHEDULE;
509
510         current_thread->remaining_quantum--;
511         if (current_thread->remaining_quantum <= 0)
512                 return INT_RESCHEDULE;
513         else
514                 return INT_NO_RESCHEDULE;
515 }
516
517 /* timer callback to wake up a sleeping thread */
518 static enum handler_return thread_sleep_handler(timer_t *timer, lk_time_t now, void *arg)
519 {
520         thread_t *t = (thread_t *)arg;
521
522 #if THREAD_CHECKS
523         ASSERT(t->magic == THREAD_MAGIC);
524         ASSERT(t->state == THREAD_SLEEPING);
525 #endif
526
527         t->state = THREAD_READY;
528         insert_in_run_queue_head(t);
529
530         return INT_RESCHEDULE;
531 }
532
533 /**
534  * @brief  Put thread to sleep; delay specified in ms
535  *
536  * This function puts the current thread to sleep until the specified
537  * delay in ms has expired.
538  *
539  * Note that this function could sleep for longer than the specified delay if
540  * other threads are running.  When the timer expires, this thread will
541  * be placed at the head of the run queue.
542  */
543 void thread_sleep(lk_time_t delay)
544 {
545         timer_t timer;
546
547 #if THREAD_CHECKS
548         ASSERT(current_thread->magic == THREAD_MAGIC);
549         ASSERT(current_thread->state == THREAD_RUNNING);
550 #endif
551
552         timer_initialize(&timer);
553
554         enter_critical_section();
555         timer_set_oneshot(&timer, delay, thread_sleep_handler, (void *)current_thread);
556         current_thread->state = THREAD_SLEEPING;
557         thread_resched();
558         exit_critical_section();
559 }
560
561 /**
562  * @brief  Initialize threading system
563  *
564  * This function is called once, from kmain()
565  */
566 void thread_init_early(void)
567 {
568         int i;
569
570         /* initialize the run queues */
571         for (i=0; i < NUM_PRIORITIES; i++)
572                 list_initialize(&run_queue[i]);
573
574         /* initialize the thread list */
575         list_initialize(&thread_list);
576
577         /* create a thread to cover the current running state */
578         thread_t *t = &bootstrap_thread;
579         init_thread_struct(t, "bootstrap");
580
581         /* half construct this thread, since we're already running */
582         t->priority = HIGHEST_PRIORITY;
583         t->state = THREAD_RUNNING;
584         t->saved_critical_section_count = 1;
585         list_add_head(&thread_list, &t->thread_list_node);
586         current_thread = t;
587 }
588
589 /**
590  * @brief Complete thread initialization
591  *
592  * This function is called once at boot time
593  */
594 void thread_init(void)
595 {
596         arch_thread_initialize(current_thread);
597 #if PLATFORM_HAS_DYNAMIC_TIMER
598         timer_initialize(&preempt_timer);
599 #endif
600 }
601
602 /**
603  * @brief Change name of current thread
604  */
605 void thread_set_name(const char *name)
606 {
607         strlcpy(current_thread->name, name, sizeof(current_thread->name));
608 }
609
610 /**
611  * @brief Change priority of current thread
612  *
613  * See thread_create() for a discussion of priority values.
614  */
615 void thread_set_priority(int priority)
616 {
617         if (priority < LOWEST_PRIORITY)
618                 priority = LOWEST_PRIORITY;
619         if (priority > HIGHEST_PRIORITY)
620                 priority = HIGHEST_PRIORITY;
621         current_thread->priority = priority;
622 }
623
624 /**
625  * @brief  Become an idle thread
626  *
627  * This function marks the current thread as the idle thread -- the one which
628  * executes when there is nothing else to do.  This function does not return.
629  * This function is called once at boot time.
630  */
631 void thread_become_idle(void)
632 {
633         thread_set_name("idle");
634         thread_set_priority(IDLE_PRIORITY);
635         idle_thread = current_thread;
636
637         /* release the implicit boot critical section and yield to the scheduler */
638         exit_critical_section();
639         thread_yield();
640
641         idle_thread_routine();
642 }
643
644 /**
645  * @brief  Dump debugging info about the specified thread.
646  */
647 void dump_thread(thread_t *t)
648 {
649         dprintf(INFO, "dump_thread: t %p (%s)\n", t, t->name);
650         dprintf(INFO, "\tstate %d, priority %d, remaining quantum %d, critical section %d\n", t->state, t->priority, t->remaining_quantum, t->saved_critical_section_count);
651         dprintf(INFO, "\tstack %p, stack_size %zd\n", t->stack, t->stack_size);
652         dprintf(INFO, "\tentry %p, arg %p\n", t->entry, t->arg);
653         dprintf(INFO, "\twait queue %p, wait queue ret %d\n", t->blocking_wait_queue, t->wait_queue_block_ret);
654         dprintf(INFO, "\ttls:");
655         int i;
656         for (i=0; i < MAX_TLS_ENTRY; i++) {
657                 dprintf(INFO, " 0x%x", t->tls[i]);
658         }
659         dprintf(INFO, "\n");
660 }
661
662 /**
663  * @brief  Dump debugging info about all threads
664  */
665 void dump_all_threads(void)
666 {
667         thread_t *t;
668
669         enter_critical_section();
670         list_for_every_entry(&thread_list, t, thread_t, thread_list_node) {
671                 dump_thread(t);
672         }
673         exit_critical_section();
674 }
675
676 /** @} */
677
678
679 /**
680  * @defgroup  wait  Wait Queue
681  * @{
682  */
683
684 /**
685  * @brief  Initialize a wait queue
686  */
687 void wait_queue_init(wait_queue_t *wait)
688 {
689         wait->magic = WAIT_QUEUE_MAGIC;
690         list_initialize(&wait->list);
691         wait->count = 0;
692 }
693
694 static enum handler_return wait_queue_timeout_handler(timer_t *timer, lk_time_t now, void *arg)
695 {
696         thread_t *thread = (thread_t *)arg;
697
698 #if THREAD_CHECKS
699         ASSERT(thread->magic == THREAD_MAGIC);
700 #endif
701
702         if (thread_unblock_from_wait_queue(thread, false, ERR_TIMED_OUT) >= NO_ERROR)
703                 return INT_RESCHEDULE;
704
705         return INT_NO_RESCHEDULE;
706 }
707
708 /**
709  * @brief  Block until a wait queue is notified.
710  *
711  * This function puts the current thread at the end of a wait
712  * queue and then blocks until some other thread wakes the queue
713  * up again.
714  *
715  * @param  wait     The wait queue to enter
716  * @param  timeout  The maximum time, in ms, to wait
717  *
718  * If the timeout is zero, this function returns immediately with
719  * ERR_TIMED_OUT.  If the timeout is INFINITE_TIME, this function
720  * waits indefinitely.  Otherwise, this function returns with
721  * ERR_TIMED_OUT at the end of the timeout period.
722  *
723  * @return ERR_TIMED_OUT on timeout, else returns the return
724  * value specified when the queue was woken by wait_queue_wake_one().
725  */
726 status_t wait_queue_block(wait_queue_t *wait, lk_time_t timeout)
727 {
728         timer_t timer;
729
730 #if THREAD_CHECKS
731         ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
732         ASSERT(current_thread->state == THREAD_RUNNING);
733         ASSERT(in_critical_section());
734 #endif
735
736         if (timeout == 0)
737                 return ERR_TIMED_OUT;
738
739         list_add_tail(&wait->list, &current_thread->queue_node);
740         wait->count++;
741         current_thread->state = THREAD_BLOCKED;
742         current_thread->blocking_wait_queue = wait;
743         current_thread->wait_queue_block_ret = NO_ERROR;
744
745         /* if the timeout is nonzero or noninfinite, set a callback to yank us out of the queue */
746         if (timeout != INFINITE_TIME) {
747                 timer_initialize(&timer);
748                 timer_set_oneshot(&timer, timeout, wait_queue_timeout_handler, (void *)current_thread);
749         }
750
751         thread_block();
752
753         /* we don't really know if the timer fired or not, so it's better safe to try to cancel it */
754         if (timeout != INFINITE_TIME) {
755                 timer_cancel(&timer);
756         }
757
758         return current_thread->wait_queue_block_ret;
759 }
760
761 /**
762  * @brief  Wake up one thread sleeping on a wait queue
763  *
764  * This function removes one thread (if any) from the head of the wait queue and
765  * makes it executable.  The new thread will be placed at the head of the
766  * run queue.
767  *
768  * @param wait  The wait queue to wake
769  * @param reschedule  If true, the newly-woken thread will run immediately.
770  * @param wait_queue_error  The return value which the new thread will receive
771  * from wait_queue_block().
772  *
773  * @return  The number of threads woken (zero or one)
774  */
775 int wait_queue_wake_one(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
776 {
777         thread_t *t;
778         int ret = 0;
779
780 #if THREAD_CHECKS
781         ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
782         ASSERT(in_critical_section());
783 #endif
784
785         t = list_remove_head_type(&wait->list, thread_t, queue_node);
786         if (t) {
787                 wait->count--;
788 #if THREAD_CHECKS
789                 ASSERT(t->state == THREAD_BLOCKED);
790 #endif
791                 t->state = THREAD_READY;
792                 t->wait_queue_block_ret = wait_queue_error;
793                 t->blocking_wait_queue = NULL;
794
795                 /* if we're instructed to reschedule, stick the current thread on the head
796                  * of the run queue first, so that the newly awakened thread gets a chance to run
797                  * before the current one, but the current one doesn't get unnecessarilly punished.
798                  */
799                 if (reschedule) {
800                         current_thread->state = THREAD_READY;
801                         insert_in_run_queue_head(current_thread);
802                 }
803                 insert_in_run_queue_head(t);
804                 if (reschedule)
805                         thread_resched();
806                 ret = 1;
807         }
808
809         return ret;
810 }
811
812
813 /**
814  * @brief  Wake all threads sleeping on a wait queue
815  *
816  * This function removes all threads (if any) from the wait queue and
817  * makes them executable.  The new threads will be placed at the head of the
818  * run queue.
819  *
820  * @param wait  The wait queue to wake
821  * @param reschedule  If true, the newly-woken threads will run immediately.
822  * @param wait_queue_error  The return value which the new thread will receive
823  * from wait_queue_block().
824  *
825  * @return  The number of threads woken (zero or one)
826  */
827 int wait_queue_wake_all(wait_queue_t *wait, bool reschedule, status_t wait_queue_error)
828 {
829         thread_t *t;
830         int ret = 0;
831
832 #if THREAD_CHECKS
833         ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
834         ASSERT(in_critical_section());
835 #endif
836
837         if (reschedule && wait->count > 0) {
838                 /* if we're instructed to reschedule, stick the current thread on the head
839                  * of the run queue first, so that the newly awakened threads get a chance to run
840                  * before the current one, but the current one doesn't get unnecessarilly punished.
841                  */
842                 current_thread->state = THREAD_READY;
843                 insert_in_run_queue_head(current_thread);
844         }
845
846         /* pop all the threads off the wait queue into the run queue */
847         while ((t = list_remove_head_type(&wait->list, thread_t, queue_node))) {
848                 wait->count--;
849 #if THREAD_CHECKS
850                 ASSERT(t->state == THREAD_BLOCKED);
851 #endif
852                 t->state = THREAD_READY;
853                 t->wait_queue_block_ret = wait_queue_error;
854                 t->blocking_wait_queue = NULL;
855
856                 insert_in_run_queue_head(t);
857                 ret++;
858         }
859
860 #if THREAD_CHECKS
861         ASSERT(wait->count == 0);
862 #endif
863
864         if (reschedule && ret > 0)
865                 thread_resched();
866
867         return ret;
868 }
869
870 /**
871  * @brief  Free all resources allocated in wait_queue_init()
872  *
873  * If any threads were waiting on this queue, they are all woken.
874  */
875 void wait_queue_destroy(wait_queue_t *wait, bool reschedule)
876 {
877 #if THREAD_CHECKS
878         ASSERT(wait->magic == WAIT_QUEUE_MAGIC);
879         ASSERT(in_critical_section());
880 #endif
881         wait_queue_wake_all(wait, reschedule, ERR_OBJECT_DESTROYED);
882         wait->magic = 0;
883 }
884
885 /**
886  * @brief  Wake a specific thread in a wait queue
887  *
888  * This function extracts a specific thread from a wait queue, wakes it, and
889  * puts it at the head of the run queue.
890  *
891  * @param t  The thread to wake
892  * @param reschedule  If true, the newly-woken threads will run immediately.
893  * @param wait_queue_error  The return value which the new thread will receive
894  *   from wait_queue_block().
895  *
896  * @return ERR_NOT_BLOCKED if thread was not in any wait queue.
897  */
898 status_t thread_unblock_from_wait_queue(thread_t *t, bool reschedule, status_t wait_queue_error)
899 {
900 #if THREAD_CHECKS
901         ASSERT(in_critical_section());
902         ASSERT(t->magic == THREAD_MAGIC);
903 #endif
904
905         if (t->state != THREAD_BLOCKED)
906                 return ERR_NOT_BLOCKED;
907
908 #if THREAD_CHECKS
909         ASSERT(t->blocking_wait_queue != NULL);
910         ASSERT(t->blocking_wait_queue->magic == WAIT_QUEUE_MAGIC);
911         ASSERT(list_in_list(&t->queue_node));
912 #endif
913
914         list_delete(&t->queue_node);
915         t->blocking_wait_queue->count--;
916         t->blocking_wait_queue = NULL;
917         t->state = THREAD_READY;
918         t->wait_queue_block_ret = wait_queue_error;
919         insert_in_run_queue_head(t);
920
921         if (reschedule)
922                 thread_resched();
923
924         return NO_ERROR;
925 }
926
927