rts: drop unused global 'blackhole_queue'
[ghc.git] / rts / Schedule.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2005
4 *
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
7 *
8 * -------------------------------------------------------------------------*/
9
10 #ifndef SCHEDULE_H
11 #define SCHEDULE_H
12
13 #include "rts/OSThreads.h"
14 #include "Capability.h"
15 #include "Trace.h"
16
17 #include "BeginPrivate.h"
18
19 /* initScheduler(), exitScheduler()
20 * Called from STG : no
21 * Locks assumed : none
22 */
23 void initScheduler (void);
24 void exitScheduler (rtsBool wait_foreign);
25 void freeScheduler (void);
26 void markScheduler (evac_fn evac, void *user);
27
28 // Place a new thread on the run queue of the current Capability
29 void scheduleThread (Capability *cap, StgTSO *tso);
30
31 // Place a new thread on the run queue of a specified Capability
32 // (cap is the currently owned Capability, cpu is the number of
33 // the desired Capability).
34 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
35
36 /* wakeUpRts()
37 *
38 * Causes an OS thread to wake up and run the scheduler, if necessary.
39 */
40 #if defined(THREADED_RTS)
41 void wakeUpRts(void);
42 #endif
43
44 /* raiseExceptionHelper */
45 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
46
47 /* findRetryFrameHelper */
48 StgWord findRetryFrameHelper (Capability *cap, StgTSO *tso);
49
50 /* Entry point for a new worker */
51 void scheduleWorker (Capability *cap, Task *task);
52
53 /* The state of the scheduler. This is used to control the sequence
54 * of events during shutdown, and when the runtime is interrupted
55 * using ^C.
56 */
57 #define SCHED_RUNNING 0 /* running as normal */
58 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
59 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
60
61 extern volatile StgWord sched_state;
62
63 /*
64 * flag that tracks whether we have done any execution in this time
65 * slice, and controls the disabling of the interval timer.
66 *
67 * The timer interrupt transitions ACTIVITY_YES into
68 * ACTIVITY_MAYBE_NO, waits for RtsFlags.GcFlags.idleGCDelayTime,
69 * and then:
70 * - if idle GC is no, set ACTIVITY_INACTIVE and wakeUpRts()
71 * - if idle GC is off, set ACTIVITY_DONE_GC and stopTimer()
72 *
73 * If the scheduler finds ACTIVITY_INACTIVE, then it sets
74 * ACTIVITY_DONE_GC, performs the GC and calls stopTimer().
75 *
76 * If the scheduler finds ACTIVITY_DONE_GC and it has a thread to run,
77 * it enables the timer again with startTimer().
78 */
79 #define ACTIVITY_YES 0
80 // the RTS is active
81 #define ACTIVITY_MAYBE_NO 1
82 // no activity since the last timer signal
83 #define ACTIVITY_INACTIVE 2
84 // RtsFlags.GcFlags.idleGCDelayTime has passed with no activity
85 #define ACTIVITY_DONE_GC 3
86 // like ACTIVITY_INACTIVE, but we've done a GC too (if idle GC is
87 // enabled) and the interval timer is now turned off.
88
89 /* Recent activity flag.
90 * Locks required : Transition from MAYBE_NO to INACTIVE
91 * happens in the timer signal, so it is atomic. Trnasition from
92 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
93 * to set it to ACTIVITY_YES.
94 */
95 extern volatile StgWord recent_activity;
96
97 /* Thread queues.
98 * Locks required : sched_mutex
99 */
100 #if !defined(THREADED_RTS)
101 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
102 extern StgTSO *sleeping_queue;
103 #endif
104
105 extern rtsBool heap_overflow;
106
107 #if defined(THREADED_RTS)
108 extern Mutex sched_mutex;
109 #endif
110
111 /* Called by shutdown_handler(). */
112 void interruptStgRts (void);
113
114 void resurrectThreads (StgTSO *);
115
116 /* -----------------------------------------------------------------------------
117 * Some convenient macros/inline functions...
118 */
119
120 #if !IN_STG_CODE
121
122 /* END_TSO_QUEUE and friends now defined in includes/stg/MiscClosures.h */
123
124 /* Add a thread to the end of the run queue.
125 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
126 * ASSUMES: cap->running_task is the current task.
127 */
128 EXTERN_INLINE void
129 appendToRunQueue (Capability *cap, StgTSO *tso);
130
131 EXTERN_INLINE void
132 appendToRunQueue (Capability *cap, StgTSO *tso)
133 {
134 ASSERT(tso->_link == END_TSO_QUEUE);
135 if (cap->run_queue_hd == END_TSO_QUEUE) {
136 cap->run_queue_hd = tso;
137 tso->block_info.prev = END_TSO_QUEUE;
138 } else {
139 setTSOLink(cap, cap->run_queue_tl, tso);
140 setTSOPrev(cap, tso, cap->run_queue_tl);
141 }
142 cap->run_queue_tl = tso;
143 }
144
145 /* Push a thread on the beginning of the run queue.
146 * ASSUMES: cap->running_task is the current task.
147 */
148 EXTERN_INLINE void
149 pushOnRunQueue (Capability *cap, StgTSO *tso);
150
151 EXTERN_INLINE void
152 pushOnRunQueue (Capability *cap, StgTSO *tso)
153 {
154 setTSOLink(cap, tso, cap->run_queue_hd);
155 tso->block_info.prev = END_TSO_QUEUE;
156 if (cap->run_queue_hd != END_TSO_QUEUE) {
157 setTSOPrev(cap, cap->run_queue_hd, tso);
158 }
159 cap->run_queue_hd = tso;
160 if (cap->run_queue_tl == END_TSO_QUEUE) {
161 cap->run_queue_tl = tso;
162 }
163 }
164
165 /* Pop the first thread off the runnable queue.
166 */
167 INLINE_HEADER StgTSO *
168 popRunQueue (Capability *cap)
169 {
170 StgTSO *t = cap->run_queue_hd;
171 ASSERT(t != END_TSO_QUEUE);
172 cap->run_queue_hd = t->_link;
173 if (t->_link != END_TSO_QUEUE) {
174 t->_link->block_info.prev = END_TSO_QUEUE;
175 }
176 t->_link = END_TSO_QUEUE; // no write barrier req'd
177 if (cap->run_queue_hd == END_TSO_QUEUE) {
178 cap->run_queue_tl = END_TSO_QUEUE;
179 }
180 return t;
181 }
182
183 INLINE_HEADER StgTSO *
184 peekRunQueue (Capability *cap)
185 {
186 return cap->run_queue_hd;
187 }
188
189 void promoteInRunQueue (Capability *cap, StgTSO *tso);
190
191 /* Add a thread to the end of the blocked queue.
192 */
193 #if !defined(THREADED_RTS)
194 INLINE_HEADER void
195 appendToBlockedQueue(StgTSO *tso)
196 {
197 ASSERT(tso->_link == END_TSO_QUEUE);
198 if (blocked_queue_hd == END_TSO_QUEUE) {
199 blocked_queue_hd = tso;
200 } else {
201 setTSOLink(&MainCapability, blocked_queue_tl, tso);
202 }
203 blocked_queue_tl = tso;
204 }
205 #endif
206
207 /* Check whether various thread queues are empty
208 */
209 INLINE_HEADER rtsBool
210 emptyQueue (StgTSO *q)
211 {
212 return (q == END_TSO_QUEUE);
213 }
214
215 INLINE_HEADER rtsBool
216 emptyRunQueue(Capability *cap)
217 {
218 return emptyQueue(cap->run_queue_hd);
219 }
220
221 /* assumes that the queue is not empty; so combine this with
222 * an emptyRunQueue check! */
223 INLINE_HEADER rtsBool
224 singletonRunQueue(Capability *cap)
225 {
226 ASSERT(!emptyRunQueue(cap));
227 return cap->run_queue_hd->_link == END_TSO_QUEUE;
228 }
229
230 INLINE_HEADER void
231 truncateRunQueue(Capability *cap)
232 {
233 cap->run_queue_hd = END_TSO_QUEUE;
234 cap->run_queue_tl = END_TSO_QUEUE;
235 }
236
237 #if !defined(THREADED_RTS)
238 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
239 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
240 #endif
241
242 INLINE_HEADER rtsBool
243 emptyThreadQueues(Capability *cap)
244 {
245 return emptyRunQueue(cap)
246 #if !defined(THREADED_RTS)
247 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
248 #endif
249 ;
250 }
251
252 #endif /* !IN_STG_CODE */
253
254 #include "EndPrivate.h"
255
256 #endif /* SCHEDULE_H */