testsuite: attempt fixing fallout from 089b72f52
[ghc.git] / rts / Schedule.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2005
4 *
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
7 *
8 * -------------------------------------------------------------------------*/
9
10 #ifndef SCHEDULE_H
11 #define SCHEDULE_H
12
13 #include "rts/OSThreads.h"
14 #include "Capability.h"
15 #include "Trace.h"
16
17 #include "BeginPrivate.h"
18
19 /* initScheduler(), exitScheduler()
20 * Called from STG : no
21 * Locks assumed : none
22 */
23 void initScheduler (void);
24 void exitScheduler (rtsBool wait_foreign);
25 void freeScheduler (void);
26 void markScheduler (evac_fn evac, void *user);
27
28 // Place a new thread on the run queue of the current Capability
29 void scheduleThread (Capability *cap, StgTSO *tso);
30
31 // Place a new thread on the run queue of a specified Capability
32 // (cap is the currently owned Capability, cpu is the number of
33 // the desired Capability).
34 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
35
36 /* wakeUpRts()
37 *
38 * Causes an OS thread to wake up and run the scheduler, if necessary.
39 */
40 #if defined(THREADED_RTS)
41 void wakeUpRts(void);
42 #endif
43
44 /* raiseExceptionHelper */
45 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
46
47 /* findRetryFrameHelper */
48 StgWord findRetryFrameHelper (Capability *cap, StgTSO *tso);
49
50 /* Entry point for a new worker */
51 void scheduleWorker (Capability *cap, Task *task);
52
53 /* The state of the scheduler. This is used to control the sequence
54 * of events during shutdown, and when the runtime is interrupted
55 * using ^C.
56 */
57 #define SCHED_RUNNING 0 /* running as normal */
58 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
59 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
60
61 extern volatile StgWord sched_state;
62
63 /*
64 * flag that tracks whether we have done any execution in this time
65 * slice, and controls the disabling of the interval timer.
66 *
67 * The timer interrupt transitions ACTIVITY_YES into
68 * ACTIVITY_MAYBE_NO, waits for RtsFlags.GcFlags.idleGCDelayTime,
69 * and then:
70 * - if idle GC is no, set ACTIVITY_INACTIVE and wakeUpRts()
71 * - if idle GC is off, set ACTIVITY_DONE_GC and stopTimer()
72 *
73 * If the scheduler finds ACTIVITY_INACTIVE, then it sets
74 * ACTIVITY_DONE_GC, performs the GC and calls stopTimer().
75 *
76 * If the scheduler finds ACTIVITY_DONE_GC and it has a thread to run,
77 * it enables the timer again with startTimer().
78 */
79 #define ACTIVITY_YES 0
80 // the RTS is active
81 #define ACTIVITY_MAYBE_NO 1
82 // no activity since the last timer signal
83 #define ACTIVITY_INACTIVE 2
84 // RtsFlags.GcFlags.idleGCDelayTime has passed with no activity
85 #define ACTIVITY_DONE_GC 3
86 // like ACTIVITY_INACTIVE, but we've done a GC too (if idle GC is
87 // enabled) and the interval timer is now turned off.
88
89 /* Recent activity flag.
90 * Locks required : Transition from MAYBE_NO to INACTIVE
91 * happens in the timer signal, so it is atomic. Trnasition from
92 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
93 * to set it to ACTIVITY_YES.
94 */
95 extern volatile StgWord recent_activity;
96
97 /* Thread queues.
98 * Locks required : sched_mutex
99 */
100 extern StgTSO *blackhole_queue;
101 #if !defined(THREADED_RTS)
102 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
103 extern StgTSO *sleeping_queue;
104 #endif
105
106 extern rtsBool heap_overflow;
107
108 #if defined(THREADED_RTS)
109 extern Mutex sched_mutex;
110 #endif
111
112 /* Called by shutdown_handler(). */
113 void interruptStgRts (void);
114
115 void resurrectThreads (StgTSO *);
116
117 /* -----------------------------------------------------------------------------
118 * Some convenient macros/inline functions...
119 */
120
121 #if !IN_STG_CODE
122
123 /* END_TSO_QUEUE and friends now defined in includes/stg/MiscClosures.h */
124
125 /* Add a thread to the end of the run queue.
126 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
127 * ASSUMES: cap->running_task is the current task.
128 */
129 EXTERN_INLINE void
130 appendToRunQueue (Capability *cap, StgTSO *tso);
131
132 EXTERN_INLINE void
133 appendToRunQueue (Capability *cap, StgTSO *tso)
134 {
135 ASSERT(tso->_link == END_TSO_QUEUE);
136 if (cap->run_queue_hd == END_TSO_QUEUE) {
137 cap->run_queue_hd = tso;
138 tso->block_info.prev = END_TSO_QUEUE;
139 } else {
140 setTSOLink(cap, cap->run_queue_tl, tso);
141 setTSOPrev(cap, tso, cap->run_queue_tl);
142 }
143 cap->run_queue_tl = tso;
144 }
145
146 /* Push a thread on the beginning of the run queue.
147 * ASSUMES: cap->running_task is the current task.
148 */
149 EXTERN_INLINE void
150 pushOnRunQueue (Capability *cap, StgTSO *tso);
151
152 EXTERN_INLINE void
153 pushOnRunQueue (Capability *cap, StgTSO *tso)
154 {
155 setTSOLink(cap, tso, cap->run_queue_hd);
156 tso->block_info.prev = END_TSO_QUEUE;
157 if (cap->run_queue_hd != END_TSO_QUEUE) {
158 setTSOPrev(cap, cap->run_queue_hd, tso);
159 }
160 cap->run_queue_hd = tso;
161 if (cap->run_queue_tl == END_TSO_QUEUE) {
162 cap->run_queue_tl = tso;
163 }
164 }
165
166 /* Pop the first thread off the runnable queue.
167 */
168 INLINE_HEADER StgTSO *
169 popRunQueue (Capability *cap)
170 {
171 StgTSO *t = cap->run_queue_hd;
172 ASSERT(t != END_TSO_QUEUE);
173 cap->run_queue_hd = t->_link;
174 if (t->_link != END_TSO_QUEUE) {
175 t->_link->block_info.prev = END_TSO_QUEUE;
176 }
177 t->_link = END_TSO_QUEUE; // no write barrier req'd
178 if (cap->run_queue_hd == END_TSO_QUEUE) {
179 cap->run_queue_tl = END_TSO_QUEUE;
180 }
181 return t;
182 }
183
184 INLINE_HEADER StgTSO *
185 peekRunQueue (Capability *cap)
186 {
187 return cap->run_queue_hd;
188 }
189
190 void removeFromRunQueue (Capability *cap, StgTSO *tso);
191 extern void promoteInRunQueue (Capability *cap, StgTSO *tso);
192
193 /* Add a thread to the end of the blocked queue.
194 */
195 #if !defined(THREADED_RTS)
196 INLINE_HEADER void
197 appendToBlockedQueue(StgTSO *tso)
198 {
199 ASSERT(tso->_link == END_TSO_QUEUE);
200 if (blocked_queue_hd == END_TSO_QUEUE) {
201 blocked_queue_hd = tso;
202 } else {
203 setTSOLink(&MainCapability, blocked_queue_tl, tso);
204 }
205 blocked_queue_tl = tso;
206 }
207 #endif
208
209 /* Check whether various thread queues are empty
210 */
211 INLINE_HEADER rtsBool
212 emptyQueue (StgTSO *q)
213 {
214 return (q == END_TSO_QUEUE);
215 }
216
217 INLINE_HEADER rtsBool
218 emptyRunQueue(Capability *cap)
219 {
220 return emptyQueue(cap->run_queue_hd);
221 }
222
223 /* assumes that the queue is not empty; so combine this with
224 * an emptyRunQueue check! */
225 INLINE_HEADER rtsBool
226 singletonRunQueue(Capability *cap)
227 {
228 ASSERT(!emptyRunQueue(cap));
229 return cap->run_queue_hd->_link == END_TSO_QUEUE;
230 }
231
232 INLINE_HEADER void
233 truncateRunQueue(Capability *cap)
234 {
235 cap->run_queue_hd = END_TSO_QUEUE;
236 cap->run_queue_tl = END_TSO_QUEUE;
237 }
238
239 #if !defined(THREADED_RTS)
240 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
241 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
242 #endif
243
244 INLINE_HEADER rtsBool
245 emptyThreadQueues(Capability *cap)
246 {
247 return emptyRunQueue(cap)
248 #if !defined(THREADED_RTS)
249 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
250 #endif
251 ;
252 }
253
254 #endif /* !IN_STG_CODE */
255
256 #include "EndPrivate.h"
257
258 #endif /* SCHEDULE_H */