edbe246ed326be74915798735dfcd7d924c613a2
[ghc.git] / rts / Schedule.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2005
4 *
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
7 *
8 * -------------------------------------------------------------------------*/
9
10 #ifndef SCHEDULE_H
11 #define SCHEDULE_H
12
13 #include "OSThreads.h"
14 #include "Capability.h"
15
16 /* initScheduler(), exitScheduler()
17 * Called from STG : no
18 * Locks assumed : none
19 */
20 void initScheduler (void);
21 void exitScheduler (void);
22
23 // Place a new thread on the run queue of the current Capability
24 void scheduleThread (Capability *cap, StgTSO *tso);
25
26 // Place a new thread on the run queue of a specified Capability
27 // (cap is the currently owned Capability, cpu is the number of
28 // the desired Capability).
29 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
30
31 /* awakenBlockedQueue()
32 *
33 * Takes a pointer to the beginning of a blocked TSO queue, and
34 * wakes up the entire queue.
35 * Called from STG : yes
36 * Locks assumed : none
37 */
38 #if defined(GRAN)
39 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
40 #elif defined(PAR)
41 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
42 #else
43 void awakenBlockedQueue (Capability *cap, StgTSO *tso);
44 #endif
45
46 /* unblockOne()
47 *
48 * Put the specified thread on the run queue of the given Capability.
49 * Called from STG : yes
50 * Locks assumed : we own the Capability.
51 */
52 StgTSO * unblockOne(Capability *cap, StgTSO *tso);
53
54 /* raiseAsync()
55 *
56 * Raises an exception asynchronously in the specified thread.
57 *
58 * Called from STG : yes
59 * Locks assumed : none
60 */
61 void raiseAsync(Capability *cap, StgTSO *tso, StgClosure *exception);
62
63 /* suspendComputation()
64 *
65 * A variant of raiseAsync(), this strips the stack of the specified
66 * thread down to the stop_here point, leaving a current closure on
67 * top of the stack at [stop_here - 1].
68 */
69 void suspendComputation(Capability *cap, StgTSO *tso, StgPtr stop_here);
70
71 /* raiseExceptionHelper */
72 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
73
74 /* findRetryFrameHelper */
75 StgWord findRetryFrameHelper (StgTSO *tso);
76
77 /* GetRoots(evac_fn f)
78 *
79 * Call f() for each root known to the scheduler.
80 *
81 * Called from STG : NO
82 * Locks assumed : ????
83 */
84 void GetRoots(evac_fn);
85
86 /* workerStart()
87 *
88 * Entry point for a new worker task.
89 * Called from STG : NO
90 * Locks assumed : none
91 */
92 void workerStart(Task *task);
93
94 #if defined(GRAN)
95 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
96 void unlink_from_bq(StgTSO* tso, StgClosure* node);
97 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
98 #elif defined(PAR)
99 nat run_queue_len(void);
100 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
101 void initThread(StgTSO *tso, nat stack_size);
102 #else
103 char *info_type(StgClosure *closure); // dummy
104 char *info_type_by_ip(StgInfoTable *ip); // dummy
105 void awaken_blocked_queue(StgTSO *q);
106 void initThread(StgTSO *tso, nat stack_size);
107 #endif
108
109 /* Context switch flag.
110 * Locks required : none (conflicts are harmless)
111 */
112 extern int RTS_VAR(context_switch);
113
114 /* The state of the scheduler. This is used to control the sequence
115 * of events during shutdown, and when the runtime is interrupted
116 * using ^C.
117 */
118 #define SCHED_RUNNING 0 /* running as normal */
119 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
120 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
121
122 extern rtsBool RTS_VAR(sched_state);
123
124 /*
125 * flag that tracks whether we have done any execution in this time slice.
126 */
127 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
128 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
129 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
130 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
131
132 /* Recent activity flag.
133 * Locks required : Transition from MAYBE_NO to INACTIVE
134 * happens in the timer signal, so it is atomic. Trnasition from
135 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
136 * to set it to ACTIVITY_YES.
137 */
138 extern nat recent_activity;
139
140 /* Thread queues.
141 * Locks required : sched_mutex
142 *
143 * In GranSim we have one run/blocked_queue per PE.
144 */
145 #if defined(GRAN)
146 // run_queue_hds defined in GranSim.h
147 #else
148 extern StgTSO *RTS_VAR(blackhole_queue);
149 #if !defined(THREADED_RTS)
150 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
151 extern StgTSO *RTS_VAR(sleeping_queue);
152 #endif
153 #endif
154
155 /* Linked list of all threads.
156 * Locks required : sched_mutex
157 */
158 extern StgTSO *RTS_VAR(all_threads);
159
160 /* Set to rtsTrue if there are threads on the blackhole_queue, and
161 * it is possible that one or more of them may be available to run.
162 * This flag is set to rtsFalse after we've checked the queue, and
163 * set to rtsTrue just before we run some Haskell code. It is used
164 * to decide whether we should yield the Capability or not.
165 * Locks required : none (see scheduleCheckBlackHoles()).
166 */
167 extern rtsBool blackholes_need_checking;
168
169 #if defined(THREADED_RTS)
170 extern Mutex RTS_VAR(sched_mutex);
171 #endif
172
173 StgBool isThreadBound(StgTSO *tso);
174
175 SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
176
177 /* Called by shutdown_handler(). */
178 void interruptStgRts (void);
179
180 nat run_queue_len (void);
181
182 void resurrectThreads (StgTSO *);
183
184 void printAllThreads(void);
185
186 /* debugging only
187 */
188 #ifdef DEBUG
189 void print_bq (StgClosure *node);
190 #endif
191 #if defined(PAR)
192 void print_bqe (StgBlockingQueueElement *bqe);
193 #endif
194
195 void labelThread(StgPtr tso, char *label);
196
197 /* -----------------------------------------------------------------------------
198 * Some convenient macros/inline functions...
199 */
200
201 #if !IN_STG_CODE
202
203 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
204
205 /* Add a thread to the end of the run queue.
206 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
207 * ASSUMES: cap->running_task is the current task.
208 */
209 STATIC_INLINE void
210 appendToRunQueue (Capability *cap, StgTSO *tso)
211 {
212 ASSERT(tso->link == END_TSO_QUEUE);
213 if (cap->run_queue_hd == END_TSO_QUEUE) {
214 cap->run_queue_hd = tso;
215 } else {
216 cap->run_queue_tl->link = tso;
217 }
218 cap->run_queue_tl = tso;
219 }
220
221 /* Push a thread on the beginning of the run queue. Used for
222 * newly awakened threads, so they get run as soon as possible.
223 * ASSUMES: cap->running_task is the current task.
224 */
225 STATIC_INLINE void
226 pushOnRunQueue (Capability *cap, StgTSO *tso)
227 {
228 tso->link = cap->run_queue_hd;
229 cap->run_queue_hd = tso;
230 if (cap->run_queue_tl == END_TSO_QUEUE) {
231 cap->run_queue_tl = tso;
232 }
233 }
234
235 /* Pop the first thread off the runnable queue.
236 */
237 STATIC_INLINE StgTSO *
238 popRunQueue (Capability *cap)
239 {
240 StgTSO *t = cap->run_queue_hd;
241 ASSERT(t != END_TSO_QUEUE);
242 cap->run_queue_hd = t->link;
243 t->link = END_TSO_QUEUE;
244 if (cap->run_queue_hd == END_TSO_QUEUE) {
245 cap->run_queue_tl = END_TSO_QUEUE;
246 }
247 return t;
248 }
249
250 /* Add a thread to the end of the blocked queue.
251 */
252 #if !defined(THREADED_RTS)
253 STATIC_INLINE void
254 appendToBlockedQueue(StgTSO *tso)
255 {
256 ASSERT(tso->link == END_TSO_QUEUE);
257 if (blocked_queue_hd == END_TSO_QUEUE) {
258 blocked_queue_hd = tso;
259 } else {
260 blocked_queue_tl->link = tso;
261 }
262 blocked_queue_tl = tso;
263 }
264 #endif
265
266 #if defined(THREADED_RTS)
267 STATIC_INLINE void
268 appendToWakeupQueue (Capability *cap, StgTSO *tso)
269 {
270 ASSERT(tso->link == END_TSO_QUEUE);
271 if (cap->wakeup_queue_hd == END_TSO_QUEUE) {
272 cap->wakeup_queue_hd = tso;
273 } else {
274 cap->wakeup_queue_tl->link = tso;
275 }
276 cap->wakeup_queue_tl = tso;
277 }
278 #endif
279
280 /* Check whether various thread queues are empty
281 */
282 STATIC_INLINE rtsBool
283 emptyQueue (StgTSO *q)
284 {
285 return (q == END_TSO_QUEUE);
286 }
287
288 STATIC_INLINE rtsBool
289 emptyRunQueue(Capability *cap)
290 {
291 return emptyQueue(cap->run_queue_hd);
292 }
293
294 #if defined(THREADED_RTS)
295 STATIC_INLINE rtsBool
296 emptyWakeupQueue(Capability *cap)
297 {
298 return emptyQueue(cap->wakeup_queue_hd);
299 }
300 #endif
301
302 #if !defined(THREADED_RTS)
303 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
304 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
305 #endif
306
307 STATIC_INLINE rtsBool
308 emptyThreadQueues(Capability *cap)
309 {
310 return emptyRunQueue(cap)
311 #if !defined(THREADED_RTS)
312 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
313 #endif
314 ;
315 }
316
317 #ifdef DEBUG
318 void sched_belch(char *s, ...)
319 GNU_ATTRIBUTE(format (printf, 1, 2));
320 #endif
321
322 #endif /* !IN_STG_CODE */
323
324 STATIC_INLINE void
325 dirtyTSO (StgTSO *tso)
326 {
327 tso->flags |= TSO_DIRTY;
328 }
329
330 #endif /* SCHEDULE_H */
331