Split GC.c, and move storage manager into sm/ directory
[ghc.git] / rts / Schedule.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2005
4 *
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
7 *
8 * -------------------------------------------------------------------------*/
9
10 #ifndef SCHEDULE_H
11 #define SCHEDULE_H
12
13 #include "OSThreads.h"
14 #include "Capability.h"
15
16 /* initScheduler(), exitScheduler()
17 * Called from STG : no
18 * Locks assumed : none
19 */
20 void initScheduler (void);
21 void exitScheduler (void);
22
23 // Place a new thread on the run queue of the current Capability
24 void scheduleThread (Capability *cap, StgTSO *tso);
25
26 // Place a new thread on the run queue of a specified Capability
27 // (cap is the currently owned Capability, cpu is the number of
28 // the desired Capability).
29 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
30
31 /* awakenBlockedQueue()
32 *
33 * Takes a pointer to the beginning of a blocked TSO queue, and
34 * wakes up the entire queue.
35 * Called from STG : yes
36 * Locks assumed : none
37 */
38 #if defined(GRAN)
39 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
40 #elif defined(PAR)
41 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
42 #else
43 void awakenBlockedQueue (Capability *cap, StgTSO *tso);
44 #endif
45
46 /* wakeUpRts()
47 *
48 * Causes an OS thread to wake up and run the scheduler, if necessary.
49 */
50 void wakeUpRts(void);
51
52 /* unblockOne()
53 *
54 * Put the specified thread on the run queue of the given Capability.
55 * Called from STG : yes
56 * Locks assumed : we own the Capability.
57 */
58 StgTSO * unblockOne (Capability *cap, StgTSO *tso);
59
60 /* raiseExceptionHelper */
61 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
62
63 /* findRetryFrameHelper */
64 StgWord findRetryFrameHelper (StgTSO *tso);
65
66 /* GetRoots(evac_fn f)
67 *
68 * Call f() for each root known to the scheduler.
69 *
70 * Called from STG : NO
71 * Locks assumed : ????
72 */
73 void GetRoots(evac_fn);
74
75 /* workerStart()
76 *
77 * Entry point for a new worker task.
78 * Called from STG : NO
79 * Locks assumed : none
80 */
81 void workerStart(Task *task);
82
83 #if defined(GRAN)
84 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
85 void unlink_from_bq(StgTSO* tso, StgClosure* node);
86 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
87 #elif defined(PAR)
88 nat run_queue_len(void);
89 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
90 void initThread(StgTSO *tso, nat stack_size);
91 #else
92 char *info_type(StgClosure *closure); // dummy
93 char *info_type_by_ip(StgInfoTable *ip); // dummy
94 void awaken_blocked_queue(StgTSO *q);
95 void initThread(StgTSO *tso, nat stack_size);
96 #endif
97
98 /* Context switch flag.
99 * Locks required : none (conflicts are harmless)
100 */
101 extern int RTS_VAR(context_switch);
102
103 /* The state of the scheduler. This is used to control the sequence
104 * of events during shutdown, and when the runtime is interrupted
105 * using ^C.
106 */
107 #define SCHED_RUNNING 0 /* running as normal */
108 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
109 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
110
111 extern rtsBool RTS_VAR(sched_state);
112
113 /*
114 * flag that tracks whether we have done any execution in this time slice.
115 */
116 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
117 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
118 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
119 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
120
121 /* Recent activity flag.
122 * Locks required : Transition from MAYBE_NO to INACTIVE
123 * happens in the timer signal, so it is atomic. Trnasition from
124 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
125 * to set it to ACTIVITY_YES.
126 */
127 extern nat recent_activity;
128
129 /* Thread queues.
130 * Locks required : sched_mutex
131 *
132 * In GranSim we have one run/blocked_queue per PE.
133 */
134 #if defined(GRAN)
135 // run_queue_hds defined in GranSim.h
136 #else
137 extern StgTSO *RTS_VAR(blackhole_queue);
138 #if !defined(THREADED_RTS)
139 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
140 extern StgTSO *RTS_VAR(sleeping_queue);
141 #endif
142 #endif
143
144 /* Linked list of all threads.
145 * Locks required : sched_mutex
146 */
147 extern StgTSO *RTS_VAR(all_threads);
148
149 /* Set to rtsTrue if there are threads on the blackhole_queue, and
150 * it is possible that one or more of them may be available to run.
151 * This flag is set to rtsFalse after we've checked the queue, and
152 * set to rtsTrue just before we run some Haskell code. It is used
153 * to decide whether we should yield the Capability or not.
154 * Locks required : none (see scheduleCheckBlackHoles()).
155 */
156 extern rtsBool blackholes_need_checking;
157
158 #if defined(THREADED_RTS)
159 extern Mutex RTS_VAR(sched_mutex);
160 #endif
161
162 SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
163
164 /* Called by shutdown_handler(). */
165 void interruptStgRts (void);
166
167 nat run_queue_len (void);
168
169 void resurrectThreads (StgTSO *);
170
171 void printAllThreads(void);
172
173 /* debugging only
174 */
175 #ifdef DEBUG
176 void print_bq (StgClosure *node);
177 #endif
178 #if defined(PAR)
179 void print_bqe (StgBlockingQueueElement *bqe);
180 #endif
181
182 /* -----------------------------------------------------------------------------
183 * Some convenient macros/inline functions...
184 */
185
186 #if !IN_STG_CODE
187
188 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
189
190 /* Add a thread to the end of the run queue.
191 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
192 * ASSUMES: cap->running_task is the current task.
193 */
194 INLINE_HEADER void
195 appendToRunQueue (Capability *cap, StgTSO *tso)
196 {
197 ASSERT(tso->link == END_TSO_QUEUE);
198 if (cap->run_queue_hd == END_TSO_QUEUE) {
199 cap->run_queue_hd = tso;
200 } else {
201 cap->run_queue_tl->link = tso;
202 }
203 cap->run_queue_tl = tso;
204 }
205
206 /* Push a thread on the beginning of the run queue. Used for
207 * newly awakened threads, so they get run as soon as possible.
208 * ASSUMES: cap->running_task is the current task.
209 */
210 INLINE_HEADER void
211 pushOnRunQueue (Capability *cap, StgTSO *tso)
212 {
213 tso->link = cap->run_queue_hd;
214 cap->run_queue_hd = tso;
215 if (cap->run_queue_tl == END_TSO_QUEUE) {
216 cap->run_queue_tl = tso;
217 }
218 }
219
220 /* Pop the first thread off the runnable queue.
221 */
222 INLINE_HEADER StgTSO *
223 popRunQueue (Capability *cap)
224 {
225 StgTSO *t = cap->run_queue_hd;
226 ASSERT(t != END_TSO_QUEUE);
227 cap->run_queue_hd = t->link;
228 t->link = END_TSO_QUEUE;
229 if (cap->run_queue_hd == END_TSO_QUEUE) {
230 cap->run_queue_tl = END_TSO_QUEUE;
231 }
232 return t;
233 }
234
235 /* Add a thread to the end of the blocked queue.
236 */
237 #if !defined(THREADED_RTS)
238 INLINE_HEADER void
239 appendToBlockedQueue(StgTSO *tso)
240 {
241 ASSERT(tso->link == END_TSO_QUEUE);
242 if (blocked_queue_hd == END_TSO_QUEUE) {
243 blocked_queue_hd = tso;
244 } else {
245 blocked_queue_tl->link = tso;
246 }
247 blocked_queue_tl = tso;
248 }
249 #endif
250
251 #if defined(THREADED_RTS)
252 INLINE_HEADER void
253 appendToWakeupQueue (Capability *cap, StgTSO *tso)
254 {
255 ASSERT(tso->link == END_TSO_QUEUE);
256 if (cap->wakeup_queue_hd == END_TSO_QUEUE) {
257 cap->wakeup_queue_hd = tso;
258 } else {
259 cap->wakeup_queue_tl->link = tso;
260 }
261 cap->wakeup_queue_tl = tso;
262 }
263 #endif
264
265 /* Check whether various thread queues are empty
266 */
267 INLINE_HEADER rtsBool
268 emptyQueue (StgTSO *q)
269 {
270 return (q == END_TSO_QUEUE);
271 }
272
273 INLINE_HEADER rtsBool
274 emptyRunQueue(Capability *cap)
275 {
276 return emptyQueue(cap->run_queue_hd);
277 }
278
279 #if defined(THREADED_RTS)
280 INLINE_HEADER rtsBool
281 emptyWakeupQueue(Capability *cap)
282 {
283 return emptyQueue(cap->wakeup_queue_hd);
284 }
285 #endif
286
287 #if !defined(THREADED_RTS)
288 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
289 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
290 #endif
291
292 INLINE_HEADER rtsBool
293 emptyThreadQueues(Capability *cap)
294 {
295 return emptyRunQueue(cap)
296 #if !defined(THREADED_RTS)
297 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
298 #endif
299 ;
300 }
301
302 #endif /* !IN_STG_CODE */
303
304 INLINE_HEADER void
305 dirtyTSO (StgTSO *tso)
306 {
307 tso->flags |= TSO_DIRTY;
308 }
309
310 #endif /* SCHEDULE_H */
311