Use OSThreadProcAttr for workerStart
[ghc.git] / rts / Schedule.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2005
4 *
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
7 *
8 * -------------------------------------------------------------------------*/
9
10 #ifndef SCHEDULE_H
11 #define SCHEDULE_H
12
13 #include "OSThreads.h"
14 #include "Capability.h"
15
16 /* initScheduler(), exitScheduler()
17 * Called from STG : no
18 * Locks assumed : none
19 */
20 void initScheduler (void);
21 void exitScheduler (rtsBool wait_foreign);
22 void freeScheduler (void);
23
24 // Place a new thread on the run queue of the current Capability
25 void scheduleThread (Capability *cap, StgTSO *tso);
26
27 // Place a new thread on the run queue of a specified Capability
28 // (cap is the currently owned Capability, cpu is the number of
29 // the desired Capability).
30 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
31
32 /* awakenBlockedQueue()
33 *
34 * Takes a pointer to the beginning of a blocked TSO queue, and
35 * wakes up the entire queue.
36 * Called from STG : yes
37 * Locks assumed : none
38 */
39 #if defined(GRAN)
40 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
41 #elif defined(PAR)
42 void awakenBlockedQueue(StgBlockingQueueElement *q, StgClosure *node);
43 #else
44 void awakenBlockedQueue (Capability *cap, StgTSO *tso);
45 #endif
46
47 /* wakeUpRts()
48 *
49 * Causes an OS thread to wake up and run the scheduler, if necessary.
50 */
51 void wakeUpRts(void);
52
53 /* unblockOne()
54 *
55 * Put the specified thread on the run queue of the given Capability.
56 * Called from STG : yes
57 * Locks assumed : we own the Capability.
58 */
59 StgTSO * unblockOne (Capability *cap, StgTSO *tso);
60
61 /* raiseExceptionHelper */
62 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
63
64 /* findRetryFrameHelper */
65 StgWord findRetryFrameHelper (StgTSO *tso);
66
67 /* workerStart()
68 *
69 * Entry point for a new worker task.
70 * Called from STG : NO
71 * Locks assumed : none
72 */
73 #if defined(THREADED_RTS)
74 void OSThreadProcAttr workerStart(Task *task);
75 #endif
76
77 #if defined(GRAN)
78 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
79 void unlink_from_bq(StgTSO* tso, StgClosure* node);
80 void initThread(StgTSO *tso, nat stack_size, StgInt pri);
81 #elif defined(PAR)
82 nat run_queue_len(void);
83 void awaken_blocked_queue(StgBlockingQueueElement *q, StgClosure *node);
84 void initThread(StgTSO *tso, nat stack_size);
85 #else
86 char *info_type(StgClosure *closure); // dummy
87 char *info_type_by_ip(StgInfoTable *ip); // dummy
88 void awaken_blocked_queue(StgTSO *q);
89 void initThread(StgTSO *tso, nat stack_size);
90 #endif
91
92 /* Context switch flag.
93 * Locks required : none (conflicts are harmless)
94 */
95 extern int RTS_VAR(context_switch);
96
97 /* The state of the scheduler. This is used to control the sequence
98 * of events during shutdown, and when the runtime is interrupted
99 * using ^C.
100 */
101 #define SCHED_RUNNING 0 /* running as normal */
102 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
103 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
104
105 extern rtsBool RTS_VAR(sched_state);
106
107 /*
108 * flag that tracks whether we have done any execution in this time slice.
109 */
110 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
111 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
112 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
113 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
114
115 /* Recent activity flag.
116 * Locks required : Transition from MAYBE_NO to INACTIVE
117 * happens in the timer signal, so it is atomic. Trnasition from
118 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
119 * to set it to ACTIVITY_YES.
120 */
121 extern nat recent_activity;
122
123 /* Thread queues.
124 * Locks required : sched_mutex
125 *
126 * In GranSim we have one run/blocked_queue per PE.
127 */
128 #if defined(GRAN)
129 // run_queue_hds defined in GranSim.h
130 #else
131 extern StgTSO *RTS_VAR(blackhole_queue);
132 #if !defined(THREADED_RTS)
133 extern StgTSO *RTS_VAR(blocked_queue_hd), *RTS_VAR(blocked_queue_tl);
134 extern StgTSO *RTS_VAR(sleeping_queue);
135 #endif
136 #endif
137
138 /* Set to rtsTrue if there are threads on the blackhole_queue, and
139 * it is possible that one or more of them may be available to run.
140 * This flag is set to rtsFalse after we've checked the queue, and
141 * set to rtsTrue just before we run some Haskell code. It is used
142 * to decide whether we should yield the Capability or not.
143 * Locks required : none (see scheduleCheckBlackHoles()).
144 */
145 extern rtsBool blackholes_need_checking;
146
147 #if defined(THREADED_RTS)
148 extern Mutex RTS_VAR(sched_mutex);
149 #endif
150
151 SchedulerStatus rts_mainLazyIO(HaskellObj p, /*out*/HaskellObj *ret);
152
153 /* Called by shutdown_handler(). */
154 void interruptStgRts (void);
155
156 nat run_queue_len (void);
157
158 void resurrectThreads (StgTSO *);
159 void performPendingThrowTos (StgTSO *);
160
161 void printAllThreads(void);
162
163 /* debugging only
164 */
165 #ifdef DEBUG
166 void print_bq (StgClosure *node);
167 #endif
168 #if defined(PAR)
169 void print_bqe (StgBlockingQueueElement *bqe);
170 #endif
171
172 /* -----------------------------------------------------------------------------
173 * Some convenient macros/inline functions...
174 */
175
176 #if !IN_STG_CODE
177
178 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
179
180 /* Add a thread to the end of the run queue.
181 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
182 * ASSUMES: cap->running_task is the current task.
183 */
184 INLINE_HEADER void
185 appendToRunQueue (Capability *cap, StgTSO *tso)
186 {
187 ASSERT(tso->_link == END_TSO_QUEUE);
188 if (cap->run_queue_hd == END_TSO_QUEUE) {
189 cap->run_queue_hd = tso;
190 } else {
191 setTSOLink(cap, cap->run_queue_tl, tso);
192 }
193 cap->run_queue_tl = tso;
194 }
195
196 /* Push a thread on the beginning of the run queue. Used for
197 * newly awakened threads, so they get run as soon as possible.
198 * ASSUMES: cap->running_task is the current task.
199 */
200 INLINE_HEADER void
201 pushOnRunQueue (Capability *cap, StgTSO *tso)
202 {
203 setTSOLink(cap, tso, cap->run_queue_hd);
204 cap->run_queue_hd = tso;
205 if (cap->run_queue_tl == END_TSO_QUEUE) {
206 cap->run_queue_tl = tso;
207 }
208 }
209
210 /* Pop the first thread off the runnable queue.
211 */
212 INLINE_HEADER StgTSO *
213 popRunQueue (Capability *cap)
214 {
215 StgTSO *t = cap->run_queue_hd;
216 ASSERT(t != END_TSO_QUEUE);
217 cap->run_queue_hd = t->_link;
218 t->_link = END_TSO_QUEUE; // no write barrier req'd
219 if (cap->run_queue_hd == END_TSO_QUEUE) {
220 cap->run_queue_tl = END_TSO_QUEUE;
221 }
222 return t;
223 }
224
225 /* Add a thread to the end of the blocked queue.
226 */
227 #if !defined(THREADED_RTS)
228 INLINE_HEADER void
229 appendToBlockedQueue(StgTSO *tso)
230 {
231 ASSERT(tso->_link == END_TSO_QUEUE);
232 if (blocked_queue_hd == END_TSO_QUEUE) {
233 blocked_queue_hd = tso;
234 } else {
235 setTSOLink(&MainCapability, blocked_queue_tl, tso);
236 }
237 blocked_queue_tl = tso;
238 }
239 #endif
240
241 #if defined(THREADED_RTS)
242 // Assumes: my_cap is owned by the current Task. We hold
243 // other_cap->lock, but we do not necessarily own other_cap; another
244 // Task may be running on it.
245 INLINE_HEADER void
246 appendToWakeupQueue (Capability *my_cap, Capability *other_cap, StgTSO *tso)
247 {
248 ASSERT(tso->_link == END_TSO_QUEUE);
249 if (other_cap->wakeup_queue_hd == END_TSO_QUEUE) {
250 other_cap->wakeup_queue_hd = tso;
251 } else {
252 // my_cap is passed to setTSOLink() because it may need to
253 // write to the mutable list.
254 setTSOLink(my_cap, other_cap->wakeup_queue_tl, tso);
255 }
256 other_cap->wakeup_queue_tl = tso;
257 }
258 #endif
259
260 /* Check whether various thread queues are empty
261 */
262 INLINE_HEADER rtsBool
263 emptyQueue (StgTSO *q)
264 {
265 return (q == END_TSO_QUEUE);
266 }
267
268 INLINE_HEADER rtsBool
269 emptyRunQueue(Capability *cap)
270 {
271 return emptyQueue(cap->run_queue_hd);
272 }
273
274 #if defined(THREADED_RTS)
275 INLINE_HEADER rtsBool
276 emptyWakeupQueue(Capability *cap)
277 {
278 return emptyQueue(cap->wakeup_queue_hd);
279 }
280 #endif
281
282 #if !defined(THREADED_RTS)
283 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
284 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
285 #endif
286
287 INLINE_HEADER rtsBool
288 emptyThreadQueues(Capability *cap)
289 {
290 return emptyRunQueue(cap)
291 #if !defined(THREADED_RTS)
292 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
293 #endif
294 ;
295 }
296
297 #endif /* !IN_STG_CODE */
298
299 #endif /* SCHEDULE_H */
300