1453af41ca8a0ce7df9926903fb21ca938ea8477
[ghc.git] / rts / Schedule.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2005
4 *
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
7 *
8 * -------------------------------------------------------------------------*/
9
10 #ifndef SCHEDULE_H
11 #define SCHEDULE_H
12
13 #include "rts/OSThreads.h"
14 #include "Capability.h"
15 #include "Trace.h"
16
17 #include "BeginPrivate.h"
18
19 /* initScheduler(), exitScheduler()
20 * Called from STG : no
21 * Locks assumed : none
22 */
23 void initScheduler (void);
24 void exitScheduler (bool wait_foreign);
25 void freeScheduler (void);
26 void markScheduler (evac_fn evac, void *user);
27
28 // Place a new thread on the run queue of the current Capability
29 void scheduleThread (Capability *cap, StgTSO *tso);
30
31 // Place a new thread on the run queue of a specified Capability
32 // (cap is the currently owned Capability, cpu is the number of
33 // the desired Capability).
34 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
35
36 /* wakeUpRts()
37 *
38 * Causes an OS thread to wake up and run the scheduler, if necessary.
39 */
40 #if defined(THREADED_RTS)
41 void wakeUpRts(void);
42 #endif
43
44 /* raiseExceptionHelper */
45 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
46
47 /* findRetryFrameHelper */
48 StgWord findRetryFrameHelper (Capability *cap, StgTSO *tso);
49
50 /* Entry point for a new worker */
51 void scheduleWorker (Capability *cap, Task *task);
52
53 /* The state of the scheduler. This is used to control the sequence
54 * of events during shutdown. See Note [shutdown] in Schedule.c.
55 */
56 #define SCHED_RUNNING 0 /* running as normal */
57 #define SCHED_INTERRUPTING 1 /* before threads are deleted */
58 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
59
60 extern volatile StgWord sched_state;
61
62 /*
63 * flag that tracks whether we have done any execution in this time
64 * slice, and controls the disabling of the interval timer.
65 *
66 * The timer interrupt transitions ACTIVITY_YES into
67 * ACTIVITY_MAYBE_NO, waits for RtsFlags.GcFlags.idleGCDelayTime,
68 * and then:
69 * - if idle GC is no, set ACTIVITY_INACTIVE and wakeUpRts()
70 * - if idle GC is off, set ACTIVITY_DONE_GC and stopTimer()
71 *
72 * If the scheduler finds ACTIVITY_INACTIVE, then it sets
73 * ACTIVITY_DONE_GC, performs the GC and calls stopTimer().
74 *
75 * If the scheduler finds ACTIVITY_DONE_GC and it has a thread to run,
76 * it enables the timer again with startTimer().
77 */
78 #define ACTIVITY_YES 0
79 // the RTS is active
80 #define ACTIVITY_MAYBE_NO 1
81 // no activity since the last timer signal
82 #define ACTIVITY_INACTIVE 2
83 // RtsFlags.GcFlags.idleGCDelayTime has passed with no activity
84 #define ACTIVITY_DONE_GC 3
85 // like ACTIVITY_INACTIVE, but we've done a GC too (if idle GC is
86 // enabled) and the interval timer is now turned off.
87
88 /* Recent activity flag.
89 * Locks required : Transition from MAYBE_NO to INACTIVE
90 * happens in the timer signal, so it is atomic. Trnasition from
91 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
92 * to set it to ACTIVITY_YES.
93 */
94 extern volatile StgWord recent_activity;
95
96 /* Thread queues.
97 * Locks required : sched_mutex
98 */
99 #if !defined(THREADED_RTS)
100 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
101 extern StgTSO *sleeping_queue;
102 #endif
103
104 extern bool heap_overflow;
105
106 #if defined(THREADED_RTS)
107 extern Mutex sched_mutex;
108 #endif
109
110 /* Called by shutdown_handler(). */
111 void interruptStgRts (void);
112
113 void resurrectThreads (StgTSO *);
114
115 /* -----------------------------------------------------------------------------
116 * Some convenient macros/inline functions...
117 */
118
119 #if !IN_STG_CODE
120
121 /* END_TSO_QUEUE and friends now defined in includes/stg/MiscClosures.h */
122
123 /* Add a thread to the end of the run queue.
124 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
125 * ASSUMES: cap->running_task is the current task.
126 */
127 EXTERN_INLINE void
128 appendToRunQueue (Capability *cap, StgTSO *tso);
129
130 EXTERN_INLINE void
131 appendToRunQueue (Capability *cap, StgTSO *tso)
132 {
133 ASSERT(tso->_link == END_TSO_QUEUE);
134 if (cap->run_queue_hd == END_TSO_QUEUE) {
135 cap->run_queue_hd = tso;
136 tso->block_info.prev = END_TSO_QUEUE;
137 } else {
138 setTSOLink(cap, cap->run_queue_tl, tso);
139 setTSOPrev(cap, tso, cap->run_queue_tl);
140 }
141 cap->run_queue_tl = tso;
142 cap->n_run_queue++;
143 }
144
145 /* Push a thread on the beginning of the run queue.
146 * ASSUMES: cap->running_task is the current task.
147 */
148 EXTERN_INLINE void
149 pushOnRunQueue (Capability *cap, StgTSO *tso);
150
151 EXTERN_INLINE void
152 pushOnRunQueue (Capability *cap, StgTSO *tso)
153 {
154 setTSOLink(cap, tso, cap->run_queue_hd);
155 tso->block_info.prev = END_TSO_QUEUE;
156 if (cap->run_queue_hd != END_TSO_QUEUE) {
157 setTSOPrev(cap, cap->run_queue_hd, tso);
158 }
159 cap->run_queue_hd = tso;
160 if (cap->run_queue_tl == END_TSO_QUEUE) {
161 cap->run_queue_tl = tso;
162 }
163 cap->n_run_queue++;
164 }
165
166 /* Pop the first thread off the runnable queue.
167 */
168 INLINE_HEADER StgTSO *
169 popRunQueue (Capability *cap)
170 {
171 StgTSO *t = cap->run_queue_hd;
172 ASSERT(t != END_TSO_QUEUE);
173 cap->run_queue_hd = t->_link;
174 if (t->_link != END_TSO_QUEUE) {
175 t->_link->block_info.prev = END_TSO_QUEUE;
176 }
177 t->_link = END_TSO_QUEUE; // no write barrier req'd
178 if (cap->run_queue_hd == END_TSO_QUEUE) {
179 cap->run_queue_tl = END_TSO_QUEUE;
180 }
181 cap->n_run_queue--;
182 return t;
183 }
184
185 INLINE_HEADER StgTSO *
186 peekRunQueue (Capability *cap)
187 {
188 return cap->run_queue_hd;
189 }
190
191 void promoteInRunQueue (Capability *cap, StgTSO *tso);
192
193 /* Add a thread to the end of the blocked queue.
194 */
195 #if !defined(THREADED_RTS)
196 INLINE_HEADER void
197 appendToBlockedQueue(StgTSO *tso)
198 {
199 ASSERT(tso->_link == END_TSO_QUEUE);
200 if (blocked_queue_hd == END_TSO_QUEUE) {
201 blocked_queue_hd = tso;
202 } else {
203 setTSOLink(&MainCapability, blocked_queue_tl, tso);
204 }
205 blocked_queue_tl = tso;
206 }
207 #endif
208
209 /* Check whether various thread queues are empty
210 */
211 INLINE_HEADER bool
212 emptyQueue (StgTSO *q)
213 {
214 return (q == END_TSO_QUEUE);
215 }
216
217 INLINE_HEADER bool
218 emptyRunQueue(Capability *cap)
219 {
220 return cap->n_run_queue == 0;
221 }
222
223 INLINE_HEADER void
224 truncateRunQueue(Capability *cap)
225 {
226 cap->run_queue_hd = END_TSO_QUEUE;
227 cap->run_queue_tl = END_TSO_QUEUE;
228 cap->n_run_queue = 0;
229 }
230
231 #if !defined(THREADED_RTS)
232 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
233 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
234 #endif
235
236 INLINE_HEADER bool
237 emptyThreadQueues(Capability *cap)
238 {
239 return emptyRunQueue(cap)
240 #if !defined(THREADED_RTS)
241 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
242 #endif
243 ;
244 }
245
246 #endif /* !IN_STG_CODE */
247
248 #include "EndPrivate.h"
249
250 #endif /* SCHEDULE_H */