Unify event logging and debug tracing.
[ghc.git] / rts / Schedule.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2005
4 *
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
7 *
8 * -------------------------------------------------------------------------*/
9
10 #ifndef SCHEDULE_H
11 #define SCHEDULE_H
12
13 #include "rts/OSThreads.h"
14 #include "Capability.h"
15 #include "Trace.h"
16
17 #pragma GCC visibility push(hidden)
18
19 /* initScheduler(), exitScheduler()
20 * Called from STG : no
21 * Locks assumed : none
22 */
23 void initScheduler (void);
24 void exitScheduler (rtsBool wait_foreign);
25 void freeScheduler (void);
26
27 // Place a new thread on the run queue of the current Capability
28 void scheduleThread (Capability *cap, StgTSO *tso);
29
30 // Place a new thread on the run queue of a specified Capability
31 // (cap is the currently owned Capability, cpu is the number of
32 // the desired Capability).
33 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
34
35 /* wakeUpRts()
36 *
37 * Causes an OS thread to wake up and run the scheduler, if necessary.
38 */
39 #if defined(THREADED_RTS)
40 void wakeUpRts(void);
41 #endif
42
43 /* raiseExceptionHelper */
44 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
45
46 /* findRetryFrameHelper */
47 StgWord findRetryFrameHelper (StgTSO *tso);
48
49 /* workerStart()
50 *
51 * Entry point for a new worker task.
52 * Called from STG : NO
53 * Locks assumed : none
54 */
55 #if defined(THREADED_RTS)
56 void OSThreadProcAttr workerStart(Task *task);
57 #endif
58
59 /* The state of the scheduler. This is used to control the sequence
60 * of events during shutdown, and when the runtime is interrupted
61 * using ^C.
62 */
63 #define SCHED_RUNNING 0 /* running as normal */
64 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
65 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
66
67 extern volatile StgWord sched_state;
68
69 /*
70 * flag that tracks whether we have done any execution in this time slice.
71 */
72 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
73 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
74 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
75 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
76
77 /* Recent activity flag.
78 * Locks required : Transition from MAYBE_NO to INACTIVE
79 * happens in the timer signal, so it is atomic. Trnasition from
80 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
81 * to set it to ACTIVITY_YES.
82 */
83 extern volatile StgWord recent_activity;
84
85 /* Thread queues.
86 * Locks required : sched_mutex
87 *
88 * In GranSim we have one run/blocked_queue per PE.
89 */
90 extern StgTSO *blackhole_queue;
91 #if !defined(THREADED_RTS)
92 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
93 extern StgTSO *sleeping_queue;
94 #endif
95
96 /* Set to rtsTrue if there are threads on the blackhole_queue, and
97 * it is possible that one or more of them may be available to run.
98 * This flag is set to rtsFalse after we've checked the queue, and
99 * set to rtsTrue just before we run some Haskell code. It is used
100 * to decide whether we should yield the Capability or not.
101 * Locks required : none (see scheduleCheckBlackHoles()).
102 */
103 extern rtsBool blackholes_need_checking;
104
105 extern rtsBool heap_overflow;
106
107 #if defined(THREADED_RTS)
108 extern Mutex sched_mutex;
109 #endif
110
111 /* Called by shutdown_handler(). */
112 void interruptStgRts (void);
113
114 void resurrectThreads (StgTSO *);
115 void performPendingThrowTos (StgTSO *);
116
117 /* -----------------------------------------------------------------------------
118 * Some convenient macros/inline functions...
119 */
120
121 #if !IN_STG_CODE
122
123 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
124
125 /* Add a thread to the end of the run queue.
126 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
127 * ASSUMES: cap->running_task is the current task.
128 */
129 INLINE_HEADER void
130 appendToRunQueue (Capability *cap, StgTSO *tso)
131 {
132 ASSERT(tso->_link == END_TSO_QUEUE);
133 if (cap->run_queue_hd == END_TSO_QUEUE) {
134 cap->run_queue_hd = tso;
135 } else {
136 setTSOLink(cap, cap->run_queue_tl, tso);
137 }
138 cap->run_queue_tl = tso;
139 traceSchedEvent (cap, EVENT_THREAD_RUNNABLE, tso, 0);
140 }
141
142 /* Push a thread on the beginning of the run queue.
143 * ASSUMES: cap->running_task is the current task.
144 */
145 INLINE_HEADER void
146 pushOnRunQueue (Capability *cap, StgTSO *tso)
147 {
148 setTSOLink(cap, tso, cap->run_queue_hd);
149 cap->run_queue_hd = tso;
150 if (cap->run_queue_tl == END_TSO_QUEUE) {
151 cap->run_queue_tl = tso;
152 }
153 }
154
155 /* Pop the first thread off the runnable queue.
156 */
157 INLINE_HEADER StgTSO *
158 popRunQueue (Capability *cap)
159 {
160 StgTSO *t = cap->run_queue_hd;
161 ASSERT(t != END_TSO_QUEUE);
162 cap->run_queue_hd = t->_link;
163 t->_link = END_TSO_QUEUE; // no write barrier req'd
164 if (cap->run_queue_hd == END_TSO_QUEUE) {
165 cap->run_queue_tl = END_TSO_QUEUE;
166 }
167 return t;
168 }
169
170 /* Add a thread to the end of the blocked queue.
171 */
172 #if !defined(THREADED_RTS)
173 INLINE_HEADER void
174 appendToBlockedQueue(StgTSO *tso)
175 {
176 ASSERT(tso->_link == END_TSO_QUEUE);
177 if (blocked_queue_hd == END_TSO_QUEUE) {
178 blocked_queue_hd = tso;
179 } else {
180 setTSOLink(&MainCapability, blocked_queue_tl, tso);
181 }
182 blocked_queue_tl = tso;
183 }
184 #endif
185
186 #if defined(THREADED_RTS)
187 // Assumes: my_cap is owned by the current Task. We hold
188 // other_cap->lock, but we do not necessarily own other_cap; another
189 // Task may be running on it.
190 INLINE_HEADER void
191 appendToWakeupQueue (Capability *my_cap, Capability *other_cap, StgTSO *tso)
192 {
193 ASSERT(tso->_link == END_TSO_QUEUE);
194 if (other_cap->wakeup_queue_hd == END_TSO_QUEUE) {
195 other_cap->wakeup_queue_hd = tso;
196 } else {
197 // my_cap is passed to setTSOLink() because it may need to
198 // write to the mutable list.
199 setTSOLink(my_cap, other_cap->wakeup_queue_tl, tso);
200 }
201 other_cap->wakeup_queue_tl = tso;
202 }
203 #endif
204
205 /* Check whether various thread queues are empty
206 */
207 INLINE_HEADER rtsBool
208 emptyQueue (StgTSO *q)
209 {
210 return (q == END_TSO_QUEUE);
211 }
212
213 INLINE_HEADER rtsBool
214 emptyRunQueue(Capability *cap)
215 {
216 return emptyQueue(cap->run_queue_hd);
217 }
218
219 #if defined(THREADED_RTS)
220 INLINE_HEADER rtsBool
221 emptyWakeupQueue(Capability *cap)
222 {
223 return emptyQueue(cap->wakeup_queue_hd);
224 }
225 #endif
226
227 #if !defined(THREADED_RTS)
228 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
229 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
230 #endif
231
232 INLINE_HEADER rtsBool
233 emptyThreadQueues(Capability *cap)
234 {
235 return emptyRunQueue(cap)
236 #if !defined(THREADED_RTS)
237 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
238 #endif
239 ;
240 }
241
242 #endif /* !IN_STG_CODE */
243
244 #pragma GCC visibility pop
245
246 #endif /* SCHEDULE_H */
247