Merge branch 'master' of http://darcs.haskell.org//ghc
[ghc.git] / rts / Schedule.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2005
4 *
5 * Prototypes for functions in Schedule.c
6 * (RTS internal scheduler interface)
7 *
8 * -------------------------------------------------------------------------*/
9
10 #ifndef SCHEDULE_H
11 #define SCHEDULE_H
12
13 #include "rts/OSThreads.h"
14 #include "Capability.h"
15 #include "Trace.h"
16
17 #include "BeginPrivate.h"
18
19 /* initScheduler(), exitScheduler()
20 * Called from STG : no
21 * Locks assumed : none
22 */
23 void initScheduler (void);
24 void exitScheduler (rtsBool wait_foreign);
25 void freeScheduler (void);
26 void markScheduler (evac_fn evac, void *user);
27
28 // Place a new thread on the run queue of the current Capability
29 void scheduleThread (Capability *cap, StgTSO *tso);
30
31 // Place a new thread on the run queue of a specified Capability
32 // (cap is the currently owned Capability, cpu is the number of
33 // the desired Capability).
34 void scheduleThreadOn(Capability *cap, StgWord cpu, StgTSO *tso);
35
36 /* wakeUpRts()
37 *
38 * Causes an OS thread to wake up and run the scheduler, if necessary.
39 */
40 #if defined(THREADED_RTS)
41 void wakeUpRts(void);
42 #endif
43
44 /* raiseExceptionHelper */
45 StgWord raiseExceptionHelper (StgRegTable *reg, StgTSO *tso, StgClosure *exception);
46
47 /* findRetryFrameHelper */
48 StgWord findRetryFrameHelper (Capability *cap, StgTSO *tso);
49
50 /* Entry point for a new worker */
51 void scheduleWorker (Capability *cap, Task *task);
52
53 /* The state of the scheduler. This is used to control the sequence
54 * of events during shutdown, and when the runtime is interrupted
55 * using ^C.
56 */
57 #define SCHED_RUNNING 0 /* running as normal */
58 #define SCHED_INTERRUPTING 1 /* ^C detected, before threads are deleted */
59 #define SCHED_SHUTTING_DOWN 2 /* final shutdown */
60
61 extern volatile StgWord sched_state;
62
63 /*
64 * flag that tracks whether we have done any execution in this time slice.
65 */
66 #define ACTIVITY_YES 0 /* there has been activity in the current slice */
67 #define ACTIVITY_MAYBE_NO 1 /* no activity in the current slice */
68 #define ACTIVITY_INACTIVE 2 /* a complete slice has passed with no activity */
69 #define ACTIVITY_DONE_GC 3 /* like 2, but we've done a GC too */
70
71 /* Recent activity flag.
72 * Locks required : Transition from MAYBE_NO to INACTIVE
73 * happens in the timer signal, so it is atomic. Trnasition from
74 * INACTIVE to DONE_GC happens under sched_mutex. No lock required
75 * to set it to ACTIVITY_YES.
76 */
77 extern volatile StgWord recent_activity;
78
79 /* Thread queues.
80 * Locks required : sched_mutex
81 *
82 * In GranSim we have one run/blocked_queue per PE.
83 */
84 extern StgTSO *blackhole_queue;
85 #if !defined(THREADED_RTS)
86 extern StgTSO *blocked_queue_hd, *blocked_queue_tl;
87 extern StgTSO *sleeping_queue;
88 #endif
89
90 extern rtsBool heap_overflow;
91
92 #if defined(THREADED_RTS)
93 extern Mutex sched_mutex;
94 #endif
95
96 /* Called by shutdown_handler(). */
97 void interruptStgRts (void);
98
99 void resurrectThreads (StgTSO *);
100
101 /* -----------------------------------------------------------------------------
102 * Some convenient macros/inline functions...
103 */
104
105 #if !IN_STG_CODE
106
107 /* END_TSO_QUEUE and friends now defined in includes/StgMiscClosures.h */
108
109 /* Add a thread to the end of the run queue.
110 * NOTE: tso->link should be END_TSO_QUEUE before calling this macro.
111 * ASSUMES: cap->running_task is the current task.
112 */
113 EXTERN_INLINE void
114 appendToRunQueue (Capability *cap, StgTSO *tso);
115
116 EXTERN_INLINE void
117 appendToRunQueue (Capability *cap, StgTSO *tso)
118 {
119 ASSERT(tso->_link == END_TSO_QUEUE);
120 if (cap->run_queue_hd == END_TSO_QUEUE) {
121 cap->run_queue_hd = tso;
122 tso->block_info.prev = END_TSO_QUEUE;
123 } else {
124 setTSOLink(cap, cap->run_queue_tl, tso);
125 setTSOPrev(cap, tso, cap->run_queue_tl);
126 }
127 cap->run_queue_tl = tso;
128 }
129
130 /* Push a thread on the beginning of the run queue.
131 * ASSUMES: cap->running_task is the current task.
132 */
133 EXTERN_INLINE void
134 pushOnRunQueue (Capability *cap, StgTSO *tso);
135
136 EXTERN_INLINE void
137 pushOnRunQueue (Capability *cap, StgTSO *tso)
138 {
139 setTSOLink(cap, tso, cap->run_queue_hd);
140 tso->block_info.prev = END_TSO_QUEUE;
141 if (cap->run_queue_hd != END_TSO_QUEUE) {
142 setTSOPrev(cap, cap->run_queue_hd, tso);
143 }
144 cap->run_queue_hd = tso;
145 if (cap->run_queue_tl == END_TSO_QUEUE) {
146 cap->run_queue_tl = tso;
147 }
148 }
149
150 /* Pop the first thread off the runnable queue.
151 */
152 INLINE_HEADER StgTSO *
153 popRunQueue (Capability *cap)
154 {
155 StgTSO *t = cap->run_queue_hd;
156 ASSERT(t != END_TSO_QUEUE);
157 cap->run_queue_hd = t->_link;
158 if (t->_link != END_TSO_QUEUE) {
159 t->_link->block_info.prev = END_TSO_QUEUE;
160 }
161 t->_link = END_TSO_QUEUE; // no write barrier req'd
162 if (cap->run_queue_hd == END_TSO_QUEUE) {
163 cap->run_queue_tl = END_TSO_QUEUE;
164 }
165 return t;
166 }
167
168 extern void removeFromRunQueue (Capability *cap, StgTSO *tso);
169
170 /* Add a thread to the end of the blocked queue.
171 */
172 #if !defined(THREADED_RTS)
173 INLINE_HEADER void
174 appendToBlockedQueue(StgTSO *tso)
175 {
176 ASSERT(tso->_link == END_TSO_QUEUE);
177 if (blocked_queue_hd == END_TSO_QUEUE) {
178 blocked_queue_hd = tso;
179 } else {
180 setTSOLink(&MainCapability, blocked_queue_tl, tso);
181 }
182 blocked_queue_tl = tso;
183 }
184 #endif
185
186 /* Check whether various thread queues are empty
187 */
188 INLINE_HEADER rtsBool
189 emptyQueue (StgTSO *q)
190 {
191 return (q == END_TSO_QUEUE);
192 }
193
194 INLINE_HEADER rtsBool
195 emptyRunQueue(Capability *cap)
196 {
197 return emptyQueue(cap->run_queue_hd);
198 }
199
200 #if !defined(THREADED_RTS)
201 #define EMPTY_BLOCKED_QUEUE() (emptyQueue(blocked_queue_hd))
202 #define EMPTY_SLEEPING_QUEUE() (emptyQueue(sleeping_queue))
203 #endif
204
205 INLINE_HEADER rtsBool
206 emptyThreadQueues(Capability *cap)
207 {
208 return emptyRunQueue(cap)
209 #if !defined(THREADED_RTS)
210 && EMPTY_BLOCKED_QUEUE() && EMPTY_SLEEPING_QUEUE()
211 #endif
212 ;
213 }
214
215 #endif /* !IN_STG_CODE */
216
217 #include "EndPrivate.h"
218
219 #endif /* SCHEDULE_H */
220