SPARC: Fix ffi019 split load/store of HsInt64 into two parts to respect alignment...
[ghc.git] / rts / RtsAPI.c
1 /* ----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2001
4 *
5 * API for invoking Haskell functions via the RTS
6 *
7 * --------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "OSThreads.h"
12 #include "RtsAPI.h"
13 #include "SchedAPI.h"
14 #include "RtsFlags.h"
15 #include "RtsUtils.h"
16 #include "Prelude.h"
17 #include "Schedule.h"
18 #include "Capability.h"
19 #include "Stable.h"
20
21 #include <stdlib.h>
22
23 /* ----------------------------------------------------------------------------
24 Building Haskell objects from C datatypes.
25
26 TODO: Currently this code does not tag created pointers,
27 however it is not unsafe (the contructor code will do it)
28 just inefficient.
29 ------------------------------------------------------------------------- */
30 HaskellObj
31 rts_mkChar (Capability *cap, HsChar c)
32 {
33 StgClosure *p = (StgClosure *)allocateLocal(cap, CONSTR_sizeW(0,1));
34 SET_HDR(p, Czh_con_info, CCS_SYSTEM);
35 p->payload[0] = (StgClosure *)(StgWord)(StgChar)c;
36 return p;
37 }
38
39 HaskellObj
40 rts_mkInt (Capability *cap, HsInt i)
41 {
42 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
43 SET_HDR(p, Izh_con_info, CCS_SYSTEM);
44 p->payload[0] = (StgClosure *)(StgInt)i;
45 return p;
46 }
47
48 HaskellObj
49 rts_mkInt8 (Capability *cap, HsInt8 i)
50 {
51 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
52 SET_HDR(p, I8zh_con_info, CCS_SYSTEM);
53 /* Make sure we mask out the bits above the lowest 8 */
54 p->payload[0] = (StgClosure *)(StgInt)i;
55 return p;
56 }
57
58 HaskellObj
59 rts_mkInt16 (Capability *cap, HsInt16 i)
60 {
61 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
62 SET_HDR(p, I16zh_con_info, CCS_SYSTEM);
63 /* Make sure we mask out the relevant bits */
64 p->payload[0] = (StgClosure *)(StgInt)i;
65 return p;
66 }
67
68 HaskellObj
69 rts_mkInt32 (Capability *cap, HsInt32 i)
70 {
71 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
72 SET_HDR(p, I32zh_con_info, CCS_SYSTEM);
73 p->payload[0] = (StgClosure *)(StgInt)i;
74 return p;
75 }
76
77
78 #ifdef sparc_HOST_ARCH
79 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
80 aligned, because that's the size of pointers. SPARC v9 can't do
81 misaligned loads/stores, so we have to write the 64bit word in chunks */
82
83 HaskellObj
84 rts_mkInt64 (Capability *cap, HsInt64 i_)
85 {
86 StgInt64 i = (StgInt64)i_;
87 StgInt32 *tmp;
88
89 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
90 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
91
92 tmp = (StgInt32*)&(p->payload[0]);
93
94 tmp[0] = (StgInt32)((StgInt64)i >> 32);
95 tmp[1] = (StgInt32)i; /* truncate high 32 bits */
96
97 return p;
98 }
99
100 #else
101
102 HaskellObj
103 rts_mkInt64 (Capability *cap, HsInt64 i)
104 {
105 llong *tmp;
106 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
107 SET_HDR(p, I64zh_con_info, CCS_SYSTEM);
108 tmp = (llong*)&(p->payload[0]);
109 *tmp = (StgInt64)i;
110 return p;
111 }
112
113 #endif /* sparc_HOST_ARCH */
114
115
116 HaskellObj
117 rts_mkWord (Capability *cap, HsWord i)
118 {
119 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
120 SET_HDR(p, Wzh_con_info, CCS_SYSTEM);
121 p->payload[0] = (StgClosure *)(StgWord)i;
122 return p;
123 }
124
125 HaskellObj
126 rts_mkWord8 (Capability *cap, HsWord8 w)
127 {
128 /* see rts_mkInt* comments */
129 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
130 SET_HDR(p, W8zh_con_info, CCS_SYSTEM);
131 p->payload[0] = (StgClosure *)(StgWord)(w & 0xff);
132 return p;
133 }
134
135 HaskellObj
136 rts_mkWord16 (Capability *cap, HsWord16 w)
137 {
138 /* see rts_mkInt* comments */
139 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
140 SET_HDR(p, W16zh_con_info, CCS_SYSTEM);
141 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffff);
142 return p;
143 }
144
145 HaskellObj
146 rts_mkWord32 (Capability *cap, HsWord32 w)
147 {
148 /* see rts_mkInt* comments */
149 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
150 SET_HDR(p, W32zh_con_info, CCS_SYSTEM);
151 p->payload[0] = (StgClosure *)(StgWord)(w & 0xffffffff);
152 return p;
153 }
154
155
156 #ifdef sparc_HOST_ARCH
157 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
158 aligned, because that's the size of pointers. SPARC v9 can't do
159 misaligned loads/stores, so we have to write the 64bit word in chunks */
160
161 HaskellObj
162 rts_mkWord64 (Capability *cap, HsWord64 w_)
163 {
164 StgWord64 w = (StgWord64)w_;
165 StgWord32 *tmp;
166
167 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
168 /* see mk_Int8 comment */
169 SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
170
171 tmp = (StgWord32*)&(p->payload[0]);
172
173 tmp[0] = (StgWord32)((StgWord64)w >> 32);
174 tmp[1] = (StgWord32)w; /* truncate high 32 bits */
175 return p;
176 }
177
178 #else
179
180 HaskellObj
181 rts_mkWord64 (Capability *cap, HsWord64 w)
182 {
183 ullong *tmp;
184
185 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,2));
186 /* see mk_Int8 comment */
187 SET_HDR(p, W64zh_con_info, CCS_SYSTEM);
188 tmp = (ullong*)&(p->payload[0]);
189 *tmp = (StgWord64)w;
190 return p;
191 }
192
193 #endif
194
195
196 HaskellObj
197 rts_mkFloat (Capability *cap, HsFloat f)
198 {
199 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,1));
200 SET_HDR(p, Fzh_con_info, CCS_SYSTEM);
201 ASSIGN_FLT((P_)p->payload, (StgFloat)f);
202 return p;
203 }
204
205 HaskellObj
206 rts_mkDouble (Capability *cap, HsDouble d)
207 {
208 StgClosure *p = (StgClosure *)allocateLocal(cap,CONSTR_sizeW(0,sizeofW(StgDouble)));
209 SET_HDR(p, Dzh_con_info, CCS_SYSTEM);
210 ASSIGN_DBL((P_)p->payload, (StgDouble)d);
211 return p;
212 }
213
214 HaskellObj
215 rts_mkStablePtr (Capability *cap, HsStablePtr s)
216 {
217 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
218 SET_HDR(p, StablePtr_con_info, CCS_SYSTEM);
219 p->payload[0] = (StgClosure *)s;
220 return p;
221 }
222
223 HaskellObj
224 rts_mkPtr (Capability *cap, HsPtr a)
225 {
226 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
227 SET_HDR(p, Ptr_con_info, CCS_SYSTEM);
228 p->payload[0] = (StgClosure *)a;
229 return p;
230 }
231
232 HaskellObj
233 rts_mkFunPtr (Capability *cap, HsFunPtr a)
234 {
235 StgClosure *p = (StgClosure *)allocateLocal(cap,sizeofW(StgHeader)+1);
236 SET_HDR(p, FunPtr_con_info, CCS_SYSTEM);
237 p->payload[0] = (StgClosure *)a;
238 return p;
239 }
240
241 HaskellObj
242 rts_mkBool (Capability *cap STG_UNUSED, HsBool b)
243 {
244 if (b) {
245 return (StgClosure *)True_closure;
246 } else {
247 return (StgClosure *)False_closure;
248 }
249 }
250
251 HaskellObj
252 rts_mkString (Capability *cap, char *s)
253 {
254 return rts_apply(cap, (StgClosure *)unpackCString_closure, rts_mkPtr(cap,s));
255 }
256
257 HaskellObj
258 rts_apply (Capability *cap, HaskellObj f, HaskellObj arg)
259 {
260 StgThunk *ap;
261
262 ap = (StgThunk *)allocateLocal(cap,sizeofW(StgThunk) + 2);
263 SET_HDR(ap, (StgInfoTable *)&stg_ap_2_upd_info, CCS_SYSTEM);
264 ap->payload[0] = f;
265 ap->payload[1] = arg;
266 return (StgClosure *)ap;
267 }
268
269 /* ----------------------------------------------------------------------------
270 Deconstructing Haskell objects
271
272 We would like to assert that we have the right kind of object in
273 each case, but this is problematic because in GHCi the info table
274 for the D# constructor (say) might be dynamically loaded. Hence we
275 omit these assertions for now.
276 ------------------------------------------------------------------------- */
277
278 HsChar
279 rts_getChar (HaskellObj p)
280 {
281 // See comment above:
282 // ASSERT(p->header.info == Czh_con_info ||
283 // p->header.info == Czh_static_info);
284 return (StgChar)(StgWord)(UNTAG_CLOSURE(p)->payload[0]);
285 }
286
287 HsInt
288 rts_getInt (HaskellObj p)
289 {
290 // See comment above:
291 // ASSERT(p->header.info == Izh_con_info ||
292 // p->header.info == Izh_static_info);
293 return (HsInt)(UNTAG_CLOSURE(p)->payload[0]);
294 }
295
296 HsInt8
297 rts_getInt8 (HaskellObj p)
298 {
299 // See comment above:
300 // ASSERT(p->header.info == I8zh_con_info ||
301 // p->header.info == I8zh_static_info);
302 return (HsInt8)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
303 }
304
305 HsInt16
306 rts_getInt16 (HaskellObj p)
307 {
308 // See comment above:
309 // ASSERT(p->header.info == I16zh_con_info ||
310 // p->header.info == I16zh_static_info);
311 return (HsInt16)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
312 }
313
314 HsInt32
315 rts_getInt32 (HaskellObj p)
316 {
317 // See comment above:
318 // ASSERT(p->header.info == I32zh_con_info ||
319 // p->header.info == I32zh_static_info);
320 return (HsInt32)(HsInt)(UNTAG_CLOSURE(p)->payload[0]);
321 }
322
323
324 #ifdef sparc_HOST_ARCH
325 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
326 aligned, because that's the size of pointers. SPARC v9 can't do
327 misaligned loads/stores, so we have to read the 64bit word in chunks */
328
329 HsInt64
330 rts_getInt64 (HaskellObj p)
331 {
332 HsInt32* tmp;
333 // See comment above:
334 // ASSERT(p->header.info == I64zh_con_info ||
335 // p->header.info == I64zh_static_info);
336 tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
337
338 HsInt64 i = (HsInt64)((HsInt64)(tmp[0]) << 32) | (HsInt64)tmp[1];
339 return i;
340 }
341
342 #else
343
344 HsInt64
345 rts_getInt64 (HaskellObj p)
346 {
347 HsInt64* tmp;
348 // See comment above:
349 // ASSERT(p->header.info == I64zh_con_info ||
350 // p->header.info == I64zh_static_info);
351 tmp = (HsInt64*)&(UNTAG_CLOSURE(p)->payload[0]);
352 return *tmp;
353 }
354
355 #endif /* sparc_HOST_ARCH */
356
357
358 HsWord
359 rts_getWord (HaskellObj p)
360 {
361 // See comment above:
362 // ASSERT(p->header.info == Wzh_con_info ||
363 // p->header.info == Wzh_static_info);
364 return (HsWord)(UNTAG_CLOSURE(p)->payload[0]);
365 }
366
367 HsWord8
368 rts_getWord8 (HaskellObj p)
369 {
370 // See comment above:
371 // ASSERT(p->header.info == W8zh_con_info ||
372 // p->header.info == W8zh_static_info);
373 return (HsWord8)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
374 }
375
376 HsWord16
377 rts_getWord16 (HaskellObj p)
378 {
379 // See comment above:
380 // ASSERT(p->header.info == W16zh_con_info ||
381 // p->header.info == W16zh_static_info);
382 return (HsWord16)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
383 }
384
385 HsWord32
386 rts_getWord32 (HaskellObj p)
387 {
388 // See comment above:
389 // ASSERT(p->header.info == W32zh_con_info ||
390 // p->header.info == W32zh_static_info);
391 return (HsWord32)(HsWord)(UNTAG_CLOSURE(p)->payload[0]);
392 }
393
394
395 #ifdef sparc_HOST_ARCH
396 /* The closures returned by allocateLocal are only guaranteed to be 32 bit
397 aligned, because that's the size of pointers. SPARC v9 can't do
398 misaligned loads/stores, so we have to write the 64bit word in chunks */
399
400 HsWord64
401 rts_getWord64 (HaskellObj p)
402 {
403 HsInt32* tmp;
404 // See comment above:
405 // ASSERT(p->header.info == I64zh_con_info ||
406 // p->header.info == I64zh_static_info);
407 tmp = (HsInt32*)&(UNTAG_CLOSURE(p)->payload[0]);
408
409 HsInt64 i = (HsWord64)((HsWord64)(tmp[0]) << 32) | (HsWord64)tmp[1];
410 return i;
411 }
412
413 #else
414
415 HsWord64
416 rts_getWord64 (HaskellObj p)
417 {
418 HsWord64* tmp;
419 // See comment above:
420 // ASSERT(p->header.info == W64zh_con_info ||
421 // p->header.info == W64zh_static_info);
422 tmp = (HsWord64*)&(UNTAG_CLOSURE(p)->payload[0]);
423 return *tmp;
424 }
425
426 #endif
427
428
429 HsFloat
430 rts_getFloat (HaskellObj p)
431 {
432 // See comment above:
433 // ASSERT(p->header.info == Fzh_con_info ||
434 // p->header.info == Fzh_static_info);
435 return (float)(PK_FLT((P_)UNTAG_CLOSURE(p)->payload));
436 }
437
438 HsDouble
439 rts_getDouble (HaskellObj p)
440 {
441 // See comment above:
442 // ASSERT(p->header.info == Dzh_con_info ||
443 // p->header.info == Dzh_static_info);
444 return (double)(PK_DBL((P_)UNTAG_CLOSURE(p)->payload));
445 }
446
447 HsStablePtr
448 rts_getStablePtr (HaskellObj p)
449 {
450 // See comment above:
451 // ASSERT(p->header.info == StablePtr_con_info ||
452 // p->header.info == StablePtr_static_info);
453 return (StgStablePtr)(UNTAG_CLOSURE(p)->payload[0]);
454 }
455
456 HsPtr
457 rts_getPtr (HaskellObj p)
458 {
459 // See comment above:
460 // ASSERT(p->header.info == Ptr_con_info ||
461 // p->header.info == Ptr_static_info);
462 return (Capability *)(UNTAG_CLOSURE(p)->payload[0]);
463 }
464
465 HsFunPtr
466 rts_getFunPtr (HaskellObj p)
467 {
468 // See comment above:
469 // ASSERT(p->header.info == FunPtr_con_info ||
470 // p->header.info == FunPtr_static_info);
471 return (void *)(UNTAG_CLOSURE(p)->payload[0]);
472 }
473
474 HsBool
475 rts_getBool (HaskellObj p)
476 {
477 StgInfoTable *info;
478
479 info = get_itbl((StgClosure *)UNTAG_CLOSURE(p));
480 if (info->srt_bitmap == 0) { // srt_bitmap is the constructor tag
481 return 0;
482 } else {
483 return 1;
484 }
485 }
486
487 /* -----------------------------------------------------------------------------
488 Creating threads
489 -------------------------------------------------------------------------- */
490
491 INLINE_HEADER void pushClosure (StgTSO *tso, StgWord c) {
492 tso->sp--;
493 tso->sp[0] = (W_) c;
494 }
495
496 StgTSO *
497 createGenThread (Capability *cap, nat stack_size, StgClosure *closure)
498 {
499 StgTSO *t;
500 #if defined(GRAN)
501 t = createThread (cap, stack_size, NO_PRI);
502 #else
503 t = createThread (cap, stack_size);
504 #endif
505 pushClosure(t, (W_)closure);
506 pushClosure(t, (W_)&stg_enter_info);
507 return t;
508 }
509
510 StgTSO *
511 createIOThread (Capability *cap, nat stack_size, StgClosure *closure)
512 {
513 StgTSO *t;
514 #if defined(GRAN)
515 t = createThread (cap, stack_size, NO_PRI);
516 #else
517 t = createThread (cap, stack_size);
518 #endif
519 pushClosure(t, (W_)&stg_noforceIO_info);
520 pushClosure(t, (W_)&stg_ap_v_info);
521 pushClosure(t, (W_)closure);
522 pushClosure(t, (W_)&stg_enter_info);
523 return t;
524 }
525
526 /*
527 * Same as above, but also evaluate the result of the IO action
528 * to whnf while we're at it.
529 */
530
531 StgTSO *
532 createStrictIOThread(Capability *cap, nat stack_size, StgClosure *closure)
533 {
534 StgTSO *t;
535 #if defined(GRAN)
536 t = createThread(cap, stack_size, NO_PRI);
537 #else
538 t = createThread(cap, stack_size);
539 #endif
540 pushClosure(t, (W_)&stg_forceIO_info);
541 pushClosure(t, (W_)&stg_ap_v_info);
542 pushClosure(t, (W_)closure);
543 pushClosure(t, (W_)&stg_enter_info);
544 return t;
545 }
546
547 /* ----------------------------------------------------------------------------
548 Evaluating Haskell expressions
549 ------------------------------------------------------------------------- */
550
551 Capability *
552 rts_eval (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
553 {
554 StgTSO *tso;
555
556 tso = createGenThread(cap, RtsFlags.GcFlags.initialStkSize, p);
557 return scheduleWaitThread(tso,ret,cap);
558 }
559
560 Capability *
561 rts_eval_ (Capability *cap, HaskellObj p, unsigned int stack_size,
562 /*out*/HaskellObj *ret)
563 {
564 StgTSO *tso;
565
566 tso = createGenThread(cap, stack_size, p);
567 return scheduleWaitThread(tso,ret,cap);
568 }
569
570 /*
571 * rts_evalIO() evaluates a value of the form (IO a), forcing the action's
572 * result to WHNF before returning.
573 */
574 Capability *
575 rts_evalIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
576 {
577 StgTSO* tso;
578
579 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
580 return scheduleWaitThread(tso,ret,cap);
581 }
582
583 /*
584 * rts_evalStableIO() is suitable for calling from Haskell. It
585 * evaluates a value of the form (StablePtr (IO a)), forcing the
586 * action's result to WHNF before returning. The result is returned
587 * in a StablePtr.
588 */
589 Capability *
590 rts_evalStableIO (Capability *cap, HsStablePtr s, /*out*/HsStablePtr *ret)
591 {
592 StgTSO* tso;
593 StgClosure *p, *r;
594 SchedulerStatus stat;
595
596 p = (StgClosure *)deRefStablePtr(s);
597 tso = createStrictIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
598 // async exceptions are always blocked by default in the created
599 // thread. See #1048.
600 tso->flags |= TSO_BLOCKEX | TSO_INTERRUPTIBLE;
601 cap = scheduleWaitThread(tso,&r,cap);
602 stat = rts_getSchedStatus(cap);
603
604 if (stat == Success && ret != NULL) {
605 ASSERT(r != NULL);
606 *ret = getStablePtr((StgPtr)r);
607 }
608
609 return cap;
610 }
611
612 /*
613 * Like rts_evalIO(), but doesn't force the action's result.
614 */
615 Capability *
616 rts_evalLazyIO (Capability *cap, HaskellObj p, /*out*/HaskellObj *ret)
617 {
618 StgTSO *tso;
619
620 tso = createIOThread(cap, RtsFlags.GcFlags.initialStkSize, p);
621 return scheduleWaitThread(tso,ret,cap);
622 }
623
624 Capability *
625 rts_evalLazyIO_ (Capability *cap, HaskellObj p, unsigned int stack_size,
626 /*out*/HaskellObj *ret)
627 {
628 StgTSO *tso;
629
630 tso = createIOThread(cap, stack_size, p);
631 return scheduleWaitThread(tso,ret,cap);
632 }
633
634 /* Convenience function for decoding the returned status. */
635
636 void
637 rts_checkSchedStatus (char* site, Capability *cap)
638 {
639 SchedulerStatus rc = cap->running_task->stat;
640 switch (rc) {
641 case Success:
642 return;
643 case Killed:
644 errorBelch("%s: uncaught exception",site);
645 stg_exit(EXIT_FAILURE);
646 case Interrupted:
647 errorBelch("%s: interrupted", site);
648 stg_exit(EXIT_FAILURE);
649 default:
650 errorBelch("%s: Return code (%d) not ok",(site),(rc));
651 stg_exit(EXIT_FAILURE);
652 }
653 }
654
655 SchedulerStatus
656 rts_getSchedStatus (Capability *cap)
657 {
658 return cap->running_task->stat;
659 }
660
661 Capability *
662 rts_lock (void)
663 {
664 Capability *cap;
665 Task *task;
666
667 // ToDo: get rid of this lock in the common case. We could store
668 // a free Task in thread-local storage, for example. That would
669 // leave just one lock on the path into the RTS: cap->lock when
670 // acquiring the Capability.
671 ACQUIRE_LOCK(&sched_mutex);
672 task = newBoundTask();
673 RELEASE_LOCK(&sched_mutex);
674
675 cap = NULL;
676 waitForReturnCapability(&cap, task);
677 return (Capability *)cap;
678 }
679
680 // Exiting the RTS: we hold a Capability that is not necessarily the
681 // same one that was originally returned by rts_lock(), because
682 // rts_evalIO() etc. may return a new one. Now that we have
683 // investigated the return value, we can release the Capability,
684 // and free the Task (in that order).
685
686 void
687 rts_unlock (Capability *cap)
688 {
689 Task *task;
690
691 task = cap->running_task;
692 ASSERT_FULL_CAPABILITY_INVARIANTS(cap,task);
693
694 // Now release the Capability. With the capability released, GC
695 // may happen. NB. does not try to put the current Task on the
696 // worker queue.
697 // NB. keep cap->lock held while we call boundTaskExiting(). This
698 // is necessary during shutdown, where we want the invariant that
699 // after shutdownCapability(), all the Tasks associated with the
700 // Capability have completed their shutdown too. Otherwise we
701 // could have boundTaskExiting()/workerTaskStop() running at some
702 // random point in the future, which causes problems for
703 // freeTaskManager().
704 ACQUIRE_LOCK(&cap->lock);
705 releaseCapability_(cap,rtsFalse);
706
707 // Finally, we can release the Task to the free list.
708 boundTaskExiting(task);
709 RELEASE_LOCK(&cap->lock);
710 }