pruneSparkQueue(): fix bug when top>bottom
[ghc.git] / rts / Sparks.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2000-2008
4 *
5 * Sparking support for PARALLEL_HASKELL and THREADED_RTS versions of the RTS.
6 *
7 * The implementation uses Double-Ended Queues with lock-free access
8 * (thereby often called "deque") as described in
9 *
10 * D.Chase and Y.Lev, Dynamic Circular Work-Stealing Deque.
11 * SPAA'05, July 2005, Las Vegas, USA.
12 * ACM 1-58113-986-1/05/0007
13 *
14 * Author: Jost Berthold MSRC 07-09/2008
15 *
16 * The DeQue is held as a circular array with known length. Positions
17 * of top (read-end) and bottom (write-end) always increase, and the
18 * array is accessed with indices modulo array-size. While this bears
19 * the risk of overflow, we assume that (with 64 bit indices), a
20 * program must run very long to reach that point.
21 *
22 * The write end of the queue (position bottom) can only be used with
23 * mutual exclusion, i.e. by exactly one caller at a time. At this
24 * end, new items can be enqueued using pushBottom()/newSpark(), and
25 * removed using popBottom()/reclaimSpark() (the latter implying a cas
26 * synchronisation with potential concurrent readers for the case of
27 * just one element).
28 *
29 * Multiple readers can steal()/findSpark() from the read end
30 * (position top), and are synchronised without a lock, based on a cas
31 * of the top position. One reader wins, the others return NULL for a
32 * failure.
33 *
34 * Both popBottom and steal also return NULL when the queue is empty.
35 *
36 -------------------------------------------------------------------------*/
37
38 #include "PosixSource.h"
39 #include "Rts.h"
40 #include "Storage.h"
41 #include "Schedule.h"
42 #include "SchedAPI.h"
43 #include "RtsFlags.h"
44 #include "RtsUtils.h"
45 #include "ParTicky.h"
46 #include "Trace.h"
47 #include "Prelude.h"
48
49 #include "SMP.h" // for cas
50
51 #include "Sparks.h"
52
53 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
54
55 /* internal helpers ... */
56
57 static StgWord
58 roundUp2(StgWord val)
59 {
60 StgWord rounded = 1;
61
62 /* StgWord is unsigned anyway, only catch 0 */
63 if (val == 0) {
64 barf("DeQue,roundUp2: invalid size 0 requested");
65 }
66 /* at least 1 bit set, shift up to its place */
67 do {
68 rounded = rounded << 1;
69 } while (0 != (val = val>>1));
70 return rounded;
71 }
72
73 #define CASTOP(addr,old,new) ((old) == cas(((StgPtr)addr),(old),(new)))
74
75 /* -----------------------------------------------------------------------------
76 *
77 * Initialising spark pools.
78 *
79 * -------------------------------------------------------------------------- */
80
81 /* constructor */
82 static SparkPool*
83 initPool(StgWord size)
84 {
85 StgWord realsize;
86 SparkPool *q;
87
88 realsize = roundUp2(size); /* to compute modulo as a bitwise & */
89
90 q = (SparkPool*) stgMallocBytes(sizeof(SparkPool), /* admin fields */
91 "newSparkPool");
92 q->elements = (StgClosurePtr*)
93 stgMallocBytes(realsize * sizeof(StgClosurePtr), /* dataspace */
94 "newSparkPool:data space");
95 q->top=0;
96 q->bottom=0;
97 q->topBound=0; /* read by writer, updated each time top is read */
98
99 q->size = realsize; /* power of 2 */
100 q->moduloSize = realsize - 1; /* n % size == n & moduloSize */
101
102 ASSERT_SPARK_POOL_INVARIANTS(q);
103 return q;
104 }
105
106 void
107 initSparkPools( void )
108 {
109 #ifdef THREADED_RTS
110 /* walk over the capabilities, allocating a spark pool for each one */
111 nat i;
112 for (i = 0; i < n_capabilities; i++) {
113 capabilities[i].sparks = initPool(RtsFlags.ParFlags.maxLocalSparks);
114 }
115 #else
116 /* allocate a single spark pool */
117 MainCapability->sparks = initPool(RtsFlags.ParFlags.maxLocalSparks);
118 #endif
119 }
120
121 void
122 freeSparkPool (SparkPool *pool)
123 {
124 /* should not interfere with concurrent findSpark() calls! And
125 nobody should use the pointer any more. We cross our fingers...*/
126 stgFree(pool->elements);
127 stgFree(pool);
128 }
129
130 /* -----------------------------------------------------------------------------
131 *
132 * reclaimSpark: remove a spark from the write end of the queue.
133 * Returns the removed spark, and NULL if a race is lost or the pool
134 * empty.
135 *
136 * If only one spark is left in the pool, we synchronise with
137 * concurrently stealing threads by using cas to modify the top field.
138 * This routine should NEVER be called by a task which does not own
139 * the capability. Can this be checked here?
140 *
141 * -------------------------------------------------------------------------- */
142
143 StgClosure *
144 reclaimSpark (SparkPool *deque)
145 {
146 /* also a bit tricky, has to avoid concurrent steal() calls by
147 accessing top with cas, when there is only one element left */
148 StgWord t, b;
149 StgClosurePtr* pos;
150 long currSize;
151 StgClosurePtr removed;
152
153 ASSERT_SPARK_POOL_INVARIANTS(deque);
154
155 b = deque->bottom;
156 /* "decrement b as a test, see what happens" */
157 deque->bottom = --b;
158 pos = (deque->elements) + (b & (deque->moduloSize));
159 t = deque->top; /* using topBound would give an *upper* bound, we
160 need a lower bound. We use the real top here, but
161 can update the topBound value */
162 deque->topBound = t;
163 currSize = b - t;
164 if (currSize < 0) { /* was empty before decrementing b, set b
165 consistently and abort */
166 deque->bottom = t;
167 return NULL;
168 }
169 removed = *pos;
170 if (currSize > 0) { /* no danger, still elements in buffer after b-- */
171 return removed;
172 }
173 /* otherwise, has someone meanwhile stolen the same (last) element?
174 Check and increment top value to know */
175 if ( !(CASTOP(&(deque->top),t,t+1)) ) {
176 removed = NULL; /* no success, but continue adjusting bottom */
177 }
178 deque->bottom = t+1; /* anyway, empty now. Adjust bottom consistently. */
179 deque->topBound = t+1; /* ...and cached top value as well */
180
181 ASSERT_SPARK_POOL_INVARIANTS(deque);
182
183 return removed;
184 }
185
186 /* -----------------------------------------------------------------------------
187 *
188 * tryStealSpark: try to steal a spark from a Capability.
189 *
190 * Returns a valid spark, or NULL if the pool was empty, and can
191 * occasionally return NULL if there was a race with another thread
192 * stealing from the same pool. In this case, try again later.
193 *
194 -------------------------------------------------------------------------- */
195
196 static StgClosurePtr
197 steal(SparkPool *deque)
198 {
199 StgClosurePtr* pos;
200 StgClosurePtr* arraybase;
201 StgWord sz;
202 StgClosurePtr stolen;
203 StgWord b,t;
204
205 ASSERT_SPARK_POOL_INVARIANTS(deque);
206
207 b = deque->bottom;
208 t = deque->top;
209 if (b - t <= 0 ) {
210 return NULL; /* already looks empty, abort */
211 }
212
213 /* now access array, see pushBottom() */
214 arraybase = deque->elements;
215 sz = deque->moduloSize;
216 pos = arraybase + (t & sz);
217 stolen = *pos;
218
219 /* now decide whether we have won */
220 if ( !(CASTOP(&(deque->top),t,t+1)) ) {
221 /* lost the race, someon else has changed top in the meantime */
222 return NULL;
223 } /* else: OK, top has been incremented by the cas call */
224
225 ASSERT_SPARK_POOL_INVARIANTS(deque);
226 /* return stolen element */
227 return stolen;
228 }
229
230 StgClosure *
231 tryStealSpark (Capability *cap)
232 {
233 SparkPool *pool = cap->sparks;
234 StgClosure *stolen;
235
236 do {
237 stolen = steal(pool);
238 } while (stolen != NULL && !closure_SHOULD_SPARK(stolen));
239
240 return stolen;
241 }
242
243
244 /* -----------------------------------------------------------------------------
245 *
246 * "guesses" whether a deque is empty. Can return false negatives in
247 * presence of concurrent steal() calls, and false positives in
248 * presence of a concurrent pushBottom().
249 *
250 * -------------------------------------------------------------------------- */
251
252 rtsBool
253 looksEmpty(SparkPool* deque)
254 {
255 StgWord t = deque->top;
256 StgWord b = deque->bottom;
257 /* try to prefer false negatives by reading top first */
258 return (b - t <= 0);
259 /* => array is *never* completely filled, always 1 place free! */
260 }
261
262 /* -----------------------------------------------------------------------------
263 *
264 * Turn a spark into a real thread
265 *
266 * -------------------------------------------------------------------------- */
267
268 void
269 createSparkThread (Capability *cap)
270 {
271 StgTSO *tso;
272
273 tso = createIOThread (cap, RtsFlags.GcFlags.initialStkSize,
274 &base_GHCziConc_runSparks_closure);
275 appendToRunQueue(cap,tso);
276 }
277
278 /* -----------------------------------------------------------------------------
279 *
280 * Create a new spark
281 *
282 * -------------------------------------------------------------------------- */
283
284 #define DISCARD_NEW
285
286 /* enqueue an element. Should always succeed by resizing the array
287 (not implemented yet, silently fails in that case). */
288 static void
289 pushBottom (SparkPool* deque, StgClosurePtr elem)
290 {
291 StgWord t;
292 StgClosurePtr* pos;
293 StgWord sz = deque->moduloSize;
294 StgWord b = deque->bottom;
295
296 ASSERT_SPARK_POOL_INVARIANTS(deque);
297
298 /* we try to avoid reading deque->top (accessed by all) and use
299 deque->topBound (accessed only by writer) instead.
300 This is why we do not just call empty(deque) here.
301 */
302 t = deque->topBound;
303 if ( b - t >= sz ) { /* nota bene: sz == deque->size - 1, thus ">=" */
304 /* could be full, check the real top value in this case */
305 t = deque->top;
306 deque->topBound = t;
307 if (b - t >= sz) { /* really no space left :-( */
308 /* reallocate the array, copying the values. Concurrent steal()s
309 will in the meantime use the old one and modify only top.
310 This means: we cannot safely free the old space! Can keep it
311 on a free list internally here...
312
313 Potential bug in combination with steal(): if array is
314 replaced, it is unclear which one concurrent steal operations
315 use. Must read the array base address in advance in steal().
316 */
317 #if defined(DISCARD_NEW)
318 ASSERT_SPARK_POOL_INVARIANTS(deque);
319 return; /* for now, silently fail */
320 #else
321 /* could make room by incrementing the top position here. In
322 * this case, should use CASTOP. If this fails, someone else has
323 * removed something, and new room will be available.
324 */
325 ASSERT_SPARK_POOL_INVARIANTS(deque);
326 #endif
327 }
328 }
329 pos = (deque->elements) + (b & sz);
330 *pos = elem;
331 (deque->bottom)++;
332
333 ASSERT_SPARK_POOL_INVARIANTS(deque);
334 return;
335 }
336
337
338 /* --------------------------------------------------------------------------
339 * newSpark: create a new spark, as a result of calling "par"
340 * Called directly from STG.
341 * -------------------------------------------------------------------------- */
342
343 StgInt
344 newSpark (StgRegTable *reg, StgClosure *p)
345 {
346 Capability *cap = regTableToCapability(reg);
347 SparkPool *pool = cap->sparks;
348
349 /* I am not sure whether this is the right thing to do.
350 * Maybe it is better to exploit the tag information
351 * instead of throwing it away?
352 */
353 p = UNTAG_CLOSURE(p);
354
355 ASSERT_SPARK_POOL_INVARIANTS(pool);
356
357 if (closure_SHOULD_SPARK(p)) {
358 pushBottom(pool,p);
359 }
360
361 cap->sparks_created++;
362
363 ASSERT_SPARK_POOL_INVARIANTS(pool);
364 return 1;
365 }
366
367
368
369 /* --------------------------------------------------------------------------
370 * Remove all sparks from the spark queues which should not spark any
371 * more. Called after GC. We assume exclusive access to the structure
372 * and replace all sparks in the queue, see explanation below. At exit,
373 * the spark pool only contains sparkable closures.
374 * -------------------------------------------------------------------------- */
375
376 void
377 pruneSparkQueue (evac_fn evac, void *user, Capability *cap)
378 {
379 SparkPool *pool;
380 StgClosurePtr spark, tmp, *elements;
381 nat n, pruned_sparks; // stats only
382 StgWord botInd,oldBotInd,currInd; // indices in array (always < size)
383 const StgInfoTable *info;
384
385 PAR_TICKY_MARK_SPARK_QUEUE_START();
386
387 n = 0;
388 pruned_sparks = 0;
389
390 pool = cap->sparks;
391
392 // it is possible that top > bottom, indicating an empty pool. We
393 // fix that here; this is only necessary because the loop below
394 // assumes it.
395 if (pool->top > pool->bottom)
396 pool->top = pool->bottom;
397
398 // Take this opportunity to reset top/bottom modulo the size of
399 // the array, to avoid overflow. This is only possible because no
400 // stealing is happening during GC.
401 pool->bottom -= pool->top & ~pool->moduloSize;
402 pool->top &= pool->moduloSize;
403 pool->topBound = pool->top;
404
405 debugTrace(DEBUG_sched,
406 "markSparkQueue: current spark queue len=%d; (hd=%ld; tl=%ld)",
407 sparkPoolSize(pool), pool->bottom, pool->top);
408 ASSERT_SPARK_POOL_INVARIANTS(pool);
409
410 elements = pool->elements;
411
412 /* We have exclusive access to the structure here, so we can reset
413 bottom and top counters, and prune invalid sparks. Contents are
414 copied in-place if they are valuable, otherwise discarded. The
415 routine uses "real" indices t and b, starts by computing them
416 as the modulus size of top and bottom,
417
418 Copying:
419
420 At the beginning, the pool structure can look like this:
421 ( bottom % size >= top % size , no wrap-around)
422 t b
423 ___________***********_________________
424
425 or like this ( bottom % size < top % size, wrap-around )
426 b t
427 ***********__________******************
428 As we need to remove useless sparks anyway, we make one pass
429 between t and b, moving valuable content to b and subsequent
430 cells (wrapping around when the size is reached).
431
432 b t
433 ***********OOO_______XX_X__X?**********
434 ^____move?____/
435
436 After this movement, botInd becomes the new bottom, and old
437 bottom becomes the new top index, both as indices in the array
438 size range.
439 */
440 // starting here
441 currInd = (pool->top) & (pool->moduloSize); // mod
442
443 // copies of evacuated closures go to space from botInd on
444 // we keep oldBotInd to know when to stop
445 oldBotInd = botInd = (pool->bottom) & (pool->moduloSize); // mod
446
447 // on entry to loop, we are within the bounds
448 ASSERT( currInd < pool->size && botInd < pool->size );
449
450 while (currInd != oldBotInd ) {
451 /* must use != here, wrap-around at size
452 subtle: loop not entered if queue empty
453 */
454
455 /* check element at currInd. if valuable, evacuate and move to
456 botInd, otherwise move on */
457 spark = elements[currInd];
458
459 // We have to be careful here: in the parallel GC, another
460 // thread might evacuate this closure while we're looking at it,
461 // so grab the info pointer just once.
462 info = spark->header.info;
463 if (IS_FORWARDING_PTR(info)) {
464 tmp = (StgClosure*)UN_FORWARDING_PTR(info);
465 /* if valuable work: shift inside the pool */
466 if (closure_SHOULD_SPARK(tmp)) {
467 elements[botInd] = tmp; // keep entry (new address)
468 botInd++;
469 n++;
470 } else {
471 pruned_sparks++; // discard spark
472 cap->sparks_pruned++;
473 }
474 } else {
475 if (!(closure_flags[INFO_PTR_TO_STRUCT(info)->type] & _NS)) {
476 elements[botInd] = spark; // keep entry (new address)
477 evac (user, &elements[botInd]);
478 botInd++;
479 n++;
480 } else {
481 pruned_sparks++; // discard spark
482 cap->sparks_pruned++;
483 }
484 }
485 currInd++;
486
487 // in the loop, we may reach the bounds, and instantly wrap around
488 ASSERT( currInd <= pool->size && botInd <= pool->size );
489 if ( currInd == pool->size ) { currInd = 0; }
490 if ( botInd == pool->size ) { botInd = 0; }
491
492 } // while-loop over spark pool elements
493
494 ASSERT(currInd == oldBotInd);
495
496 pool->top = oldBotInd; // where we started writing
497 pool->topBound = pool->top;
498
499 pool->bottom = (oldBotInd <= botInd) ? botInd : (botInd + pool->size);
500 // first free place we did not use (corrected by wraparound)
501
502 PAR_TICKY_MARK_SPARK_QUEUE_END(n);
503
504 debugTrace(DEBUG_sched, "pruned %d sparks", pruned_sparks);
505
506 debugTrace(DEBUG_sched,
507 "new spark queue len=%d; (hd=%ld; tl=%ld)",
508 sparkPoolSize(pool), pool->bottom, pool->top);
509
510 ASSERT_SPARK_POOL_INVARIANTS(pool);
511 }
512
513 /* GC for the spark pool, called inside Capability.c for all
514 capabilities in turn. Blindly "evac"s complete spark pool. */
515 void
516 traverseSparkQueue (evac_fn evac, void *user, Capability *cap)
517 {
518 StgClosure **sparkp;
519 SparkPool *pool;
520 StgWord top,bottom, modMask;
521
522 pool = cap->sparks;
523
524 ASSERT_SPARK_POOL_INVARIANTS(pool);
525
526 top = pool->top;
527 bottom = pool->bottom;
528 sparkp = pool->elements;
529 modMask = pool->moduloSize;
530
531 while (top < bottom) {
532 /* call evac for all closures in range (wrap-around via modulo)
533 * In GHC-6.10, evac takes an additional 1st argument to hold a
534 * GC-specific register, see rts/sm/GC.c::mark_root()
535 */
536 evac( user , sparkp + (top & modMask) );
537 top++;
538 }
539
540 debugTrace(DEBUG_sched,
541 "traversed spark queue, len=%d; (hd=%ld; tl=%ld)",
542 sparkPoolSize(pool), pool->bottom, pool->top);
543 }
544
545 /* ----------------------------------------------------------------------------
546 * balanceSparkPoolsCaps: takes an array of capabilities (usually: all
547 * capabilities) and its size. Accesses all spark pools and equally
548 * distributes the sparks among them.
549 *
550 * Could be called after GC, before Cap. release, from scheduler.
551 * -------------------------------------------------------------------------- */
552 void balanceSparkPoolsCaps(nat n_caps, Capability caps[]);
553
554 void balanceSparkPoolsCaps(nat n_caps STG_UNUSED,
555 Capability caps[] STG_UNUSED) {
556 barf("not implemented");
557 }
558
559 #else
560
561 StgInt
562 newSpark (StgRegTable *reg STG_UNUSED, StgClosure *p STG_UNUSED)
563 {
564 /* nothing */
565 return 1;
566 }
567
568
569 #endif /* PARALLEL_HASKELL || THREADED_RTS */
570
571
572 /* -----------------------------------------------------------------------------
573 *
574 * GRAN & PARALLEL_HASKELL stuff beyond here.
575 *
576 * TODO "nuke" this!
577 *
578 * -------------------------------------------------------------------------- */
579
580 #if defined(PARALLEL_HASKELL) || defined(GRAN)
581
582 static void slide_spark_pool( StgSparkPool *pool );
583
584 rtsBool
585 add_to_spark_queue( StgClosure *closure, StgSparkPool *pool )
586 {
587 if (pool->tl == pool->lim)
588 slide_spark_pool(pool);
589
590 if (closure_SHOULD_SPARK(closure) &&
591 pool->tl < pool->lim) {
592 *(pool->tl++) = closure;
593
594 #if defined(PARALLEL_HASKELL)
595 // collect parallel global statistics (currently done together with GC stats)
596 if (RtsFlags.ParFlags.ParStats.Global &&
597 RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
598 // debugBelch("Creating spark for %x @ %11.2f\n", closure, usertime());
599 globalParStats.tot_sparks_created++;
600 }
601 #endif
602 return rtsTrue;
603 } else {
604 #if defined(PARALLEL_HASKELL)
605 // collect parallel global statistics (currently done together with GC stats)
606 if (RtsFlags.ParFlags.ParStats.Global &&
607 RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
608 //debugBelch("Ignoring spark for %x @ %11.2f\n", closure, usertime());
609 globalParStats.tot_sparks_ignored++;
610 }
611 #endif
612 return rtsFalse;
613 }
614 }
615
616 static void
617 slide_spark_pool( StgSparkPool *pool )
618 {
619 StgClosure **sparkp, **to_sparkp;
620
621 sparkp = pool->hd;
622 to_sparkp = pool->base;
623 while (sparkp < pool->tl) {
624 ASSERT(to_sparkp<=sparkp);
625 ASSERT(*sparkp!=NULL);
626 ASSERT(LOOKS_LIKE_GHC_INFO((*sparkp)->header.info));
627
628 if (closure_SHOULD_SPARK(*sparkp)) {
629 *to_sparkp++ = *sparkp++;
630 } else {
631 sparkp++;
632 }
633 }
634 pool->hd = pool->base;
635 pool->tl = to_sparkp;
636 }
637
638 void
639 disposeSpark(spark)
640 StgClosure *spark;
641 {
642 #if !defined(THREADED_RTS)
643 Capability *cap;
644 StgSparkPool *pool;
645
646 cap = &MainRegTable;
647 pool = &(cap->rSparks);
648 ASSERT(pool->hd <= pool->tl && pool->tl <= pool->lim);
649 #endif
650 ASSERT(spark != (StgClosure *)NULL);
651 /* Do nothing */
652 }
653
654
655 #elif defined(GRAN)
656
657 /*
658 Search the spark queue of the proc in event for a spark that's worth
659 turning into a thread
660 (was gimme_spark in the old RTS)
661 */
662 void
663 findLocalSpark (rtsEvent *event, rtsBool *found_res, rtsSparkQ *spark_res)
664 {
665 PEs proc = event->proc, /* proc to search for work */
666 creator = event->creator; /* proc that requested work */
667 StgClosure* node;
668 rtsBool found;
669 rtsSparkQ spark_of_non_local_node = NULL,
670 spark_of_non_local_node_prev = NULL,
671 low_priority_spark = NULL,
672 low_priority_spark_prev = NULL,
673 spark = NULL, prev = NULL;
674
675 /* Choose a spark from the local spark queue */
676 prev = (rtsSpark*)NULL;
677 spark = pending_sparks_hds[proc];
678 found = rtsFalse;
679
680 // ToDo: check this code & implement local sparking !! -- HWL
681 while (!found && spark != (rtsSpark*)NULL)
682 {
683 ASSERT((prev!=(rtsSpark*)NULL || spark==pending_sparks_hds[proc]) &&
684 (prev==(rtsSpark*)NULL || prev->next==spark) &&
685 (spark->prev==prev));
686 node = spark->node;
687 if (!closure_SHOULD_SPARK(node))
688 {
689 IF_GRAN_DEBUG(checkSparkQ,
690 debugBelch("^^ pruning spark %p (node %p) in gimme_spark",
691 spark, node));
692
693 if (RtsFlags.GranFlags.GranSimStats.Sparks)
694 DumpRawGranEvent(proc, (PEs)0, SP_PRUNED,(StgTSO*)NULL,
695 spark->node, spark->name, spark_queue_len(proc));
696
697 ASSERT(spark != (rtsSpark*)NULL);
698 ASSERT(SparksAvail>0);
699 --SparksAvail;
700
701 ASSERT(prev==(rtsSpark*)NULL || prev->next==spark);
702 spark = delete_from_sparkq (spark, proc, rtsTrue);
703 if (spark != (rtsSpark*)NULL)
704 prev = spark->prev;
705 continue;
706 }
707 /* -- node should eventually be sparked */
708 else if (RtsFlags.GranFlags.PreferSparksOfLocalNodes &&
709 !IS_LOCAL_TO(PROCS(node),CurrentProc))
710 {
711 barf("Local sparking not yet implemented");
712
713 /* Remember first low priority spark */
714 if (spark_of_non_local_node==(rtsSpark*)NULL) {
715 spark_of_non_local_node_prev = prev;
716 spark_of_non_local_node = spark;
717 }
718
719 if (spark->next == (rtsSpark*)NULL) {
720 /* ASSERT(spark==SparkQueueTl); just for testing */
721 prev = spark_of_non_local_node_prev;
722 spark = spark_of_non_local_node;
723 found = rtsTrue;
724 break;
725 }
726
727 # if defined(GRAN) && defined(GRAN_CHECK)
728 /* Should never happen; just for testing
729 if (spark==pending_sparks_tl) {
730 debugBelch("ReSchedule: Last spark != SparkQueueTl\n");
731 stg_exit(EXIT_FAILURE);
732 } */
733 # endif
734 prev = spark;
735 spark = spark->next;
736 ASSERT(SparksAvail>0);
737 --SparksAvail;
738 continue;
739 }
740 else if ( RtsFlags.GranFlags.DoPrioritySparking ||
741 (spark->gran_info >= RtsFlags.GranFlags.SparkPriority2) )
742 {
743 if (RtsFlags.GranFlags.DoPrioritySparking)
744 barf("Priority sparking not yet implemented");
745
746 found = rtsTrue;
747 }
748 #if 0
749 else /* only used if SparkPriority2 is defined */
750 {
751 /* ToDo: fix the code below and re-integrate it */
752 /* Remember first low priority spark */
753 if (low_priority_spark==(rtsSpark*)NULL) {
754 low_priority_spark_prev = prev;
755 low_priority_spark = spark;
756 }
757
758 if (spark->next == (rtsSpark*)NULL) {
759 /* ASSERT(spark==spark_queue_tl); just for testing */
760 prev = low_priority_spark_prev;
761 spark = low_priority_spark;
762 found = rtsTrue; /* take low pri spark => rc is 2 */
763 break;
764 }
765
766 /* Should never happen; just for testing
767 if (spark==pending_sparks_tl) {
768 debugBelch("ReSchedule: Last spark != SparkQueueTl\n");
769 stg_exit(EXIT_FAILURE);
770 break;
771 } */
772 prev = spark;
773 spark = spark->next;
774
775 IF_GRAN_DEBUG(pri,
776 debugBelch("++ Ignoring spark of priority %u (SparkPriority=%u); node=%p; name=%u\n",
777 spark->gran_info, RtsFlags.GranFlags.SparkPriority,
778 spark->node, spark->name);)
779 }
780 #endif
781 } /* while (spark!=NULL && !found) */
782
783 *spark_res = spark;
784 *found_res = found;
785 }
786
787 /*
788 Turn the spark into a thread.
789 In GranSim this basically means scheduling a StartThread event for the
790 node pointed to by the spark at some point in the future.
791 (was munch_spark in the old RTS)
792 */
793 rtsBool
794 activateSpark (rtsEvent *event, rtsSparkQ spark)
795 {
796 PEs proc = event->proc, /* proc to search for work */
797 creator = event->creator; /* proc that requested work */
798 StgTSO* tso;
799 StgClosure* node;
800 rtsTime spark_arrival_time;
801
802 /*
803 We've found a node on PE proc requested by PE creator.
804 If proc==creator we can turn the spark into a thread immediately;
805 otherwise we schedule a MoveSpark event on the requesting PE
806 */
807
808 /* DaH Qu' yIchen */
809 if (proc!=creator) {
810
811 /* only possible if we simulate GUM style fishing */
812 ASSERT(RtsFlags.GranFlags.Fishing);
813
814 /* Message packing costs for sending a Fish; qeq jabbI'ID */
815 CurrentTime[proc] += RtsFlags.GranFlags.Costs.mpacktime;
816
817 if (RtsFlags.GranFlags.GranSimStats.Sparks)
818 DumpRawGranEvent(proc, (PEs)0, SP_EXPORTED,
819 (StgTSO*)NULL, spark->node,
820 spark->name, spark_queue_len(proc));
821
822 /* time of the spark arrival on the remote PE */
823 spark_arrival_time = CurrentTime[proc] + RtsFlags.GranFlags.Costs.latency;
824
825 new_event(creator, proc, spark_arrival_time,
826 MoveSpark,
827 (StgTSO*)NULL, spark->node, spark);
828
829 CurrentTime[proc] += RtsFlags.GranFlags.Costs.mtidytime;
830
831 } else { /* proc==creator i.e. turn the spark into a thread */
832
833 if ( RtsFlags.GranFlags.GranSimStats.Global &&
834 spark->gran_info < RtsFlags.GranFlags.SparkPriority2 ) {
835
836 globalGranStats.tot_low_pri_sparks++;
837 IF_GRAN_DEBUG(pri,
838 debugBelch("++ No high priority spark available; low priority (%u) spark chosen: node=%p; name=%u\n",
839 spark->gran_info,
840 spark->node, spark->name));
841 }
842
843 CurrentTime[proc] += RtsFlags.GranFlags.Costs.threadcreatetime;
844
845 node = spark->node;
846
847 # if 0
848 /* ToDo: fix the GC interface and move to StartThread handling-- HWL */
849 if (GARBAGE COLLECTION IS NECESSARY) {
850 /* Some kind of backoff needed here in case there's too little heap */
851 # if defined(GRAN_CHECK) && defined(GRAN)
852 if (RtsFlags.GcFlags.giveStats)
853 fprintf(RtsFlags.GcFlags.statsFile,"***** vIS Qu' chen veQ boSwI'; spark=%p, node=%p; name=%u\n",
854 /* (found==2 ? "no hi pri spark" : "hi pri spark"), */
855 spark, node, spark->name);
856 # endif
857 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc]+1,
858 FindWork,
859 (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
860 barf("//// activateSpark: out of heap ; ToDo: call GarbageCollect()");
861 GarbageCollect(GetRoots, rtsFalse);
862 // HWL old: ReallyPerformThreadGC(TSO_HS+TSO_CTS_SIZE,rtsFalse);
863 // HWL old: SAVE_Hp -= TSO_HS+TSO_CTS_SIZE;
864 spark = NULL;
865 return; /* was: continue; */ /* to the next event, eventually */
866 }
867 # endif
868
869 if (RtsFlags.GranFlags.GranSimStats.Sparks)
870 DumpRawGranEvent(CurrentProc,(PEs)0,SP_USED,(StgTSO*)NULL,
871 spark->node, spark->name,
872 spark_queue_len(CurrentProc));
873
874 new_event(proc, proc, CurrentTime[proc],
875 StartThread,
876 END_TSO_QUEUE, node, spark); // (rtsSpark*)NULL);
877
878 procStatus[proc] = Starting;
879 }
880 }
881
882 /* -------------------------------------------------------------------------
883 This is the main point where handling granularity information comes into
884 play.
885 ------------------------------------------------------------------------- */
886
887 #define MAX_RAND_PRI 100
888
889 /*
890 Granularity info transformers.
891 Applied to the GRAN_INFO field of a spark.
892 */
893 STATIC_INLINE nat ID(nat x) { return(x); };
894 STATIC_INLINE nat INV(nat x) { return(-x); };
895 STATIC_INLINE nat IGNORE(nat x) { return (0); };
896 STATIC_INLINE nat RAND(nat x) { return ((random() % MAX_RAND_PRI) + 1); }
897
898 /* NB: size_info and par_info are currently unused (what a shame!) -- HWL */
899 rtsSpark *
900 newSpark(node,name,gran_info,size_info,par_info,local)
901 StgClosure *node;
902 nat name, gran_info, size_info, par_info, local;
903 {
904 nat pri;
905 rtsSpark *newspark;
906
907 pri = RtsFlags.GranFlags.RandomPriorities ? RAND(gran_info) :
908 RtsFlags.GranFlags.InversePriorities ? INV(gran_info) :
909 RtsFlags.GranFlags.IgnorePriorities ? IGNORE(gran_info) :
910 ID(gran_info);
911
912 if ( RtsFlags.GranFlags.SparkPriority!=0 &&
913 pri<RtsFlags.GranFlags.SparkPriority ) {
914 IF_GRAN_DEBUG(pri,
915 debugBelch(",, NewSpark: Ignoring spark of priority %u (SparkPriority=%u); node=%#x; name=%u\n",
916 pri, RtsFlags.GranFlags.SparkPriority, node, name));
917 return ((rtsSpark*)NULL);
918 }
919
920 newspark = (rtsSpark*) stgMallocBytes(sizeof(rtsSpark), "NewSpark");
921 newspark->prev = newspark->next = (rtsSpark*)NULL;
922 newspark->node = node;
923 newspark->name = (name==1) ? CurrentTSO->gran.sparkname : name;
924 newspark->gran_info = pri;
925 newspark->global = !local; /* Check that with parAt, parAtAbs !!*/
926
927 if (RtsFlags.GranFlags.GranSimStats.Global) {
928 globalGranStats.tot_sparks_created++;
929 globalGranStats.sparks_created_on_PE[CurrentProc]++;
930 }
931
932 return(newspark);
933 }
934
935 void
936 disposeSpark(spark)
937 rtsSpark *spark;
938 {
939 ASSERT(spark!=NULL);
940 stgFree(spark);
941 }
942
943 void
944 disposeSparkQ(spark)
945 rtsSparkQ spark;
946 {
947 if (spark==NULL)
948 return;
949
950 disposeSparkQ(spark->next);
951
952 # ifdef GRAN_CHECK
953 if (SparksAvail < 0) {
954 debugBelch("disposeSparkQ: SparksAvail<0 after disposing sparkq @ %p\n", &spark);
955 print_spark(spark);
956 }
957 # endif
958
959 stgFree(spark);
960 }
961
962 /*
963 With PrioritySparking add_to_spark_queue performs an insert sort to keep
964 the spark queue sorted. Otherwise the spark is just added to the end of
965 the queue.
966 */
967
968 void
969 add_to_spark_queue(spark)
970 rtsSpark *spark;
971 {
972 rtsSpark *prev = NULL, *next = NULL;
973 nat count = 0;
974 rtsBool found = rtsFalse;
975
976 if ( spark == (rtsSpark *)NULL ) {
977 return;
978 }
979
980 if (RtsFlags.GranFlags.DoPrioritySparking && (spark->gran_info != 0) ) {
981 /* Priority sparking is enabled i.e. spark queues must be sorted */
982
983 for (prev = NULL, next = pending_sparks_hd, count=0;
984 (next != NULL) &&
985 !(found = (spark->gran_info >= next->gran_info));
986 prev = next, next = next->next, count++)
987 {}
988
989 } else { /* 'utQo' */
990 /* Priority sparking is disabled */
991
992 found = rtsFalse; /* to add it at the end */
993
994 }
995
996 if (found) {
997 /* next points to the first spark with a gran_info smaller than that
998 of spark; therefore, add spark before next into the spark queue */
999 spark->next = next;
1000 if ( next == NULL ) {
1001 pending_sparks_tl = spark;
1002 } else {
1003 next->prev = spark;
1004 }
1005 spark->prev = prev;
1006 if ( prev == NULL ) {
1007 pending_sparks_hd = spark;
1008 } else {
1009 prev->next = spark;
1010 }
1011 } else { /* (RtsFlags.GranFlags.DoPrioritySparking && !found) || !DoPrioritySparking */
1012 /* add the spark at the end of the spark queue */
1013 spark->next = NULL;
1014 spark->prev = pending_sparks_tl;
1015 if (pending_sparks_hd == NULL)
1016 pending_sparks_hd = spark;
1017 else
1018 pending_sparks_tl->next = spark;
1019 pending_sparks_tl = spark;
1020 }
1021 ++SparksAvail;
1022
1023 /* add costs for search in priority sparking */
1024 if (RtsFlags.GranFlags.DoPrioritySparking) {
1025 CurrentTime[CurrentProc] += count * RtsFlags.GranFlags.Costs.pri_spark_overhead;
1026 }
1027
1028 IF_GRAN_DEBUG(checkSparkQ,
1029 debugBelch("++ Spark stats after adding spark %p (node %p) to queue on PE %d",
1030 spark, spark->node, CurrentProc);
1031 print_sparkq_stats());
1032
1033 # if defined(GRAN_CHECK)
1034 if (RtsFlags.GranFlags.Debug.checkSparkQ) {
1035 for (prev = NULL, next = pending_sparks_hd;
1036 (next != NULL);
1037 prev = next, next = next->next)
1038 {}
1039 if ( (prev!=NULL) && (prev!=pending_sparks_tl) )
1040 debugBelch("SparkQ inconsistency after adding spark %p: (PE %u) pending_sparks_tl (%p) not end of queue (%p)\n",
1041 spark,CurrentProc,
1042 pending_sparks_tl, prev);
1043 }
1044 # endif
1045
1046 # if defined(GRAN_CHECK)
1047 /* Check if the sparkq is still sorted. Just for testing, really! */
1048 if ( RtsFlags.GranFlags.Debug.checkSparkQ &&
1049 RtsFlags.GranFlags.Debug.pri ) {
1050 rtsBool sorted = rtsTrue;
1051 rtsSpark *prev, *next;
1052
1053 if (pending_sparks_hd == NULL ||
1054 pending_sparks_hd->next == NULL ) {
1055 /* just 1 elem => ok */
1056 } else {
1057 for (prev = pending_sparks_hd,
1058 next = pending_sparks_hd->next;
1059 (next != NULL) ;
1060 prev = next, next = next->next) {
1061 sorted = sorted &&
1062 (prev->gran_info >= next->gran_info);
1063 }
1064 }
1065 if (!sorted) {
1066 debugBelch("ghuH: SPARKQ on PE %d is not sorted:\n",
1067 CurrentProc);
1068 print_sparkq(CurrentProc);
1069 }
1070 }
1071 # endif
1072 }
1073
1074 nat
1075 spark_queue_len(proc)
1076 PEs proc;
1077 {
1078 rtsSpark *prev, *spark; /* prev only for testing !! */
1079 nat len;
1080
1081 for (len = 0, prev = NULL, spark = pending_sparks_hds[proc];
1082 spark != NULL;
1083 len++, prev = spark, spark = spark->next)
1084 {}
1085
1086 # if defined(GRAN_CHECK)
1087 if ( RtsFlags.GranFlags.Debug.checkSparkQ )
1088 if ( (prev!=NULL) && (prev!=pending_sparks_tls[proc]) )
1089 debugBelch("ERROR in spark_queue_len: (PE %u) pending_sparks_tl (%p) not end of queue (%p)\n",
1090 proc, pending_sparks_tls[proc], prev);
1091 # endif
1092
1093 return (len);
1094 }
1095
1096 /*
1097 Take spark out of the spark queue on PE p and nuke the spark. Adjusts
1098 hd and tl pointers of the spark queue. Returns a pointer to the next
1099 spark in the queue.
1100 */
1101 rtsSpark *
1102 delete_from_sparkq (spark, p, dispose_too) /* unlink and dispose spark */
1103 rtsSpark *spark;
1104 PEs p;
1105 rtsBool dispose_too;
1106 {
1107 rtsSpark *new_spark;
1108
1109 if (spark==NULL)
1110 barf("delete_from_sparkq: trying to delete NULL spark\n");
1111
1112 # if defined(GRAN_CHECK)
1113 if ( RtsFlags.GranFlags.Debug.checkSparkQ ) {
1114 debugBelch("## |%p:%p| (%p)<-spark=%p->(%p) <-(%p)\n",
1115 pending_sparks_hd, pending_sparks_tl,
1116 spark->prev, spark, spark->next,
1117 (spark->next==NULL ? 0 : spark->next->prev));
1118 }
1119 # endif
1120
1121 if (spark->prev==NULL) {
1122 /* spark is first spark of queue => adjust hd pointer */
1123 ASSERT(pending_sparks_hds[p]==spark);
1124 pending_sparks_hds[p] = spark->next;
1125 } else {
1126 spark->prev->next = spark->next;
1127 }
1128 if (spark->next==NULL) {
1129 ASSERT(pending_sparks_tls[p]==spark);
1130 /* spark is first spark of queue => adjust tl pointer */
1131 pending_sparks_tls[p] = spark->prev;
1132 } else {
1133 spark->next->prev = spark->prev;
1134 }
1135 new_spark = spark->next;
1136
1137 # if defined(GRAN_CHECK)
1138 if ( RtsFlags.GranFlags.Debug.checkSparkQ ) {
1139 debugBelch("## |%p:%p| (%p)<-spark=%p->(%p) <-(%p); spark=%p will be deleted NOW \n",
1140 pending_sparks_hd, pending_sparks_tl,
1141 spark->prev, spark, spark->next,
1142 (spark->next==NULL ? 0 : spark->next->prev), spark);
1143 }
1144 # endif
1145
1146 if (dispose_too)
1147 disposeSpark(spark);
1148
1149 return new_spark;
1150 }
1151
1152 /* Mark all nodes pointed to by sparks in the spark queues (for GC) */
1153 void
1154 markSparkQueue(void)
1155 {
1156 StgClosure *MarkRoot(StgClosure *root); // prototype
1157 PEs p;
1158 rtsSpark *sp;
1159
1160 for (p=0; p<RtsFlags.GranFlags.proc; p++)
1161 for (sp=pending_sparks_hds[p]; sp!=NULL; sp=sp->next) {
1162 ASSERT(sp->node!=NULL);
1163 ASSERT(LOOKS_LIKE_GHC_INFO(sp->node->header.info));
1164 // ToDo?: statistics gathering here (also for GUM!)
1165 sp->node = (StgClosure *)MarkRoot(sp->node);
1166 }
1167
1168 IF_DEBUG(gc,
1169 debugBelch("markSparkQueue: spark statistics at start of GC:");
1170 print_sparkq_stats());
1171 }
1172
1173 void
1174 print_spark(spark)
1175 rtsSpark *spark;
1176 {
1177 char str[16];
1178
1179 if (spark==NULL) {
1180 debugBelch("Spark: NIL\n");
1181 return;
1182 } else {
1183 sprintf(str,
1184 ((spark->node==NULL) ? "______" : "%#6lx"),
1185 stgCast(StgPtr,spark->node));
1186
1187 debugBelch("Spark: Node %8s, Name %#6x, Global %5s, Creator %5x, Prev %6p, Next %6p\n",
1188 str, spark->name,
1189 ((spark->global)==rtsTrue?"True":"False"), spark->creator,
1190 spark->prev, spark->next);
1191 }
1192 }
1193
1194 void
1195 print_sparkq(proc)
1196 PEs proc;
1197 // rtsSpark *hd;
1198 {
1199 rtsSpark *x = pending_sparks_hds[proc];
1200
1201 debugBelch("Spark Queue of PE %d with root at %p:\n", proc, x);
1202 for (; x!=(rtsSpark*)NULL; x=x->next) {
1203 print_spark(x);
1204 }
1205 }
1206
1207 /*
1208 Print a statistics of all spark queues.
1209 */
1210 void
1211 print_sparkq_stats(void)
1212 {
1213 PEs p;
1214
1215 debugBelch("SparkQs: [");
1216 for (p=0; p<RtsFlags.GranFlags.proc; p++)
1217 debugBelch(", PE %d: %d", p, spark_queue_len(p));
1218 debugBelch("\n");
1219 }
1220
1221 #endif