Split GC.c, and move storage manager into sm/ directory
[ghc.git] / rts / Sparks.c
1 /* ---------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 2000-2006
4 *
5 * Sparking support for PARALLEL_HASKELL and THREADED_RTS versions of the RTS.
6 *
7 * -------------------------------------------------------------------------*/
8
9 #include "PosixSource.h"
10 #include "Rts.h"
11 #include "Storage.h"
12 #include "Schedule.h"
13 #include "SchedAPI.h"
14 #include "RtsFlags.h"
15 #include "RtsUtils.h"
16 #include "ParTicky.h"
17 # if defined(PARALLEL_HASKELL)
18 # include "ParallelRts.h"
19 # include "GranSimRts.h" // for GR_...
20 # elif defined(GRAN)
21 # include "GranSimRts.h"
22 # endif
23 #include "Sparks.h"
24 #include "Trace.h"
25
26 #if defined(THREADED_RTS) || defined(PARALLEL_HASKELL)
27
28 static INLINE_ME void bump_hd (StgSparkPool *p)
29 { p->hd++; if (p->hd == p->lim) p->hd = p->base; }
30
31 static INLINE_ME void bump_tl (StgSparkPool *p)
32 { p->tl++; if (p->tl == p->lim) p->tl = p->base; }
33
34 /* -----------------------------------------------------------------------------
35 *
36 * Initialising spark pools.
37 *
38 * -------------------------------------------------------------------------- */
39
40 static void
41 initSparkPool(StgSparkPool *pool)
42 {
43 pool->base = stgMallocBytes(RtsFlags.ParFlags.maxLocalSparks
44 * sizeof(StgClosure *),
45 "initSparkPools");
46 pool->lim = pool->base + RtsFlags.ParFlags.maxLocalSparks;
47 pool->hd = pool->base;
48 pool->tl = pool->base;
49 }
50
51 void
52 initSparkPools( void )
53 {
54 #ifdef THREADED_RTS
55 /* walk over the capabilities, allocating a spark pool for each one */
56 nat i;
57 for (i = 0; i < n_capabilities; i++) {
58 initSparkPool(&capabilities[i].r.rSparks);
59 }
60 #else
61 /* allocate a single spark pool */
62 initSparkPool(&MainCapability.r.rSparks);
63 #endif
64 }
65
66 /* -----------------------------------------------------------------------------
67 *
68 * findSpark: find a spark on the current Capability that we can fork
69 * into a thread.
70 *
71 * -------------------------------------------------------------------------- */
72
73 StgClosure *
74 findSpark (Capability *cap)
75 {
76 StgSparkPool *pool;
77 StgClosure *spark;
78
79 pool = &(cap->r.rSparks);
80 ASSERT_SPARK_POOL_INVARIANTS(pool);
81
82 while (pool->hd != pool->tl) {
83 spark = *pool->hd;
84 bump_hd(pool);
85 if (closure_SHOULD_SPARK(spark)) {
86 #ifdef GRAN
87 if (RtsFlags.ParFlags.ParStats.Sparks)
88 DumpRawGranEvent(CURRENT_PROC, CURRENT_PROC,
89 GR_STEALING, ((StgTSO *)NULL), spark,
90 0, 0 /* spark_queue_len(ADVISORY_POOL) */);
91 #endif
92 return spark;
93 }
94 }
95 // spark pool is now empty
96 return NULL;
97 }
98
99 /* -----------------------------------------------------------------------------
100 * Mark all nodes pointed to by sparks in the spark queues (for GC) Does an
101 * implicit slide i.e. after marking all sparks are at the beginning of the
102 * spark pool and the spark pool only contains sparkable closures
103 * -------------------------------------------------------------------------- */
104
105 void
106 markSparkQueue (evac_fn evac)
107 {
108 StgClosure **sparkp, **to_sparkp;
109 nat i, n, pruned_sparks; // stats only
110 StgSparkPool *pool;
111 Capability *cap;
112
113 PAR_TICKY_MARK_SPARK_QUEUE_START();
114
115 n = 0;
116 pruned_sparks = 0;
117 for (i = 0; i < n_capabilities; i++) {
118 cap = &capabilities[i];
119 pool = &(cap->r.rSparks);
120
121 ASSERT_SPARK_POOL_INVARIANTS(pool);
122
123 #if defined(PARALLEL_HASKELL)
124 // stats only
125 n = 0;
126 pruned_sparks = 0;
127 #endif
128
129 sparkp = pool->hd;
130 to_sparkp = pool->hd;
131 while (sparkp != pool->tl) {
132 ASSERT(*sparkp!=NULL);
133 ASSERT(LOOKS_LIKE_CLOSURE_PTR(((StgClosure *)*sparkp)));
134 // ToDo?: statistics gathering here (also for GUM!)
135 if (closure_SHOULD_SPARK(*sparkp)) {
136 evac(sparkp);
137 *to_sparkp++ = *sparkp;
138 if (to_sparkp == pool->lim) {
139 to_sparkp = pool->base;
140 }
141 n++;
142 } else {
143 pruned_sparks++;
144 }
145 sparkp++;
146 if (sparkp == pool->lim) {
147 sparkp = pool->base;
148 }
149 }
150 pool->tl = to_sparkp;
151
152 PAR_TICKY_MARK_SPARK_QUEUE_END(n);
153
154 #if defined(PARALLEL_HASKELL)
155 debugTrace(DEBUG_sched,
156 "marked %d sparks and pruned %d sparks on [%x]",
157 n, pruned_sparks, mytid);
158 #else
159 debugTrace(DEBUG_sched,
160 "marked %d sparks and pruned %d sparks",
161 n, pruned_sparks);
162 #endif
163
164 debugTrace(DEBUG_sched,
165 "new spark queue len=%d; (hd=%p; tl=%p)\n",
166 sparkPoolSize(pool), pool->hd, pool->tl);
167 }
168 }
169
170 /* -----------------------------------------------------------------------------
171 *
172 * Turn a spark into a real thread
173 *
174 * -------------------------------------------------------------------------- */
175
176 void
177 createSparkThread (Capability *cap, StgClosure *p)
178 {
179 StgTSO *tso;
180
181 tso = createGenThread (cap, RtsFlags.GcFlags.initialStkSize, p);
182 appendToRunQueue(cap,tso);
183 }
184
185 /* -----------------------------------------------------------------------------
186 *
187 * Create a new spark
188 *
189 * -------------------------------------------------------------------------- */
190
191 #define DISCARD_NEW
192
193 StgInt
194 newSpark (StgRegTable *reg, StgClosure *p)
195 {
196 StgSparkPool *pool = &(reg->rSparks);
197
198 ASSERT_SPARK_POOL_INVARIANTS(pool);
199
200 if (closure_SHOULD_SPARK(p)) {
201 #ifdef DISCARD_NEW
202 StgClosure **new_tl;
203 new_tl = pool->tl + 1;
204 if (new_tl == pool->lim) { new_tl = pool->base; }
205 if (new_tl != pool->hd) {
206 *pool->tl = p;
207 pool->tl = new_tl;
208 } else if (!closure_SHOULD_SPARK(*pool->hd)) {
209 // if the old closure is not sparkable, discard it and
210 // keep the new one. Otherwise, keep the old one.
211 *pool->tl = p;
212 bump_hd(pool);
213 }
214 #else /* DISCARD OLD */
215 *pool->tl = p;
216 bump_tl(pool);
217 if (pool->tl == pool->hd) { bump_hd(pool); }
218 #endif
219 }
220
221 ASSERT_SPARK_POOL_INVARIANTS(pool);
222 return 1;
223 }
224
225 #else
226
227 StgInt
228 newSpark (StgRegTable *reg STG_UNUSED, StgClosure *p STG_UNUSED)
229 {
230 /* nothing */
231 return 1;
232 }
233
234 #endif /* PARALLEL_HASKELL || THREADED_RTS */
235
236
237 /* -----------------------------------------------------------------------------
238 *
239 * GRAN & PARALLEL_HASKELL stuff beyond here.
240 *
241 * -------------------------------------------------------------------------- */
242
243 #if defined(PARALLEL_HASKELL) || defined(GRAN)
244
245 static void slide_spark_pool( StgSparkPool *pool );
246
247 rtsBool
248 add_to_spark_queue( StgClosure *closure, StgSparkPool *pool )
249 {
250 if (pool->tl == pool->lim)
251 slide_spark_pool(pool);
252
253 if (closure_SHOULD_SPARK(closure) &&
254 pool->tl < pool->lim) {
255 *(pool->tl++) = closure;
256
257 #if defined(PARALLEL_HASKELL)
258 // collect parallel global statistics (currently done together with GC stats)
259 if (RtsFlags.ParFlags.ParStats.Global &&
260 RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
261 // debugBelch("Creating spark for %x @ %11.2f\n", closure, usertime());
262 globalParStats.tot_sparks_created++;
263 }
264 #endif
265 return rtsTrue;
266 } else {
267 #if defined(PARALLEL_HASKELL)
268 // collect parallel global statistics (currently done together with GC stats)
269 if (RtsFlags.ParFlags.ParStats.Global &&
270 RtsFlags.GcFlags.giveStats > NO_GC_STATS) {
271 //debugBelch("Ignoring spark for %x @ %11.2f\n", closure, usertime());
272 globalParStats.tot_sparks_ignored++;
273 }
274 #endif
275 return rtsFalse;
276 }
277 }
278
279 static void
280 slide_spark_pool( StgSparkPool *pool )
281 {
282 StgClosure **sparkp, **to_sparkp;
283
284 sparkp = pool->hd;
285 to_sparkp = pool->base;
286 while (sparkp < pool->tl) {
287 ASSERT(to_sparkp<=sparkp);
288 ASSERT(*sparkp!=NULL);
289 ASSERT(LOOKS_LIKE_GHC_INFO((*sparkp)->header.info));
290
291 if (closure_SHOULD_SPARK(*sparkp)) {
292 *to_sparkp++ = *sparkp++;
293 } else {
294 sparkp++;
295 }
296 }
297 pool->hd = pool->base;
298 pool->tl = to_sparkp;
299 }
300
301 void
302 disposeSpark(spark)
303 StgClosure *spark;
304 {
305 #if !defined(THREADED_RTS)
306 Capability *cap;
307 StgSparkPool *pool;
308
309 cap = &MainRegTable;
310 pool = &(cap->rSparks);
311 ASSERT(pool->hd <= pool->tl && pool->tl <= pool->lim);
312 #endif
313 ASSERT(spark != (StgClosure *)NULL);
314 /* Do nothing */
315 }
316
317
318 #elif defined(GRAN)
319
320 /*
321 Search the spark queue of the proc in event for a spark that's worth
322 turning into a thread
323 (was gimme_spark in the old RTS)
324 */
325 void
326 findLocalSpark (rtsEvent *event, rtsBool *found_res, rtsSparkQ *spark_res)
327 {
328 PEs proc = event->proc, /* proc to search for work */
329 creator = event->creator; /* proc that requested work */
330 StgClosure* node;
331 rtsBool found;
332 rtsSparkQ spark_of_non_local_node = NULL,
333 spark_of_non_local_node_prev = NULL,
334 low_priority_spark = NULL,
335 low_priority_spark_prev = NULL,
336 spark = NULL, prev = NULL;
337
338 /* Choose a spark from the local spark queue */
339 prev = (rtsSpark*)NULL;
340 spark = pending_sparks_hds[proc];
341 found = rtsFalse;
342
343 // ToDo: check this code & implement local sparking !! -- HWL
344 while (!found && spark != (rtsSpark*)NULL)
345 {
346 ASSERT((prev!=(rtsSpark*)NULL || spark==pending_sparks_hds[proc]) &&
347 (prev==(rtsSpark*)NULL || prev->next==spark) &&
348 (spark->prev==prev));
349 node = spark->node;
350 if (!closure_SHOULD_SPARK(node))
351 {
352 IF_GRAN_DEBUG(checkSparkQ,
353 debugBelch("^^ pruning spark %p (node %p) in gimme_spark",
354 spark, node));
355
356 if (RtsFlags.GranFlags.GranSimStats.Sparks)
357 DumpRawGranEvent(proc, (PEs)0, SP_PRUNED,(StgTSO*)NULL,
358 spark->node, spark->name, spark_queue_len(proc));
359
360 ASSERT(spark != (rtsSpark*)NULL);
361 ASSERT(SparksAvail>0);
362 --SparksAvail;
363
364 ASSERT(prev==(rtsSpark*)NULL || prev->next==spark);
365 spark = delete_from_sparkq (spark, proc, rtsTrue);
366 if (spark != (rtsSpark*)NULL)
367 prev = spark->prev;
368 continue;
369 }
370 /* -- node should eventually be sparked */
371 else if (RtsFlags.GranFlags.PreferSparksOfLocalNodes &&
372 !IS_LOCAL_TO(PROCS(node),CurrentProc))
373 {
374 barf("Local sparking not yet implemented");
375
376 /* Remember first low priority spark */
377 if (spark_of_non_local_node==(rtsSpark*)NULL) {
378 spark_of_non_local_node_prev = prev;
379 spark_of_non_local_node = spark;
380 }
381
382 if (spark->next == (rtsSpark*)NULL) {
383 /* ASSERT(spark==SparkQueueTl); just for testing */
384 prev = spark_of_non_local_node_prev;
385 spark = spark_of_non_local_node;
386 found = rtsTrue;
387 break;
388 }
389
390 # if defined(GRAN) && defined(GRAN_CHECK)
391 /* Should never happen; just for testing
392 if (spark==pending_sparks_tl) {
393 debugBelch("ReSchedule: Last spark != SparkQueueTl\n");
394 stg_exit(EXIT_FAILURE);
395 } */
396 # endif
397 prev = spark;
398 spark = spark->next;
399 ASSERT(SparksAvail>0);
400 --SparksAvail;
401 continue;
402 }
403 else if ( RtsFlags.GranFlags.DoPrioritySparking ||
404 (spark->gran_info >= RtsFlags.GranFlags.SparkPriority2) )
405 {
406 if (RtsFlags.GranFlags.DoPrioritySparking)
407 barf("Priority sparking not yet implemented");
408
409 found = rtsTrue;
410 }
411 #if 0
412 else /* only used if SparkPriority2 is defined */
413 {
414 /* ToDo: fix the code below and re-integrate it */
415 /* Remember first low priority spark */
416 if (low_priority_spark==(rtsSpark*)NULL) {
417 low_priority_spark_prev = prev;
418 low_priority_spark = spark;
419 }
420
421 if (spark->next == (rtsSpark*)NULL) {
422 /* ASSERT(spark==spark_queue_tl); just for testing */
423 prev = low_priority_spark_prev;
424 spark = low_priority_spark;
425 found = rtsTrue; /* take low pri spark => rc is 2 */
426 break;
427 }
428
429 /* Should never happen; just for testing
430 if (spark==pending_sparks_tl) {
431 debugBelch("ReSchedule: Last spark != SparkQueueTl\n");
432 stg_exit(EXIT_FAILURE);
433 break;
434 } */
435 prev = spark;
436 spark = spark->next;
437
438 IF_GRAN_DEBUG(pri,
439 debugBelch("++ Ignoring spark of priority %u (SparkPriority=%u); node=%p; name=%u\n",
440 spark->gran_info, RtsFlags.GranFlags.SparkPriority,
441 spark->node, spark->name);)
442 }
443 #endif
444 } /* while (spark!=NULL && !found) */
445
446 *spark_res = spark;
447 *found_res = found;
448 }
449
450 /*
451 Turn the spark into a thread.
452 In GranSim this basically means scheduling a StartThread event for the
453 node pointed to by the spark at some point in the future.
454 (was munch_spark in the old RTS)
455 */
456 rtsBool
457 activateSpark (rtsEvent *event, rtsSparkQ spark)
458 {
459 PEs proc = event->proc, /* proc to search for work */
460 creator = event->creator; /* proc that requested work */
461 StgTSO* tso;
462 StgClosure* node;
463 rtsTime spark_arrival_time;
464
465 /*
466 We've found a node on PE proc requested by PE creator.
467 If proc==creator we can turn the spark into a thread immediately;
468 otherwise we schedule a MoveSpark event on the requesting PE
469 */
470
471 /* DaH Qu' yIchen */
472 if (proc!=creator) {
473
474 /* only possible if we simulate GUM style fishing */
475 ASSERT(RtsFlags.GranFlags.Fishing);
476
477 /* Message packing costs for sending a Fish; qeq jabbI'ID */
478 CurrentTime[proc] += RtsFlags.GranFlags.Costs.mpacktime;
479
480 if (RtsFlags.GranFlags.GranSimStats.Sparks)
481 DumpRawGranEvent(proc, (PEs)0, SP_EXPORTED,
482 (StgTSO*)NULL, spark->node,
483 spark->name, spark_queue_len(proc));
484
485 /* time of the spark arrival on the remote PE */
486 spark_arrival_time = CurrentTime[proc] + RtsFlags.GranFlags.Costs.latency;
487
488 new_event(creator, proc, spark_arrival_time,
489 MoveSpark,
490 (StgTSO*)NULL, spark->node, spark);
491
492 CurrentTime[proc] += RtsFlags.GranFlags.Costs.mtidytime;
493
494 } else { /* proc==creator i.e. turn the spark into a thread */
495
496 if ( RtsFlags.GranFlags.GranSimStats.Global &&
497 spark->gran_info < RtsFlags.GranFlags.SparkPriority2 ) {
498
499 globalGranStats.tot_low_pri_sparks++;
500 IF_GRAN_DEBUG(pri,
501 debugBelch("++ No high priority spark available; low priority (%u) spark chosen: node=%p; name=%u\n",
502 spark->gran_info,
503 spark->node, spark->name));
504 }
505
506 CurrentTime[proc] += RtsFlags.GranFlags.Costs.threadcreatetime;
507
508 node = spark->node;
509
510 # if 0
511 /* ToDo: fix the GC interface and move to StartThread handling-- HWL */
512 if (GARBAGE COLLECTION IS NECESSARY) {
513 /* Some kind of backoff needed here in case there's too little heap */
514 # if defined(GRAN_CHECK) && defined(GRAN)
515 if (RtsFlags.GcFlags.giveStats)
516 fprintf(RtsFlags.GcFlags.statsFile,"***** vIS Qu' chen veQ boSwI'; spark=%p, node=%p; name=%u\n",
517 /* (found==2 ? "no hi pri spark" : "hi pri spark"), */
518 spark, node, spark->name);
519 # endif
520 new_event(CurrentProc, CurrentProc, CurrentTime[CurrentProc]+1,
521 FindWork,
522 (StgTSO*)NULL, (StgClosure*)NULL, (rtsSpark*)NULL);
523 barf("//// activateSpark: out of heap ; ToDo: call GarbageCollect()");
524 GarbageCollect(GetRoots, rtsFalse);
525 // HWL old: ReallyPerformThreadGC(TSO_HS+TSO_CTS_SIZE,rtsFalse);
526 // HWL old: SAVE_Hp -= TSO_HS+TSO_CTS_SIZE;
527 spark = NULL;
528 return; /* was: continue; */ /* to the next event, eventually */
529 }
530 # endif
531
532 if (RtsFlags.GranFlags.GranSimStats.Sparks)
533 DumpRawGranEvent(CurrentProc,(PEs)0,SP_USED,(StgTSO*)NULL,
534 spark->node, spark->name,
535 spark_queue_len(CurrentProc));
536
537 new_event(proc, proc, CurrentTime[proc],
538 StartThread,
539 END_TSO_QUEUE, node, spark); // (rtsSpark*)NULL);
540
541 procStatus[proc] = Starting;
542 }
543 }
544
545 /* -------------------------------------------------------------------------
546 This is the main point where handling granularity information comes into
547 play.
548 ------------------------------------------------------------------------- */
549
550 #define MAX_RAND_PRI 100
551
552 /*
553 Granularity info transformers.
554 Applied to the GRAN_INFO field of a spark.
555 */
556 STATIC_INLINE nat ID(nat x) { return(x); };
557 STATIC_INLINE nat INV(nat x) { return(-x); };
558 STATIC_INLINE nat IGNORE(nat x) { return (0); };
559 STATIC_INLINE nat RAND(nat x) { return ((random() % MAX_RAND_PRI) + 1); }
560
561 /* NB: size_info and par_info are currently unused (what a shame!) -- HWL */
562 rtsSpark *
563 newSpark(node,name,gran_info,size_info,par_info,local)
564 StgClosure *node;
565 nat name, gran_info, size_info, par_info, local;
566 {
567 nat pri;
568 rtsSpark *newspark;
569
570 pri = RtsFlags.GranFlags.RandomPriorities ? RAND(gran_info) :
571 RtsFlags.GranFlags.InversePriorities ? INV(gran_info) :
572 RtsFlags.GranFlags.IgnorePriorities ? IGNORE(gran_info) :
573 ID(gran_info);
574
575 if ( RtsFlags.GranFlags.SparkPriority!=0 &&
576 pri<RtsFlags.GranFlags.SparkPriority ) {
577 IF_GRAN_DEBUG(pri,
578 debugBelch(",, NewSpark: Ignoring spark of priority %u (SparkPriority=%u); node=%#x; name=%u\n",
579 pri, RtsFlags.GranFlags.SparkPriority, node, name));
580 return ((rtsSpark*)NULL);
581 }
582
583 newspark = (rtsSpark*) stgMallocBytes(sizeof(rtsSpark), "NewSpark");
584 newspark->prev = newspark->next = (rtsSpark*)NULL;
585 newspark->node = node;
586 newspark->name = (name==1) ? CurrentTSO->gran.sparkname : name;
587 newspark->gran_info = pri;
588 newspark->global = !local; /* Check that with parAt, parAtAbs !!*/
589
590 if (RtsFlags.GranFlags.GranSimStats.Global) {
591 globalGranStats.tot_sparks_created++;
592 globalGranStats.sparks_created_on_PE[CurrentProc]++;
593 }
594
595 return(newspark);
596 }
597
598 void
599 disposeSpark(spark)
600 rtsSpark *spark;
601 {
602 ASSERT(spark!=NULL);
603 stgFree(spark);
604 }
605
606 void
607 disposeSparkQ(spark)
608 rtsSparkQ spark;
609 {
610 if (spark==NULL)
611 return;
612
613 disposeSparkQ(spark->next);
614
615 # ifdef GRAN_CHECK
616 if (SparksAvail < 0) {
617 debugBelch("disposeSparkQ: SparksAvail<0 after disposing sparkq @ %p\n", &spark);
618 print_spark(spark);
619 }
620 # endif
621
622 stgFree(spark);
623 }
624
625 /*
626 With PrioritySparking add_to_spark_queue performs an insert sort to keep
627 the spark queue sorted. Otherwise the spark is just added to the end of
628 the queue.
629 */
630
631 void
632 add_to_spark_queue(spark)
633 rtsSpark *spark;
634 {
635 rtsSpark *prev = NULL, *next = NULL;
636 nat count = 0;
637 rtsBool found = rtsFalse;
638
639 if ( spark == (rtsSpark *)NULL ) {
640 return;
641 }
642
643 if (RtsFlags.GranFlags.DoPrioritySparking && (spark->gran_info != 0) ) {
644 /* Priority sparking is enabled i.e. spark queues must be sorted */
645
646 for (prev = NULL, next = pending_sparks_hd, count=0;
647 (next != NULL) &&
648 !(found = (spark->gran_info >= next->gran_info));
649 prev = next, next = next->next, count++)
650 {}
651
652 } else { /* 'utQo' */
653 /* Priority sparking is disabled */
654
655 found = rtsFalse; /* to add it at the end */
656
657 }
658
659 if (found) {
660 /* next points to the first spark with a gran_info smaller than that
661 of spark; therefore, add spark before next into the spark queue */
662 spark->next = next;
663 if ( next == NULL ) {
664 pending_sparks_tl = spark;
665 } else {
666 next->prev = spark;
667 }
668 spark->prev = prev;
669 if ( prev == NULL ) {
670 pending_sparks_hd = spark;
671 } else {
672 prev->next = spark;
673 }
674 } else { /* (RtsFlags.GranFlags.DoPrioritySparking && !found) || !DoPrioritySparking */
675 /* add the spark at the end of the spark queue */
676 spark->next = NULL;
677 spark->prev = pending_sparks_tl;
678 if (pending_sparks_hd == NULL)
679 pending_sparks_hd = spark;
680 else
681 pending_sparks_tl->next = spark;
682 pending_sparks_tl = spark;
683 }
684 ++SparksAvail;
685
686 /* add costs for search in priority sparking */
687 if (RtsFlags.GranFlags.DoPrioritySparking) {
688 CurrentTime[CurrentProc] += count * RtsFlags.GranFlags.Costs.pri_spark_overhead;
689 }
690
691 IF_GRAN_DEBUG(checkSparkQ,
692 debugBelch("++ Spark stats after adding spark %p (node %p) to queue on PE %d",
693 spark, spark->node, CurrentProc);
694 print_sparkq_stats());
695
696 # if defined(GRAN_CHECK)
697 if (RtsFlags.GranFlags.Debug.checkSparkQ) {
698 for (prev = NULL, next = pending_sparks_hd;
699 (next != NULL);
700 prev = next, next = next->next)
701 {}
702 if ( (prev!=NULL) && (prev!=pending_sparks_tl) )
703 debugBelch("SparkQ inconsistency after adding spark %p: (PE %u) pending_sparks_tl (%p) not end of queue (%p)\n",
704 spark,CurrentProc,
705 pending_sparks_tl, prev);
706 }
707 # endif
708
709 # if defined(GRAN_CHECK)
710 /* Check if the sparkq is still sorted. Just for testing, really! */
711 if ( RtsFlags.GranFlags.Debug.checkSparkQ &&
712 RtsFlags.GranFlags.Debug.pri ) {
713 rtsBool sorted = rtsTrue;
714 rtsSpark *prev, *next;
715
716 if (pending_sparks_hd == NULL ||
717 pending_sparks_hd->next == NULL ) {
718 /* just 1 elem => ok */
719 } else {
720 for (prev = pending_sparks_hd,
721 next = pending_sparks_hd->next;
722 (next != NULL) ;
723 prev = next, next = next->next) {
724 sorted = sorted &&
725 (prev->gran_info >= next->gran_info);
726 }
727 }
728 if (!sorted) {
729 debugBelch("ghuH: SPARKQ on PE %d is not sorted:\n",
730 CurrentProc);
731 print_sparkq(CurrentProc);
732 }
733 }
734 # endif
735 }
736
737 nat
738 spark_queue_len(proc)
739 PEs proc;
740 {
741 rtsSpark *prev, *spark; /* prev only for testing !! */
742 nat len;
743
744 for (len = 0, prev = NULL, spark = pending_sparks_hds[proc];
745 spark != NULL;
746 len++, prev = spark, spark = spark->next)
747 {}
748
749 # if defined(GRAN_CHECK)
750 if ( RtsFlags.GranFlags.Debug.checkSparkQ )
751 if ( (prev!=NULL) && (prev!=pending_sparks_tls[proc]) )
752 debugBelch("ERROR in spark_queue_len: (PE %u) pending_sparks_tl (%p) not end of queue (%p)\n",
753 proc, pending_sparks_tls[proc], prev);
754 # endif
755
756 return (len);
757 }
758
759 /*
760 Take spark out of the spark queue on PE p and nuke the spark. Adjusts
761 hd and tl pointers of the spark queue. Returns a pointer to the next
762 spark in the queue.
763 */
764 rtsSpark *
765 delete_from_sparkq (spark, p, dispose_too) /* unlink and dispose spark */
766 rtsSpark *spark;
767 PEs p;
768 rtsBool dispose_too;
769 {
770 rtsSpark *new_spark;
771
772 if (spark==NULL)
773 barf("delete_from_sparkq: trying to delete NULL spark\n");
774
775 # if defined(GRAN_CHECK)
776 if ( RtsFlags.GranFlags.Debug.checkSparkQ ) {
777 debugBelch("## |%p:%p| (%p)<-spark=%p->(%p) <-(%p)\n",
778 pending_sparks_hd, pending_sparks_tl,
779 spark->prev, spark, spark->next,
780 (spark->next==NULL ? 0 : spark->next->prev));
781 }
782 # endif
783
784 if (spark->prev==NULL) {
785 /* spark is first spark of queue => adjust hd pointer */
786 ASSERT(pending_sparks_hds[p]==spark);
787 pending_sparks_hds[p] = spark->next;
788 } else {
789 spark->prev->next = spark->next;
790 }
791 if (spark->next==NULL) {
792 ASSERT(pending_sparks_tls[p]==spark);
793 /* spark is first spark of queue => adjust tl pointer */
794 pending_sparks_tls[p] = spark->prev;
795 } else {
796 spark->next->prev = spark->prev;
797 }
798 new_spark = spark->next;
799
800 # if defined(GRAN_CHECK)
801 if ( RtsFlags.GranFlags.Debug.checkSparkQ ) {
802 debugBelch("## |%p:%p| (%p)<-spark=%p->(%p) <-(%p); spark=%p will be deleted NOW \n",
803 pending_sparks_hd, pending_sparks_tl,
804 spark->prev, spark, spark->next,
805 (spark->next==NULL ? 0 : spark->next->prev), spark);
806 }
807 # endif
808
809 if (dispose_too)
810 disposeSpark(spark);
811
812 return new_spark;
813 }
814
815 /* Mark all nodes pointed to by sparks in the spark queues (for GC) */
816 void
817 markSparkQueue(void)
818 {
819 StgClosure *MarkRoot(StgClosure *root); // prototype
820 PEs p;
821 rtsSpark *sp;
822
823 for (p=0; p<RtsFlags.GranFlags.proc; p++)
824 for (sp=pending_sparks_hds[p]; sp!=NULL; sp=sp->next) {
825 ASSERT(sp->node!=NULL);
826 ASSERT(LOOKS_LIKE_GHC_INFO(sp->node->header.info));
827 // ToDo?: statistics gathering here (also for GUM!)
828 sp->node = (StgClosure *)MarkRoot(sp->node);
829 }
830
831 IF_DEBUG(gc,
832 debugBelch("markSparkQueue: spark statistics at start of GC:");
833 print_sparkq_stats());
834 }
835
836 void
837 print_spark(spark)
838 rtsSpark *spark;
839 {
840 char str[16];
841
842 if (spark==NULL) {
843 debugBelch("Spark: NIL\n");
844 return;
845 } else {
846 sprintf(str,
847 ((spark->node==NULL) ? "______" : "%#6lx"),
848 stgCast(StgPtr,spark->node));
849
850 debugBelch("Spark: Node %8s, Name %#6x, Global %5s, Creator %5x, Prev %6p, Next %6p\n",
851 str, spark->name,
852 ((spark->global)==rtsTrue?"True":"False"), spark->creator,
853 spark->prev, spark->next);
854 }
855 }
856
857 void
858 print_sparkq(proc)
859 PEs proc;
860 // rtsSpark *hd;
861 {
862 rtsSpark *x = pending_sparks_hds[proc];
863
864 debugBelch("Spark Queue of PE %d with root at %p:\n", proc, x);
865 for (; x!=(rtsSpark*)NULL; x=x->next) {
866 print_spark(x);
867 }
868 }
869
870 /*
871 Print a statistics of all spark queues.
872 */
873 void
874 print_sparkq_stats(void)
875 {
876 PEs p;
877
878 debugBelch("SparkQs: [");
879 for (p=0; p<RtsFlags.GranFlags.proc; p++)
880 debugBelch(", PE %d: %d", p, spark_queue_len(p));
881 debugBelch("\n");
882 }
883
884 #endif