a14fe05072891eedabd0b72189a9f5f7164eec00
[ghc.git] / rts / sm / Storage.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2006
4 *
5 * Storage manager front end
6 *
7 * Documentation on the architecture of the Storage Manager can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #include "PosixSource.h"
15 #include "Rts.h"
16 #include "RtsUtils.h"
17 #include "RtsFlags.h"
18 #include "Stats.h"
19 #include "Hooks.h"
20 #include "BlockAlloc.h"
21 #include "MBlock.h"
22 #include "Weak.h"
23 #include "Sanity.h"
24 #include "Arena.h"
25 #include "OSThreads.h"
26 #include "Capability.h"
27 #include "Storage.h"
28 #include "Schedule.h"
29 #include "RetainerProfile.h" // for counting memory blocks (memInventory)
30 #include "OSMem.h"
31 #include "Trace.h"
32 #include "GC.h"
33 #include "GCUtils.h"
34
35 #include <stdlib.h>
36 #include <string.h>
37
38 /*
39 * All these globals require sm_mutex to access in THREADED_RTS mode.
40 */
41 StgClosure *caf_list = NULL;
42 StgClosure *revertible_caf_list = NULL;
43 rtsBool keepCAFs;
44
45 bdescr *pinned_object_block; /* allocate pinned objects into this block */
46 nat alloc_blocks; /* number of allocate()d blocks since GC */
47 nat alloc_blocks_lim; /* approximate limit on alloc_blocks */
48
49 generation *generations = NULL; /* all the generations */
50 generation *g0 = NULL; /* generation 0, for convenience */
51 generation *oldest_gen = NULL; /* oldest generation, for convenience */
52 step *g0s0 = NULL; /* generation 0, step 0, for convenience */
53
54 ullong total_allocated = 0; /* total memory allocated during run */
55
56 nat n_nurseries = 0; /* == RtsFlags.ParFlags.nNodes, convenience */
57 step *nurseries = NULL; /* array of nurseries, >1 only if THREADED_RTS */
58
59 #ifdef THREADED_RTS
60 /*
61 * Storage manager mutex: protects all the above state from
62 * simultaneous access by two STG threads.
63 */
64 Mutex sm_mutex;
65 /*
66 * This mutex is used by atomicModifyMutVar# only
67 */
68 Mutex atomic_modify_mutvar_mutex;
69 #endif
70
71
72 /*
73 * Forward references
74 */
75 static void *stgAllocForGMP (size_t size_in_bytes);
76 static void *stgReallocForGMP (void *ptr, size_t old_size, size_t new_size);
77 static void stgDeallocForGMP (void *ptr, size_t size);
78
79 static void
80 initStep (step *stp, int g, int s)
81 {
82 stp->no = s;
83 stp->blocks = NULL;
84 stp->n_blocks = 0;
85 stp->old_blocks = NULL;
86 stp->n_old_blocks = 0;
87 stp->gen = &generations[g];
88 stp->gen_no = g;
89 stp->large_objects = NULL;
90 stp->n_large_blocks = 0;
91 stp->scavenged_large_objects = NULL;
92 stp->n_scavenged_large_blocks = 0;
93 stp->is_compacted = 0;
94 stp->bitmap = NULL;
95 #ifdef THREADED_RTS
96 initSpinLock(&stp->sync_todo);
97 initSpinLock(&stp->sync_large_objects);
98 #endif
99 }
100
101 void
102 initStorage( void )
103 {
104 nat g, s;
105 generation *gen;
106 step *step_arr;
107
108 if (generations != NULL) {
109 // multi-init protection
110 return;
111 }
112
113 initMBlocks();
114
115 /* Sanity check to make sure the LOOKS_LIKE_ macros appear to be
116 * doing something reasonable.
117 */
118 /* We use the NOT_NULL variant or gcc warns that the test is always true */
119 ASSERT(LOOKS_LIKE_INFO_PTR_NOT_NULL(&stg_BLACKHOLE_info));
120 ASSERT(LOOKS_LIKE_CLOSURE_PTR(&stg_dummy_ret_closure));
121 ASSERT(!HEAP_ALLOCED(&stg_dummy_ret_closure));
122
123 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
124 RtsFlags.GcFlags.heapSizeSuggestion >
125 RtsFlags.GcFlags.maxHeapSize) {
126 RtsFlags.GcFlags.maxHeapSize = RtsFlags.GcFlags.heapSizeSuggestion;
127 }
128
129 if (RtsFlags.GcFlags.maxHeapSize != 0 &&
130 RtsFlags.GcFlags.minAllocAreaSize >
131 RtsFlags.GcFlags.maxHeapSize) {
132 errorBelch("maximum heap size (-M) is smaller than minimum alloc area size (-A)");
133 RtsFlags.GcFlags.minAllocAreaSize = RtsFlags.GcFlags.maxHeapSize;
134 }
135
136 initBlockAllocator();
137
138 #if defined(THREADED_RTS)
139 initMutex(&sm_mutex);
140 initMutex(&atomic_modify_mutvar_mutex);
141 #endif
142
143 ACQUIRE_SM_LOCK;
144
145 /* allocate generation info array */
146 generations = (generation *)stgMallocBytes(RtsFlags.GcFlags.generations
147 * sizeof(struct generation_),
148 "initStorage: gens");
149
150 /* allocate all the steps into an array. It is important that we do
151 it this way, because we need the invariant that two step pointers
152 can be directly compared to see which is the oldest.
153 Remember that the last generation has only one step. */
154 step_arr = stgMallocBytes(sizeof(struct step_)
155 * (1 + ((RtsFlags.GcFlags.generations - 1)
156 * RtsFlags.GcFlags.steps)),
157 "initStorage: steps");
158
159 /* Initialise all generations */
160 for(g = 0; g < RtsFlags.GcFlags.generations; g++) {
161 gen = &generations[g];
162 gen->no = g;
163 gen->mut_list = allocBlock();
164 gen->collections = 0;
165 gen->failed_promotions = 0;
166 gen->max_blocks = 0;
167 }
168
169 /* A couple of convenience pointers */
170 g0 = &generations[0];
171 oldest_gen = &generations[RtsFlags.GcFlags.generations-1];
172
173 /* Allocate step structures in each generation */
174 if (RtsFlags.GcFlags.generations > 1) {
175 /* Only for multiple-generations */
176
177 /* Oldest generation: one step */
178 oldest_gen->n_steps = 1;
179 oldest_gen->steps = step_arr + (RtsFlags.GcFlags.generations - 1)
180 * RtsFlags.GcFlags.steps;
181
182 /* set up all except the oldest generation with 2 steps */
183 for(g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
184 generations[g].n_steps = RtsFlags.GcFlags.steps;
185 generations[g].steps = step_arr + g * RtsFlags.GcFlags.steps;
186 }
187
188 } else {
189 /* single generation, i.e. a two-space collector */
190 g0->n_steps = 1;
191 g0->steps = step_arr;
192 }
193
194 #ifdef THREADED_RTS
195 n_nurseries = n_capabilities;
196 #else
197 n_nurseries = 1;
198 #endif
199 nurseries = stgMallocBytes (n_nurseries * sizeof(struct step_),
200 "initStorage: nurseries");
201
202 /* Initialise all steps */
203 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
204 for (s = 0; s < generations[g].n_steps; s++) {
205 initStep(&generations[g].steps[s], g, s);
206 }
207 }
208
209 for (s = 0; s < n_nurseries; s++) {
210 initStep(&nurseries[s], 0, s);
211 }
212
213 /* Set up the destination pointers in each younger gen. step */
214 for (g = 0; g < RtsFlags.GcFlags.generations-1; g++) {
215 for (s = 0; s < generations[g].n_steps-1; s++) {
216 generations[g].steps[s].to = &generations[g].steps[s+1];
217 }
218 generations[g].steps[s].to = &generations[g+1].steps[0];
219 }
220 oldest_gen->steps[0].to = &oldest_gen->steps[0];
221
222 for (s = 0; s < n_nurseries; s++) {
223 nurseries[s].to = generations[0].steps[0].to;
224 }
225
226 /* The oldest generation has one step. */
227 if (RtsFlags.GcFlags.compact) {
228 if (RtsFlags.GcFlags.generations == 1) {
229 errorBelch("WARNING: compaction is incompatible with -G1; disabled");
230 } else {
231 oldest_gen->steps[0].is_compacted = 1;
232 }
233 }
234
235 generations[0].max_blocks = 0;
236 g0s0 = &generations[0].steps[0];
237
238 /* The allocation area. Policy: keep the allocation area
239 * small to begin with, even if we have a large suggested heap
240 * size. Reason: we're going to do a major collection first, and we
241 * don't want it to be a big one. This vague idea is borne out by
242 * rigorous experimental evidence.
243 */
244 allocNurseries();
245
246 weak_ptr_list = NULL;
247 caf_list = NULL;
248 revertible_caf_list = NULL;
249
250 /* initialise the allocate() interface */
251 alloc_blocks = 0;
252 alloc_blocks_lim = RtsFlags.GcFlags.minAllocAreaSize;
253
254 /* Tell GNU multi-precision pkg about our custom alloc functions */
255 mp_set_memory_functions(stgAllocForGMP, stgReallocForGMP, stgDeallocForGMP);
256
257 #ifdef THREADED_RTS
258 initSpinLock(&gc_alloc_block_sync);
259 #endif
260
261 IF_DEBUG(gc, statDescribeGens());
262
263 RELEASE_SM_LOCK;
264 }
265
266 void
267 exitStorage (void)
268 {
269 stat_exit(calcAllocated());
270 }
271
272 void
273 freeStorage (void)
274 {
275 stgFree(g0s0); // frees all the steps
276 stgFree(generations);
277 freeAllMBlocks();
278 #if defined(THREADED_RTS)
279 closeMutex(&sm_mutex);
280 closeMutex(&atomic_modify_mutvar_mutex);
281 #endif
282 stgFree(nurseries);
283 }
284
285 /* -----------------------------------------------------------------------------
286 CAF management.
287
288 The entry code for every CAF does the following:
289
290 - builds a CAF_BLACKHOLE in the heap
291 - pushes an update frame pointing to the CAF_BLACKHOLE
292 - invokes UPD_CAF(), which:
293 - calls newCaf, below
294 - updates the CAF with a static indirection to the CAF_BLACKHOLE
295
296 Why do we build a BLACKHOLE in the heap rather than just updating
297 the thunk directly? It's so that we only need one kind of update
298 frame - otherwise we'd need a static version of the update frame too.
299
300 newCaf() does the following:
301
302 - it puts the CAF on the oldest generation's mut-once list.
303 This is so that we can treat the CAF as a root when collecting
304 younger generations.
305
306 For GHCI, we have additional requirements when dealing with CAFs:
307
308 - we must *retain* all dynamically-loaded CAFs ever entered,
309 just in case we need them again.
310 - we must be able to *revert* CAFs that have been evaluated, to
311 their pre-evaluated form.
312
313 To do this, we use an additional CAF list. When newCaf() is
314 called on a dynamically-loaded CAF, we add it to the CAF list
315 instead of the old-generation mutable list, and save away its
316 old info pointer (in caf->saved_info) for later reversion.
317
318 To revert all the CAFs, we traverse the CAF list and reset the
319 info pointer to caf->saved_info, then throw away the CAF list.
320 (see GC.c:revertCAFs()).
321
322 -- SDM 29/1/01
323
324 -------------------------------------------------------------------------- */
325
326 void
327 newCAF(StgClosure* caf)
328 {
329 ACQUIRE_SM_LOCK;
330
331 if(keepCAFs)
332 {
333 // HACK:
334 // If we are in GHCi _and_ we are using dynamic libraries,
335 // then we can't redirect newCAF calls to newDynCAF (see below),
336 // so we make newCAF behave almost like newDynCAF.
337 // The dynamic libraries might be used by both the interpreted
338 // program and GHCi itself, so they must not be reverted.
339 // This also means that in GHCi with dynamic libraries, CAFs are not
340 // garbage collected. If this turns out to be a problem, we could
341 // do another hack here and do an address range test on caf to figure
342 // out whether it is from a dynamic library.
343 ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
344 ((StgIndStatic *)caf)->static_link = caf_list;
345 caf_list = caf;
346 }
347 else
348 {
349 /* Put this CAF on the mutable list for the old generation.
350 * This is a HACK - the IND_STATIC closure doesn't really have
351 * a mut_link field, but we pretend it has - in fact we re-use
352 * the STATIC_LINK field for the time being, because when we
353 * come to do a major GC we won't need the mut_link field
354 * any more and can use it as a STATIC_LINK.
355 */
356 ((StgIndStatic *)caf)->saved_info = NULL;
357 recordMutableGen(caf, oldest_gen);
358 }
359
360 RELEASE_SM_LOCK;
361 }
362
363 // An alternate version of newCaf which is used for dynamically loaded
364 // object code in GHCi. In this case we want to retain *all* CAFs in
365 // the object code, because they might be demanded at any time from an
366 // expression evaluated on the command line.
367 // Also, GHCi might want to revert CAFs, so we add these to the
368 // revertible_caf_list.
369 //
370 // The linker hackily arranges that references to newCaf from dynamic
371 // code end up pointing to newDynCAF.
372 void
373 newDynCAF(StgClosure *caf)
374 {
375 ACQUIRE_SM_LOCK;
376
377 ((StgIndStatic *)caf)->saved_info = (StgInfoTable *)caf->header.info;
378 ((StgIndStatic *)caf)->static_link = revertible_caf_list;
379 revertible_caf_list = caf;
380
381 RELEASE_SM_LOCK;
382 }
383
384 /* -----------------------------------------------------------------------------
385 Nursery management.
386 -------------------------------------------------------------------------- */
387
388 static bdescr *
389 allocNursery (step *stp, bdescr *tail, nat blocks)
390 {
391 bdescr *bd;
392 nat i;
393
394 // Allocate a nursery: we allocate fresh blocks one at a time and
395 // cons them on to the front of the list, not forgetting to update
396 // the back pointer on the tail of the list to point to the new block.
397 for (i=0; i < blocks; i++) {
398 // @LDV profiling
399 /*
400 processNursery() in LdvProfile.c assumes that every block group in
401 the nursery contains only a single block. So, if a block group is
402 given multiple blocks, change processNursery() accordingly.
403 */
404 bd = allocBlock();
405 bd->link = tail;
406 // double-link the nursery: we might need to insert blocks
407 if (tail != NULL) {
408 tail->u.back = bd;
409 }
410 bd->step = stp;
411 bd->gen_no = 0;
412 bd->flags = 0;
413 bd->free = bd->start;
414 tail = bd;
415 }
416 tail->u.back = NULL;
417 return tail;
418 }
419
420 static void
421 assignNurseriesToCapabilities (void)
422 {
423 #ifdef THREADED_RTS
424 nat i;
425
426 for (i = 0; i < n_nurseries; i++) {
427 capabilities[i].r.rNursery = &nurseries[i];
428 capabilities[i].r.rCurrentNursery = nurseries[i].blocks;
429 capabilities[i].r.rCurrentAlloc = NULL;
430 }
431 #else /* THREADED_RTS */
432 MainCapability.r.rNursery = &nurseries[0];
433 MainCapability.r.rCurrentNursery = nurseries[0].blocks;
434 MainCapability.r.rCurrentAlloc = NULL;
435 #endif
436 }
437
438 void
439 allocNurseries( void )
440 {
441 nat i;
442
443 for (i = 0; i < n_nurseries; i++) {
444 nurseries[i].blocks =
445 allocNursery(&nurseries[i], NULL,
446 RtsFlags.GcFlags.minAllocAreaSize);
447 nurseries[i].n_blocks = RtsFlags.GcFlags.minAllocAreaSize;
448 nurseries[i].old_blocks = NULL;
449 nurseries[i].n_old_blocks = 0;
450 }
451 assignNurseriesToCapabilities();
452 }
453
454 void
455 resetNurseries( void )
456 {
457 nat i;
458 bdescr *bd;
459 step *stp;
460
461 for (i = 0; i < n_nurseries; i++) {
462 stp = &nurseries[i];
463 for (bd = stp->blocks; bd; bd = bd->link) {
464 bd->free = bd->start;
465 ASSERT(bd->gen_no == 0);
466 ASSERT(bd->step == stp);
467 IF_DEBUG(sanity,memset(bd->start, 0xaa, BLOCK_SIZE));
468 }
469 }
470 assignNurseriesToCapabilities();
471 }
472
473 lnat
474 countNurseryBlocks (void)
475 {
476 nat i;
477 lnat blocks = 0;
478
479 for (i = 0; i < n_nurseries; i++) {
480 blocks += nurseries[i].n_blocks;
481 }
482 return blocks;
483 }
484
485 static void
486 resizeNursery ( step *stp, nat blocks )
487 {
488 bdescr *bd;
489 nat nursery_blocks;
490
491 nursery_blocks = stp->n_blocks;
492 if (nursery_blocks == blocks) return;
493
494 if (nursery_blocks < blocks) {
495 debugTrace(DEBUG_gc, "increasing size of nursery to %d blocks",
496 blocks);
497 stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
498 }
499 else {
500 bdescr *next_bd;
501
502 debugTrace(DEBUG_gc, "decreasing size of nursery to %d blocks",
503 blocks);
504
505 bd = stp->blocks;
506 while (nursery_blocks > blocks) {
507 next_bd = bd->link;
508 next_bd->u.back = NULL;
509 nursery_blocks -= bd->blocks; // might be a large block
510 freeGroup(bd);
511 bd = next_bd;
512 }
513 stp->blocks = bd;
514 // might have gone just under, by freeing a large block, so make
515 // up the difference.
516 if (nursery_blocks < blocks) {
517 stp->blocks = allocNursery(stp, stp->blocks, blocks-nursery_blocks);
518 }
519 }
520
521 stp->n_blocks = blocks;
522 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
523 }
524
525 //
526 // Resize each of the nurseries to the specified size.
527 //
528 void
529 resizeNurseriesFixed (nat blocks)
530 {
531 nat i;
532 for (i = 0; i < n_nurseries; i++) {
533 resizeNursery(&nurseries[i], blocks);
534 }
535 }
536
537 //
538 // Resize the nurseries to the total specified size.
539 //
540 void
541 resizeNurseries (nat blocks)
542 {
543 // If there are multiple nurseries, then we just divide the number
544 // of available blocks between them.
545 resizeNurseriesFixed(blocks / n_nurseries);
546 }
547
548 /* -----------------------------------------------------------------------------
549 The allocate() interface
550
551 allocateInGen() function allocates memory directly into a specific
552 generation. It always succeeds, and returns a chunk of memory n
553 words long. n can be larger than the size of a block if necessary,
554 in which case a contiguous block group will be allocated.
555
556 allocate(n) is equivalent to allocateInGen(g0).
557 -------------------------------------------------------------------------- */
558
559 StgPtr
560 allocateInGen (generation *g, nat n)
561 {
562 step *stp;
563 bdescr *bd;
564 StgPtr ret;
565
566 ACQUIRE_SM_LOCK;
567
568 TICK_ALLOC_HEAP_NOCTR(n);
569 CCS_ALLOC(CCCS,n);
570
571 stp = &g->steps[0];
572
573 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_))
574 {
575 nat req_blocks = (lnat)BLOCK_ROUND_UP(n*sizeof(W_)) / BLOCK_SIZE;
576
577 // Attempting to allocate an object larger than maxHeapSize
578 // should definitely be disallowed. (bug #1791)
579 if (RtsFlags.GcFlags.maxHeapSize > 0 &&
580 req_blocks >= RtsFlags.GcFlags.maxHeapSize) {
581 heapOverflow();
582 }
583
584 bd = allocGroup(req_blocks);
585 dbl_link_onto(bd, &stp->large_objects);
586 stp->n_large_blocks += bd->blocks; // might be larger than req_blocks
587 bd->gen_no = g->no;
588 bd->step = stp;
589 bd->flags = BF_LARGE;
590 bd->free = bd->start + n;
591 ret = bd->start;
592 }
593 else
594 {
595 // small allocation (<LARGE_OBJECT_THRESHOLD) */
596 bd = stp->blocks;
597 if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
598 bd = allocBlock();
599 bd->gen_no = g->no;
600 bd->step = stp;
601 bd->flags = 0;
602 bd->link = stp->blocks;
603 stp->blocks = bd;
604 stp->n_blocks++;
605 alloc_blocks++;
606 }
607 ret = bd->free;
608 bd->free += n;
609 }
610
611 RELEASE_SM_LOCK;
612
613 return ret;
614 }
615
616 StgPtr
617 allocate (nat n)
618 {
619 return allocateInGen(g0,n);
620 }
621
622 lnat
623 allocatedBytes( void )
624 {
625 lnat allocated;
626
627 allocated = alloc_blocks * BLOCK_SIZE_W;
628 if (pinned_object_block != NULL) {
629 allocated -= (pinned_object_block->start + BLOCK_SIZE_W) -
630 pinned_object_block->free;
631 }
632
633 return allocated;
634 }
635
636 /* -----------------------------------------------------------------------------
637 allocateLocal()
638
639 This allocates memory in the current thread - it is intended for
640 use primarily from STG-land where we have a Capability. It is
641 better than allocate() because it doesn't require taking the
642 sm_mutex lock in the common case.
643
644 Memory is allocated directly from the nursery if possible (but not
645 from the current nursery block, so as not to interfere with
646 Hp/HpLim).
647 -------------------------------------------------------------------------- */
648
649 StgPtr
650 allocateLocal (Capability *cap, nat n)
651 {
652 bdescr *bd;
653 StgPtr p;
654
655 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
656 return allocateInGen(g0,n);
657 }
658
659 /* small allocation (<LARGE_OBJECT_THRESHOLD) */
660
661 TICK_ALLOC_HEAP_NOCTR(n);
662 CCS_ALLOC(CCCS,n);
663
664 bd = cap->r.rCurrentAlloc;
665 if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
666
667 // The CurrentAlloc block is full, we need to find another
668 // one. First, we try taking the next block from the
669 // nursery:
670 bd = cap->r.rCurrentNursery->link;
671
672 if (bd == NULL || bd->free + n > bd->start + BLOCK_SIZE_W) {
673 // The nursery is empty, or the next block is already
674 // full: allocate a fresh block (we can't fail here).
675 ACQUIRE_SM_LOCK;
676 bd = allocBlock();
677 cap->r.rNursery->n_blocks++;
678 RELEASE_SM_LOCK;
679 bd->gen_no = 0;
680 bd->step = cap->r.rNursery;
681 bd->flags = 0;
682 // NO: alloc_blocks++;
683 // calcAllocated() uses the size of the nursery, and we've
684 // already bumpted nursery->n_blocks above.
685 } else {
686 // we have a block in the nursery: take it and put
687 // it at the *front* of the nursery list, and use it
688 // to allocate() from.
689 cap->r.rCurrentNursery->link = bd->link;
690 if (bd->link != NULL) {
691 bd->link->u.back = cap->r.rCurrentNursery;
692 }
693 }
694 dbl_link_onto(bd, &cap->r.rNursery->blocks);
695 cap->r.rCurrentAlloc = bd;
696 IF_DEBUG(sanity, checkNurserySanity(cap->r.rNursery));
697 }
698 p = bd->free;
699 bd->free += n;
700 return p;
701 }
702
703 /* ---------------------------------------------------------------------------
704 Allocate a fixed/pinned object.
705
706 We allocate small pinned objects into a single block, allocating a
707 new block when the current one overflows. The block is chained
708 onto the large_object_list of generation 0 step 0.
709
710 NOTE: The GC can't in general handle pinned objects. This
711 interface is only safe to use for ByteArrays, which have no
712 pointers and don't require scavenging. It works because the
713 block's descriptor has the BF_LARGE flag set, so the block is
714 treated as a large object and chained onto various lists, rather
715 than the individual objects being copied. However, when it comes
716 to scavenge the block, the GC will only scavenge the first object.
717 The reason is that the GC can't linearly scan a block of pinned
718 objects at the moment (doing so would require using the
719 mostly-copying techniques). But since we're restricting ourselves
720 to pinned ByteArrays, not scavenging is ok.
721
722 This function is called by newPinnedByteArray# which immediately
723 fills the allocated memory with a MutableByteArray#.
724 ------------------------------------------------------------------------- */
725
726 StgPtr
727 allocatePinned( nat n )
728 {
729 StgPtr p;
730 bdescr *bd = pinned_object_block;
731
732 // If the request is for a large object, then allocate()
733 // will give us a pinned object anyway.
734 if (n >= LARGE_OBJECT_THRESHOLD/sizeof(W_)) {
735 return allocate(n);
736 }
737
738 ACQUIRE_SM_LOCK;
739
740 TICK_ALLOC_HEAP_NOCTR(n);
741 CCS_ALLOC(CCCS,n);
742
743 // we always return 8-byte aligned memory. bd->free must be
744 // 8-byte aligned to begin with, so we just round up n to
745 // the nearest multiple of 8 bytes.
746 if (sizeof(StgWord) == 4) {
747 n = (n+1) & ~1;
748 }
749
750 // If we don't have a block of pinned objects yet, or the current
751 // one isn't large enough to hold the new object, allocate a new one.
752 if (bd == NULL || (bd->free + n) > (bd->start + BLOCK_SIZE_W)) {
753 pinned_object_block = bd = allocBlock();
754 dbl_link_onto(bd, &g0s0->large_objects);
755 g0s0->n_large_blocks++;
756 bd->gen_no = 0;
757 bd->step = g0s0;
758 bd->flags = BF_PINNED | BF_LARGE;
759 bd->free = bd->start;
760 alloc_blocks++;
761 }
762
763 p = bd->free;
764 bd->free += n;
765 RELEASE_SM_LOCK;
766 return p;
767 }
768
769 /* -----------------------------------------------------------------------------
770 Write Barriers
771 -------------------------------------------------------------------------- */
772
773 /*
774 This is the write barrier for MUT_VARs, a.k.a. IORefs. A
775 MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
776 is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
777 and is put on the mutable list.
778 */
779 void
780 dirty_MUT_VAR(StgRegTable *reg, StgClosure *p)
781 {
782 Capability *cap = regTableToCapability(reg);
783 bdescr *bd;
784 if (p->header.info == &stg_MUT_VAR_CLEAN_info) {
785 p->header.info = &stg_MUT_VAR_DIRTY_info;
786 bd = Bdescr((StgPtr)p);
787 if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
788 }
789 }
790
791 /*
792 This is the write barrier for MVARs. An MVAR_CLEAN objects is not
793 on the mutable list; a MVAR_DIRTY is. When written to, a
794 MVAR_CLEAN turns into a MVAR_DIRTY and is put on the mutable list.
795 The check for MVAR_CLEAN is inlined at the call site for speed,
796 this really does make a difference on concurrency-heavy benchmarks
797 such as Chaneneos and cheap-concurrency.
798 */
799 void
800 dirty_MVAR(StgRegTable *reg, StgClosure *p)
801 {
802 Capability *cap = regTableToCapability(reg);
803 bdescr *bd;
804 bd = Bdescr((StgPtr)p);
805 if (bd->gen_no > 0) recordMutableCap(p,cap,bd->gen_no);
806 }
807
808 /* -----------------------------------------------------------------------------
809 Allocation functions for GMP.
810
811 These all use the allocate() interface - we can't have any garbage
812 collection going on during a gmp operation, so we use allocate()
813 which always succeeds. The gmp operations which might need to
814 allocate will ask the storage manager (via doYouWantToGC()) whether
815 a garbage collection is required, in case we get into a loop doing
816 only allocate() style allocation.
817 -------------------------------------------------------------------------- */
818
819 static void *
820 stgAllocForGMP (size_t size_in_bytes)
821 {
822 StgArrWords* arr;
823 nat data_size_in_words, total_size_in_words;
824
825 /* round up to a whole number of words */
826 data_size_in_words = (size_in_bytes + sizeof(W_) + 1) / sizeof(W_);
827 total_size_in_words = sizeofW(StgArrWords) + data_size_in_words;
828
829 /* allocate and fill it in. */
830 #if defined(THREADED_RTS)
831 arr = (StgArrWords *)allocateLocal(myTask()->cap, total_size_in_words);
832 #else
833 arr = (StgArrWords *)allocateLocal(&MainCapability, total_size_in_words);
834 #endif
835 SET_ARR_HDR(arr, &stg_ARR_WORDS_info, CCCS, data_size_in_words);
836
837 /* and return a ptr to the goods inside the array */
838 return arr->payload;
839 }
840
841 static void *
842 stgReallocForGMP (void *ptr, size_t old_size, size_t new_size)
843 {
844 void *new_stuff_ptr = stgAllocForGMP(new_size);
845 nat i = 0;
846 char *p = (char *) ptr;
847 char *q = (char *) new_stuff_ptr;
848
849 for (; i < old_size; i++, p++, q++) {
850 *q = *p;
851 }
852
853 return(new_stuff_ptr);
854 }
855
856 static void
857 stgDeallocForGMP (void *ptr STG_UNUSED,
858 size_t size STG_UNUSED)
859 {
860 /* easy for us: the garbage collector does the dealloc'n */
861 }
862
863 /* -----------------------------------------------------------------------------
864 * Stats and stuff
865 * -------------------------------------------------------------------------- */
866
867 /* -----------------------------------------------------------------------------
868 * calcAllocated()
869 *
870 * Approximate how much we've allocated: number of blocks in the
871 * nursery + blocks allocated via allocate() - unused nusery blocks.
872 * This leaves a little slop at the end of each block, and doesn't
873 * take into account large objects (ToDo).
874 * -------------------------------------------------------------------------- */
875
876 lnat
877 calcAllocated( void )
878 {
879 nat allocated;
880 bdescr *bd;
881
882 allocated = allocatedBytes();
883 allocated += countNurseryBlocks() * BLOCK_SIZE_W;
884
885 {
886 #ifdef THREADED_RTS
887 nat i;
888 for (i = 0; i < n_nurseries; i++) {
889 Capability *cap;
890 for ( bd = capabilities[i].r.rCurrentNursery->link;
891 bd != NULL; bd = bd->link ) {
892 allocated -= BLOCK_SIZE_W;
893 }
894 cap = &capabilities[i];
895 if (cap->r.rCurrentNursery->free <
896 cap->r.rCurrentNursery->start + BLOCK_SIZE_W) {
897 allocated -= (cap->r.rCurrentNursery->start + BLOCK_SIZE_W)
898 - cap->r.rCurrentNursery->free;
899 }
900 }
901 #else
902 bdescr *current_nursery = MainCapability.r.rCurrentNursery;
903
904 for ( bd = current_nursery->link; bd != NULL; bd = bd->link ) {
905 allocated -= BLOCK_SIZE_W;
906 }
907 if (current_nursery->free < current_nursery->start + BLOCK_SIZE_W) {
908 allocated -= (current_nursery->start + BLOCK_SIZE_W)
909 - current_nursery->free;
910 }
911 #endif
912 }
913
914 total_allocated += allocated;
915 return allocated;
916 }
917
918 /* Approximate the amount of live data in the heap. To be called just
919 * after garbage collection (see GarbageCollect()).
920 */
921 lnat
922 calcLiveBlocks(void)
923 {
924 nat g, s;
925 lnat live = 0;
926 step *stp;
927
928 if (RtsFlags.GcFlags.generations == 1) {
929 return g0s0->n_large_blocks + g0s0->n_blocks;
930 }
931
932 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
933 for (s = 0; s < generations[g].n_steps; s++) {
934 /* approximate amount of live data (doesn't take into account slop
935 * at end of each block).
936 */
937 if (g == 0 && s == 0) {
938 continue;
939 }
940 stp = &generations[g].steps[s];
941 live += stp->n_large_blocks + stp->n_blocks;
942 }
943 }
944 return live;
945 }
946
947 lnat
948 countOccupied(bdescr *bd)
949 {
950 lnat words;
951
952 words = 0;
953 for (; bd != NULL; bd = bd->link) {
954 words += bd->free - bd->start;
955 }
956 return words;
957 }
958
959 // Return an accurate count of the live data in the heap, excluding
960 // generation 0.
961 lnat
962 calcLiveWords(void)
963 {
964 nat g, s;
965 lnat live;
966 step *stp;
967
968 if (RtsFlags.GcFlags.generations == 1) {
969 return countOccupied(g0s0->blocks) + countOccupied(g0s0->large_objects);
970 }
971
972 live = 0;
973 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
974 for (s = 0; s < generations[g].n_steps; s++) {
975 if (g == 0 && s == 0) continue;
976 stp = &generations[g].steps[s];
977 live += countOccupied(stp->blocks) +
978 countOccupied(stp->large_objects);
979 }
980 }
981 return live;
982 }
983
984 /* Approximate the number of blocks that will be needed at the next
985 * garbage collection.
986 *
987 * Assume: all data currently live will remain live. Steps that will
988 * be collected next time will therefore need twice as many blocks
989 * since all the data will be copied.
990 */
991 extern lnat
992 calcNeeded(void)
993 {
994 lnat needed = 0;
995 nat g, s;
996 step *stp;
997
998 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
999 for (s = 0; s < generations[g].n_steps; s++) {
1000 if (g == 0 && s == 0) { continue; }
1001 stp = &generations[g].steps[s];
1002 if (generations[g].steps[0].n_blocks +
1003 generations[g].steps[0].n_large_blocks
1004 > generations[g].max_blocks
1005 && stp->is_compacted == 0) {
1006 needed += 2 * stp->n_blocks;
1007 } else {
1008 needed += stp->n_blocks;
1009 }
1010 }
1011 }
1012 return needed;
1013 }
1014
1015 /* ----------------------------------------------------------------------------
1016 Executable memory
1017
1018 Executable memory must be managed separately from non-executable
1019 memory. Most OSs these days require you to jump through hoops to
1020 dynamically allocate executable memory, due to various security
1021 measures.
1022
1023 Here we provide a small memory allocator for executable memory.
1024 Memory is managed with a page granularity; we allocate linearly
1025 in the page, and when the page is emptied (all objects on the page
1026 are free) we free the page again, not forgetting to make it
1027 non-executable.
1028
1029 TODO: The inability to handle objects bigger than BLOCK_SIZE_W means that
1030 the linker cannot use allocateExec for loading object code files
1031 on Windows. Once allocateExec can handle larger objects, the linker
1032 should be modified to use allocateExec instead of VirtualAlloc.
1033 ------------------------------------------------------------------------- */
1034
1035 static bdescr *exec_block;
1036
1037 void *allocateExec (nat bytes)
1038 {
1039 void *ret;
1040 nat n;
1041
1042 ACQUIRE_SM_LOCK;
1043
1044 // round up to words.
1045 n = (bytes + sizeof(W_) + 1) / sizeof(W_);
1046
1047 if (n+1 > BLOCK_SIZE_W) {
1048 barf("allocateExec: can't handle large objects");
1049 }
1050
1051 if (exec_block == NULL ||
1052 exec_block->free + n + 1 > exec_block->start + BLOCK_SIZE_W) {
1053 bdescr *bd;
1054 lnat pagesize = getPageSize();
1055 bd = allocGroup(stg_max(1, pagesize / BLOCK_SIZE));
1056 debugTrace(DEBUG_gc, "allocate exec block %p", bd->start);
1057 bd->gen_no = 0;
1058 bd->flags = BF_EXEC;
1059 bd->link = exec_block;
1060 if (exec_block != NULL) {
1061 exec_block->u.back = bd;
1062 }
1063 bd->u.back = NULL;
1064 setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsTrue);
1065 exec_block = bd;
1066 }
1067 *(exec_block->free) = n; // store the size of this chunk
1068 exec_block->gen_no += n; // gen_no stores the number of words allocated
1069 ret = exec_block->free + 1;
1070 exec_block->free += n + 1;
1071
1072 RELEASE_SM_LOCK
1073 return ret;
1074 }
1075
1076 void freeExec (void *addr)
1077 {
1078 StgPtr p = (StgPtr)addr - 1;
1079 bdescr *bd = Bdescr((StgPtr)p);
1080
1081 if ((bd->flags & BF_EXEC) == 0) {
1082 barf("freeExec: not executable");
1083 }
1084
1085 if (*(StgPtr)p == 0) {
1086 barf("freeExec: already free?");
1087 }
1088
1089 ACQUIRE_SM_LOCK;
1090
1091 bd->gen_no -= *(StgPtr)p;
1092 *(StgPtr)p = 0;
1093
1094 if (bd->gen_no == 0) {
1095 // Free the block if it is empty, but not if it is the block at
1096 // the head of the queue.
1097 if (bd != exec_block) {
1098 debugTrace(DEBUG_gc, "free exec block %p", bd->start);
1099 dbl_link_remove(bd, &exec_block);
1100 setExecutable(bd->start, bd->blocks * BLOCK_SIZE, rtsFalse);
1101 freeGroup(bd);
1102 } else {
1103 bd->free = bd->start;
1104 }
1105 }
1106
1107 RELEASE_SM_LOCK
1108 }
1109
1110 /* -----------------------------------------------------------------------------
1111 Debugging
1112
1113 memInventory() checks for memory leaks by counting up all the
1114 blocks we know about and comparing that to the number of blocks
1115 allegedly floating around in the system.
1116 -------------------------------------------------------------------------- */
1117
1118 #ifdef DEBUG
1119
1120 // Useful for finding partially full blocks in gdb
1121 void findSlop(bdescr *bd);
1122 void findSlop(bdescr *bd)
1123 {
1124 lnat slop;
1125
1126 for (; bd != NULL; bd = bd->link) {
1127 slop = (bd->blocks * BLOCK_SIZE_W) - (bd->free - bd->start);
1128 if (slop > (1024/sizeof(W_))) {
1129 debugBelch("block at %p (bdescr %p) has %ldKB slop\n",
1130 bd->start, bd, slop / (1024/sizeof(W_)));
1131 }
1132 }
1133 }
1134
1135 nat
1136 countBlocks(bdescr *bd)
1137 {
1138 nat n;
1139 for (n=0; bd != NULL; bd=bd->link) {
1140 n += bd->blocks;
1141 }
1142 return n;
1143 }
1144
1145 // (*1) Just like countBlocks, except that we adjust the count for a
1146 // megablock group so that it doesn't include the extra few blocks
1147 // that would be taken up by block descriptors in the second and
1148 // subsequent megablock. This is so we can tally the count with the
1149 // number of blocks allocated in the system, for memInventory().
1150 static nat
1151 countAllocdBlocks(bdescr *bd)
1152 {
1153 nat n;
1154 for (n=0; bd != NULL; bd=bd->link) {
1155 n += bd->blocks;
1156 // hack for megablock groups: see (*1) above
1157 if (bd->blocks > BLOCKS_PER_MBLOCK) {
1158 n -= (MBLOCK_SIZE / BLOCK_SIZE - BLOCKS_PER_MBLOCK)
1159 * (bd->blocks/(MBLOCK_SIZE/BLOCK_SIZE));
1160 }
1161 }
1162 return n;
1163 }
1164
1165 static lnat
1166 stepBlocks (step *stp)
1167 {
1168 ASSERT(countBlocks(stp->blocks) == stp->n_blocks);
1169 ASSERT(countBlocks(stp->large_objects) == stp->n_large_blocks);
1170 return stp->n_blocks + stp->n_old_blocks +
1171 countAllocdBlocks(stp->large_objects);
1172 }
1173
1174 void
1175 memInventory(void)
1176 {
1177 nat g, s, i;
1178 step *stp;
1179 lnat gen_blocks[RtsFlags.GcFlags.generations];
1180 lnat nursery_blocks, retainer_blocks,
1181 arena_blocks, exec_blocks;
1182 lnat live_blocks = 0, free_blocks = 0;
1183
1184 // count the blocks we current have
1185
1186 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1187 gen_blocks[g] = 0;
1188 for (i = 0; i < n_capabilities; i++) {
1189 gen_blocks[g] += countBlocks(capabilities[i].mut_lists[g]);
1190 }
1191 gen_blocks[g] += countAllocdBlocks(generations[g].mut_list);
1192 for (s = 0; s < generations[g].n_steps; s++) {
1193 stp = &generations[g].steps[s];
1194 gen_blocks[g] += stepBlocks(stp);
1195 }
1196 }
1197
1198 nursery_blocks = 0;
1199 for (i = 0; i < n_nurseries; i++) {
1200 nursery_blocks += stepBlocks(&nurseries[i]);
1201 }
1202
1203 retainer_blocks = 0;
1204 #ifdef PROFILING
1205 if (RtsFlags.ProfFlags.doHeapProfile == HEAP_BY_RETAINER) {
1206 retainer_blocks = retainerStackBlocks();
1207 }
1208 #endif
1209
1210 // count the blocks allocated by the arena allocator
1211 arena_blocks = arenaBlocks();
1212
1213 // count the blocks containing executable memory
1214 exec_blocks = countAllocdBlocks(exec_block);
1215
1216 /* count the blocks on the free list */
1217 free_blocks = countFreeList();
1218
1219 live_blocks = 0;
1220 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1221 live_blocks += gen_blocks[g];
1222 }
1223 live_blocks += nursery_blocks +
1224 + retainer_blocks + arena_blocks + exec_blocks;
1225
1226 if (live_blocks + free_blocks != mblocks_allocated * BLOCKS_PER_MBLOCK)
1227 {
1228 debugBelch("Memory leak detected\n");
1229 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1230 debugBelch(" gen %d blocks : %4lu\n", g, gen_blocks[g]);
1231 }
1232 debugBelch(" nursery : %4lu\n", nursery_blocks);
1233 debugBelch(" retainer : %4lu\n", retainer_blocks);
1234 debugBelch(" arena blocks : %4lu\n", arena_blocks);
1235 debugBelch(" exec : %4lu\n", exec_blocks);
1236 debugBelch(" free : %4lu\n", free_blocks);
1237 debugBelch(" total : %4lu\n\n", live_blocks + free_blocks);
1238 debugBelch(" in system : %4lu\n", mblocks_allocated * BLOCKS_PER_MBLOCK);
1239 ASSERT(0);
1240 }
1241 }
1242
1243
1244 /* Full heap sanity check. */
1245 void
1246 checkSanity( void )
1247 {
1248 nat g, s;
1249
1250 if (RtsFlags.GcFlags.generations == 1) {
1251 checkHeap(g0s0->blocks);
1252 checkChain(g0s0->large_objects);
1253 } else {
1254
1255 for (g = 0; g < RtsFlags.GcFlags.generations; g++) {
1256 for (s = 0; s < generations[g].n_steps; s++) {
1257 if (g == 0 && s == 0) { continue; }
1258 ASSERT(countBlocks(generations[g].steps[s].blocks)
1259 == generations[g].steps[s].n_blocks);
1260 ASSERT(countBlocks(generations[g].steps[s].large_objects)
1261 == generations[g].steps[s].n_large_blocks);
1262 checkHeap(generations[g].steps[s].blocks);
1263 checkChain(generations[g].steps[s].large_objects);
1264 if (g > 0) {
1265 checkMutableList(generations[g].mut_list, g);
1266 }
1267 }
1268 }
1269
1270 for (s = 0; s < n_nurseries; s++) {
1271 ASSERT(countBlocks(nurseries[s].blocks)
1272 == nurseries[s].n_blocks);
1273 ASSERT(countBlocks(nurseries[s].large_objects)
1274 == nurseries[s].n_large_blocks);
1275 }
1276
1277 checkFreeListSanity();
1278 }
1279 }
1280
1281 /* Nursery sanity check */
1282 void
1283 checkNurserySanity( step *stp )
1284 {
1285 bdescr *bd, *prev;
1286 nat blocks = 0;
1287
1288 prev = NULL;
1289 for (bd = stp->blocks; bd != NULL; bd = bd->link) {
1290 ASSERT(bd->u.back == prev);
1291 prev = bd;
1292 blocks += bd->blocks;
1293 }
1294 ASSERT(blocks == stp->n_blocks);
1295 }
1296
1297 // handy function for use in gdb, because Bdescr() is inlined.
1298 extern bdescr *_bdescr( StgPtr p );
1299
1300 bdescr *
1301 _bdescr( StgPtr p )
1302 {
1303 return Bdescr(p);
1304 }
1305
1306 #endif