Improve handling of -fdph-* flags
[ghc.git] / includes / Storage.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2004
4 *
5 * External Storage Manger Interface
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #ifndef STORAGE_H
10 #define STORAGE_H
11
12 #include <stddef.h>
13 #include "OSThreads.h"
14 #include "SMP.h"
15
16 /* -----------------------------------------------------------------------------
17 * Generational GC
18 *
19 * We support an arbitrary number of generations, with an arbitrary number
20 * of steps per generation. Notes (in no particular order):
21 *
22 * - all generations except the oldest should have two steps. This gives
23 * objects a decent chance to age before being promoted, and in
24 * particular will ensure that we don't end up with too many
25 * thunks being updated in older generations.
26 *
27 * - the oldest generation has one step. There's no point in aging
28 * objects in the oldest generation.
29 *
30 * - generation 0, step 0 (G0S0) is the allocation area. It is given
31 * a fixed set of blocks during initialisation, and these blocks
32 * are never freed.
33 *
34 * - during garbage collection, each step which is an evacuation
35 * destination (i.e. all steps except G0S0) is allocated a to-space.
36 * evacuated objects are allocated into the step's to-space until
37 * GC is finished, when the original step's contents may be freed
38 * and replaced by the to-space.
39 *
40 * - the mutable-list is per-generation (not per-step). G0 doesn't
41 * have one (since every garbage collection collects at least G0).
42 *
43 * - block descriptors contain pointers to both the step and the
44 * generation that the block belongs to, for convenience.
45 *
46 * - static objects are stored in per-generation lists. See GC.c for
47 * details of how we collect CAFs in the generational scheme.
48 *
49 * - large objects are per-step, and are promoted in the same way
50 * as small objects, except that we may allocate large objects into
51 * generation 1 initially.
52 *
53 * ------------------------------------------------------------------------- */
54
55 typedef struct step_ {
56 unsigned int no; // step number in this generation
57 unsigned int abs_no; // absolute step number
58
59 struct generation_ * gen; // generation this step belongs to
60 unsigned int gen_no; // generation number (cached)
61
62 bdescr * blocks; // blocks in this step
63 unsigned int n_blocks; // number of blocks
64 unsigned int n_words; // number of words
65
66 struct step_ * to; // destination step for live objects
67
68 bdescr * large_objects; // large objects (doubly linked)
69 unsigned int n_large_blocks; // no. of blocks used by large objs
70
71 StgTSO * threads; // threads in this step
72 // linked via global_link
73
74 // ------------------------------------
75 // Fields below are used during GC only
76
77 // During GC, if we are collecting this step, blocks and n_blocks
78 // are copied into the following two fields. After GC, these blocks
79 // are freed.
80
81 #if defined(THREADED_RTS)
82 char pad[128]; // make sure the following is
83 // on a separate cache line.
84 SpinLock sync_todo; // lock for todos
85 SpinLock sync_large_objects; // lock for large_objects
86 // and scavenged_large_objects
87 #endif
88
89 int mark; // mark (not copy)? (old gen only)
90 int compact; // compact (not sweep)? (old gen only)
91
92 bdescr * old_blocks; // bdescr of first from-space block
93 unsigned int n_old_blocks; // number of blocks in from-space
94 unsigned int live_estimate; // for sweeping: estimate of live data
95
96 bdescr * todos; // blocks waiting to be scavenged
97 bdescr * todos_last;
98 unsigned int n_todos; // count of above
99
100 bdescr * part_blocks; // partially-full scanned blocks
101 unsigned int n_part_blocks; // count of above
102
103 bdescr * scavenged_large_objects; // live large objs after GC (d-link)
104 unsigned int n_scavenged_large_blocks; // size (not count) of above
105
106 bdescr * bitmap; // bitmap for compacting collection
107
108 StgTSO * old_threads;
109
110 } step;
111
112
113 typedef struct generation_ {
114 unsigned int no; // generation number
115 step * steps; // steps
116 unsigned int n_steps; // number of steps
117 unsigned int max_blocks; // max blocks in step 0
118 bdescr *mut_list; // mut objects in this gen (not G0)
119
120 // stats information
121 unsigned int collections;
122 unsigned int par_collections;
123 unsigned int failed_promotions;
124
125 // temporary use during GC:
126 bdescr *saved_mut_list;
127 } generation;
128
129 extern generation * RTS_VAR(generations);
130
131 extern generation * RTS_VAR(g0);
132 extern step * RTS_VAR(g0s0);
133 extern generation * RTS_VAR(oldest_gen);
134 extern step * RTS_VAR(all_steps);
135 extern nat RTS_VAR(total_steps);
136
137 /* -----------------------------------------------------------------------------
138 Initialisation / De-initialisation
139 -------------------------------------------------------------------------- */
140
141 extern void initStorage(void);
142 extern void exitStorage(void);
143 extern void freeStorage(void);
144
145 /* -----------------------------------------------------------------------------
146 Generic allocation
147
148 StgPtr allocateInGen(generation *g, nat n)
149 Allocates a chunk of contiguous store
150 n words long in generation g,
151 returning a pointer to the first word.
152 Always succeeds.
153
154 StgPtr allocate(nat n) Equaivalent to allocateInGen(g0)
155
156 StgPtr allocateLocal(Capability *cap, nat n)
157 Allocates memory from the nursery in
158 the current Capability. This can be
159 done without taking a global lock,
160 unlike allocate().
161
162 StgPtr allocatePinned(nat n) Allocates a chunk of contiguous store
163 n words long, which is at a fixed
164 address (won't be moved by GC).
165 Returns a pointer to the first word.
166 Always succeeds.
167
168 NOTE: the GC can't in general handle
169 pinned objects, so allocatePinned()
170 can only be used for ByteArrays at the
171 moment.
172
173 Don't forget to TICK_ALLOC_XXX(...)
174 after calling allocate or
175 allocatePinned, for the
176 benefit of the ticky-ticky profiler.
177
178 rtsBool doYouWantToGC(void) Returns True if the storage manager is
179 ready to perform a GC, False otherwise.
180
181 lnat allocatedBytes(void) Returns the number of bytes allocated
182 via allocate() since the last GC.
183 Used in the reporting of statistics.
184
185 -------------------------------------------------------------------------- */
186
187 extern StgPtr allocate ( lnat n );
188 extern StgPtr allocateInGen ( generation *g, lnat n );
189 extern StgPtr allocateLocal ( Capability *cap, lnat n );
190 extern StgPtr allocatePinned ( lnat n );
191 extern lnat allocatedBytes ( void );
192
193 extern bdescr * RTS_VAR(small_alloc_list);
194 extern bdescr * RTS_VAR(large_alloc_list);
195 extern bdescr * RTS_VAR(pinned_object_block);
196
197 extern nat RTS_VAR(alloc_blocks);
198 extern nat RTS_VAR(alloc_blocks_lim);
199
200 INLINE_HEADER rtsBool
201 doYouWantToGC( void )
202 {
203 return (alloc_blocks >= alloc_blocks_lim);
204 }
205
206 /* memory allocator for executable memory */
207 extern void *allocateExec (nat bytes);
208 extern void freeExec (void *p);
209
210 /* for splitting blocks groups in two */
211 extern bdescr * splitLargeBlock (bdescr *bd, nat blocks);
212
213 /* -----------------------------------------------------------------------------
214 Performing Garbage Collection
215
216 GarbageCollect(get_roots) Performs a garbage collection.
217 'get_roots' is called to find all the
218 roots that the system knows about.
219
220
221 -------------------------------------------------------------------------- */
222
223 extern void GarbageCollect(rtsBool force_major_gc);
224
225 /* -----------------------------------------------------------------------------
226 Generational garbage collection support
227
228 recordMutable(StgPtr p) Informs the garbage collector that a
229 previously immutable object has
230 become (permanently) mutable. Used
231 by thawArray and similar.
232
233 updateWithIndirection(p1,p2) Updates the object at p1 with an
234 indirection pointing to p2. This is
235 normally called for objects in an old
236 generation (>0) when they are updated.
237
238 updateWithPermIndirection(p1,p2) As above but uses a permanent indir.
239
240 -------------------------------------------------------------------------- */
241
242 /*
243 * Storage manager mutex
244 */
245 #if defined(THREADED_RTS)
246 extern Mutex sm_mutex;
247 extern Mutex atomic_modify_mutvar_mutex;
248 extern SpinLock recordMutableGen_sync;
249 #endif
250
251 #if defined(THREADED_RTS)
252 #define ACQUIRE_SM_LOCK ACQUIRE_LOCK(&sm_mutex);
253 #define RELEASE_SM_LOCK RELEASE_LOCK(&sm_mutex);
254 #define ASSERT_SM_LOCK() ASSERT_LOCK_HELD(&sm_mutex);
255 #else
256 #define ACQUIRE_SM_LOCK
257 #define RELEASE_SM_LOCK
258 #define ASSERT_SM_LOCK()
259 #endif
260
261 INLINE_HEADER void
262 recordMutableGen(StgClosure *p, generation *gen)
263 {
264 bdescr *bd;
265
266 bd = gen->mut_list;
267 if (bd->free >= bd->start + BLOCK_SIZE_W) {
268 bdescr *new_bd;
269 new_bd = allocBlock();
270 new_bd->link = bd;
271 bd = new_bd;
272 gen->mut_list = bd;
273 }
274 *bd->free++ = (StgWord)p;
275
276 }
277
278 INLINE_HEADER void
279 recordMutableGenLock(StgClosure *p, generation *gen)
280 {
281 ACQUIRE_SM_LOCK;
282 recordMutableGen(p,gen);
283 RELEASE_SM_LOCK;
284 }
285
286 extern bdescr *allocBlock_sync(void);
287
288 // Version of recordMutableGen() for use in parallel GC. The same as
289 // recordMutableGen(), except that we surround it with a spinlock and
290 // call the spinlock version of allocBlock().
291 INLINE_HEADER void
292 recordMutableGen_GC(StgClosure *p, generation *gen)
293 {
294 bdescr *bd;
295
296 ACQUIRE_SPIN_LOCK(&recordMutableGen_sync);
297
298 bd = gen->mut_list;
299 if (bd->free >= bd->start + BLOCK_SIZE_W) {
300 bdescr *new_bd;
301 new_bd = allocBlock_sync();
302 new_bd->link = bd;
303 bd = new_bd;
304 gen->mut_list = bd;
305 }
306 *bd->free++ = (StgWord)p;
307
308 RELEASE_SPIN_LOCK(&recordMutableGen_sync);
309 }
310
311 INLINE_HEADER void
312 recordMutable(StgClosure *p)
313 {
314 bdescr *bd;
315 ASSERT(closure_MUTABLE(p));
316 bd = Bdescr((P_)p);
317 if (bd->gen_no > 0) recordMutableGen(p, &RTS_DEREF(generations)[bd->gen_no]);
318 }
319
320 INLINE_HEADER void
321 recordMutableLock(StgClosure *p)
322 {
323 ACQUIRE_SM_LOCK;
324 recordMutable(p);
325 RELEASE_SM_LOCK;
326 }
327
328 /* -----------------------------------------------------------------------------
329 The CAF table - used to let us revert CAFs in GHCi
330 -------------------------------------------------------------------------- */
331
332 /* set to disable CAF garbage collection in GHCi. */
333 /* (needed when dynamic libraries are used). */
334 extern rtsBool keepCAFs;
335
336 /* -----------------------------------------------------------------------------
337 This is the write barrier for MUT_VARs, a.k.a. IORefs. A
338 MUT_VAR_CLEAN object is not on the mutable list; a MUT_VAR_DIRTY
339 is. When written to, a MUT_VAR_CLEAN turns into a MUT_VAR_DIRTY
340 and is put on the mutable list.
341 -------------------------------------------------------------------------- */
342
343 void dirty_MUT_VAR(StgRegTable *reg, StgClosure *p);
344
345 /* -----------------------------------------------------------------------------
346 DEBUGGING predicates for pointers
347
348 LOOKS_LIKE_INFO_PTR(p) returns False if p is definitely not an info ptr
349 LOOKS_LIKE_CLOSURE_PTR(p) returns False if p is definitely not a closure ptr
350
351 These macros are complete but not sound. That is, they might
352 return false positives. Do not rely on them to distinguish info
353 pointers from closure pointers, for example.
354
355 We don't use address-space predicates these days, for portability
356 reasons, and the fact that code/data can be scattered about the
357 address space in a dynamically-linked environment. Our best option
358 is to look at the alleged info table and see whether it seems to
359 make sense...
360 -------------------------------------------------------------------------- */
361
362 INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR (StgWord p);
363 INLINE_HEADER rtsBool LOOKS_LIKE_CLOSURE_PTR (void *p); // XXX StgClosure*
364
365 /* -----------------------------------------------------------------------------
366 Macros for calculating how big a closure will be (used during allocation)
367 -------------------------------------------------------------------------- */
368
369 INLINE_HEADER StgOffset PAP_sizeW ( nat n_args )
370 { return sizeofW(StgPAP) + n_args; }
371
372 INLINE_HEADER StgOffset AP_sizeW ( nat n_args )
373 { return sizeofW(StgAP) + n_args; }
374
375 INLINE_HEADER StgOffset AP_STACK_sizeW ( nat size )
376 { return sizeofW(StgAP_STACK) + size; }
377
378 INLINE_HEADER StgOffset CONSTR_sizeW( nat p, nat np )
379 { return sizeofW(StgHeader) + p + np; }
380
381 INLINE_HEADER StgOffset THUNK_SELECTOR_sizeW ( void )
382 { return sizeofW(StgSelector); }
383
384 INLINE_HEADER StgOffset BLACKHOLE_sizeW ( void )
385 { return sizeofW(StgHeader)+MIN_PAYLOAD_SIZE; }
386
387 /* --------------------------------------------------------------------------
388 Sizes of closures
389 ------------------------------------------------------------------------*/
390
391 INLINE_HEADER StgOffset sizeW_fromITBL( const StgInfoTable* itbl )
392 { return sizeofW(StgClosure)
393 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
394 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
395
396 INLINE_HEADER StgOffset thunk_sizeW_fromITBL( const StgInfoTable* itbl )
397 { return sizeofW(StgThunk)
398 + sizeofW(StgPtr) * itbl->layout.payload.ptrs
399 + sizeofW(StgWord) * itbl->layout.payload.nptrs; }
400
401 INLINE_HEADER StgOffset ap_stack_sizeW( StgAP_STACK* x )
402 { return AP_STACK_sizeW(x->size); }
403
404 INLINE_HEADER StgOffset ap_sizeW( StgAP* x )
405 { return AP_sizeW(x->n_args); }
406
407 INLINE_HEADER StgOffset pap_sizeW( StgPAP* x )
408 { return PAP_sizeW(x->n_args); }
409
410 INLINE_HEADER StgOffset arr_words_sizeW( StgArrWords* x )
411 { return sizeofW(StgArrWords) + x->words; }
412
413 INLINE_HEADER StgOffset mut_arr_ptrs_sizeW( StgMutArrPtrs* x )
414 { return sizeofW(StgMutArrPtrs) + x->ptrs; }
415
416 INLINE_HEADER StgWord tso_sizeW ( StgTSO *tso )
417 { return TSO_STRUCT_SIZEW + tso->stack_size; }
418
419 INLINE_HEADER StgWord bco_sizeW ( StgBCO *bco )
420 { return bco->size; }
421
422 INLINE_HEADER nat
423 closure_sizeW_ (StgClosure *p, StgInfoTable *info)
424 {
425 switch (info->type) {
426 case THUNK_0_1:
427 case THUNK_1_0:
428 return sizeofW(StgThunk) + 1;
429 case FUN_0_1:
430 case CONSTR_0_1:
431 case FUN_1_0:
432 case CONSTR_1_0:
433 return sizeofW(StgHeader) + 1;
434 case THUNK_0_2:
435 case THUNK_1_1:
436 case THUNK_2_0:
437 return sizeofW(StgThunk) + 2;
438 case FUN_0_2:
439 case CONSTR_0_2:
440 case FUN_1_1:
441 case CONSTR_1_1:
442 case FUN_2_0:
443 case CONSTR_2_0:
444 return sizeofW(StgHeader) + 2;
445 case THUNK:
446 return thunk_sizeW_fromITBL(info);
447 case THUNK_SELECTOR:
448 return THUNK_SELECTOR_sizeW();
449 case AP_STACK:
450 return ap_stack_sizeW((StgAP_STACK *)p);
451 case AP:
452 return ap_sizeW((StgAP *)p);
453 case PAP:
454 return pap_sizeW((StgPAP *)p);
455 case IND:
456 case IND_PERM:
457 case IND_OLDGEN:
458 case IND_OLDGEN_PERM:
459 return sizeofW(StgInd);
460 case ARR_WORDS:
461 return arr_words_sizeW((StgArrWords *)p);
462 case MUT_ARR_PTRS_CLEAN:
463 case MUT_ARR_PTRS_DIRTY:
464 case MUT_ARR_PTRS_FROZEN:
465 case MUT_ARR_PTRS_FROZEN0:
466 return mut_arr_ptrs_sizeW((StgMutArrPtrs*)p);
467 case TSO:
468 return tso_sizeW((StgTSO *)p);
469 case BCO:
470 return bco_sizeW((StgBCO *)p);
471 case TVAR_WATCH_QUEUE:
472 return sizeofW(StgTVarWatchQueue);
473 case TVAR:
474 return sizeofW(StgTVar);
475 case TREC_CHUNK:
476 return sizeofW(StgTRecChunk);
477 case TREC_HEADER:
478 return sizeofW(StgTRecHeader);
479 case ATOMIC_INVARIANT:
480 return sizeofW(StgAtomicInvariant);
481 case INVARIANT_CHECK_QUEUE:
482 return sizeofW(StgInvariantCheckQueue);
483 default:
484 return sizeW_fromITBL(info);
485 }
486 }
487
488 // The definitive way to find the size, in words, of a heap-allocated closure
489 INLINE_HEADER nat
490 closure_sizeW (StgClosure *p)
491 {
492 return closure_sizeW_(p, get_itbl(p));
493 }
494
495 /* -----------------------------------------------------------------------------
496 Sizes of stack frames
497 -------------------------------------------------------------------------- */
498
499 INLINE_HEADER StgWord stack_frame_sizeW( StgClosure *frame )
500 {
501 StgRetInfoTable *info;
502
503 info = get_ret_itbl(frame);
504 switch (info->i.type) {
505
506 case RET_DYN:
507 {
508 StgRetDyn *dyn = (StgRetDyn *)frame;
509 return sizeofW(StgRetDyn) + RET_DYN_BITMAP_SIZE +
510 RET_DYN_NONPTR_REGS_SIZE +
511 RET_DYN_PTRS(dyn->liveness) + RET_DYN_NONPTRS(dyn->liveness);
512 }
513
514 case RET_FUN:
515 return sizeofW(StgRetFun) + ((StgRetFun *)frame)->size;
516
517 case RET_BIG:
518 return 1 + GET_LARGE_BITMAP(&info->i)->size;
519
520 case RET_BCO:
521 return 2 + BCO_BITMAP_SIZE((StgBCO *)((P_)frame)[1]);
522
523 default:
524 return 1 + BITMAP_SIZE(info->i.layout.bitmap);
525 }
526 }
527
528 /* -----------------------------------------------------------------------------
529 Nursery manipulation
530 -------------------------------------------------------------------------- */
531
532 extern void allocNurseries ( void );
533 extern void resetNurseries ( void );
534 extern void resizeNurseries ( nat blocks );
535 extern void resizeNurseriesFixed ( nat blocks );
536 extern lnat countNurseryBlocks ( void );
537
538
539 /* -----------------------------------------------------------------------------
540 Functions from GC.c
541 -------------------------------------------------------------------------- */
542
543 typedef void (*evac_fn)(void *user, StgClosure **root);
544
545 extern void threadPaused ( Capability *cap, StgTSO * );
546 extern StgClosure * isAlive ( StgClosure *p );
547 extern void markCAFs ( evac_fn evac, void *user );
548 extern void GetRoots ( evac_fn evac, void *user );
549
550 /* -----------------------------------------------------------------------------
551 Stats 'n' DEBUG stuff
552 -------------------------------------------------------------------------- */
553
554 extern ullong RTS_VAR(total_allocated);
555
556 extern lnat calcAllocated ( void );
557 extern lnat calcLiveBlocks ( void );
558 extern lnat calcLiveWords ( void );
559 extern lnat countOccupied ( bdescr *bd );
560 extern lnat calcNeeded ( void );
561
562 #if defined(DEBUG)
563 extern void memInventory(rtsBool show);
564 extern void checkSanity(void);
565 extern nat countBlocks(bdescr *);
566 extern void checkNurserySanity( step *stp );
567 #endif
568
569 #if defined(DEBUG)
570 void printMutOnceList(generation *gen);
571 void printMutableList(generation *gen);
572 #endif
573
574 /* ----------------------------------------------------------------------------
575 Storage manager internal APIs and globals
576 ------------------------------------------------------------------------- */
577
578 #define END_OF_STATIC_LIST stgCast(StgClosure*,1)
579
580 extern void newDynCAF(StgClosure *);
581
582 extern void move_TSO(StgTSO *src, StgTSO *dest);
583 extern StgTSO *relocate_stack(StgTSO *dest, ptrdiff_t diff);
584
585 extern StgWeak * RTS_VAR(old_weak_ptr_list);
586 extern StgWeak * RTS_VAR(weak_ptr_list);
587 extern StgClosure * RTS_VAR(caf_list);
588 extern StgClosure * RTS_VAR(revertible_caf_list);
589 extern StgTSO * RTS_VAR(resurrected_threads);
590
591 #define IS_FORWARDING_PTR(p) ((((StgWord)p) & 1) != 0)
592 #define MK_FORWARDING_PTR(p) (((StgWord)p) | 1)
593 #define UN_FORWARDING_PTR(p) (((StgWord)p) - 1)
594
595 INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR_NOT_NULL (StgWord p)
596 {
597 StgInfoTable *info = INFO_PTR_TO_STRUCT(p);
598 return info->type != INVALID_OBJECT && info->type < N_CLOSURE_TYPES;
599 }
600
601 INLINE_HEADER rtsBool LOOKS_LIKE_INFO_PTR (StgWord p)
602 {
603 return p && (IS_FORWARDING_PTR(p) || LOOKS_LIKE_INFO_PTR_NOT_NULL(p));
604 }
605
606 INLINE_HEADER rtsBool LOOKS_LIKE_CLOSURE_PTR (void *p)
607 {
608 return LOOKS_LIKE_INFO_PTR((StgWord)(UNTAG_CLOSURE((StgClosure *)(p)))->header.info);
609 }
610
611 #endif /* STORAGE_H */