Allow work units smaller than a block to improve load balancing
[ghc.git] / rts / sm / GC.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2006
4 *
5 * Generational garbage collector
6 *
7 * Documentation on the architecture of the Garbage Collector can be
8 * found in the online commentary:
9 *
10 * http://hackage.haskell.org/trac/ghc/wiki/Commentary/Rts/Storage/GC
11 *
12 * ---------------------------------------------------------------------------*/
13
14 #ifndef GC_H
15 #define GC_H
16
17 #include "OSThreads.h"
18
19 /* -----------------------------------------------------------------------------
20 General scheme
21
22 ToDo: move this to the wiki when the implementation is done.
23
24 We're only going to try to parallelise the copying GC for now. The
25 Plan is as follows.
26
27 Each thread has a gc_thread structure (see below) which holds its
28 thread-local data. We'll keep a pointer to this in a thread-local
29 variable, or possibly in a register.
30
31 In the gc_thread structure is a step_workspace for each step. The
32 primary purpose of the step_workspace is to hold evacuated objects;
33 when an object is evacuated, it is copied to the "todo" block in
34 the thread's workspace for the appropriate step. When the todo
35 block is full, it is pushed to the global step->todos list, which
36 is protected by a lock. (in fact we intervene a one-place buffer
37 here to reduce contention).
38
39 A thread repeatedly grabs a block of work from one of the
40 step->todos lists, scavenges it, and keeps the scavenged block on
41 its own ws->scavd_list (this is to avoid unnecessary contention
42 returning the completed buffers back to the step: we can just
43 collect them all later).
44
45 When there is no global work to do, we start scavenging the todo
46 blocks in the workspaces. This is where the scan_bd field comes
47 in: we can scan the contents of the todo block, when we have
48 scavenged the contents of the todo block (up to todo_bd->free), we
49 don't want to move this block immediately to the scavd_list,
50 because it is probably only partially full. So we remember that we
51 have scanned up to this point by saving the block in ws->scan_bd,
52 with the current scan pointer in ws->scan. Later, when more
53 objects have been copied to this block, we can come back and scan
54 the rest. When we visit this workspace again in the future,
55 scan_bd may still be the same as todo_bd, or it might be different:
56 if enough objects were copied into this block that it filled up,
57 then we will have allocated a new todo block, but *not* pushed the
58 old one to the step, because it is partially scanned.
59
60 The reason to leave scanning the todo blocks until last is that we
61 want to deal with full blocks as far as possible.
62 ------------------------------------------------------------------------- */
63
64
65 /* -----------------------------------------------------------------------------
66 Step Workspace
67
68 A step workspace exists for each step for each GC thread. The GC
69 thread takes a block from the todos list of the step into the
70 scanbd and then scans it. Objects referred to by those in the scan
71 block are copied into the todo or scavd blocks of the relevant step.
72
73 ------------------------------------------------------------------------- */
74
75 typedef struct step_workspace_ {
76 step * step; // the step for this workspace
77 struct gc_thread_ * gct; // the gc_thread that contains this workspace
78
79 // block that is currently being scanned
80 bdescr * scan_bd;
81
82 // where objects to be scavenged go
83 bdescr * todo_bd;
84 StgPtr todo_free; // free ptr for todo_bd
85 StgPtr todo_lim; // lim for todo_bd
86
87 bdescr * buffer_todo_bd; // buffer to reduce contention
88 // on the step's todos list
89
90 // where large objects to be scavenged go
91 bdescr * todo_large_objects;
92
93 // Objects that have already been, scavenged.
94 bdescr * scavd_list;
95 lnat n_scavd_blocks; // count of blocks in this list
96
97 // Partially-full, scavenged, blocks
98 bdescr * part_list;
99 unsigned int n_part_blocks; // count of above
100
101 } step_workspace;
102
103 /* ----------------------------------------------------------------------------
104 GC thread object
105
106 Every GC thread has one of these. It contains all the step specific
107 workspaces and other GC thread loacl information. At some later
108 point it maybe useful to move this other into the TLS store of the
109 GC threads
110 ------------------------------------------------------------------------- */
111
112 typedef struct gc_thread_ {
113 #ifdef THREADED_RTS
114 OSThreadId id; // The OS thread that this struct belongs to
115 Mutex wake_mutex;
116 Condition wake_cond; // So we can go to sleep between GCs
117 rtsBool wakeup;
118 rtsBool exit;
119 #endif
120 nat thread_index; // a zero based index identifying the thread
121
122 bdescr * free_blocks; // a buffer of free blocks for this thread
123 // during GC without accessing the block
124 // allocators spin lock.
125
126 StgClosure* static_objects; // live static objects
127 StgClosure* scavenged_static_objects; // static objects scavenged so far
128
129 lnat gc_count; // number of gc's this thread has done
130
131 // --------------------
132 // evacuate flags
133
134 step *evac_step; // Youngest generation that objects
135 // should be evacuated to in
136 // evacuate(). (Logically an
137 // argument to evacuate, but it's
138 // static a lot of the time so we
139 // optimise it into a per-thread
140 // variable).
141
142 rtsBool failed_to_evac; // failure to evacuate an object typically
143 // Causes it to be recorded in the mutable
144 // object list
145
146 rtsBool eager_promotion; // forces promotion to the evac gen
147 // instead of the to-space
148 // corresponding to the object
149
150 lnat thunk_selector_depth; // ummm.... not used as of now
151
152 #ifdef USE_PAPI
153 int papi_events;
154 #endif
155
156 // -------------------
157 // stats
158
159 lnat copied;
160 lnat any_work;
161 lnat no_work;
162 lnat scav_find_work;
163
164 // -------------------
165 // workspaces
166
167 // array of workspaces, indexed by stp->abs_no. This is placed
168 // directly at the end of the gc_thread structure so that we can get from
169 // the gc_thread pointer to a workspace using only pointer
170 // arithmetic, no memory access. This happens in the inner loop
171 // of the GC, see Evac.c:alloc_for_copy().
172 step_workspace steps[];
173 } gc_thread;
174
175 extern nat N;
176 extern rtsBool major_gc;
177 extern nat n_gc_threads;
178
179 extern gc_thread **gc_threads;
180 register gc_thread *gct __asm__("%rbx");
181 // extern gc_thread *gct; // this thread's gct TODO: make thread-local
182
183 extern bdescr *mark_stack_bdescr;
184 extern StgPtr *mark_stack;
185 extern StgPtr *mark_sp;
186 extern StgPtr *mark_splim;
187
188 extern rtsBool mark_stack_overflowed;
189 extern bdescr *oldgen_scan_bd;
190 extern StgPtr oldgen_scan;
191
192 extern long copied;
193
194 #ifdef DEBUG
195 extern nat mutlist_MUTVARS, mutlist_MUTARRS, mutlist_MVARS, mutlist_OTHERS;
196 #endif
197
198 StgClosure * isAlive(StgClosure *p);
199
200 #define WORK_UNIT_WORDS 128
201
202 #endif /* GC_H */