NonMoving: Trace swept segment counts
[ghc.git] / rts / sm / NonMovingSweep.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2018
4 *
5 * Non-moving garbage collector and allocator: Sweep phase
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "Rts.h"
10 #include "NonMovingSweep.h"
11 #include "NonMoving.h"
12 #include "NonMovingMark.h" // for nonmovingIsAlive
13 #include "Capability.h"
14 #include "GCThread.h" // for GCUtils.h
15 #include "GCUtils.h"
16 #include "Storage.h"
17 #include "Trace.h"
18 #include "StableName.h"
19
20 // On which list should a particular segment be placed?
21 enum SweepResult {
22 SEGMENT_FREE, // segment is empty: place on free list
23 SEGMENT_PARTIAL, // segment is partially filled: place on active list
24 SEGMENT_FILLED // segment is full: place on filled list
25 };
26
27 // Determine which list a marked segment should be placed on and initialize
28 // next_free indices as appropriate.
29 GNUC_ATTR_HOT static enum SweepResult
30 nonmovingSweepSegment(struct NonmovingSegment *seg)
31 {
32 bool found_free = false;
33 bool found_live = false;
34
35 for (nonmoving_block_idx i = 0;
36 i < nonmovingSegmentBlockCount(seg);
37 ++i)
38 {
39 if (seg->bitmap[i] == nonmovingMarkEpoch) {
40 found_live = true;
41 } else if (!found_free) {
42 found_free = true;
43 seg->next_free = i;
44 nonmovingSegmentInfo(seg)->next_free_snap = i;
45 Bdescr((P_)seg)->u.scan = (P_)nonmovingSegmentGetBlock(seg, i);
46 seg->bitmap[i] = 0;
47 } else {
48 seg->bitmap[i] = 0;
49 }
50
51 if (found_free && found_live) {
52 // zero the remaining dead object's mark bits
53 for (; i < nonmovingSegmentBlockCount(seg); ++i) {
54 if (seg->bitmap[i] != nonmovingMarkEpoch) {
55 seg->bitmap[i] = 0;
56 }
57 }
58 return SEGMENT_PARTIAL;
59 }
60 }
61
62 if (found_live) {
63 return SEGMENT_FILLED;
64 } else {
65 ASSERT(seg->next_free == 0);
66 ASSERT(nonmovingSegmentInfo(seg)->next_free_snap == 0);
67 return SEGMENT_FREE;
68 }
69 }
70
71 #if defined(DEBUG)
72
73 void nonmovingGcCafs()
74 {
75 uint32_t i = 0;
76 StgIndStatic *next;
77
78 for (StgIndStatic *caf = debug_caf_list_snapshot;
79 caf != (StgIndStatic*) END_OF_CAF_LIST;
80 caf = next)
81 {
82 next = (StgIndStatic*)caf->saved_info;
83
84 const StgInfoTable *info = get_itbl((StgClosure*)caf);
85 ASSERT(info->type == IND_STATIC);
86
87 StgWord flag = ((StgWord) caf->static_link) & STATIC_BITS;
88 if (flag != 0 && flag != static_flag) {
89 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%p", caf);
90 SET_INFO((StgClosure*)caf, &stg_GCD_CAF_info); // stub it
91 } else {
92 // CAF is alive, move it back to the debug_caf_list
93 ++i;
94 debugTrace(DEBUG_gccafs, "CAF alive at 0x%p", caf);
95 ACQUIRE_SM_LOCK; // debug_caf_list is global, locked by sm_mutex
96 caf->saved_info = (const StgInfoTable*)debug_caf_list;
97 debug_caf_list = caf;
98 RELEASE_SM_LOCK;
99 }
100 }
101
102 debugTrace(DEBUG_gccafs, "%d CAFs live", i);
103 debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
104 }
105
106 static void
107 clear_segment(struct NonmovingSegment* seg)
108 {
109 size_t end = ((size_t)seg) + NONMOVING_SEGMENT_SIZE;
110 memset(&seg->bitmap, 0, end - (size_t)&seg->bitmap);
111 }
112
113 static void
114 clear_segment_free_blocks(struct NonmovingSegment* seg)
115 {
116 unsigned int block_size = nonmovingSegmentBlockSize(seg);
117 for (unsigned int p_idx = 0; p_idx < nonmovingSegmentBlockCount(seg); ++p_idx) {
118 // after mark, so bit not set == dead
119 if (nonmovingGetMark(seg, p_idx) == 0) {
120 memset(nonmovingSegmentGetBlock(seg, p_idx), 0, block_size);
121 }
122 }
123 }
124
125 #endif
126
127 GNUC_ATTR_HOT void nonmovingSweep(void)
128 {
129 int n_freed=0, n_active=0, n_filled=0;
130 while (nonmovingHeap.sweep_list) {
131 struct NonmovingSegment *seg = nonmovingHeap.sweep_list;
132
133 // Pushing the segment to one of the free/active/filled segments
134 // updates the link field, so update sweep_list here
135 nonmovingHeap.sweep_list = seg->link;
136
137 enum SweepResult ret = nonmovingSweepSegment(seg);
138
139 switch (ret) {
140 case SEGMENT_FREE:
141 IF_DEBUG(sanity, clear_segment(seg));
142 nonmovingPushFreeSegment(seg);
143 n_freed++;
144 break;
145 case SEGMENT_PARTIAL:
146 IF_DEBUG(sanity, clear_segment_free_blocks(seg));
147 nonmovingPushActiveSegment(seg);
148 n_active++;
149 break;
150 case SEGMENT_FILLED:
151 nonmovingPushFilledSegment(seg);
152 n_filled++;
153 break;
154 default:
155 barf("nonmovingSweep: weird sweep return: %d\n", ret);
156 }
157 }
158
159 debugTrace(DEBUG_nonmoving_gc, "Pushed %d free, %d active, %d filled segments\n",
160 n_freed, n_active, n_filled);
161 }
162
163 /* N.B. This happens during the pause so we own all capabilities. */
164 void nonmovingSweepMutLists()
165 {
166 for (uint32_t n = 0; n < n_capabilities; n++) {
167 Capability *cap = capabilities[n];
168 bdescr *old_mut_list = cap->mut_lists[oldest_gen->no];
169 cap->mut_lists[oldest_gen->no] = allocBlockOnNode_sync(cap->node);
170 for (bdescr *bd = old_mut_list; bd; bd = bd->link) {
171 for (StgPtr p = bd->start; p < bd->free; p++) {
172 StgClosure **q = (StgClosure**)p;
173 if (nonmovingIsAlive(*q)) {
174 recordMutableCap(*q, cap, oldest_gen->no);
175 }
176 }
177 }
178 freeChain(old_mut_list);
179 }
180 }
181
182 void nonmovingSweepLargeObjects()
183 {
184 freeChain_lock(nonmoving_large_objects);
185 nonmoving_large_objects = nonmoving_marked_large_objects;
186 n_nonmoving_large_blocks = n_nonmoving_marked_large_blocks;
187 nonmoving_marked_large_objects = NULL;
188 n_nonmoving_marked_large_blocks = 0;
189 }
190
191 // Helper for nonmovingSweepStableNameTable. Essentially nonmovingIsAlive,
192 // but works when the object died in moving heap, see
193 // nonmovingSweepStableNameTable
194 static bool is_alive(StgClosure *p)
195 {
196 if (!HEAP_ALLOCED_GC(p)) {
197 return true;
198 }
199
200 if (nonmovingClosureBeingSwept(p)) {
201 return nonmovingIsAlive(p);
202 } else {
203 // We don't want to sweep any stable names which weren't in the
204 // set of segments that we swept.
205 // See Note [Sweeping stable names in the concurrent collector]
206 return true;
207 }
208 }
209
210 void nonmovingSweepStableNameTable()
211 {
212 // See comments in gcStableTables
213
214 /* Note [Sweeping stable names in the concurrent collector]
215 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
216 *
217 * When collecting concurrently we need to take care to avoid freeing
218 * stable names the we didn't sweep this collection cycle. For instance,
219 * consider the following situation:
220 *
221 * 1. We take a snapshot and start collection
222 * 2. A mutator allocates a new object, then makes a stable name for it
223 * 3. The mutator performs a minor GC and promotes the new object to the nonmoving heap
224 * 4. The GC thread gets to the sweep phase and, when traversing the stable
225 * name table, finds the new object unmarked. It then assumes that the
226 * object is dead and removes the stable name from the stable name table.
227 *
228 */
229
230 // FIXME: We can't use nonmovingIsAlive here without first using isAlive:
231 // a stable name can die during moving heap collection and we can't use
232 // nonmovingIsAlive on those objects. Inefficient.
233
234 stableNameLock();
235 FOR_EACH_STABLE_NAME(
236 p, {
237 if (p->sn_obj != NULL) {
238 if (!is_alive((StgClosure*)p->sn_obj)) {
239 p->sn_obj = NULL; // Just to make an assertion happy
240 freeSnEntry(p);
241 } else if (p->addr != NULL) {
242 if (!is_alive((StgClosure*)p->addr)) {
243 p->addr = NULL;
244 }
245 }
246 }
247 });
248 stableNameUnlock();
249 }