e70933f8a5f2280461d207048e681619a512d0b8
[ghc.git] / rts / sm / NonMovingSweep.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2018
4 *
5 * Non-moving garbage collector and allocator: Sweep phase
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #include "Rts.h"
10 #include "NonMovingSweep.h"
11 #include "NonMoving.h"
12 #include "NonMovingMark.h" // for nonmovingIsAlive
13 #include "Capability.h"
14 #include "GCThread.h" // for GCUtils.h
15 #include "GCUtils.h"
16 #include "Storage.h"
17 #include "Trace.h"
18 #include "StableName.h"
19
20 // On which list should a particular segment be placed?
21 enum SweepResult {
22 SEGMENT_FREE, // segment is empty: place on free list
23 SEGMENT_PARTIAL, // segment is partially filled: place on active list
24 SEGMENT_FILLED // segment is full: place on filled list
25 };
26
27 // Determine which list a marked segment should be placed on and initialize
28 // next_free indices as appropriate.
29 GNUC_ATTR_HOT static enum SweepResult
30 nonmovingSweepSegment(struct NonmovingSegment *seg)
31 {
32 bool found_free = false;
33 bool found_live = false;
34
35 for (nonmoving_block_idx i = 0;
36 i < nonmovingSegmentBlockCount(seg);
37 ++i)
38 {
39 if (seg->bitmap[i] == nonmovingMarkEpoch) {
40 found_live = true;
41 } else if (!found_free) {
42 found_free = true;
43 seg->next_free = i;
44 nonmovingSegmentInfo(seg)->next_free_snap = i;
45 Bdescr((P_)seg)->u.scan = (P_)nonmovingSegmentGetBlock(seg, i);
46 seg->bitmap[i] = 0;
47 } else {
48 seg->bitmap[i] = 0;
49 }
50
51 if (found_free && found_live) {
52 // zero the remaining dead object's mark bits
53 for (; i < nonmovingSegmentBlockCount(seg); ++i) {
54 if (seg->bitmap[i] != nonmovingMarkEpoch) {
55 seg->bitmap[i] = 0;
56 }
57 }
58 return SEGMENT_PARTIAL;
59 }
60 }
61
62 if (found_live) {
63 return SEGMENT_FILLED;
64 } else {
65 ASSERT(seg->next_free == 0);
66 ASSERT(nonmovingSegmentInfo(seg)->next_free_snap == 0);
67 return SEGMENT_FREE;
68 }
69 }
70
71 #if defined(DEBUG)
72
73 void nonmovingGcCafs()
74 {
75 uint32_t i = 0;
76 StgIndStatic *next;
77
78 for (StgIndStatic *caf = debug_caf_list_snapshot;
79 caf != (StgIndStatic*) END_OF_CAF_LIST;
80 caf = next)
81 {
82 next = (StgIndStatic*)caf->saved_info;
83
84 const StgInfoTable *info = get_itbl((StgClosure*)caf);
85 ASSERT(info->type == IND_STATIC);
86
87 StgWord flag = ((StgWord) caf->static_link) & STATIC_BITS;
88 if (flag != 0 && flag != static_flag) {
89 debugTrace(DEBUG_gccafs, "CAF gc'd at 0x%p", caf);
90 SET_INFO((StgClosure*)caf, &stg_GCD_CAF_info); // stub it
91 } else {
92 // CAF is alive, move it back to the debug_caf_list
93 ++i;
94 debugTrace(DEBUG_gccafs, "CAF alive at 0x%p", caf);
95 ACQUIRE_SM_LOCK; // debug_caf_list is global, locked by sm_mutex
96 caf->saved_info = (const StgInfoTable*)debug_caf_list;
97 debug_caf_list = caf;
98 RELEASE_SM_LOCK;
99 }
100 }
101
102 debugTrace(DEBUG_gccafs, "%d CAFs live", i);
103 debug_caf_list_snapshot = (StgIndStatic*)END_OF_CAF_LIST;
104 }
105
106 static void
107 clear_segment(struct NonmovingSegment* seg)
108 {
109 size_t end = ((size_t)seg) + NONMOVING_SEGMENT_SIZE;
110 memset(&seg->bitmap, 0, end - (size_t)&seg->bitmap);
111 }
112
113 static void
114 clear_segment_free_blocks(struct NonmovingSegment* seg)
115 {
116 unsigned int block_size = nonmovingSegmentBlockSize(seg);
117 for (unsigned int p_idx = 0; p_idx < nonmovingSegmentBlockCount(seg); ++p_idx) {
118 // after mark, so bit not set == dead
119 if (nonmovingGetMark(seg, p_idx) == 0) {
120 memset(nonmovingSegmentGetBlock(seg, p_idx), 0, block_size);
121 }
122 }
123 }
124
125 #endif
126
127 GNUC_ATTR_HOT void nonmovingSweep(void)
128 {
129 while (nonmovingHeap.sweep_list) {
130 struct NonmovingSegment *seg = nonmovingHeap.sweep_list;
131
132 // Pushing the segment to one of the free/active/filled segments
133 // updates the link field, so update sweep_list here
134 nonmovingHeap.sweep_list = seg->link;
135
136 enum SweepResult ret = nonmovingSweepSegment(seg);
137
138 switch (ret) {
139 case SEGMENT_FREE:
140 IF_DEBUG(sanity, clear_segment(seg));
141 nonmovingPushFreeSegment(seg);
142 break;
143 case SEGMENT_PARTIAL:
144 IF_DEBUG(sanity, clear_segment_free_blocks(seg));
145 nonmovingPushActiveSegment(seg);
146 break;
147 case SEGMENT_FILLED:
148 nonmovingPushFilledSegment(seg);
149 break;
150 default:
151 barf("nonmovingSweep: weird sweep return: %d\n", ret);
152 }
153 }
154 }
155
156 /* N.B. This happens during the pause so we own all capabilities. */
157 void nonmovingSweepMutLists()
158 {
159 for (uint32_t n = 0; n < n_capabilities; n++) {
160 Capability *cap = capabilities[n];
161 bdescr *old_mut_list = cap->mut_lists[oldest_gen->no];
162 cap->mut_lists[oldest_gen->no] = allocBlockOnNode_sync(cap->node);
163 for (bdescr *bd = old_mut_list; bd; bd = bd->link) {
164 for (StgPtr p = bd->start; p < bd->free; p++) {
165 StgClosure **q = (StgClosure**)p;
166 if (nonmovingIsAlive(*q)) {
167 recordMutableCap(*q, cap, oldest_gen->no);
168 }
169 }
170 }
171 freeChain(old_mut_list);
172 }
173 }
174
175 void nonmovingSweepLargeObjects()
176 {
177 freeChain_lock(nonmoving_large_objects);
178 nonmoving_large_objects = nonmoving_marked_large_objects;
179 n_nonmoving_large_blocks = n_nonmoving_marked_large_blocks;
180 nonmoving_marked_large_objects = NULL;
181 n_nonmoving_marked_large_blocks = 0;
182 }
183
184 // Helper for nonmovingSweepStableNameTable. Essentially nonmovingIsAlive,
185 // but works when the object died in moving heap, see
186 // nonmovingSweepStableNameTable
187 static bool is_alive(StgClosure *p)
188 {
189 if (!HEAP_ALLOCED_GC(p)) {
190 return true;
191 }
192
193 if (nonmovingClosureBeingSwept(p)) {
194 return nonmovingIsAlive(p);
195 } else {
196 // We don't want to sweep any stable names which weren't in the
197 // set of segments that we swept.
198 // See Note [Sweeping stable names in the concurrent collector]
199 return true;
200 }
201 }
202
203 void nonmovingSweepStableNameTable()
204 {
205 // See comments in gcStableTables
206
207 /* Note [Sweeping stable names in the concurrent collector]
208 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
209 *
210 * When collecting concurrently we need to take care to avoid freeing
211 * stable names the we didn't sweep this collection cycle. For instance,
212 * consider the following situation:
213 *
214 * 1. We take a snapshot and start collection
215 * 2. A mutator allocates a new object, then makes a stable name for it
216 * 3. The mutator performs a minor GC and promotes the new object to the nonmoving heap
217 * 4. The GC thread gets to the sweep phase and, when traversing the stable
218 * name table, finds the new object unmarked. It then assumes that the
219 * object is dead and removes the stable name from the stable name table.
220 *
221 */
222
223 // FIXME: We can't use nonmovingIsAlive here without first using isAlive:
224 // a stable name can die during moving heap collection and we can't use
225 // nonmovingIsAlive on those objects. Inefficient.
226
227 stableNameLock();
228 FOR_EACH_STABLE_NAME(
229 p, {
230 if (p->sn_obj != NULL) {
231 if (!is_alive((StgClosure*)p->sn_obj)) {
232 p->sn_obj = NULL; // Just to make an assertion happy
233 freeSnEntry(p);
234 } else if (p->addr != NULL) {
235 if (!is_alive((StgClosure*)p->addr)) {
236 p->addr = NULL;
237 }
238 }
239 }
240 });
241 stableNameUnlock();
242 }