Use local mut lists in UPD_IND(), also clean up Updates.h
[ghc.git] / rts / Updates.h
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team, 1998-2004
4 *
5 * Performing updates.
6 *
7 * ---------------------------------------------------------------------------*/
8
9 #ifndef UPDATES_H
10 #define UPDATES_H
11
12 #ifndef CMINUSMINUS
13 BEGIN_RTS_PRIVATE
14 #endif
15
16 /* -----------------------------------------------------------------------------
17 Updates
18 -------------------------------------------------------------------------- */
19
20 /* LDV profiling:
21 * We call LDV_recordDead_FILL_SLOP_DYNAMIC(p1) regardless of the generation in
22 * which p1 resides.
23 *
24 * Note:
25 * After all, we do *NOT* need to call LDV_RECORD_CREATE() for both IND and
26 * IND_OLDGEN closures because they are inherently used. But, it corrupts
27 * the invariants that every closure keeps its creation time in the profiling
28 * field. So, we call LDV_RECORD_CREATE().
29 */
30
31 /* In the DEBUG case, we also zero out the slop of the old closure,
32 * so that the sanity checker can tell where the next closure is.
33 *
34 * Two important invariants: we should never try to update a closure
35 * to point to itself, and the closure being updated should not
36 * already have been updated (the mutable list will get messed up
37 * otherwise).
38 *
39 * NB. We do *not* do this in THREADED_RTS mode, because when we have the
40 * possibility of multiple threads entering the same closure, zeroing
41 * the slop in one of the threads would have a disastrous effect on
42 * the other (seen in the wild!).
43 */
44 #ifdef CMINUSMINUS
45
46 #define FILL_SLOP(p) \
47 W_ inf; \
48 W_ sz; \
49 W_ i; \
50 inf = %GET_STD_INFO(p); \
51 if (%INFO_TYPE(inf) != HALF_W_(BLACKHOLE) \
52 && %INFO_TYPE(inf) != HALF_W_(CAF_BLACKHOLE)) { \
53 if (%INFO_TYPE(inf) == HALF_W_(THUNK_SELECTOR)) { \
54 sz = BYTES_TO_WDS(SIZEOF_StgSelector_NoThunkHdr); \
55 } else { \
56 if (%INFO_TYPE(inf) == HALF_W_(AP_STACK)) { \
57 sz = StgAP_STACK_size(p) + BYTES_TO_WDS(SIZEOF_StgAP_STACK_NoThunkHdr); \
58 } else { \
59 if (%INFO_TYPE(inf) == HALF_W_(AP)) { \
60 sz = TO_W_(StgAP_n_args(p)) + BYTES_TO_WDS(SIZEOF_StgAP_NoThunkHdr); \
61 } else { \
62 sz = TO_W_(%INFO_PTRS(inf)) + TO_W_(%INFO_NPTRS(inf)); \
63 } \
64 } \
65 } \
66 i = 0; \
67 for: \
68 if (i < sz) { \
69 StgThunk_payload(p,i) = 0; \
70 i = i + 1; \
71 goto for; \
72 } \
73 }
74
75 #else /* !CMINUSMINUS */
76
77 INLINE_HEADER void
78 FILL_SLOP(StgClosure *p)
79 {
80 StgInfoTable *inf = get_itbl(p);
81 nat i, sz;
82
83 switch (inf->type) {
84 case BLACKHOLE:
85 case CAF_BLACKHOLE:
86 goto no_slop;
87 // we already filled in the slop when we overwrote the thunk
88 // with BLACKHOLE, and also an evacuated BLACKHOLE is only the
89 // size of an IND.
90 case THUNK_SELECTOR:
91 sz = sizeofW(StgSelector) - sizeofW(StgThunkHeader);
92 break;
93 case AP:
94 sz = ((StgAP *)p)->n_args + sizeofW(StgAP) - sizeofW(StgThunkHeader);
95 break;
96 case AP_STACK:
97 sz = ((StgAP_STACK *)p)->size + sizeofW(StgAP_STACK) - sizeofW(StgThunkHeader);
98 break;
99 default:
100 sz = inf->layout.payload.ptrs + inf->layout.payload.nptrs;
101 break;
102 }
103 for (i = 0; i < sz; i++) {
104 ((StgThunk *)p)->payload[i] = 0;
105 }
106 no_slop:
107 ;
108 }
109
110 #endif /* CMINUSMINUS */
111
112 #if !defined(DEBUG) || defined(THREADED_RTS)
113 #define DEBUG_FILL_SLOP(p) /* do nothing */
114 #else
115 #define DEBUG_FILL_SLOP(p) FILL_SLOP(p)
116 #endif
117
118 /* We have two versions of this macro (sadly), one for use in C-- code,
119 * and the other for C.
120 *
121 * The and_then argument is a performance hack so that we can paste in
122 * the continuation code directly. It helps shave a couple of
123 * instructions off the common case in the update code, which is
124 * worthwhile (the update code is often part of the inner loop).
125 * (except that gcc now appears to common up this code again and
126 * invert the optimisation. Grrrr --SDM).
127 */
128 #ifdef CMINUSMINUS
129
130 #define updateWithIndirection(ind_info, p1, p2, and_then) \
131 W_ bd; \
132 \
133 DEBUG_FILL_SLOP(p1); \
134 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1); \
135 StgInd_indirectee(p1) = p2; \
136 prim %write_barrier() []; \
137 bd = Bdescr(p1); \
138 if (bdescr_gen_no(bd) != 0 :: bits16) { \
139 recordMutableCap(p1, TO_W_(bdescr_gen_no(bd)), R1); \
140 SET_INFO(p1, stg_IND_OLDGEN_info); \
141 LDV_RECORD_CREATE(p1); \
142 TICK_UPD_OLD_IND(); \
143 and_then; \
144 } else { \
145 SET_INFO(p1, ind_info); \
146 LDV_RECORD_CREATE(p1); \
147 TICK_UPD_NEW_IND(); \
148 and_then; \
149 }
150
151 #else /* !CMINUSMINUS */
152
153 INLINE_HEADER void updateWithIndirection (Capability *cap,
154 const StgInfoTable *ind_info,
155 StgClosure *p1,
156 StgClosure *p2)
157 {
158 bdescr *bd;
159
160 ASSERT( (P_)p1 != (P_)p2 );
161 /* not necessarily true: ASSERT( !closure_IND(p1) ); */
162 /* occurs in RaiseAsync.c:raiseAsync() */
163 DEBUG_FILL_SLOP(p1);
164 LDV_RECORD_DEAD_FILL_SLOP_DYNAMIC(p1);
165 ((StgInd *)p1)->indirectee = p2;
166 write_barrier();
167 bd = Bdescr((StgPtr)p1);
168 if (bd->gen_no != 0) {
169 recordMutableCap(p1, cap, bd->gen_no);
170 SET_INFO(p1, &stg_IND_OLDGEN_info);
171 TICK_UPD_OLD_IND();
172 } else {
173 SET_INFO(p1, ind_info);
174 LDV_RECORD_CREATE(p1);
175 TICK_UPD_NEW_IND();
176 }
177 }
178
179 #endif /* CMINUSMINUS */
180
181 #define UPD_IND(cap, updclosure, heapptr) \
182 updateWithIndirection(cap, &stg_IND_info, \
183 updclosure, \
184 heapptr)
185
186 #ifndef CMINUSMINUS
187 END_RTS_PRIVATE
188 #endif
189
190 #endif /* UPDATES_H */