Make start address of `osReserveHeapMemory` tunable via command line -xb
[ghc.git] / rts / STM.c
1 /* -----------------------------------------------------------------------------
2 * (c) The GHC Team 1998-2005
3 *
4 * STM implementation.
5 *
6 * Overview
7 * --------
8 *
9 * See the PPoPP 2005 paper "Composable memory transactions". In summary, each
10 * transaction has a TRec (transaction record) holding entries for each of the
11 * TVars (transactional variables) that it has accessed. Each entry records (a)
12 * the TVar, (b) the expected value seen in the TVar, (c) the new value that the
13 * transaction wants to write to the TVar, (d) during commit, the identity of
14 * the TRec that wrote the expected value.
15 *
16 * Separate TRecs are used for each level in a nest of transactions. This
17 * allows a nested transaction to be aborted without condemning its enclosing
18 * transactions. This is needed in the implementation of catchRetry. Note that
19 * the "expected value" in a nested transaction's TRec is the value expected to
20 * be *held in memory* if the transaction commits -- not the "new value" stored
21 * in one of the enclosing transactions. This means that validation can be done
22 * without searching through a nest of TRecs.
23 *
24 * Concurrency control
25 * -------------------
26 *
27 * Three different concurrency control schemes can be built according to the
28 * settings in STM.h:
29 *
30 * STM_UNIPROC assumes that the caller serialises invocations on the STM
31 * interface. In the Haskell RTS this means it is suitable only for
32 * non-THREADED_RTS builds.
33 *
34 * STM_CG_LOCK uses coarse-grained locking -- a single 'stm lock' is acquired
35 * during an invocation on the STM interface. Note that this does not mean that
36 * transactions are simply serialized -- the lock is only held *within* the
37 * implementation of stmCommitTransaction, stmWait etc.
38 *
39 * STM_FG_LOCKS uses fine-grained locking -- locking is done on a per-TVar basis
40 * and, when committing a transaction, no locks are acquired for TVars that have
41 * been read but not updated.
42 *
43 * Concurrency control is implemented in the functions:
44 *
45 * lock_stm
46 * unlock_stm
47 * lock_tvar / cond_lock_tvar
48 * unlock_tvar
49 *
50 * The choice between STM_UNIPROC / STM_CG_LOCK / STM_FG_LOCKS affects the
51 * implementation of these functions.
52 *
53 * lock_stm & unlock_stm are straightforward : they acquire a simple spin-lock
54 * using STM_CG_LOCK, and otherwise they are no-ops.
55 *
56 * lock_tvar / cond_lock_tvar and unlock_tvar are more complex because they have
57 * other effects (present in STM_UNIPROC and STM_CG_LOCK builds) as well as the
58 * actual business of manipulating a lock (present only in STM_FG_LOCKS builds).
59 * This is because locking a TVar is implemented by writing the lock holder's
60 * TRec into the TVar's current_value field:
61 *
62 * lock_tvar - lock a specified TVar (STM_FG_LOCKS only), returning the value
63 * it contained.
64 *
65 * cond_lock_tvar - lock a specified TVar (STM_FG_LOCKS only) if it
66 * contains a specified value. Return TRUE if this succeeds,
67 * FALSE otherwise.
68 *
69 * unlock_tvar - release the lock on a specified TVar (STM_FG_LOCKS only),
70 * storing a specified value in place of the lock entry.
71 *
72 * Using these operations, the typical pattern of a commit/validate/wait
73 * operation is to (a) lock the STM, (b) lock all the TVars being updated, (c)
74 * check that the TVars that were only read from still contain their expected
75 * values, (d) release the locks on the TVars, writing updates to them in the
76 * case of a commit, (e) unlock the STM.
77 *
78 * Queues of waiting threads hang off the first_watch_queue_entry field of each
79 * TVar. This may only be manipulated when holding that TVar's lock. In
80 * particular, when a thread is putting itself to sleep, it mustn't release the
81 * TVar's lock until it has added itself to the wait queue and marked its TSO as
82 * BlockedOnSTM -- this makes sure that other threads will know to wake it.
83 *
84 * ---------------------------------------------------------------------------*/
85
86 #include "PosixSource.h"
87 #include "Rts.h"
88
89 #include "RtsUtils.h"
90 #include "Schedule.h"
91 #include "STM.h"
92 #include "Trace.h"
93 #include "Threads.h"
94 #include "sm/Storage.h"
95 #include "SMPClosureOps.h"
96
97 #include <stdio.h>
98
99 #define TRUE 1
100 #define FALSE 0
101
102 // ACQ_ASSERT is used for assertions which are only required for
103 // THREADED_RTS builds with fine-grained locking.
104
105 #if defined(STM_FG_LOCKS)
106 #define ACQ_ASSERT(_X) ASSERT(_X)
107 #define NACQ_ASSERT(_X) /*Nothing*/
108 #else
109 #define ACQ_ASSERT(_X) /*Nothing*/
110 #define NACQ_ASSERT(_X) ASSERT(_X)
111 #endif
112
113 /*......................................................................*/
114
115 // If SHAKE is defined then validation will sometimes spuriously fail. They help test
116 // unusual code paths if genuine contention is rare
117
118 #define TRACE(_x...) debugTrace(DEBUG_stm, "STM: " _x)
119
120 #ifdef SHAKE
121 static const int do_shake = TRUE;
122 #else
123 static const int do_shake = FALSE;
124 #endif
125 static int shake_ctr = 0;
126 static int shake_lim = 1;
127
128 static int shake(void) {
129 if (do_shake) {
130 if (((shake_ctr++) % shake_lim) == 0) {
131 shake_ctr = 1;
132 shake_lim ++;
133 return TRUE;
134 }
135 return FALSE;
136 } else {
137 return FALSE;
138 }
139 }
140
141 /*......................................................................*/
142
143 // Helper macros for iterating over entries within a transaction
144 // record
145
146 #define FOR_EACH_ENTRY(_t,_x,CODE) do { \
147 StgTRecHeader *__t = (_t); \
148 StgTRecChunk *__c = __t -> current_chunk; \
149 StgWord __limit = __c -> next_entry_idx; \
150 TRACE("%p : FOR_EACH_ENTRY, current_chunk=%p limit=%ld", __t, __c, __limit); \
151 while (__c != END_STM_CHUNK_LIST) { \
152 StgWord __i; \
153 for (__i = 0; __i < __limit; __i ++) { \
154 TRecEntry *_x = &(__c -> entries[__i]); \
155 do { CODE } while (0); \
156 } \
157 __c = __c -> prev_chunk; \
158 __limit = TREC_CHUNK_NUM_ENTRIES; \
159 } \
160 exit_for_each: \
161 if (FALSE) goto exit_for_each; \
162 } while (0)
163
164 #define BREAK_FOR_EACH goto exit_for_each
165
166 /*......................................................................*/
167
168 // if REUSE_MEMORY is defined then attempt to re-use descriptors, log chunks,
169 // and wait queue entries without GC
170
171 #define REUSE_MEMORY
172
173 /*......................................................................*/
174
175 #define IF_STM_UNIPROC(__X) do { } while (0)
176 #define IF_STM_CG_LOCK(__X) do { } while (0)
177 #define IF_STM_FG_LOCKS(__X) do { } while (0)
178
179 #if defined(STM_UNIPROC)
180 #undef IF_STM_UNIPROC
181 #define IF_STM_UNIPROC(__X) do { __X } while (0)
182 static const StgBool config_use_read_phase = FALSE;
183
184 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
185 TRACE("%p : lock_stm()", trec);
186 }
187
188 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
189 TRACE("%p : unlock_stm()", trec);
190 }
191
192 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
193 StgTVar *s STG_UNUSED) {
194 StgClosure *result;
195 TRACE("%p : lock_tvar(%p)", trec, s);
196 result = s -> current_value;
197 return result;
198 }
199
200 static void unlock_tvar(Capability *cap,
201 StgTRecHeader *trec STG_UNUSED,
202 StgTVar *s,
203 StgClosure *c,
204 StgBool force_update) {
205 TRACE("%p : unlock_tvar(%p)", trec, s);
206 if (force_update) {
207 s -> current_value = c;
208 dirty_TVAR(cap,s);
209 }
210 }
211
212 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
213 StgTVar *s STG_UNUSED,
214 StgClosure *expected) {
215 StgClosure *result;
216 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
217 result = s -> current_value;
218 TRACE("%p : %s", trec, (result == expected) ? "success" : "failure");
219 return (result == expected);
220 }
221
222 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
223 // Nothing -- uniproc
224 return TRUE;
225 }
226
227 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
228 // Nothing -- uniproc
229 }
230 #endif
231
232 #if defined(STM_CG_LOCK) /*........................................*/
233
234 #undef IF_STM_CG_LOCK
235 #define IF_STM_CG_LOCK(__X) do { __X } while (0)
236 static const StgBool config_use_read_phase = FALSE;
237 static volatile StgTRecHeader *smp_locked = NULL;
238
239 static void lock_stm(StgTRecHeader *trec) {
240 while (cas(&smp_locked, NULL, trec) != NULL) { }
241 TRACE("%p : lock_stm()", trec);
242 }
243
244 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
245 TRACE("%p : unlock_stm()", trec);
246 ASSERT(smp_locked == trec);
247 smp_locked = 0;
248 }
249
250 static StgClosure *lock_tvar(StgTRecHeader *trec STG_UNUSED,
251 StgTVar *s STG_UNUSED) {
252 StgClosure *result;
253 TRACE("%p : lock_tvar(%p)", trec, s);
254 ASSERT(smp_locked == trec);
255 result = s -> current_value;
256 return result;
257 }
258
259 static void *unlock_tvar(Capability *cap,
260 StgTRecHeader *trec STG_UNUSED,
261 StgTVar *s,
262 StgClosure *c,
263 StgBool force_update) {
264 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
265 ASSERT(smp_locked == trec);
266 if (force_update) {
267 s -> current_value = c;
268 dirty_TVAR(cap,s);
269 }
270 }
271
272 static StgBool cond_lock_tvar(StgTRecHeader *trec STG_UNUSED,
273 StgTVar *s STG_UNUSED,
274 StgClosure *expected) {
275 StgClosure *result;
276 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
277 ASSERT(smp_locked == trec);
278 result = s -> current_value;
279 TRACE("%p : %d", result ? "success" : "failure");
280 return (result == expected);
281 }
282
283 static StgBool lock_inv(StgAtomicInvariant *inv STG_UNUSED) {
284 // Nothing -- protected by STM lock
285 return TRUE;
286 }
287
288 static void unlock_inv(StgAtomicInvariant *inv STG_UNUSED) {
289 // Nothing -- protected by STM lock
290 }
291 #endif
292
293 #if defined(STM_FG_LOCKS) /*...................................*/
294
295 #undef IF_STM_FG_LOCKS
296 #define IF_STM_FG_LOCKS(__X) do { __X } while (0)
297 static const StgBool config_use_read_phase = TRUE;
298
299 static void lock_stm(StgTRecHeader *trec STG_UNUSED) {
300 TRACE("%p : lock_stm()", trec);
301 }
302
303 static void unlock_stm(StgTRecHeader *trec STG_UNUSED) {
304 TRACE("%p : unlock_stm()", trec);
305 }
306
307 static StgClosure *lock_tvar(StgTRecHeader *trec,
308 StgTVar *s STG_UNUSED) {
309 StgClosure *result;
310 TRACE("%p : lock_tvar(%p)", trec, s);
311 do {
312 do {
313 result = s -> current_value;
314 } while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info);
315 } while (cas((void *)&(s -> current_value),
316 (StgWord)result, (StgWord)trec) != (StgWord)result);
317 return result;
318 }
319
320 static void unlock_tvar(Capability *cap,
321 StgTRecHeader *trec STG_UNUSED,
322 StgTVar *s,
323 StgClosure *c,
324 StgBool force_update STG_UNUSED) {
325 TRACE("%p : unlock_tvar(%p, %p)", trec, s, c);
326 ASSERT(s -> current_value == (StgClosure *)trec);
327 s -> current_value = c;
328 dirty_TVAR(cap,s);
329 }
330
331 static StgBool cond_lock_tvar(StgTRecHeader *trec,
332 StgTVar *s,
333 StgClosure *expected) {
334 StgClosure *result;
335 StgWord w;
336 TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected);
337 w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec);
338 result = (StgClosure *)w;
339 TRACE("%p : %s", trec, result ? "success" : "failure");
340 return (result == expected);
341 }
342
343 static StgBool lock_inv(StgAtomicInvariant *inv) {
344 return (cas(&(inv -> lock), 0, 1) == 0);
345 }
346
347 static void unlock_inv(StgAtomicInvariant *inv) {
348 ASSERT(inv -> lock == 1);
349 inv -> lock = 0;
350 }
351 #endif
352
353 /*......................................................................*/
354
355 static StgBool watcher_is_tso(StgTVarWatchQueue *q) {
356 StgClosure *c = q -> closure;
357 const StgInfoTable *info = get_itbl(c);
358 return (info -> type) == TSO;
359 }
360
361 static StgBool watcher_is_invariant(StgTVarWatchQueue *q) {
362 StgClosure *c = q -> closure;
363 return (c->header.info == &stg_ATOMIC_INVARIANT_info);
364 }
365
366 /*......................................................................*/
367
368 // Helper functions for thread blocking and unblocking
369
370 static void park_tso(StgTSO *tso) {
371 ASSERT(tso -> why_blocked == NotBlocked);
372 tso -> why_blocked = BlockedOnSTM;
373 tso -> block_info.closure = (StgClosure *) END_TSO_QUEUE;
374 TRACE("park_tso on tso=%p", tso);
375 }
376
377 static void unpark_tso(Capability *cap, StgTSO *tso) {
378 // We will continue unparking threads while they remain on one of the wait
379 // queues: it's up to the thread itself to remove it from the wait queues
380 // if it decides to do so when it is scheduled.
381
382 // Unblocking a TSO from BlockedOnSTM is done under the TSO lock,
383 // to avoid multiple CPUs unblocking the same TSO, and also to
384 // synchronise with throwTo(). The first time the TSO is unblocked
385 // we mark this fact by setting block_info.closure == STM_AWOKEN.
386 // This way we can avoid sending further wakeup messages in the
387 // future.
388 lockTSO(tso);
389 if (tso->why_blocked == BlockedOnSTM &&
390 tso->block_info.closure == &stg_STM_AWOKEN_closure) {
391 TRACE("unpark_tso already woken up tso=%p", tso);
392 } else if (tso -> why_blocked == BlockedOnSTM) {
393 TRACE("unpark_tso on tso=%p", tso);
394 tso->block_info.closure = &stg_STM_AWOKEN_closure;
395 tryWakeupThread(cap,tso);
396 } else {
397 TRACE("spurious unpark_tso on tso=%p", tso);
398 }
399 unlockTSO(tso);
400 }
401
402 static void unpark_waiters_on(Capability *cap, StgTVar *s) {
403 StgTVarWatchQueue *q;
404 StgTVarWatchQueue *trail;
405 TRACE("unpark_waiters_on tvar=%p", s);
406 // unblock TSOs in reverse order, to be a bit fairer (#2319)
407 for (q = s -> first_watch_queue_entry, trail = q;
408 q != END_STM_WATCH_QUEUE;
409 q = q -> next_queue_entry) {
410 trail = q;
411 }
412 q = trail;
413 for (;
414 q != END_STM_WATCH_QUEUE;
415 q = q -> prev_queue_entry) {
416 if (watcher_is_tso(q)) {
417 unpark_tso(cap, (StgTSO *)(q -> closure));
418 }
419 }
420 }
421
422 /*......................................................................*/
423
424 // Helper functions for downstream allocation and initialization
425
426 static StgInvariantCheckQueue *new_stg_invariant_check_queue(Capability *cap,
427 StgAtomicInvariant *invariant) {
428 StgInvariantCheckQueue *result;
429 result = (StgInvariantCheckQueue *)allocate(cap, sizeofW(StgInvariantCheckQueue));
430 SET_HDR (result, &stg_INVARIANT_CHECK_QUEUE_info, CCS_SYSTEM);
431 result -> invariant = invariant;
432 result -> my_execution = NO_TREC;
433 return result;
434 }
435
436 static StgTVarWatchQueue *new_stg_tvar_watch_queue(Capability *cap,
437 StgClosure *closure) {
438 StgTVarWatchQueue *result;
439 result = (StgTVarWatchQueue *)allocate(cap, sizeofW(StgTVarWatchQueue));
440 SET_HDR (result, &stg_TVAR_WATCH_QUEUE_info, CCS_SYSTEM);
441 result -> closure = closure;
442 return result;
443 }
444
445 static StgTRecChunk *new_stg_trec_chunk(Capability *cap) {
446 StgTRecChunk *result;
447 result = (StgTRecChunk *)allocate(cap, sizeofW(StgTRecChunk));
448 SET_HDR (result, &stg_TREC_CHUNK_info, CCS_SYSTEM);
449 result -> prev_chunk = END_STM_CHUNK_LIST;
450 result -> next_entry_idx = 0;
451 return result;
452 }
453
454 static StgTRecHeader *new_stg_trec_header(Capability *cap,
455 StgTRecHeader *enclosing_trec) {
456 StgTRecHeader *result;
457 result = (StgTRecHeader *) allocate(cap, sizeofW(StgTRecHeader));
458 SET_HDR (result, &stg_TREC_HEADER_info, CCS_SYSTEM);
459
460 result -> enclosing_trec = enclosing_trec;
461 result -> current_chunk = new_stg_trec_chunk(cap);
462 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
463
464 if (enclosing_trec == NO_TREC) {
465 result -> state = TREC_ACTIVE;
466 } else {
467 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
468 enclosing_trec -> state == TREC_CONDEMNED);
469 result -> state = enclosing_trec -> state;
470 }
471
472 return result;
473 }
474
475 /*......................................................................*/
476
477 // Allocation / deallocation functions that retain per-capability lists
478 // of closures that can be re-used
479
480 static StgInvariantCheckQueue *alloc_stg_invariant_check_queue(Capability *cap,
481 StgAtomicInvariant *invariant) {
482 StgInvariantCheckQueue *result = NULL;
483 if (cap -> free_invariant_check_queues == END_INVARIANT_CHECK_QUEUE) {
484 result = new_stg_invariant_check_queue(cap, invariant);
485 } else {
486 result = cap -> free_invariant_check_queues;
487 result -> invariant = invariant;
488 result -> my_execution = NO_TREC;
489 cap -> free_invariant_check_queues = result -> next_queue_entry;
490 }
491 return result;
492 }
493
494 static StgTVarWatchQueue *alloc_stg_tvar_watch_queue(Capability *cap,
495 StgClosure *closure) {
496 StgTVarWatchQueue *result = NULL;
497 if (cap -> free_tvar_watch_queues == END_STM_WATCH_QUEUE) {
498 result = new_stg_tvar_watch_queue(cap, closure);
499 } else {
500 result = cap -> free_tvar_watch_queues;
501 result -> closure = closure;
502 cap -> free_tvar_watch_queues = result -> next_queue_entry;
503 }
504 return result;
505 }
506
507 static void free_stg_tvar_watch_queue(Capability *cap,
508 StgTVarWatchQueue *wq) {
509 #if defined(REUSE_MEMORY)
510 wq -> next_queue_entry = cap -> free_tvar_watch_queues;
511 cap -> free_tvar_watch_queues = wq;
512 #endif
513 }
514
515 static StgTRecChunk *alloc_stg_trec_chunk(Capability *cap) {
516 StgTRecChunk *result = NULL;
517 if (cap -> free_trec_chunks == END_STM_CHUNK_LIST) {
518 result = new_stg_trec_chunk(cap);
519 } else {
520 result = cap -> free_trec_chunks;
521 cap -> free_trec_chunks = result -> prev_chunk;
522 result -> prev_chunk = END_STM_CHUNK_LIST;
523 result -> next_entry_idx = 0;
524 }
525 return result;
526 }
527
528 static void free_stg_trec_chunk(Capability *cap,
529 StgTRecChunk *c) {
530 #if defined(REUSE_MEMORY)
531 c -> prev_chunk = cap -> free_trec_chunks;
532 cap -> free_trec_chunks = c;
533 #endif
534 }
535
536 static StgTRecHeader *alloc_stg_trec_header(Capability *cap,
537 StgTRecHeader *enclosing_trec) {
538 StgTRecHeader *result = NULL;
539 if (cap -> free_trec_headers == NO_TREC) {
540 result = new_stg_trec_header(cap, enclosing_trec);
541 } else {
542 result = cap -> free_trec_headers;
543 cap -> free_trec_headers = result -> enclosing_trec;
544 result -> enclosing_trec = enclosing_trec;
545 result -> current_chunk -> next_entry_idx = 0;
546 result -> invariants_to_check = END_INVARIANT_CHECK_QUEUE;
547 if (enclosing_trec == NO_TREC) {
548 result -> state = TREC_ACTIVE;
549 } else {
550 ASSERT(enclosing_trec -> state == TREC_ACTIVE ||
551 enclosing_trec -> state == TREC_CONDEMNED);
552 result -> state = enclosing_trec -> state;
553 }
554 }
555 return result;
556 }
557
558 static void free_stg_trec_header(Capability *cap,
559 StgTRecHeader *trec) {
560 #if defined(REUSE_MEMORY)
561 StgTRecChunk *chunk = trec -> current_chunk -> prev_chunk;
562 while (chunk != END_STM_CHUNK_LIST) {
563 StgTRecChunk *prev_chunk = chunk -> prev_chunk;
564 free_stg_trec_chunk(cap, chunk);
565 chunk = prev_chunk;
566 }
567 trec -> current_chunk -> prev_chunk = END_STM_CHUNK_LIST;
568 trec -> enclosing_trec = cap -> free_trec_headers;
569 cap -> free_trec_headers = trec;
570 #endif
571 }
572
573 /*......................................................................*/
574
575 // Helper functions for managing waiting lists
576
577 static void build_watch_queue_entries_for_trec(Capability *cap,
578 StgTSO *tso,
579 StgTRecHeader *trec) {
580 ASSERT(trec != NO_TREC);
581 ASSERT(trec -> enclosing_trec == NO_TREC);
582 ASSERT(trec -> state == TREC_ACTIVE);
583
584 TRACE("%p : build_watch_queue_entries_for_trec()", trec);
585
586 FOR_EACH_ENTRY(trec, e, {
587 StgTVar *s;
588 StgTVarWatchQueue *q;
589 StgTVarWatchQueue *fq;
590 s = e -> tvar;
591 TRACE("%p : adding tso=%p to watch queue for tvar=%p", trec, tso, s);
592 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
593 NACQ_ASSERT(s -> current_value == e -> expected_value);
594 fq = s -> first_watch_queue_entry;
595 q = alloc_stg_tvar_watch_queue(cap, (StgClosure*) tso);
596 q -> next_queue_entry = fq;
597 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
598 if (fq != END_STM_WATCH_QUEUE) {
599 fq -> prev_queue_entry = q;
600 }
601 s -> first_watch_queue_entry = q;
602 e -> new_value = (StgClosure *) q;
603 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
604 });
605 }
606
607 static void remove_watch_queue_entries_for_trec(Capability *cap,
608 StgTRecHeader *trec) {
609 ASSERT(trec != NO_TREC);
610 ASSERT(trec -> enclosing_trec == NO_TREC);
611 ASSERT(trec -> state == TREC_WAITING ||
612 trec -> state == TREC_CONDEMNED);
613
614 TRACE("%p : remove_watch_queue_entries_for_trec()", trec);
615
616 FOR_EACH_ENTRY(trec, e, {
617 StgTVar *s;
618 StgTVarWatchQueue *pq;
619 StgTVarWatchQueue *nq;
620 StgTVarWatchQueue *q;
621 StgClosure *saw;
622 s = e -> tvar;
623 saw = lock_tvar(trec, s);
624 q = (StgTVarWatchQueue *) (e -> new_value);
625 TRACE("%p : removing tso=%p from watch queue for tvar=%p",
626 trec,
627 q -> closure,
628 s);
629 ACQ_ASSERT(s -> current_value == (StgClosure *)trec);
630 nq = q -> next_queue_entry;
631 pq = q -> prev_queue_entry;
632 if (nq != END_STM_WATCH_QUEUE) {
633 nq -> prev_queue_entry = pq;
634 }
635 if (pq != END_STM_WATCH_QUEUE) {
636 pq -> next_queue_entry = nq;
637 } else {
638 ASSERT(s -> first_watch_queue_entry == q);
639 s -> first_watch_queue_entry = nq;
640 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
641 }
642 free_stg_tvar_watch_queue(cap, q);
643 unlock_tvar(cap, trec, s, saw, FALSE);
644 });
645 }
646
647 /*......................................................................*/
648
649 static TRecEntry *get_new_entry(Capability *cap,
650 StgTRecHeader *t) {
651 TRecEntry *result;
652 StgTRecChunk *c;
653 int i;
654
655 c = t -> current_chunk;
656 i = c -> next_entry_idx;
657 ASSERT(c != END_STM_CHUNK_LIST);
658
659 if (i < TREC_CHUNK_NUM_ENTRIES) {
660 // Continue to use current chunk
661 result = &(c -> entries[i]);
662 c -> next_entry_idx ++;
663 } else {
664 // Current chunk is full: allocate a fresh one
665 StgTRecChunk *nc;
666 nc = alloc_stg_trec_chunk(cap);
667 nc -> prev_chunk = c;
668 nc -> next_entry_idx = 1;
669 t -> current_chunk = nc;
670 result = &(nc -> entries[0]);
671 }
672
673 return result;
674 }
675
676 /*......................................................................*/
677
678 static void merge_update_into(Capability *cap,
679 StgTRecHeader *t,
680 StgTVar *tvar,
681 StgClosure *expected_value,
682 StgClosure *new_value) {
683 int found;
684
685 // Look for an entry in this trec
686 found = FALSE;
687 FOR_EACH_ENTRY(t, e, {
688 StgTVar *s;
689 s = e -> tvar;
690 if (s == tvar) {
691 found = TRUE;
692 if (e -> expected_value != expected_value) {
693 // Must abort if the two entries start from different values
694 TRACE("%p : update entries inconsistent at %p (%p vs %p)",
695 t, tvar, e -> expected_value, expected_value);
696 t -> state = TREC_CONDEMNED;
697 }
698 e -> new_value = new_value;
699 BREAK_FOR_EACH;
700 }
701 });
702
703 if (!found) {
704 // No entry so far in this trec
705 TRecEntry *ne;
706 ne = get_new_entry(cap, t);
707 ne -> tvar = tvar;
708 ne -> expected_value = expected_value;
709 ne -> new_value = new_value;
710 }
711 }
712
713 /*......................................................................*/
714
715 static void merge_read_into(Capability *cap,
716 StgTRecHeader *trec,
717 StgTVar *tvar,
718 StgClosure *expected_value)
719 {
720 int found;
721 StgTRecHeader *t;
722
723 found = FALSE;
724
725 //
726 // See #7493
727 //
728 // We need to look for an existing entry *anywhere* in the stack of
729 // nested transactions. Otherwise, in stmCommitNestedTransaction()
730 // we can't tell the difference between
731 //
732 // (1) a read-only entry
733 // (2) an entry that writes back the original value
734 //
735 // Since in both cases e->new_value == e->expected_value. But in (1)
736 // we want to do nothing, and in (2) we want to update e->new_value
737 // in the outer transaction.
738 //
739 // Here we deal with the first possibility: we never create a
740 // read-only entry in an inner transaction if there is an existing
741 // outer entry; so we never have an inner read and an outer update.
742 // So then in stmCommitNestedTransaction() we know we can always
743 // write e->new_value over the outer entry, because the inner entry
744 // is the most up to date.
745 //
746 for (t = trec; !found && t != NO_TREC; t = t -> enclosing_trec)
747 {
748 FOR_EACH_ENTRY(t, e, {
749 if (e -> tvar == tvar) {
750 found = TRUE;
751 if (e -> expected_value != expected_value) {
752 // Must abort if the two entries start from different values
753 TRACE("%p : read entries inconsistent at %p (%p vs %p)",
754 t, tvar, e -> expected_value, expected_value);
755 t -> state = TREC_CONDEMNED;
756 }
757 BREAK_FOR_EACH;
758 }
759 });
760 }
761
762 if (!found) {
763 // No entry found
764 TRecEntry *ne;
765 ne = get_new_entry(cap, trec);
766 ne -> tvar = tvar;
767 ne -> expected_value = expected_value;
768 ne -> new_value = expected_value;
769 }
770 }
771
772 /*......................................................................*/
773
774 static StgBool entry_is_update(TRecEntry *e) {
775 StgBool result;
776 result = (e -> expected_value != e -> new_value);
777 return result;
778 }
779
780 #if defined(STM_FG_LOCKS)
781 static StgBool entry_is_read_only(TRecEntry *e) {
782 StgBool result;
783 result = (e -> expected_value == e -> new_value);
784 return result;
785 }
786
787 static StgBool tvar_is_locked(StgTVar *s, StgTRecHeader *h) {
788 StgClosure *c;
789 StgBool result;
790 c = s -> current_value;
791 result = (c == (StgClosure *) h);
792 return result;
793 }
794 #endif
795
796 // revert_ownership : release a lock on a TVar, storing back
797 // the value that it held when the lock was acquired. "revert_all"
798 // is set in stmWait and stmReWait when we acquired locks on all of
799 // the TVars involved. "revert_all" is not set in commit operations
800 // where we don't lock TVars that have been read from but not updated.
801
802 static void revert_ownership(Capability *cap STG_UNUSED,
803 StgTRecHeader *trec STG_UNUSED,
804 StgBool revert_all STG_UNUSED) {
805 #if defined(STM_FG_LOCKS)
806 FOR_EACH_ENTRY(trec, e, {
807 if (revert_all || entry_is_update(e)) {
808 StgTVar *s;
809 s = e -> tvar;
810 if (tvar_is_locked(s, trec)) {
811 unlock_tvar(cap, trec, s, e -> expected_value, TRUE);
812 }
813 }
814 });
815 #endif
816 }
817
818 /*......................................................................*/
819
820 // validate_and_acquire_ownership : this performs the twin functions
821 // of checking that the TVars referred to by entries in trec hold the
822 // expected values and:
823 //
824 // - locking the TVar (on updated TVars during commit, or all TVars
825 // during wait)
826 //
827 // - recording the identity of the TRec who wrote the value seen in the
828 // TVar (on non-updated TVars during commit). These values are
829 // stashed in the TRec entries and are then checked in check_read_only
830 // to ensure that an atomic snapshot of all of these locations has been
831 // seen.
832
833 static StgBool validate_and_acquire_ownership (Capability *cap,
834 StgTRecHeader *trec,
835 int acquire_all,
836 int retain_ownership) {
837 StgBool result;
838
839 if (shake()) {
840 TRACE("%p : shake, pretending trec is invalid when it may not be", trec);
841 return FALSE;
842 }
843
844 ASSERT((trec -> state == TREC_ACTIVE) ||
845 (trec -> state == TREC_WAITING) ||
846 (trec -> state == TREC_CONDEMNED));
847 result = !((trec -> state) == TREC_CONDEMNED);
848 if (result) {
849 FOR_EACH_ENTRY(trec, e, {
850 StgTVar *s;
851 s = e -> tvar;
852 if (acquire_all || entry_is_update(e)) {
853 TRACE("%p : trying to acquire %p", trec, s);
854 if (!cond_lock_tvar(trec, s, e -> expected_value)) {
855 TRACE("%p : failed to acquire %p", trec, s);
856 result = FALSE;
857 BREAK_FOR_EACH;
858 }
859 } else {
860 ASSERT(config_use_read_phase);
861 IF_STM_FG_LOCKS({
862 TRACE("%p : will need to check %p", trec, s);
863 if (s -> current_value != e -> expected_value) {
864 TRACE("%p : doesn't match", trec);
865 result = FALSE;
866 BREAK_FOR_EACH;
867 }
868 e -> num_updates = s -> num_updates;
869 if (s -> current_value != e -> expected_value) {
870 TRACE("%p : doesn't match (race)", trec);
871 result = FALSE;
872 BREAK_FOR_EACH;
873 } else {
874 TRACE("%p : need to check version %ld", trec, e -> num_updates);
875 }
876 });
877 }
878 });
879 }
880
881 if ((!result) || (!retain_ownership)) {
882 revert_ownership(cap, trec, acquire_all);
883 }
884
885 return result;
886 }
887
888 // check_read_only : check that we've seen an atomic snapshot of the
889 // non-updated TVars accessed by a trec. This checks that the last TRec to
890 // commit an update to the TVar is unchanged since the value was stashed in
891 // validate_and_acquire_ownership. If no udpate is seen to any TVar than
892 // all of them contained their expected values at the start of the call to
893 // check_read_only.
894 //
895 // The paper "Concurrent programming without locks" (under submission), or
896 // Keir Fraser's PhD dissertation "Practical lock-free programming" discuss
897 // this kind of algorithm.
898
899 static StgBool check_read_only(StgTRecHeader *trec STG_UNUSED) {
900 StgBool result = TRUE;
901
902 ASSERT(config_use_read_phase);
903 IF_STM_FG_LOCKS({
904 FOR_EACH_ENTRY(trec, e, {
905 StgTVar *s;
906 s = e -> tvar;
907 if (entry_is_read_only(e)) {
908 TRACE("%p : check_read_only for TVar %p, saw %ld", trec, s, e -> num_updates);
909
910 // Note we need both checks and in this order as the TVar could be
911 // locked by another transaction that is committing but has not yet
912 // incremented `num_updates` (See #7815).
913 if (s -> current_value != e -> expected_value ||
914 s -> num_updates != e -> num_updates) {
915 TRACE("%p : mismatch", trec);
916 result = FALSE;
917 BREAK_FOR_EACH;
918 }
919 }
920 });
921 });
922
923 return result;
924 }
925
926
927 /************************************************************************/
928
929 void stmPreGCHook (Capability *cap) {
930 lock_stm(NO_TREC);
931 TRACE("stmPreGCHook");
932 cap->free_tvar_watch_queues = END_STM_WATCH_QUEUE;
933 cap->free_trec_chunks = END_STM_CHUNK_LIST;
934 cap->free_trec_headers = NO_TREC;
935 unlock_stm(NO_TREC);
936 }
937
938 /************************************************************************/
939
940 // check_read_only relies on version numbers held in TVars' "num_updates"
941 // fields not wrapping around while a transaction is committed. The version
942 // number is incremented each time an update is committed to the TVar
943 // This is unlikely to wrap around when 32-bit integers are used for the counts,
944 // but to ensure correctness we maintain a shared count on the maximum
945 // number of commit operations that may occur and check that this has
946 // not increased by more than 2^32 during a commit.
947
948 #define TOKEN_BATCH_SIZE 1024
949
950 static volatile StgInt64 max_commits = 0;
951
952 #if defined(THREADED_RTS)
953 static volatile StgWord token_locked = FALSE;
954
955 static void getTokenBatch(Capability *cap) {
956 while (cas((void *)&token_locked, FALSE, TRUE) == TRUE) { /* nothing */ }
957 max_commits += TOKEN_BATCH_SIZE;
958 TRACE("%p : cap got token batch, max_commits=%" FMT_Int64, cap, max_commits);
959 cap -> transaction_tokens = TOKEN_BATCH_SIZE;
960 token_locked = FALSE;
961 }
962
963 static void getToken(Capability *cap) {
964 if (cap -> transaction_tokens == 0) {
965 getTokenBatch(cap);
966 }
967 cap -> transaction_tokens --;
968 }
969 #else
970 static void getToken(Capability *cap STG_UNUSED) {
971 // Nothing
972 }
973 #endif
974
975 /*......................................................................*/
976
977 StgTRecHeader *stmStartTransaction(Capability *cap,
978 StgTRecHeader *outer) {
979 StgTRecHeader *t;
980 TRACE("%p : stmStartTransaction with %d tokens",
981 outer,
982 cap -> transaction_tokens);
983
984 getToken(cap);
985
986 t = alloc_stg_trec_header(cap, outer);
987 TRACE("%p : stmStartTransaction()=%p", outer, t);
988 return t;
989 }
990
991 /*......................................................................*/
992
993 void stmAbortTransaction(Capability *cap,
994 StgTRecHeader *trec) {
995 StgTRecHeader *et;
996 TRACE("%p : stmAbortTransaction", trec);
997 ASSERT(trec != NO_TREC);
998 ASSERT((trec -> state == TREC_ACTIVE) ||
999 (trec -> state == TREC_WAITING) ||
1000 (trec -> state == TREC_CONDEMNED));
1001
1002 lock_stm(trec);
1003
1004 et = trec -> enclosing_trec;
1005 if (et == NO_TREC) {
1006 // We're a top-level transaction: remove any watch queue entries that
1007 // we may have.
1008 TRACE("%p : aborting top-level transaction", trec);
1009
1010 if (trec -> state == TREC_WAITING) {
1011 ASSERT(trec -> enclosing_trec == NO_TREC);
1012 TRACE("%p : stmAbortTransaction aborting waiting transaction", trec);
1013 remove_watch_queue_entries_for_trec(cap, trec);
1014 }
1015
1016 } else {
1017 // We're a nested transaction: merge our read set into our parent's
1018 TRACE("%p : retaining read-set into parent %p", trec, et);
1019
1020 FOR_EACH_ENTRY(trec, e, {
1021 StgTVar *s = e -> tvar;
1022 merge_read_into(cap, et, s, e -> expected_value);
1023 });
1024 }
1025
1026 trec -> state = TREC_ABORTED;
1027 unlock_stm(trec);
1028
1029 TRACE("%p : stmAbortTransaction done", trec);
1030 }
1031
1032 /*......................................................................*/
1033
1034 void stmFreeAbortedTRec(Capability *cap,
1035 StgTRecHeader *trec) {
1036 TRACE("%p : stmFreeAbortedTRec", trec);
1037 ASSERT(trec != NO_TREC);
1038 ASSERT((trec -> state == TREC_CONDEMNED) ||
1039 (trec -> state == TREC_ABORTED));
1040
1041 free_stg_trec_header(cap, trec);
1042
1043 TRACE("%p : stmFreeAbortedTRec done", trec);
1044 }
1045
1046 /*......................................................................*/
1047
1048 void stmCondemnTransaction(Capability *cap,
1049 StgTRecHeader *trec) {
1050 TRACE("%p : stmCondemnTransaction", trec);
1051 ASSERT(trec != NO_TREC);
1052 ASSERT((trec -> state == TREC_ACTIVE) ||
1053 (trec -> state == TREC_WAITING) ||
1054 (trec -> state == TREC_CONDEMNED));
1055
1056 lock_stm(trec);
1057 if (trec -> state == TREC_WAITING) {
1058 ASSERT(trec -> enclosing_trec == NO_TREC);
1059 TRACE("%p : stmCondemnTransaction condemning waiting transaction", trec);
1060 remove_watch_queue_entries_for_trec(cap, trec);
1061 }
1062 trec -> state = TREC_CONDEMNED;
1063 unlock_stm(trec);
1064
1065 TRACE("%p : stmCondemnTransaction done", trec);
1066 }
1067
1068 /*......................................................................*/
1069
1070 StgBool stmValidateNestOfTransactions(Capability *cap, StgTRecHeader *trec) {
1071 StgTRecHeader *t;
1072 StgBool result;
1073
1074 TRACE("%p : stmValidateNestOfTransactions", trec);
1075 ASSERT(trec != NO_TREC);
1076 ASSERT((trec -> state == TREC_ACTIVE) ||
1077 (trec -> state == TREC_WAITING) ||
1078 (trec -> state == TREC_CONDEMNED));
1079
1080 lock_stm(trec);
1081
1082 t = trec;
1083 result = TRUE;
1084 while (t != NO_TREC) {
1085 result &= validate_and_acquire_ownership(cap, t, TRUE, FALSE);
1086 t = t -> enclosing_trec;
1087 }
1088
1089 if (!result && trec -> state != TREC_WAITING) {
1090 trec -> state = TREC_CONDEMNED;
1091 }
1092
1093 unlock_stm(trec);
1094
1095 TRACE("%p : stmValidateNestOfTransactions()=%d", trec, result);
1096 return result;
1097 }
1098
1099 /*......................................................................*/
1100
1101 static TRecEntry *get_entry_for(StgTRecHeader *trec, StgTVar *tvar, StgTRecHeader **in) {
1102 TRecEntry *result = NULL;
1103
1104 TRACE("%p : get_entry_for TVar %p", trec, tvar);
1105 ASSERT(trec != NO_TREC);
1106
1107 do {
1108 FOR_EACH_ENTRY(trec, e, {
1109 if (e -> tvar == tvar) {
1110 result = e;
1111 if (in != NULL) {
1112 *in = trec;
1113 }
1114 BREAK_FOR_EACH;
1115 }
1116 });
1117 trec = trec -> enclosing_trec;
1118 } while (result == NULL && trec != NO_TREC);
1119
1120 return result;
1121 }
1122
1123 /*......................................................................*/
1124
1125 /*
1126 * Add/remove links between an invariant TVars. The caller must have
1127 * locked the TVars involved and the invariant.
1128 */
1129
1130 static void disconnect_invariant(Capability *cap,
1131 StgAtomicInvariant *inv) {
1132 StgTRecHeader *last_execution = inv -> last_execution;
1133
1134 TRACE("unhooking last execution inv=%p trec=%p", inv, last_execution);
1135
1136 FOR_EACH_ENTRY(last_execution, e, {
1137 StgTVar *s = e -> tvar;
1138 StgTVarWatchQueue *q = s -> first_watch_queue_entry;
1139 DEBUG_ONLY( StgBool found = FALSE );
1140 TRACE(" looking for trec on tvar=%p", s);
1141 for (q = s -> first_watch_queue_entry;
1142 q != END_STM_WATCH_QUEUE;
1143 q = q -> next_queue_entry) {
1144 if (q -> closure == (StgClosure*)inv) {
1145 StgTVarWatchQueue *pq;
1146 StgTVarWatchQueue *nq;
1147 nq = q -> next_queue_entry;
1148 pq = q -> prev_queue_entry;
1149 if (nq != END_STM_WATCH_QUEUE) {
1150 nq -> prev_queue_entry = pq;
1151 }
1152 if (pq != END_STM_WATCH_QUEUE) {
1153 pq -> next_queue_entry = nq;
1154 } else {
1155 ASSERT(s -> first_watch_queue_entry == q);
1156 s -> first_watch_queue_entry = nq;
1157 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1158 }
1159 TRACE(" found it in watch queue entry %p", q);
1160 free_stg_tvar_watch_queue(cap, q);
1161 DEBUG_ONLY( found = TRUE );
1162 break;
1163 }
1164 }
1165 ASSERT(found);
1166 });
1167 inv -> last_execution = NO_TREC;
1168 }
1169
1170 static void connect_invariant_to_trec(Capability *cap,
1171 StgAtomicInvariant *inv,
1172 StgTRecHeader *my_execution) {
1173 TRACE("connecting execution inv=%p trec=%p", inv, my_execution);
1174
1175 ASSERT(inv -> last_execution == NO_TREC);
1176
1177 FOR_EACH_ENTRY(my_execution, e, {
1178 StgTVar *s = e -> tvar;
1179 StgTVarWatchQueue *q = alloc_stg_tvar_watch_queue(cap, (StgClosure*)inv);
1180 StgTVarWatchQueue *fq = s -> first_watch_queue_entry;
1181
1182 // We leave "last_execution" holding the values that will be
1183 // in the heap after the transaction we're in the process
1184 // of committing has finished.
1185 TRecEntry *entry = get_entry_for(my_execution -> enclosing_trec, s, NULL);
1186 if (entry != NULL) {
1187 e -> expected_value = entry -> new_value;
1188 e -> new_value = entry -> new_value;
1189 }
1190
1191 TRACE(" linking trec on tvar=%p value=%p q=%p", s, e -> expected_value, q);
1192 q -> next_queue_entry = fq;
1193 q -> prev_queue_entry = END_STM_WATCH_QUEUE;
1194 if (fq != END_STM_WATCH_QUEUE) {
1195 fq -> prev_queue_entry = q;
1196 }
1197 s -> first_watch_queue_entry = q;
1198 dirty_TVAR(cap,s); // we modified first_watch_queue_entry
1199 });
1200
1201 inv -> last_execution = my_execution;
1202 }
1203
1204 /*
1205 * Add a new invariant to the trec's list of invariants to check on commit
1206 */
1207 void stmAddInvariantToCheck(Capability *cap,
1208 StgTRecHeader *trec,
1209 StgClosure *code) {
1210 StgAtomicInvariant *invariant;
1211 StgInvariantCheckQueue *q;
1212 TRACE("%p : stmAddInvariantToCheck closure=%p", trec, code);
1213 ASSERT(trec != NO_TREC);
1214 ASSERT(trec -> state == TREC_ACTIVE ||
1215 trec -> state == TREC_CONDEMNED);
1216
1217
1218 // 1. Allocate an StgAtomicInvariant, set last_execution to NO_TREC
1219 // to signal that this is a new invariant in the current atomic block
1220
1221 invariant = (StgAtomicInvariant *) allocate(cap, sizeofW(StgAtomicInvariant));
1222 TRACE("%p : stmAddInvariantToCheck allocated invariant=%p", trec, invariant);
1223 SET_HDR (invariant, &stg_ATOMIC_INVARIANT_info, CCS_SYSTEM);
1224 invariant -> code = code;
1225 invariant -> last_execution = NO_TREC;
1226 invariant -> lock = 0;
1227
1228 // 2. Allocate an StgInvariantCheckQueue entry, link it to the current trec
1229
1230 q = alloc_stg_invariant_check_queue(cap, invariant);
1231 TRACE("%p : stmAddInvariantToCheck allocated q=%p", trec, q);
1232 q -> invariant = invariant;
1233 q -> my_execution = NO_TREC;
1234 q -> next_queue_entry = trec -> invariants_to_check;
1235 trec -> invariants_to_check = q;
1236
1237 TRACE("%p : stmAddInvariantToCheck done", trec);
1238 }
1239
1240 /*
1241 * Fill in the trec's list of invariants that might be violated by the
1242 * current transaction.
1243 */
1244
1245 StgInvariantCheckQueue *stmGetInvariantsToCheck(Capability *cap, StgTRecHeader *trec) {
1246 StgTRecChunk *c;
1247 TRACE("%p : stmGetInvariantsToCheck, head was %p",
1248 trec,
1249 trec -> invariants_to_check);
1250
1251 ASSERT(trec != NO_TREC);
1252 ASSERT((trec -> state == TREC_ACTIVE) ||
1253 (trec -> state == TREC_WAITING) ||
1254 (trec -> state == TREC_CONDEMNED));
1255 ASSERT(trec -> enclosing_trec == NO_TREC);
1256
1257 lock_stm(trec);
1258 c = trec -> current_chunk;
1259 while (c != END_STM_CHUNK_LIST) {
1260 unsigned int i;
1261 for (i = 0; i < c -> next_entry_idx; i ++) {
1262 TRecEntry *e = &(c -> entries[i]);
1263 if (entry_is_update(e)) {
1264 StgTVar *s = e -> tvar;
1265 StgClosure *old = lock_tvar(trec, s);
1266
1267 // Pick up any invariants on the TVar being updated
1268 // by entry "e"
1269
1270 StgTVarWatchQueue *q;
1271 TRACE("%p : checking for invariants on %p", trec, s);
1272 for (q = s -> first_watch_queue_entry;
1273 q != END_STM_WATCH_QUEUE;
1274 q = q -> next_queue_entry) {
1275 if (watcher_is_invariant(q)) {
1276 StgBool found = FALSE;
1277 StgInvariantCheckQueue *q2;
1278 TRACE("%p : Touching invariant %p", trec, q -> closure);
1279 for (q2 = trec -> invariants_to_check;
1280 q2 != END_INVARIANT_CHECK_QUEUE;
1281 q2 = q2 -> next_queue_entry) {
1282 if (q2 -> invariant == (StgAtomicInvariant*)(q -> closure)) {
1283 TRACE("%p : Already found %p", trec, q -> closure);
1284 found = TRUE;
1285 break;
1286 }
1287 }
1288
1289 if (!found) {
1290 StgInvariantCheckQueue *q3;
1291 TRACE("%p : Not already found %p", trec, q -> closure);
1292 q3 = alloc_stg_invariant_check_queue(cap,
1293 (StgAtomicInvariant*) q -> closure);
1294 q3 -> next_queue_entry = trec -> invariants_to_check;
1295 trec -> invariants_to_check = q3;
1296 }
1297 }
1298 }
1299
1300 unlock_tvar(cap, trec, s, old, FALSE);
1301 }
1302 }
1303 c = c -> prev_chunk;
1304 }
1305
1306 unlock_stm(trec);
1307
1308 TRACE("%p : stmGetInvariantsToCheck, head now %p",
1309 trec,
1310 trec -> invariants_to_check);
1311
1312 return (trec -> invariants_to_check);
1313 }
1314
1315 /*......................................................................*/
1316
1317 StgBool stmCommitTransaction(Capability *cap, StgTRecHeader *trec) {
1318 int result;
1319 StgInt64 max_commits_at_start = max_commits;
1320 StgBool touched_invariants;
1321 StgBool use_read_phase;
1322
1323 TRACE("%p : stmCommitTransaction()", trec);
1324 ASSERT(trec != NO_TREC);
1325
1326 lock_stm(trec);
1327
1328 ASSERT(trec -> enclosing_trec == NO_TREC);
1329 ASSERT((trec -> state == TREC_ACTIVE) ||
1330 (trec -> state == TREC_CONDEMNED));
1331
1332 // touched_invariants is true if we've written to a TVar with invariants
1333 // attached to it, or if we're trying to add a new invariant to the system.
1334
1335 touched_invariants = (trec -> invariants_to_check != END_INVARIANT_CHECK_QUEUE);
1336
1337 // If we have touched invariants then (i) lock the invariant, and (ii) add
1338 // the invariant's read set to our own. Step (i) is needed to serialize
1339 // concurrent transactions that attempt to make conflicting updates
1340 // to the invariant's trec (suppose it read from t1 and t2, and that one
1341 // concurrent transcation writes only to t1, and a second writes only to
1342 // t2). Step (ii) is needed so that both transactions will lock t1 and t2
1343 // to gain access to their wait lists (and hence be able to unhook the
1344 // invariant from both tvars).
1345
1346 if (touched_invariants) {
1347 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1348 TRACE("%p : locking invariants", trec);
1349 while (q != END_INVARIANT_CHECK_QUEUE) {
1350 StgTRecHeader *inv_old_trec;
1351 StgAtomicInvariant *inv;
1352 TRACE("%p : locking invariant %p", trec, q -> invariant);
1353 inv = q -> invariant;
1354 if (!lock_inv(inv)) {
1355 TRACE("%p : failed to lock %p", trec, inv);
1356 trec -> state = TREC_CONDEMNED;
1357 break;
1358 }
1359
1360 inv_old_trec = inv -> last_execution;
1361 if (inv_old_trec != NO_TREC) {
1362 StgTRecChunk *c = inv_old_trec -> current_chunk;
1363 while (c != END_STM_CHUNK_LIST) {
1364 unsigned int i;
1365 for (i = 0; i < c -> next_entry_idx; i ++) {
1366 TRecEntry *e = &(c -> entries[i]);
1367 TRACE("%p : ensuring we lock TVars for %p", trec, e -> tvar);
1368 merge_read_into (cap, trec, e -> tvar, e -> expected_value);
1369 }
1370 c = c -> prev_chunk;
1371 }
1372 }
1373 q = q -> next_queue_entry;
1374 }
1375 TRACE("%p : finished locking invariants", trec);
1376 }
1377
1378 // Use a read-phase (i.e. don't lock TVars we've read but not updated) if
1379 // (i) the configuration lets us use a read phase, and (ii) we've not
1380 // touched or introduced any invariants.
1381 //
1382 // In principle we could extend the implementation to support a read-phase
1383 // and invariants, but it complicates the logic: the links between
1384 // invariants and TVars are managed by the TVar watch queues which are
1385 // protected by the TVar's locks.
1386
1387 use_read_phase = ((config_use_read_phase) && (!touched_invariants));
1388
1389 result = validate_and_acquire_ownership(cap, trec, (!use_read_phase), TRUE);
1390 if (result) {
1391 // We now know that all the updated locations hold their expected values.
1392 ASSERT(trec -> state == TREC_ACTIVE);
1393
1394 if (use_read_phase) {
1395 StgInt64 max_commits_at_end;
1396 StgInt64 max_concurrent_commits;
1397 TRACE("%p : doing read check", trec);
1398 result = check_read_only(trec);
1399 TRACE("%p : read-check %s", trec, result ? "succeeded" : "failed");
1400
1401 max_commits_at_end = max_commits;
1402 max_concurrent_commits = ((max_commits_at_end - max_commits_at_start) +
1403 (n_capabilities * TOKEN_BATCH_SIZE));
1404 if (((max_concurrent_commits >> 32) > 0) || shake()) {
1405 result = FALSE;
1406 }
1407 }
1408
1409 if (result) {
1410 // We now know that all of the read-only locations held their exepcted values
1411 // at the end of the call to validate_and_acquire_ownership. This forms the
1412 // linearization point of the commit.
1413
1414 // 1. If we have touched or introduced any invariants then unhook them
1415 // from the TVars they depended on last time they were executed
1416 // and hook them on the TVars that they now depend on.
1417 if (touched_invariants) {
1418 StgInvariantCheckQueue *q = trec -> invariants_to_check;
1419 while (q != END_INVARIANT_CHECK_QUEUE) {
1420 StgAtomicInvariant *inv = q -> invariant;
1421 if (inv -> last_execution != NO_TREC) {
1422 disconnect_invariant(cap, inv);
1423 }
1424
1425 TRACE("%p : hooking up new execution trec=%p", trec, q -> my_execution);
1426 connect_invariant_to_trec(cap, inv, q -> my_execution);
1427
1428 TRACE("%p : unlocking invariant %p", trec, inv);
1429 unlock_inv(inv);
1430
1431 q = q -> next_queue_entry;
1432 }
1433 }
1434
1435 // 2. Make the updates required by the transaction
1436 FOR_EACH_ENTRY(trec, e, {
1437 StgTVar *s;
1438 s = e -> tvar;
1439 if ((!use_read_phase) || (e -> new_value != e -> expected_value)) {
1440 // Either the entry is an update or we're not using a read phase:
1441 // write the value back to the TVar, unlocking it if necessary.
1442
1443 ACQ_ASSERT(tvar_is_locked(s, trec));
1444 TRACE("%p : writing %p to %p, waking waiters", trec, e -> new_value, s);
1445 unpark_waiters_on(cap,s);
1446 IF_STM_FG_LOCKS({
1447 s -> num_updates ++;
1448 });
1449 unlock_tvar(cap, trec, s, e -> new_value, TRUE);
1450 }
1451 ACQ_ASSERT(!tvar_is_locked(s, trec));
1452 });
1453 } else {
1454 revert_ownership(cap, trec, FALSE);
1455 }
1456 }
1457
1458 unlock_stm(trec);
1459
1460 free_stg_trec_header(cap, trec);
1461
1462 TRACE("%p : stmCommitTransaction()=%d", trec, result);
1463
1464 return result;
1465 }
1466
1467 /*......................................................................*/
1468
1469 StgBool stmCommitNestedTransaction(Capability *cap, StgTRecHeader *trec) {
1470 StgTRecHeader *et;
1471 int result;
1472 ASSERT(trec != NO_TREC && trec -> enclosing_trec != NO_TREC);
1473 TRACE("%p : stmCommitNestedTransaction() into %p", trec, trec -> enclosing_trec);
1474 ASSERT((trec -> state == TREC_ACTIVE) || (trec -> state == TREC_CONDEMNED));
1475
1476 lock_stm(trec);
1477
1478 et = trec -> enclosing_trec;
1479 result = validate_and_acquire_ownership(cap, trec, (!config_use_read_phase), TRUE);
1480 if (result) {
1481 // We now know that all the updated locations hold their expected values.
1482
1483 if (config_use_read_phase) {
1484 TRACE("%p : doing read check", trec);
1485 result = check_read_only(trec);
1486 }
1487 if (result) {
1488 // We now know that all of the read-only locations held their exepcted values
1489 // at the end of the call to validate_and_acquire_ownership. This forms the
1490 // linearization point of the commit.
1491
1492 TRACE("%p : read-check succeeded", trec);
1493 FOR_EACH_ENTRY(trec, e, {
1494 // Merge each entry into the enclosing transaction record, release all
1495 // locks.
1496
1497 StgTVar *s;
1498 s = e -> tvar;
1499 if (entry_is_update(e)) {
1500 unlock_tvar(cap, trec, s, e -> expected_value, FALSE);
1501 }
1502 merge_update_into(cap, et, s, e -> expected_value, e -> new_value);
1503 ACQ_ASSERT(s -> current_value != (StgClosure *)trec);
1504 });
1505 } else {
1506 revert_ownership(cap, trec, FALSE);
1507 }
1508 }
1509
1510 unlock_stm(trec);
1511
1512 free_stg_trec_header(cap, trec);
1513
1514 TRACE("%p : stmCommitNestedTransaction()=%d", trec, result);
1515
1516 return result;
1517 }
1518
1519 /*......................................................................*/
1520
1521 StgBool stmWait(Capability *cap, StgTSO *tso, StgTRecHeader *trec) {
1522 int result;
1523 TRACE("%p : stmWait(%p)", trec, tso);
1524 ASSERT(trec != NO_TREC);
1525 ASSERT(trec -> enclosing_trec == NO_TREC);
1526 ASSERT((trec -> state == TREC_ACTIVE) ||
1527 (trec -> state == TREC_CONDEMNED));
1528
1529 lock_stm(trec);
1530 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1531 if (result) {
1532 // The transaction is valid so far so we can actually start waiting.
1533 // (Otherwise the transaction was not valid and the thread will have to
1534 // retry it).
1535
1536 // Put ourselves to sleep. We retain locks on all the TVars involved
1537 // until we are sound asleep : (a) on the wait queues, (b) BlockedOnSTM
1538 // in the TSO, (c) TREC_WAITING in the Trec.
1539 build_watch_queue_entries_for_trec(cap, tso, trec);
1540 park_tso(tso);
1541 trec -> state = TREC_WAITING;
1542
1543 // We haven't released ownership of the transaction yet. The TSO
1544 // has been put on the wait queue for the TVars it is waiting for,
1545 // but we haven't yet tidied up the TSO's stack and made it safe
1546 // to wake up the TSO. Therefore, we must wait until the TSO is
1547 // safe to wake up before we release ownership - when all is well,
1548 // the runtime will call stmWaitUnlock() below, with the same
1549 // TRec.
1550
1551 } else {
1552 unlock_stm(trec);
1553 free_stg_trec_header(cap, trec);
1554 }
1555
1556 TRACE("%p : stmWait(%p)=%d", trec, tso, result);
1557 return result;
1558 }
1559
1560
1561 void
1562 stmWaitUnlock(Capability *cap, StgTRecHeader *trec) {
1563 revert_ownership(cap, trec, TRUE);
1564 unlock_stm(trec);
1565 }
1566
1567 /*......................................................................*/
1568
1569 StgBool stmReWait(Capability *cap, StgTSO *tso) {
1570 int result;
1571 StgTRecHeader *trec = tso->trec;
1572
1573 TRACE("%p : stmReWait", trec);
1574 ASSERT(trec != NO_TREC);
1575 ASSERT(trec -> enclosing_trec == NO_TREC);
1576 ASSERT((trec -> state == TREC_WAITING) ||
1577 (trec -> state == TREC_CONDEMNED));
1578
1579 lock_stm(trec);
1580 result = validate_and_acquire_ownership(cap, trec, TRUE, TRUE);
1581 TRACE("%p : validation %s", trec, result ? "succeeded" : "failed");
1582 if (result) {
1583 // The transaction remains valid -- do nothing because it is already on
1584 // the wait queues
1585 ASSERT(trec -> state == TREC_WAITING);
1586 park_tso(tso);
1587 revert_ownership(cap, trec, TRUE);
1588 } else {
1589 // The transcation has become invalid. We can now remove it from the wait
1590 // queues.
1591 if (trec -> state != TREC_CONDEMNED) {
1592 remove_watch_queue_entries_for_trec (cap, trec);
1593 }
1594 free_stg_trec_header(cap, trec);
1595 }
1596 unlock_stm(trec);
1597
1598 TRACE("%p : stmReWait()=%d", trec, result);
1599 return result;
1600 }
1601
1602 /*......................................................................*/
1603
1604 static StgClosure *read_current_value(StgTRecHeader *trec STG_UNUSED, StgTVar *tvar) {
1605 StgClosure *result;
1606 result = tvar -> current_value;
1607
1608 #if defined(STM_FG_LOCKS)
1609 while (GET_INFO(UNTAG_CLOSURE(result)) == &stg_TREC_HEADER_info) {
1610 TRACE("%p : read_current_value(%p) saw %p", trec, tvar, result);
1611 result = tvar -> current_value;
1612 }
1613 #endif
1614
1615 TRACE("%p : read_current_value(%p)=%p", trec, tvar, result);
1616 return result;
1617 }
1618
1619 /*......................................................................*/
1620
1621 StgClosure *stmReadTVar(Capability *cap,
1622 StgTRecHeader *trec,
1623 StgTVar *tvar) {
1624 StgTRecHeader *entry_in = NULL;
1625 StgClosure *result = NULL;
1626 TRecEntry *entry = NULL;
1627 TRACE("%p : stmReadTVar(%p)", trec, tvar);
1628 ASSERT(trec != NO_TREC);
1629 ASSERT(trec -> state == TREC_ACTIVE ||
1630 trec -> state == TREC_CONDEMNED);
1631
1632 entry = get_entry_for(trec, tvar, &entry_in);
1633
1634 if (entry != NULL) {
1635 if (entry_in == trec) {
1636 // Entry found in our trec
1637 result = entry -> new_value;
1638 } else {
1639 // Entry found in another trec
1640 TRecEntry *new_entry = get_new_entry(cap, trec);
1641 new_entry -> tvar = tvar;
1642 new_entry -> expected_value = entry -> expected_value;
1643 new_entry -> new_value = entry -> new_value;
1644 result = new_entry -> new_value;
1645 }
1646 } else {
1647 // No entry found
1648 StgClosure *current_value = read_current_value(trec, tvar);
1649 TRecEntry *new_entry = get_new_entry(cap, trec);
1650 new_entry -> tvar = tvar;
1651 new_entry -> expected_value = current_value;
1652 new_entry -> new_value = current_value;
1653 result = current_value;
1654 }
1655
1656 TRACE("%p : stmReadTVar(%p)=%p", trec, tvar, result);
1657 return result;
1658 }
1659
1660 /*......................................................................*/
1661
1662 void stmWriteTVar(Capability *cap,
1663 StgTRecHeader *trec,
1664 StgTVar *tvar,
1665 StgClosure *new_value) {
1666
1667 StgTRecHeader *entry_in = NULL;
1668 TRecEntry *entry = NULL;
1669 TRACE("%p : stmWriteTVar(%p, %p)", trec, tvar, new_value);
1670 ASSERT(trec != NO_TREC);
1671 ASSERT(trec -> state == TREC_ACTIVE ||
1672 trec -> state == TREC_CONDEMNED);
1673
1674 entry = get_entry_for(trec, tvar, &entry_in);
1675
1676 if (entry != NULL) {
1677 if (entry_in == trec) {
1678 // Entry found in our trec
1679 entry -> new_value = new_value;
1680 } else {
1681 // Entry found in another trec
1682 TRecEntry *new_entry = get_new_entry(cap, trec);
1683 new_entry -> tvar = tvar;
1684 new_entry -> expected_value = entry -> expected_value;
1685 new_entry -> new_value = new_value;
1686 }
1687 } else {
1688 // No entry found
1689 StgClosure *current_value = read_current_value(trec, tvar);
1690 TRecEntry *new_entry = get_new_entry(cap, trec);
1691 new_entry -> tvar = tvar;
1692 new_entry -> expected_value = current_value;
1693 new_entry -> new_value = new_value;
1694 }
1695
1696 TRACE("%p : stmWriteTVar done", trec);
1697 }
1698
1699 /*......................................................................*/