Store a destination step in the block descriptor
[ghc.git] / rts / sm / BlockAlloc.c
1 /* -----------------------------------------------------------------------------
2 *
3 * (c) The GHC Team 1998-2008
4 *
5 * The block allocator and free list manager.
6 *
7 * This is the architecture independent part of the block allocator.
8 * It requires only the following support from the operating system:
9 *
10 * void *getMBlock(nat n);
11 *
12 * returns the address of an n*MBLOCK_SIZE region of memory, aligned on
13 * an MBLOCK_SIZE boundary. There are no other restrictions on the
14 * addresses of memory returned by getMBlock().
15 *
16 * ---------------------------------------------------------------------------*/
17
18 #include "PosixSource.h"
19 #include "Rts.h"
20
21 #include "Storage.h"
22 #include "RtsUtils.h"
23 #include "BlockAlloc.h"
24
25 #include <string.h>
26
27 static void initMBlock(void *mblock);
28
29 // The free_list is kept sorted by size, smallest first.
30 // In THREADED_RTS mode, the free list is protected by sm_mutex.
31
32 /* -----------------------------------------------------------------------------
33
34 Implementation notes
35 ~~~~~~~~~~~~~~~~~~~~
36
37 Terminology:
38 - bdescr = block descriptor
39 - bgroup = block group (1 or more adjacent blocks)
40 - mblock = mega block
41 - mgroup = mega group (1 or more adjacent mblocks)
42
43 Invariants on block descriptors
44 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
45 bd->start always points to the start of the block.
46
47 bd->free is either:
48 - zero for a non-group-head; bd->link points to the head
49 - (-1) for the head of a free block group
50 - or it points within the block
51
52 bd->blocks is either:
53 - zero for a non-group-head; bd->link points to the head
54 - number of blocks in this group otherwise
55
56 bd->link either points to a block descriptor or is NULL
57
58 The following fields are not used by the allocator:
59 bd->flags
60 bd->gen_no
61 bd->step
62 bd->dest
63
64 Exceptions: we don't maintain invariants for all the blocks within a
65 group on the free list, because it is expensive to modify every
66 bdescr in a group when coalescing. Just the head and last bdescrs
67 will be correct for a group on the free list.
68
69
70 Free lists
71 ~~~~~~~~~~
72
73 Preliminaries:
74 - most allocations are for small blocks
75 - sometimes the OS gives us new memory backwards in the address
76 space, sometimes forwards, so we should not be biased towards
77 any particular layout in the address space
78 - We want to avoid fragmentation
79 - We want allocation and freeing to be O(1) or close.
80
81 Coalescing trick: when a bgroup is freed (freeGroup()), we can check
82 whether it can be coalesced with other free bgroups by checking the
83 bdescrs for the blocks on either side of it. This means that we can
84 coalesce in O(1) time. Every free bgroup must have its head and tail
85 bdescrs initialised, the rest don't matter.
86
87 We keep the free list in buckets, using a heap-sort strategy.
88 Bucket N contains blocks with sizes 2^N - 2^(N+1)-1. The list of
89 blocks in each bucket is doubly-linked, so that if a block is
90 coalesced we can easily remove it from its current free list.
91
92 To allocate a new block of size S, grab a block from bucket
93 log2ceiling(S) (i.e. log2() rounded up), in which all blocks are at
94 least as big as S, and split it if necessary. If there are no
95 blocks in that bucket, look at bigger buckets until a block is found
96 Allocation is therefore O(logN) time.
97
98 To free a block:
99 - coalesce it with neighbours.
100 - remove coalesced neighbour(s) from free list(s)
101 - add the new (coalesced) block to the front of the appropriate
102 bucket, given by log2(S) where S is the size of the block.
103
104 Free is O(1).
105
106 We cannot play this coalescing trick with mblocks, because there is
107 no requirement that the bdescrs in the second and subsequent mblock
108 of an mgroup are initialised (the mgroup might be filled with a
109 large array, overwriting the bdescrs for example).
110
111 So there is a separate free list for megablocks, sorted in *address*
112 order, so that we can coalesce. Allocation in this list is best-fit
113 by traversing the whole list: we don't expect this list to be long,
114 and allocation/freeing of large blocks is rare; avoiding
115 fragmentation is more important than performance here.
116
117 freeGroup() might end up moving a block from free_list to
118 free_mblock_list, if after coalescing we end up with a full mblock.
119
120 checkFreeListSanity() checks all the invariants on the free lists.
121
122 --------------------------------------------------------------------------- */
123
124 #define MAX_FREE_LIST 9
125
126 static bdescr *free_list[MAX_FREE_LIST];
127 static bdescr *free_mblock_list;
128
129 // free_list[i] contains blocks that are at least size 2^i, and at
130 // most size 2^(i+1) - 1.
131 //
132 // To find the free list in which to place a block, use log_2(size).
133 // To find a free block of the right size, use log_2_ceil(size).
134
135 lnat n_alloc_blocks; // currently allocated blocks
136 lnat hw_alloc_blocks; // high-water allocated blocks
137
138 /* -----------------------------------------------------------------------------
139 Initialisation
140 -------------------------------------------------------------------------- */
141
142 void initBlockAllocator(void)
143 {
144 nat i;
145 for (i=0; i < MAX_FREE_LIST; i++) {
146 free_list[i] = NULL;
147 }
148 free_mblock_list = NULL;
149 n_alloc_blocks = 0;
150 hw_alloc_blocks = 0;
151 }
152
153 /* -----------------------------------------------------------------------------
154 Allocation
155 -------------------------------------------------------------------------- */
156
157 STATIC_INLINE void
158 initGroup(bdescr *head)
159 {
160 bdescr *bd;
161 nat i, n;
162
163 n = head->blocks;
164 head->free = head->start;
165 head->link = NULL;
166 for (i=1, bd = head+1; i < n; i++, bd++) {
167 bd->free = 0;
168 bd->blocks = 0;
169 bd->link = head;
170 }
171 }
172
173 // There are quicker non-loopy ways to do log_2, but we expect n to be
174 // usually small, and MAX_FREE_LIST is also small, so the loop version
175 // might well be the best choice here.
176 STATIC_INLINE nat
177 log_2_ceil(nat n)
178 {
179 nat i, x;
180 x = 1;
181 for (i=0; i < MAX_FREE_LIST; i++) {
182 if (x >= n) return i;
183 x = x << 1;
184 }
185 return MAX_FREE_LIST;
186 }
187
188 STATIC_INLINE nat
189 log_2(nat n)
190 {
191 nat i, x;
192 x = n;
193 for (i=0; i < MAX_FREE_LIST; i++) {
194 x = x >> 1;
195 if (x == 0) return i;
196 }
197 return MAX_FREE_LIST;
198 }
199
200 STATIC_INLINE void
201 free_list_insert (bdescr *bd)
202 {
203 nat ln;
204
205 ASSERT(bd->blocks < BLOCKS_PER_MBLOCK);
206 ln = log_2(bd->blocks);
207
208 dbl_link_onto(bd, &free_list[ln]);
209 }
210
211
212 STATIC_INLINE bdescr *
213 tail_of (bdescr *bd)
214 {
215 return bd + bd->blocks - 1;
216 }
217
218 // After splitting a group, the last block of each group must have a
219 // tail that points to the head block, to keep our invariants for
220 // coalescing.
221 STATIC_INLINE void
222 setup_tail (bdescr *bd)
223 {
224 bdescr *tail;
225 tail = tail_of(bd);
226 if (tail != bd) {
227 tail->blocks = 0;
228 tail->free = 0;
229 tail->link = bd;
230 }
231 }
232
233
234 // Take a free block group bd, and split off a group of size n from
235 // it. Adjust the free list as necessary, and return the new group.
236 static bdescr *
237 split_free_block (bdescr *bd, nat n, nat ln)
238 {
239 bdescr *fg; // free group
240
241 ASSERT(bd->blocks > n);
242 dbl_link_remove(bd, &free_list[ln]);
243 fg = bd + bd->blocks - n; // take n blocks off the end
244 fg->blocks = n;
245 bd->blocks -= n;
246 setup_tail(bd);
247 ln = log_2(bd->blocks);
248 dbl_link_onto(bd, &free_list[ln]);
249 return fg;
250 }
251
252 static bdescr *
253 alloc_mega_group (nat mblocks)
254 {
255 bdescr *best, *bd, *prev;
256 nat n;
257
258 n = MBLOCK_GROUP_BLOCKS(mblocks);
259
260 best = NULL;
261 prev = NULL;
262 for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
263 {
264 if (bd->blocks == n)
265 {
266 if (prev) {
267 prev->link = bd->link;
268 } else {
269 free_mblock_list = bd->link;
270 }
271 initGroup(bd);
272 return bd;
273 }
274 else if (bd->blocks > n)
275 {
276 if (!best || bd->blocks < best->blocks)
277 {
278 best = bd;
279 }
280 }
281 }
282
283 if (best)
284 {
285 // we take our chunk off the end here.
286 nat best_mblocks = BLOCKS_TO_MBLOCKS(best->blocks);
287 bd = FIRST_BDESCR((StgWord8*)MBLOCK_ROUND_DOWN(best) +
288 (best_mblocks-mblocks)*MBLOCK_SIZE);
289
290 best->blocks = MBLOCK_GROUP_BLOCKS(best_mblocks - mblocks);
291 initMBlock(MBLOCK_ROUND_DOWN(bd));
292 }
293 else
294 {
295 void *mblock = getMBlocks(mblocks);
296 initMBlock(mblock); // only need to init the 1st one
297 bd = FIRST_BDESCR(mblock);
298 }
299 bd->blocks = MBLOCK_GROUP_BLOCKS(mblocks);
300 return bd;
301 }
302
303 bdescr *
304 allocGroup (nat n)
305 {
306 bdescr *bd, *rem;
307 nat ln;
308
309 if (n == 0) barf("allocGroup: requested zero blocks");
310
311 if (n >= BLOCKS_PER_MBLOCK)
312 {
313 nat mblocks;
314
315 mblocks = BLOCKS_TO_MBLOCKS(n);
316
317 // n_alloc_blocks doesn't count the extra blocks we get in a
318 // megablock group.
319 n_alloc_blocks += mblocks * BLOCKS_PER_MBLOCK;
320 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
321
322 bd = alloc_mega_group(mblocks);
323 // only the bdescrs of the first MB are required to be initialised
324 initGroup(bd);
325
326 IF_DEBUG(sanity, checkFreeListSanity());
327 return bd;
328 }
329
330 n_alloc_blocks += n;
331 if (n_alloc_blocks > hw_alloc_blocks) hw_alloc_blocks = n_alloc_blocks;
332
333 ln = log_2_ceil(n);
334
335 while (ln < MAX_FREE_LIST && free_list[ln] == NULL) {
336 ln++;
337 }
338
339 if (ln == MAX_FREE_LIST) {
340 #if 0
341 if ((mblocks_allocated * MBLOCK_SIZE_W - n_alloc_blocks * BLOCK_SIZE_W) > (1024*1024)/sizeof(W_)) {
342 debugBelch("Fragmentation, wanted %d blocks:", n);
343 RtsFlags.DebugFlags.block_alloc = 1;
344 checkFreeListSanity();
345 }
346 #endif
347
348 bd = alloc_mega_group(1);
349 bd->blocks = n;
350 initGroup(bd); // we know the group will fit
351 rem = bd + n;
352 rem->blocks = BLOCKS_PER_MBLOCK-n;
353 initGroup(rem); // init the slop
354 n_alloc_blocks += rem->blocks;
355 freeGroup(rem); // add the slop on to the free list
356 IF_DEBUG(sanity, checkFreeListSanity());
357 return bd;
358 }
359
360 bd = free_list[ln];
361
362 if (bd->blocks == n) // exactly the right size!
363 {
364 dbl_link_remove(bd, &free_list[ln]);
365 }
366 else if (bd->blocks > n) // block too big...
367 {
368 bd = split_free_block(bd, n, ln);
369 }
370 else
371 {
372 barf("allocGroup: free list corrupted");
373 }
374 initGroup(bd); // initialise it
375 IF_DEBUG(sanity, checkFreeListSanity());
376 ASSERT(bd->blocks == n);
377 return bd;
378 }
379
380 bdescr *
381 allocGroup_lock(nat n)
382 {
383 bdescr *bd;
384 ACQUIRE_SM_LOCK;
385 bd = allocGroup(n);
386 RELEASE_SM_LOCK;
387 return bd;
388 }
389
390 bdescr *
391 allocBlock(void)
392 {
393 return allocGroup(1);
394 }
395
396 bdescr *
397 allocBlock_lock(void)
398 {
399 bdescr *bd;
400 ACQUIRE_SM_LOCK;
401 bd = allocBlock();
402 RELEASE_SM_LOCK;
403 return bd;
404 }
405
406 /* -----------------------------------------------------------------------------
407 De-Allocation
408 -------------------------------------------------------------------------- */
409
410 STATIC_INLINE bdescr *
411 coalesce_mblocks (bdescr *p)
412 {
413 bdescr *q;
414
415 q = p->link;
416 if (q != NULL &&
417 MBLOCK_ROUND_DOWN(q) ==
418 (StgWord8*)MBLOCK_ROUND_DOWN(p) +
419 BLOCKS_TO_MBLOCKS(p->blocks) * MBLOCK_SIZE) {
420 // can coalesce
421 p->blocks = MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(p->blocks) +
422 BLOCKS_TO_MBLOCKS(q->blocks));
423 p->link = q->link;
424 return p;
425 }
426 return q;
427 }
428
429 static void
430 free_mega_group (bdescr *mg)
431 {
432 bdescr *bd, *prev;
433
434 // Find the right place in the free list. free_mblock_list is
435 // sorted by *address*, not by size as the free_list is.
436 prev = NULL;
437 bd = free_mblock_list;
438 while (bd && bd->start < mg->start) {
439 prev = bd;
440 bd = bd->link;
441 }
442
443 // coalesce backwards
444 if (prev)
445 {
446 mg->link = prev->link;
447 prev->link = mg;
448 mg = coalesce_mblocks(prev);
449 }
450 else
451 {
452 mg->link = free_mblock_list;
453 free_mblock_list = mg;
454 }
455 // coalesce forwards
456 coalesce_mblocks(mg);
457
458 IF_DEBUG(sanity, checkFreeListSanity());
459 }
460
461
462 void
463 freeGroup(bdescr *p)
464 {
465 nat ln;
466
467 // Todo: not true in multithreaded GC
468 // ASSERT_SM_LOCK();
469
470 ASSERT(p->free != (P_)-1);
471
472 p->free = (void *)-1; /* indicates that this block is free */
473 p->step = NULL;
474 p->gen_no = 0;
475 /* fill the block group with garbage if sanity checking is on */
476 IF_DEBUG(sanity,memset(p->start, 0xaa, p->blocks * BLOCK_SIZE));
477
478 if (p->blocks == 0) barf("freeGroup: block size is zero");
479
480 if (p->blocks >= BLOCKS_PER_MBLOCK)
481 {
482 nat mblocks;
483
484 mblocks = BLOCKS_TO_MBLOCKS(p->blocks);
485 // If this is an mgroup, make sure it has the right number of blocks
486 ASSERT(p->blocks == MBLOCK_GROUP_BLOCKS(mblocks));
487
488 n_alloc_blocks -= mblocks * BLOCKS_PER_MBLOCK;
489
490 free_mega_group(p);
491 return;
492 }
493
494 ASSERT(n_alloc_blocks >= p->blocks);
495 n_alloc_blocks -= p->blocks;
496
497 // coalesce forwards
498 {
499 bdescr *next;
500 next = p + p->blocks;
501 if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(p)) && next->free == (P_)-1)
502 {
503 p->blocks += next->blocks;
504 ln = log_2(next->blocks);
505 dbl_link_remove(next, &free_list[ln]);
506 if (p->blocks == BLOCKS_PER_MBLOCK)
507 {
508 free_mega_group(p);
509 return;
510 }
511 setup_tail(p);
512 }
513 }
514
515 // coalesce backwards
516 if (p != FIRST_BDESCR(MBLOCK_ROUND_DOWN(p)))
517 {
518 bdescr *prev;
519 prev = p - 1;
520 if (prev->blocks == 0) prev = prev->link; // find the head
521
522 if (prev->free == (P_)-1)
523 {
524 ln = log_2(prev->blocks);
525 dbl_link_remove(prev, &free_list[ln]);
526 prev->blocks += p->blocks;
527 if (prev->blocks >= BLOCKS_PER_MBLOCK)
528 {
529 free_mega_group(prev);
530 return;
531 }
532 p = prev;
533 }
534 }
535
536 setup_tail(p);
537 free_list_insert(p);
538
539 IF_DEBUG(sanity, checkFreeListSanity());
540 }
541
542 void
543 freeGroup_lock(bdescr *p)
544 {
545 ACQUIRE_SM_LOCK;
546 freeGroup(p);
547 RELEASE_SM_LOCK;
548 }
549
550 void
551 freeChain(bdescr *bd)
552 {
553 bdescr *next_bd;
554 while (bd != NULL) {
555 next_bd = bd->link;
556 freeGroup(bd);
557 bd = next_bd;
558 }
559 }
560
561 void
562 freeChain_lock(bdescr *bd)
563 {
564 ACQUIRE_SM_LOCK;
565 freeChain(bd);
566 RELEASE_SM_LOCK;
567 }
568
569 // splitBlockGroup(bd,B) splits bd in two. Afterward, bd will have B
570 // blocks, and a new block descriptor pointing to the remainder is
571 // returned.
572 bdescr *
573 splitBlockGroup (bdescr *bd, nat blocks)
574 {
575 bdescr *new_bd;
576
577 if (bd->blocks <= blocks) {
578 barf("splitLargeBlock: too small");
579 }
580
581 if (bd->blocks > BLOCKS_PER_MBLOCK) {
582 nat low_mblocks, high_mblocks;
583 void *new_mblock;
584 if ((blocks - BLOCKS_PER_MBLOCK) % (MBLOCK_SIZE / BLOCK_SIZE) != 0) {
585 barf("splitLargeBlock: not a multiple of a megablock");
586 }
587 low_mblocks = 1 + (blocks - BLOCKS_PER_MBLOCK) / (MBLOCK_SIZE / BLOCK_SIZE);
588 high_mblocks = (bd->blocks - blocks) / (MBLOCK_SIZE / BLOCK_SIZE);
589
590 new_mblock = (void *) ((P_)MBLOCK_ROUND_DOWN(bd) + low_mblocks * MBLOCK_SIZE_W);
591 initMBlock(new_mblock);
592 new_bd = FIRST_BDESCR(new_mblock);
593 new_bd->blocks = MBLOCK_GROUP_BLOCKS(high_mblocks);
594
595 ASSERT(blocks + new_bd->blocks ==
596 bd->blocks + BLOCKS_PER_MBLOCK - MBLOCK_SIZE/BLOCK_SIZE);
597 }
598 else
599 {
600 // NB. we're not updating all the bdescrs in the split groups to
601 // point to the new heads, so this can only be used for large
602 // objects which do not start in the non-head block.
603 new_bd = bd + blocks;
604 new_bd->blocks = bd->blocks - blocks;
605 }
606 bd->blocks = blocks;
607
608 return new_bd;
609 }
610
611 static void
612 initMBlock(void *mblock)
613 {
614 bdescr *bd;
615 StgWord8 *block;
616
617 /* the first few Bdescr's in a block are unused, so we don't want to
618 * put them all on the free list.
619 */
620 block = FIRST_BLOCK(mblock);
621 bd = FIRST_BDESCR(mblock);
622
623 /* Initialise the start field of each block descriptor
624 */
625 for (; block <= (StgWord8*)LAST_BLOCK(mblock); bd += 1,
626 block += BLOCK_SIZE) {
627 bd->start = (void*)block;
628 }
629 }
630
631 /* -----------------------------------------------------------------------------
632 Debugging
633 -------------------------------------------------------------------------- */
634
635 #ifdef DEBUG
636 static void
637 check_tail (bdescr *bd)
638 {
639 bdescr *tail = tail_of(bd);
640
641 if (tail != bd)
642 {
643 ASSERT(tail->blocks == 0);
644 ASSERT(tail->free == 0);
645 ASSERT(tail->link == bd);
646 }
647 }
648
649 void
650 checkFreeListSanity(void)
651 {
652 bdescr *bd, *prev;
653 nat ln, min;
654
655
656 min = 1;
657 for (ln = 0; ln < MAX_FREE_LIST; ln++) {
658 IF_DEBUG(block_alloc, debugBelch("free block list [%d]:\n", ln));
659
660 prev = NULL;
661 for (bd = free_list[ln]; bd != NULL; prev = bd, bd = bd->link)
662 {
663 IF_DEBUG(block_alloc,
664 debugBelch("group at %p, length %ld blocks\n",
665 bd->start, (long)bd->blocks));
666 ASSERT(bd->free == (P_)-1);
667 ASSERT(bd->blocks > 0 && bd->blocks < BLOCKS_PER_MBLOCK);
668 ASSERT(bd->blocks >= min && bd->blocks <= (min*2 - 1));
669 ASSERT(bd->link != bd); // catch easy loops
670
671 check_tail(bd);
672
673 if (prev)
674 ASSERT(bd->u.back == prev);
675 else
676 ASSERT(bd->u.back == NULL);
677
678 {
679 bdescr *next;
680 next = bd + bd->blocks;
681 if (next <= LAST_BDESCR(MBLOCK_ROUND_DOWN(bd)))
682 {
683 ASSERT(next->free != (P_)-1);
684 }
685 }
686 }
687 min = min << 1;
688 }
689
690 prev = NULL;
691 for (bd = free_mblock_list; bd != NULL; prev = bd, bd = bd->link)
692 {
693 IF_DEBUG(block_alloc,
694 debugBelch("mega group at %p, length %ld blocks\n",
695 bd->start, (long)bd->blocks));
696
697 ASSERT(bd->link != bd); // catch easy loops
698
699 if (bd->link != NULL)
700 {
701 // make sure the list is sorted
702 ASSERT(bd->start < bd->link->start);
703 }
704
705 ASSERT(bd->blocks >= BLOCKS_PER_MBLOCK);
706 ASSERT(MBLOCK_GROUP_BLOCKS(BLOCKS_TO_MBLOCKS(bd->blocks))
707 == bd->blocks);
708
709 // make sure we're fully coalesced
710 if (bd->link != NULL)
711 {
712 ASSERT (MBLOCK_ROUND_DOWN(bd->link) !=
713 (StgWord8*)MBLOCK_ROUND_DOWN(bd) +
714 BLOCKS_TO_MBLOCKS(bd->blocks) * MBLOCK_SIZE);
715 }
716 }
717 }
718
719 nat /* BLOCKS */
720 countFreeList(void)
721 {
722 bdescr *bd;
723 lnat total_blocks = 0;
724 nat ln;
725
726 for (ln=0; ln < MAX_FREE_LIST; ln++) {
727 for (bd = free_list[ln]; bd != NULL; bd = bd->link) {
728 total_blocks += bd->blocks;
729 }
730 }
731 for (bd = free_mblock_list; bd != NULL; bd = bd->link) {
732 total_blocks += BLOCKS_PER_MBLOCK * BLOCKS_TO_MBLOCKS(bd->blocks);
733 // The caller of this function, memInventory(), expects to match
734 // the total number of blocks in the system against mblocks *
735 // BLOCKS_PER_MBLOCK, so we must subtract the space for the
736 // block descriptors from *every* mblock.
737 }
738 return total_blocks;
739 }
740
741 void
742 markBlocks (bdescr *bd)
743 {
744 for (; bd != NULL; bd = bd->link) {
745 bd->flags |= BF_KNOWN;
746 }
747 }
748
749 void
750 reportUnmarkedBlocks (void)
751 {
752 void *mblock;
753 bdescr *bd;
754
755 debugBelch("Unreachable blocks:\n");
756 for (mblock = getFirstMBlock(); mblock != NULL;
757 mblock = getNextMBlock(mblock)) {
758 for (bd = FIRST_BDESCR(mblock); bd <= LAST_BDESCR(mblock); ) {
759 if (!(bd->flags & BF_KNOWN) && bd->free != (P_)-1) {
760 debugBelch(" %p\n",bd);
761 }
762 if (bd->blocks >= BLOCKS_PER_MBLOCK) {
763 mblock = (StgWord8*)mblock +
764 (BLOCKS_TO_MBLOCKS(bd->blocks) - 1) * MBLOCK_SIZE;
765 break;
766 } else {
767 bd += bd->blocks;
768 }
769 }
770 }
771 }
772
773 #endif