2 * GENerational Conservative Garbage Collector for SBCL x86
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
17 * For a review of garbage collection techniques (e.g. generational
18 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
19 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
20 * had been accepted for _ACM Computing Surveys_ and was available
21 * as a PostScript preprint through
22 * <http://www.cs.utexas.edu/users/oops/papers.html>
24 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
35 #include "interrupt.h"
40 #include "gc-internal.h"
41 #include "genesis/vector.h"
42 #include "genesis/weak-pointer.h"
43 #include "genesis/simple-fun.h"
44 #include "genesis/static-symbols.h"
45 #include "genesis/symbol.h"
46 /* assembly language stub that executes trap_PendingInterrupt */
47 void do_pending_interrupt(void);
54 /* the number of actual generations. (The number of 'struct
55 * generation' objects is one more than this, because one object
56 * serves as scratch when GC'ing.) */
57 #define NUM_GENERATIONS 6
59 /* Should we use page protection to help avoid the scavenging of pages
60 * that don't have pointers to younger generations? */
61 boolean enable_page_protection = 1;
63 /* Should we unmap a page and re-mmap it to have it zero filled? */
64 #if defined(__FreeBSD__) || defined(__OpenBSD__)
65 /* comment from cmucl-2.4.8: This can waste a lot of swap on FreeBSD
66 * so don't unmap there.
68 * The CMU CL comment didn't specify a version, but was probably an
69 * old version of FreeBSD (pre-4.0), so this might no longer be true.
70 * OTOH, if it is true, this behavior might exist on OpenBSD too, so
71 * for now we don't unmap there either. -- WHN 2001-04-07 */
72 boolean gencgc_unmap_zero = 0;
74 boolean gencgc_unmap_zero = 1;
77 /* the minimum size (in bytes) for a large object*/
78 unsigned large_object_size = 4 * 4096;
86 /* the verbosity level. All non-error messages are disabled at level 0;
87 * and only a few rare messages are printed at level 1. */
88 unsigned gencgc_verbose = (QSHOW ? 1 : 0);
90 /* FIXME: At some point enable the various error-checking things below
91 * and see what they say. */
93 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
94 * Set verify_gens to NUM_GENERATIONS to disable this kind of check. */
95 int verify_gens = NUM_GENERATIONS;
97 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
98 boolean pre_verify_gen_0 = 0;
100 /* Should we check for bad pointers after gc_free_heap is called
101 * from Lisp PURIFY? */
102 boolean verify_after_free_heap = 0;
104 /* Should we print a note when code objects are found in the dynamic space
105 * during a heap verify? */
106 boolean verify_dynamic_code_check = 0;
108 /* Should we check code objects for fixup errors after they are transported? */
109 boolean check_code_fixups = 0;
111 /* Should we check that newly allocated regions are zero filled? */
112 boolean gencgc_zero_check = 0;
114 /* Should we check that the free space is zero filled? */
115 boolean gencgc_enable_verify_zero_fill = 0;
117 /* Should we check that free pages are zero filled during gc_free_heap
118 * called after Lisp PURIFY? */
119 boolean gencgc_zero_check_during_free_heap = 0;
122 * GC structures and variables
125 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
126 unsigned long bytes_allocated = 0;
127 static unsigned long auto_gc_trigger = 0;
129 /* the source and destination generations. These are set before a GC starts
135 /* FIXME: It would be nice to use this symbolic constant instead of
136 * bare 4096 almost everywhere. We could also use an assertion that
137 * it's equal to getpagesize(). */
139 #define PAGE_BYTES 4096
141 /* An array of page structures is statically allocated.
142 * This helps quickly map between an address its page structure.
143 * NUM_PAGES is set from the size of the dynamic space. */
144 struct page page_table[NUM_PAGES];
146 /* To map addresses to page structures the address of the first page
148 static void *heap_base = NULL;
151 /* Calculate the start address for the given page number. */
153 page_address(int page_num)
155 return (heap_base + (page_num * 4096));
158 /* Find the page index within the page_table for the given
159 * address. Return -1 on failure. */
161 find_page_index(void *addr)
163 int index = addr-heap_base;
166 index = ((unsigned int)index)/4096;
167 if (index < NUM_PAGES)
174 /* a structure to hold the state of a generation */
177 /* the first page that gc_alloc() checks on its next call */
178 int alloc_start_page;
180 /* the first page that gc_alloc_unboxed() checks on its next call */
181 int alloc_unboxed_start_page;
183 /* the first page that gc_alloc_large (boxed) considers on its next
184 * call. (Although it always allocates after the boxed_region.) */
185 int alloc_large_start_page;
187 /* the first page that gc_alloc_large (unboxed) considers on its
188 * next call. (Although it always allocates after the
189 * current_unboxed_region.) */
190 int alloc_large_unboxed_start_page;
192 /* the bytes allocated to this generation */
195 /* the number of bytes at which to trigger a GC */
198 /* to calculate a new level for gc_trigger */
199 int bytes_consed_between_gc;
201 /* the number of GCs since the last raise */
204 /* the average age after which a GC will raise objects to the
208 /* the cumulative sum of the bytes allocated to this generation. It is
209 * cleared after a GC on this generations, and update before new
210 * objects are added from a GC of a younger generation. Dividing by
211 * the bytes_allocated will give the average age of the memory in
212 * this generation since its last GC. */
213 int cum_sum_bytes_allocated;
215 /* a minimum average memory age before a GC will occur helps
216 * prevent a GC when a large number of new live objects have been
217 * added, in which case a GC could be a waste of time */
218 double min_av_mem_age;
220 /* the number of actual generations. (The number of 'struct
221 * generation' objects is one more than this, because one object
222 * serves as scratch when GC'ing.) */
223 #define NUM_GENERATIONS 6
225 /* an array of generation structures. There needs to be one more
226 * generation structure than actual generations as the oldest
227 * generation is temporarily raised then lowered. */
228 struct generation generations[NUM_GENERATIONS+1];
230 /* the oldest generation that is will currently be GCed by default.
231 * Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
233 * The default of (NUM_GENERATIONS-1) enables GC on all generations.
235 * Setting this to 0 effectively disables the generational nature of
236 * the GC. In some applications generational GC may not be useful
237 * because there are no long-lived objects.
239 * An intermediate value could be handy after moving long-lived data
240 * into an older generation so an unnecessary GC of this long-lived
241 * data can be avoided. */
242 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1;
244 /* The maximum free page in the heap is maintained and used to update
245 * ALLOCATION_POINTER which is used by the room function to limit its
246 * search of the heap. XX Gencgc obviously needs to be better
247 * integrated with the Lisp code. */
248 static int last_free_page;
251 * miscellaneous heap functions
254 /* Count the number of pages which are write-protected within the
255 * given generation. */
257 count_write_protect_generation_pages(int generation)
262 for (i = 0; i < last_free_page; i++)
263 if ((page_table[i].allocated != FREE_PAGE)
264 && (page_table[i].gen == generation)
265 && (page_table[i].write_protected == 1))
270 /* Count the number of pages within the given generation. */
272 count_generation_pages(int generation)
277 for (i = 0; i < last_free_page; i++)
278 if ((page_table[i].allocated != 0)
279 && (page_table[i].gen == generation))
284 /* Count the number of dont_move pages. */
286 count_dont_move_pages(void)
290 for (i = 0; i < last_free_page; i++) {
291 if ((page_table[i].allocated != 0) && (page_table[i].dont_move != 0)) {
298 /* Work through the pages and add up the number of bytes used for the
299 * given generation. */
301 count_generation_bytes_allocated (int gen)
305 for (i = 0; i < last_free_page; i++) {
306 if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
307 result += page_table[i].bytes_used;
312 /* Return the average age of the memory in a generation. */
314 gen_av_mem_age(int gen)
316 if (generations[gen].bytes_allocated == 0)
320 ((double)generations[gen].cum_sum_bytes_allocated)
321 / ((double)generations[gen].bytes_allocated);
324 /* The verbose argument controls how much to print: 0 for normal
325 * level of detail; 1 for debugging. */
327 print_generation_stats(int verbose) /* FIXME: should take FILE argument */
332 /* This code uses the FP instructions which may be set up for Lisp
333 * so they need to be saved and reset for C. */
336 /* number of generations to print */
338 gens = NUM_GENERATIONS+1;
340 gens = NUM_GENERATIONS;
342 /* Print the heap stats. */
344 " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
346 for (i = 0; i < gens; i++) {
350 int large_boxed_cnt = 0;
351 int large_unboxed_cnt = 0;
353 for (j = 0; j < last_free_page; j++)
354 if (page_table[j].gen == i) {
356 /* Count the number of boxed pages within the given
358 if (page_table[j].allocated & BOXED_PAGE) {
359 if (page_table[j].large_object)
365 /* Count the number of unboxed pages within the given
367 if (page_table[j].allocated & UNBOXED_PAGE) {
368 if (page_table[j].large_object)
375 gc_assert(generations[i].bytes_allocated
376 == count_generation_bytes_allocated(i));
378 " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4f\n",
380 boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
381 generations[i].bytes_allocated,
382 (count_generation_pages(i)*4096
383 - generations[i].bytes_allocated),
384 generations[i].gc_trigger,
385 count_write_protect_generation_pages(i),
386 generations[i].num_gc,
389 fprintf(stderr," Total bytes allocated=%ld\n", bytes_allocated);
391 fpu_restore(fpu_state);
395 * allocation routines
399 * To support quick and inline allocation, regions of memory can be
400 * allocated and then allocated from with just a free pointer and a
401 * check against an end address.
403 * Since objects can be allocated to spaces with different properties
404 * e.g. boxed/unboxed, generation, ages; there may need to be many
405 * allocation regions.
407 * Each allocation region may be start within a partly used page. Many
408 * features of memory use are noted on a page wise basis, e.g. the
409 * generation; so if a region starts within an existing allocated page
410 * it must be consistent with this page.
412 * During the scavenging of the newspace, objects will be transported
413 * into an allocation region, and pointers updated to point to this
414 * allocation region. It is possible that these pointers will be
415 * scavenged again before the allocation region is closed, e.g. due to
416 * trans_list which jumps all over the place to cleanup the list. It
417 * is important to be able to determine properties of all objects
418 * pointed to when scavenging, e.g to detect pointers to the oldspace.
419 * Thus it's important that the allocation regions have the correct
420 * properties set when allocated, and not just set when closed. The
421 * region allocation routines return regions with the specified
422 * properties, and grab all the pages, setting their properties
423 * appropriately, except that the amount used is not known.
425 * These regions are used to support quicker allocation using just a
426 * free pointer. The actual space used by the region is not reflected
427 * in the pages tables until it is closed. It can't be scavenged until
430 * When finished with the region it should be closed, which will
431 * update the page tables for the actual space used returning unused
432 * space. Further it may be noted in the new regions which is
433 * necessary when scavenging the newspace.
435 * Large objects may be allocated directly without an allocation
436 * region, the page tables are updated immediately.
438 * Unboxed objects don't contain pointers to other objects and so
439 * don't need scavenging. Further they can't contain pointers to
440 * younger generations so WP is not needed. By allocating pages to
441 * unboxed objects the whole page never needs scavenging or
442 * write-protecting. */
444 /* We are only using two regions at present. Both are for the current
445 * newspace generation. */
446 struct alloc_region boxed_region;
447 struct alloc_region unboxed_region;
449 /* The generation currently being allocated to. */
450 static int gc_alloc_generation;
452 /* Find a new region with room for at least the given number of bytes.
454 * It starts looking at the current generation's alloc_start_page. So
455 * may pick up from the previous region if there is enough space. This
456 * keeps the allocation contiguous when scavenging the newspace.
458 * The alloc_region should have been closed by a call to
459 * gc_alloc_update_page_tables(), and will thus be in an empty state.
461 * To assist the scavenging functions write-protected pages are not
462 * used. Free pages should not be write-protected.
464 * It is critical to the conservative GC that the start of regions be
465 * known. To help achieve this only small regions are allocated at a
468 * During scavenging, pointers may be found to within the current
469 * region and the page generation must be set so that pointers to the
470 * from space can be recognized. Therefore the generation of pages in
471 * the region are set to gc_alloc_generation. To prevent another
472 * allocation call using the same pages, all the pages in the region
473 * are allocated, although they will initially be empty.
476 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
485 "/alloc_new_region for %d bytes from gen %d\n",
486 nbytes, gc_alloc_generation));
489 /* Check that the region is in a reset state. */
490 gc_assert((alloc_region->first_page == 0)
491 && (alloc_region->last_page == -1)
492 && (alloc_region->free_pointer == alloc_region->end_addr));
496 generations[gc_alloc_generation].alloc_unboxed_start_page;
499 generations[gc_alloc_generation].alloc_start_page;
501 last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,alloc_region);
502 bytes_found=(4096 - page_table[first_page].bytes_used)
503 + 4096*(last_page-first_page);
505 /* Set up the alloc_region. */
506 alloc_region->first_page = first_page;
507 alloc_region->last_page = last_page;
508 alloc_region->start_addr = page_table[first_page].bytes_used
509 + page_address(first_page);
510 alloc_region->free_pointer = alloc_region->start_addr;
511 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
513 if (gencgc_zero_check) {
515 for (p = (int *)alloc_region->start_addr;
516 p < (int *)alloc_region->end_addr; p++) {
518 /* KLUDGE: It would be nice to use %lx and explicit casts
519 * (long) in code like this, so that it is less likely to
520 * break randomly when running on a machine with different
521 * word sizes. -- WHN 19991129 */
522 lose("The new region at %x is not zero.", p);
527 /* Set up the pages. */
529 /* The first page may have already been in use. */
530 if (page_table[first_page].bytes_used == 0) {
532 page_table[first_page].allocated = UNBOXED_PAGE;
534 page_table[first_page].allocated = BOXED_PAGE;
535 page_table[first_page].gen = gc_alloc_generation;
536 page_table[first_page].large_object = 0;
537 page_table[first_page].first_object_offset = 0;
541 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
543 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
544 page_table[first_page].allocated |= OPEN_REGION_PAGE;
546 gc_assert(page_table[first_page].gen == gc_alloc_generation);
547 gc_assert(page_table[first_page].large_object == 0);
549 for (i = first_page+1; i <= last_page; i++) {
551 page_table[i].allocated = UNBOXED_PAGE;
553 page_table[i].allocated = BOXED_PAGE;
554 page_table[i].gen = gc_alloc_generation;
555 page_table[i].large_object = 0;
556 /* This may not be necessary for unboxed regions (think it was
558 page_table[i].first_object_offset =
559 alloc_region->start_addr - page_address(i);
560 page_table[i].allocated |= OPEN_REGION_PAGE ;
563 /* Bump up last_free_page. */
564 if (last_page+1 > last_free_page) {
565 last_free_page = last_page+1;
566 SetSymbolValue(ALLOCATION_POINTER,
567 (lispobj)(((char *)heap_base) + last_free_page*4096));
571 /* If the record_new_objects flag is 2 then all new regions created
574 * If it's 1 then then it is only recorded if the first page of the
575 * current region is <= new_areas_ignore_page. This helps avoid
576 * unnecessary recording when doing full scavenge pass.
578 * The new_object structure holds the page, byte offset, and size of
579 * new regions of objects. Each new area is placed in the array of
580 * these structures pointer to by new_areas. new_areas_index holds the
581 * offset into new_areas.
583 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
584 * later code must detect this and handle it, probably by doing a full
585 * scavenge of a generation. */
586 #define NUM_NEW_AREAS 512
587 static int record_new_objects = 0;
588 static int new_areas_ignore_page;
594 static struct new_area (*new_areas)[];
595 static int new_areas_index;
598 /* Add a new area to new_areas. */
600 add_new_area(int first_page, int offset, int size)
602 unsigned new_area_start,c;
605 /* Ignore if full. */
606 if (new_areas_index >= NUM_NEW_AREAS)
609 switch (record_new_objects) {
613 if (first_page > new_areas_ignore_page)
622 new_area_start = 4096*first_page + offset;
624 /* Search backwards for a prior area that this follows from. If
625 found this will save adding a new area. */
626 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
628 4096*((*new_areas)[i].page)
629 + (*new_areas)[i].offset
630 + (*new_areas)[i].size;
632 "/add_new_area S1 %d %d %d %d\n",
633 i, c, new_area_start, area_end));*/
634 if (new_area_start == area_end) {
636 "/adding to [%d] %d %d %d with %d %d %d:\n",
638 (*new_areas)[i].page,
639 (*new_areas)[i].offset,
640 (*new_areas)[i].size,
644 (*new_areas)[i].size += size;
649 (*new_areas)[new_areas_index].page = first_page;
650 (*new_areas)[new_areas_index].offset = offset;
651 (*new_areas)[new_areas_index].size = size;
653 "/new_area %d page %d offset %d size %d\n",
654 new_areas_index, first_page, offset, size));*/
657 /* Note the max new_areas used. */
658 if (new_areas_index > max_new_areas)
659 max_new_areas = new_areas_index;
662 /* Update the tables for the alloc_region. The region maybe added to
665 * When done the alloc_region is set up so that the next quick alloc
666 * will fail safely and thus a new region will be allocated. Further
667 * it is safe to try to re-update the page table of this reset
670 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
676 int orig_first_page_bytes_used;
682 "/gc_alloc_update_page_tables() to gen %d:\n",
683 gc_alloc_generation));
686 first_page = alloc_region->first_page;
688 /* Catch an unused alloc_region. */
689 if ((first_page == 0) && (alloc_region->last_page == -1))
692 next_page = first_page+1;
694 /* Skip if no bytes were allocated. */
695 if (alloc_region->free_pointer != alloc_region->start_addr) {
696 orig_first_page_bytes_used = page_table[first_page].bytes_used;
698 gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
700 /* All the pages used need to be updated */
702 /* Update the first page. */
704 /* If the page was free then set up the gen, and
705 * first_object_offset. */
706 if (page_table[first_page].bytes_used == 0)
707 gc_assert(page_table[first_page].first_object_offset == 0);
708 page_table[first_page].allocated &= ~(OPEN_REGION_PAGE);
711 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
713 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
714 gc_assert(page_table[first_page].gen == gc_alloc_generation);
715 gc_assert(page_table[first_page].large_object == 0);
719 /* Calculate the number of bytes used in this page. This is not
720 * always the number of new bytes, unless it was free. */
722 if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
726 page_table[first_page].bytes_used = bytes_used;
727 byte_cnt += bytes_used;
730 /* All the rest of the pages should be free. We need to set their
731 * first_object_offset pointer to the start of the region, and set
734 page_table[next_page].allocated &= ~(OPEN_REGION_PAGE);
736 gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
738 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
739 gc_assert(page_table[next_page].bytes_used == 0);
740 gc_assert(page_table[next_page].gen == gc_alloc_generation);
741 gc_assert(page_table[next_page].large_object == 0);
743 gc_assert(page_table[next_page].first_object_offset ==
744 alloc_region->start_addr - page_address(next_page));
746 /* Calculate the number of bytes used in this page. */
748 if ((bytes_used = (alloc_region->free_pointer
749 - page_address(next_page)))>4096) {
753 page_table[next_page].bytes_used = bytes_used;
754 byte_cnt += bytes_used;
759 region_size = alloc_region->free_pointer - alloc_region->start_addr;
760 bytes_allocated += region_size;
761 generations[gc_alloc_generation].bytes_allocated += region_size;
763 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
765 /* Set the generations alloc restart page to the last page of
768 generations[gc_alloc_generation].alloc_unboxed_start_page =
771 generations[gc_alloc_generation].alloc_start_page = next_page-1;
773 /* Add the region to the new_areas if requested. */
775 add_new_area(first_page,orig_first_page_bytes_used, region_size);
779 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
781 gc_alloc_generation));
784 /* There are no bytes allocated. Unallocate the first_page if
785 * there are 0 bytes_used. */
786 page_table[first_page].allocated &= ~(OPEN_REGION_PAGE);
787 if (page_table[first_page].bytes_used == 0)
788 page_table[first_page].allocated = FREE_PAGE;
791 /* Unallocate any unused pages. */
792 while (next_page <= alloc_region->last_page) {
793 gc_assert(page_table[next_page].bytes_used == 0);
794 page_table[next_page].allocated = FREE_PAGE;
798 gc_set_region_empty(alloc_region);
801 static inline void *gc_quick_alloc(int nbytes);
803 /* Allocate a possibly large object. */
805 gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
809 int orig_first_page_bytes_used;
814 int large = (nbytes >= large_object_size);
818 FSHOW((stderr, "/alloc_large %d\n", nbytes));
823 "/gc_alloc_large() for %d bytes from gen %d\n",
824 nbytes, gc_alloc_generation));
827 /* If the object is small, and there is room in the current region
828 then allocate it in the current region. */
830 && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
831 return gc_quick_alloc(nbytes);
833 /* To allow the allocation of small objects without the danger of
834 using a page in the current boxed region, the search starts after
835 the current boxed free region. XX could probably keep a page
836 index ahead of the current region and bumped up here to save a
837 lot of re-scanning. */
841 generations[gc_alloc_generation].alloc_large_unboxed_start_page;
843 first_page = generations[gc_alloc_generation].alloc_large_start_page;
845 if (first_page <= alloc_region->last_page) {
846 first_page = alloc_region->last_page+1;
849 last_page=gc_find_freeish_pages(&first_page,nbytes,unboxed,0);
851 gc_assert(first_page > alloc_region->last_page);
853 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
856 generations[gc_alloc_generation].alloc_large_start_page = last_page;
858 /* Set up the pages. */
859 orig_first_page_bytes_used = page_table[first_page].bytes_used;
861 /* If the first page was free then set up the gen, and
862 * first_object_offset. */
863 if (page_table[first_page].bytes_used == 0) {
865 page_table[first_page].allocated = UNBOXED_PAGE;
867 page_table[first_page].allocated = BOXED_PAGE;
868 page_table[first_page].gen = gc_alloc_generation;
869 page_table[first_page].first_object_offset = 0;
870 page_table[first_page].large_object = large;
874 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
876 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
877 gc_assert(page_table[first_page].gen == gc_alloc_generation);
878 gc_assert(page_table[first_page].large_object == large);
882 /* Calc. the number of bytes used in this page. This is not
883 * always the number of new bytes, unless it was free. */
885 if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
889 page_table[first_page].bytes_used = bytes_used;
890 byte_cnt += bytes_used;
892 next_page = first_page+1;
894 /* All the rest of the pages should be free. We need to set their
895 * first_object_offset pointer to the start of the region, and
896 * set the bytes_used. */
898 gc_assert(page_table[next_page].allocated == FREE_PAGE);
899 gc_assert(page_table[next_page].bytes_used == 0);
901 page_table[next_page].allocated = UNBOXED_PAGE;
903 page_table[next_page].allocated = BOXED_PAGE;
904 page_table[next_page].gen = gc_alloc_generation;
905 page_table[next_page].large_object = large;
907 page_table[next_page].first_object_offset =
908 orig_first_page_bytes_used - 4096*(next_page-first_page);
910 /* Calculate the number of bytes used in this page. */
912 if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
916 page_table[next_page].bytes_used = bytes_used;
917 byte_cnt += bytes_used;
922 gc_assert((byte_cnt-orig_first_page_bytes_used) == nbytes);
924 bytes_allocated += nbytes;
925 generations[gc_alloc_generation].bytes_allocated += nbytes;
927 /* Add the region to the new_areas if requested. */
929 add_new_area(first_page,orig_first_page_bytes_used,nbytes);
931 /* Bump up last_free_page */
932 if (last_page+1 > last_free_page) {
933 last_free_page = last_page+1;
934 SetSymbolValue(ALLOCATION_POINTER,
935 (lispobj)(((char *)heap_base) + last_free_page*4096));
938 return((void *)(page_address(first_page)+orig_first_page_bytes_used));
942 gc_find_freeish_pages(int *restart_page_ptr, int nbytes, int unboxed, struct alloc_region *alloc_region)
944 /* if alloc_region is 0, we assume this is for a potentially large
949 int restart_page=*restart_page_ptr;
952 int large = !alloc_region && (nbytes >= large_object_size);
954 /* Search for a contiguous free space of at least nbytes. If it's a
955 large object then align it on a page boundary by searching for a
958 /* To allow the allocation of small objects without the danger of
959 using a page in the current boxed region, the search starts after
960 the current boxed free region. XX could probably keep a page
961 index ahead of the current region and bumped up here to save a
962 lot of re-scanning. */
965 first_page = restart_page;
967 while ((first_page < NUM_PAGES)
968 && (page_table[first_page].allocated != FREE_PAGE))
971 while (first_page < NUM_PAGES) {
972 if(page_table[first_page].allocated == FREE_PAGE)
974 /* I don't know why we need the gen=0 test, but it
975 * breaks randomly if that's omitted -dan 2003.02.26
977 if((page_table[first_page].allocated ==
978 (unboxed ? UNBOXED_PAGE : BOXED_PAGE)) &&
979 (page_table[first_page].large_object == 0) &&
980 (gc_alloc_generation == 0) &&
981 (page_table[first_page].gen == gc_alloc_generation) &&
982 (page_table[first_page].bytes_used < (4096-32)) &&
983 (page_table[first_page].write_protected == 0) &&
984 (page_table[first_page].dont_move == 0))
989 if (first_page >= NUM_PAGES) {
991 "Argh! gc_find_free_space failed (first_page), nbytes=%d.\n",
993 print_generation_stats(1);
997 gc_assert(page_table[first_page].write_protected == 0);
999 last_page = first_page;
1000 bytes_found = 4096 - page_table[first_page].bytes_used;
1002 while (((bytes_found < nbytes)
1003 || (alloc_region && (num_pages < 2)))
1004 && (last_page < (NUM_PAGES-1))
1005 && (page_table[last_page+1].allocated == FREE_PAGE)) {
1008 bytes_found += 4096;
1009 gc_assert(page_table[last_page].write_protected == 0);
1012 region_size = (4096 - page_table[first_page].bytes_used)
1013 + 4096*(last_page-first_page);
1015 gc_assert(bytes_found == region_size);
1016 restart_page = last_page + 1;
1017 } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
1019 /* Check for a failure */
1020 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
1022 "Argh! gc_find_freeish_pages failed (restart_page), nbytes=%d.\n",
1024 print_generation_stats(1);
1027 *restart_page_ptr=first_page;
1031 /* Allocate bytes. All the rest of the special-purpose allocation
1032 * functions will eventually call this (instead of just duplicating
1033 * parts of its code) */
1036 gc_alloc_with_region(int nbytes,int unboxed_p, struct alloc_region *my_region,
1039 void *new_free_pointer;
1041 /* FSHOW((stderr, "/gc_alloc %d\n", nbytes)); */
1043 /* Check whether there is room in the current alloc region. */
1044 new_free_pointer = my_region->free_pointer + nbytes;
1046 if (new_free_pointer <= my_region->end_addr) {
1047 /* If so then allocate from the current alloc region. */
1048 void *new_obj = my_region->free_pointer;
1049 my_region->free_pointer = new_free_pointer;
1051 /* Unless a `quick' alloc was requested, check whether the
1052 alloc region is almost empty. */
1054 (my_region->end_addr - my_region->free_pointer) <= 32) {
1055 /* If so, finished with the current region. */
1056 gc_alloc_update_page_tables(unboxed_p, my_region);
1057 /* Set up a new region. */
1058 gc_alloc_new_region(32 /*bytes*/, unboxed_p, my_region);
1061 return((void *)new_obj);
1064 /* Else not enough free space in the current region. */
1066 /* If there some room left in the current region, enough to be worth
1067 * saving, then allocate a large object. */
1068 /* FIXME: "32" should be a named parameter. */
1069 if ((my_region->end_addr-my_region->free_pointer) > 32)
1070 return gc_alloc_large(nbytes, unboxed_p, my_region);
1072 /* Else find a new region. */
1074 /* Finished with the current region. */
1075 gc_alloc_update_page_tables(unboxed_p, my_region);
1077 /* Set up a new region. */
1078 gc_alloc_new_region(nbytes, unboxed_p, my_region);
1080 /* Should now be enough room. */
1082 /* Check whether there is room in the current region. */
1083 new_free_pointer = my_region->free_pointer + nbytes;
1085 if (new_free_pointer <= my_region->end_addr) {
1086 /* If so then allocate from the current region. */
1087 void *new_obj = my_region->free_pointer;
1088 my_region->free_pointer = new_free_pointer;
1089 /* Check whether the current region is almost empty. */
1090 if ((my_region->end_addr - my_region->free_pointer) <= 32) {
1091 /* If so find, finished with the current region. */
1092 gc_alloc_update_page_tables(unboxed_p, my_region);
1094 /* Set up a new region. */
1095 gc_alloc_new_region(32, unboxed_p, my_region);
1098 return((void *)new_obj);
1101 /* shouldn't happen */
1103 return((void *) NIL); /* dummy value: return something ... */
1107 gc_general_alloc(int nbytes,int unboxed_p,int quick_p)
1109 struct alloc_region *my_region =
1110 unboxed_p ? &unboxed_region : &boxed_region;
1111 return gc_alloc_with_region(nbytes,unboxed_p, my_region,quick_p);
1117 gc_alloc(int nbytes,int unboxed_p)
1119 /* this is the only function that the external interface to
1120 * allocation presently knows how to call: Lisp code will never
1121 * allocate large objects, or to unboxed space, or `quick'ly.
1122 * Any of that stuff will only ever happen inside of GC */
1123 return gc_general_alloc(nbytes,unboxed_p,0);
1126 /* Allocate space from the boxed_region. If there is not enough free
1127 * space then call gc_alloc to do the job. A pointer to the start of
1128 * the object is returned. */
1129 static inline void *
1130 gc_quick_alloc(int nbytes)
1132 return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
1135 /* Allocate space for the possibly large boxed object. If it is a
1136 * large object then do a large alloc else use gc_quick_alloc. Note
1137 * that gc_quick_alloc will eventually fall through to
1138 * gc_general_alloc which may allocate the object in a large way
1139 * anyway, but based on decisions about the free space in the current
1140 * region, not the object size itself */
1142 static inline void *
1143 gc_quick_alloc_large(int nbytes)
1145 if (nbytes >= large_object_size)
1146 return gc_alloc_large(nbytes, ALLOC_BOXED, &boxed_region);
1148 return gc_general_alloc(nbytes,ALLOC_BOXED,ALLOC_QUICK);
1151 static inline void *
1152 gc_alloc_unboxed(int nbytes)
1154 return gc_general_alloc(nbytes,ALLOC_UNBOXED,0);
1157 static inline void *
1158 gc_quick_alloc_unboxed(int nbytes)
1160 return gc_general_alloc(nbytes,ALLOC_UNBOXED,ALLOC_QUICK);
1163 /* Allocate space for the object. If it is a large object then do a
1164 * large alloc else allocate from the current region. If there is not
1165 * enough free space then call general gc_alloc_unboxed() to do the job.
1167 * A pointer to the start of the object is returned. */
1168 static inline void *
1169 gc_quick_alloc_large_unboxed(int nbytes)
1171 if (nbytes >= large_object_size)
1172 return gc_alloc_large(nbytes,ALLOC_UNBOXED,&unboxed_region);
1174 return gc_quick_alloc_unboxed(nbytes);
1178 * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
1181 extern int (*scavtab[256])(lispobj *where, lispobj object);
1182 extern lispobj (*transother[256])(lispobj object);
1183 extern int (*sizetab[256])(lispobj *where);
1185 /* Copy a large boxed object. If the object is in a large object
1186 * region then it is simply promoted, else it is copied. If it's large
1187 * enough then it's copied to a large object region.
1189 * Vectors may have shrunk. If the object is not copied the space
1190 * needs to be reclaimed, and the page_tables corrected. */
1192 copy_large_object(lispobj object, int nwords)
1196 lispobj *source, *dest;
1199 gc_assert(is_lisp_pointer(object));
1200 gc_assert(from_space_p(object));
1201 gc_assert((nwords & 0x01) == 0);
1204 /* Check whether it's a large object. */
1205 first_page = find_page_index((void *)object);
1206 gc_assert(first_page >= 0);
1208 if (page_table[first_page].large_object) {
1210 /* Promote the object. */
1212 int remaining_bytes;
1217 /* Note: Any page write-protection must be removed, else a
1218 * later scavenge_newspace may incorrectly not scavenge these
1219 * pages. This would not be necessary if they are added to the
1220 * new areas, but let's do it for them all (they'll probably
1221 * be written anyway?). */
1223 gc_assert(page_table[first_page].first_object_offset == 0);
1225 next_page = first_page;
1226 remaining_bytes = nwords*4;
1227 while (remaining_bytes > 4096) {
1228 gc_assert(page_table[next_page].gen == from_space);
1229 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
1230 gc_assert(page_table[next_page].large_object);
1231 gc_assert(page_table[next_page].first_object_offset==
1232 -4096*(next_page-first_page));
1233 gc_assert(page_table[next_page].bytes_used == 4096);
1235 page_table[next_page].gen = new_space;
1237 /* Remove any write-protection. We should be able to rely
1238 * on the write-protect flag to avoid redundant calls. */
1239 if (page_table[next_page].write_protected) {
1240 os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
1241 page_table[next_page].write_protected = 0;
1243 remaining_bytes -= 4096;
1247 /* Now only one page remains, but the object may have shrunk
1248 * so there may be more unused pages which will be freed. */
1250 /* The object may have shrunk but shouldn't have grown. */
1251 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1253 page_table[next_page].gen = new_space;
1254 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
1256 /* Adjust the bytes_used. */
1257 old_bytes_used = page_table[next_page].bytes_used;
1258 page_table[next_page].bytes_used = remaining_bytes;
1260 bytes_freed = old_bytes_used - remaining_bytes;
1262 /* Free any remaining pages; needs care. */
1264 while ((old_bytes_used == 4096) &&
1265 (page_table[next_page].gen == from_space) &&
1266 (page_table[next_page].allocated == BOXED_PAGE) &&
1267 page_table[next_page].large_object &&
1268 (page_table[next_page].first_object_offset ==
1269 -(next_page - first_page)*4096)) {
1270 /* Checks out OK, free the page. Don't need to bother zeroing
1271 * pages as this should have been done before shrinking the
1272 * object. These pages shouldn't be write-protected as they
1273 * should be zero filled. */
1274 gc_assert(page_table[next_page].write_protected == 0);
1276 old_bytes_used = page_table[next_page].bytes_used;
1277 page_table[next_page].allocated = FREE_PAGE;
1278 page_table[next_page].bytes_used = 0;
1279 bytes_freed += old_bytes_used;
1283 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1284 generations[new_space].bytes_allocated += 4*nwords;
1285 bytes_allocated -= bytes_freed;
1287 /* Add the region to the new_areas if requested. */
1288 add_new_area(first_page,0,nwords*4);
1292 /* Get tag of object. */
1293 tag = lowtag_of(object);
1295 /* Allocate space. */
1296 new = gc_quick_alloc_large(nwords*4);
1299 source = (lispobj *) native_pointer(object);
1301 /* Copy the object. */
1302 while (nwords > 0) {
1303 dest[0] = source[0];
1304 dest[1] = source[1];
1310 /* Return Lisp pointer of new object. */
1311 return ((lispobj) new) | tag;
1315 /* to copy unboxed objects */
1317 copy_unboxed_object(lispobj object, int nwords)
1321 lispobj *source, *dest;
1323 gc_assert(is_lisp_pointer(object));
1324 gc_assert(from_space_p(object));
1325 gc_assert((nwords & 0x01) == 0);
1327 /* Get tag of object. */
1328 tag = lowtag_of(object);
1330 /* Allocate space. */
1331 new = gc_quick_alloc_unboxed(nwords*4);
1334 source = (lispobj *) native_pointer(object);
1336 /* Copy the object. */
1337 while (nwords > 0) {
1338 dest[0] = source[0];
1339 dest[1] = source[1];
1345 /* Return Lisp pointer of new object. */
1346 return ((lispobj) new) | tag;
1349 /* to copy large unboxed objects
1351 * If the object is in a large object region then it is simply
1352 * promoted, else it is copied. If it's large enough then it's copied
1353 * to a large object region.
1355 * Bignums and vectors may have shrunk. If the object is not copied
1356 * the space needs to be reclaimed, and the page_tables corrected.
1358 * KLUDGE: There's a lot of cut-and-paste duplication between this
1359 * function and copy_large_object(..). -- WHN 20000619 */
1361 copy_large_unboxed_object(lispobj object, int nwords)
1365 lispobj *source, *dest;
1368 gc_assert(is_lisp_pointer(object));
1369 gc_assert(from_space_p(object));
1370 gc_assert((nwords & 0x01) == 0);
1372 if ((nwords > 1024*1024) && gencgc_verbose)
1373 FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*4));
1375 /* Check whether it's a large object. */
1376 first_page = find_page_index((void *)object);
1377 gc_assert(first_page >= 0);
1379 if (page_table[first_page].large_object) {
1380 /* Promote the object. Note: Unboxed objects may have been
1381 * allocated to a BOXED region so it may be necessary to
1382 * change the region to UNBOXED. */
1383 int remaining_bytes;
1388 gc_assert(page_table[first_page].first_object_offset == 0);
1390 next_page = first_page;
1391 remaining_bytes = nwords*4;
1392 while (remaining_bytes > 4096) {
1393 gc_assert(page_table[next_page].gen == from_space);
1394 gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
1395 || (page_table[next_page].allocated == BOXED_PAGE));
1396 gc_assert(page_table[next_page].large_object);
1397 gc_assert(page_table[next_page].first_object_offset==
1398 -4096*(next_page-first_page));
1399 gc_assert(page_table[next_page].bytes_used == 4096);
1401 page_table[next_page].gen = new_space;
1402 page_table[next_page].allocated = UNBOXED_PAGE;
1403 remaining_bytes -= 4096;
1407 /* Now only one page remains, but the object may have shrunk so
1408 * there may be more unused pages which will be freed. */
1410 /* Object may have shrunk but shouldn't have grown - check. */
1411 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1413 page_table[next_page].gen = new_space;
1414 page_table[next_page].allocated = UNBOXED_PAGE;
1416 /* Adjust the bytes_used. */
1417 old_bytes_used = page_table[next_page].bytes_used;
1418 page_table[next_page].bytes_used = remaining_bytes;
1420 bytes_freed = old_bytes_used - remaining_bytes;
1422 /* Free any remaining pages; needs care. */
1424 while ((old_bytes_used == 4096) &&
1425 (page_table[next_page].gen == from_space) &&
1426 ((page_table[next_page].allocated == UNBOXED_PAGE)
1427 || (page_table[next_page].allocated == BOXED_PAGE)) &&
1428 page_table[next_page].large_object &&
1429 (page_table[next_page].first_object_offset ==
1430 -(next_page - first_page)*4096)) {
1431 /* Checks out OK, free the page. Don't need to both zeroing
1432 * pages as this should have been done before shrinking the
1433 * object. These pages shouldn't be write-protected, even if
1434 * boxed they should be zero filled. */
1435 gc_assert(page_table[next_page].write_protected == 0);
1437 old_bytes_used = page_table[next_page].bytes_used;
1438 page_table[next_page].allocated = FREE_PAGE;
1439 page_table[next_page].bytes_used = 0;
1440 bytes_freed += old_bytes_used;
1444 if ((bytes_freed > 0) && gencgc_verbose)
1446 "/copy_large_unboxed bytes_freed=%d\n",
1449 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1450 generations[new_space].bytes_allocated += 4*nwords;
1451 bytes_allocated -= bytes_freed;
1456 /* Get tag of object. */
1457 tag = lowtag_of(object);
1459 /* Allocate space. */
1460 new = gc_quick_alloc_large_unboxed(nwords*4);
1463 source = (lispobj *) native_pointer(object);
1465 /* Copy the object. */
1466 while (nwords > 0) {
1467 dest[0] = source[0];
1468 dest[1] = source[1];
1474 /* Return Lisp pointer of new object. */
1475 return ((lispobj) new) | tag;
1484 * code and code-related objects
1487 static lispobj trans_fun_header(lispobj object);
1488 static lispobj trans_boxed(lispobj object);
1491 /* Scan a x86 compiled code object, looking for possible fixups that
1492 * have been missed after a move.
1494 * Two types of fixups are needed:
1495 * 1. Absolute fixups to within the code object.
1496 * 2. Relative fixups to outside the code object.
1498 * Currently only absolute fixups to the constant vector, or to the
1499 * code area are checked. */
1501 sniff_code_object(struct code *code, unsigned displacement)
1503 int nheader_words, ncode_words, nwords;
1505 void *constants_start_addr, *constants_end_addr;
1506 void *code_start_addr, *code_end_addr;
1507 int fixup_found = 0;
1509 if (!check_code_fixups)
1512 ncode_words = fixnum_value(code->code_size);
1513 nheader_words = HeaderValue(*(lispobj *)code);
1514 nwords = ncode_words + nheader_words;
1516 constants_start_addr = (void *)code + 5*4;
1517 constants_end_addr = (void *)code + nheader_words*4;
1518 code_start_addr = (void *)code + nheader_words*4;
1519 code_end_addr = (void *)code + nwords*4;
1521 /* Work through the unboxed code. */
1522 for (p = code_start_addr; p < code_end_addr; p++) {
1523 void *data = *(void **)p;
1524 unsigned d1 = *((unsigned char *)p - 1);
1525 unsigned d2 = *((unsigned char *)p - 2);
1526 unsigned d3 = *((unsigned char *)p - 3);
1527 unsigned d4 = *((unsigned char *)p - 4);
1529 unsigned d5 = *((unsigned char *)p - 5);
1530 unsigned d6 = *((unsigned char *)p - 6);
1533 /* Check for code references. */
1534 /* Check for a 32 bit word that looks like an absolute
1535 reference to within the code adea of the code object. */
1536 if ((data >= (code_start_addr-displacement))
1537 && (data < (code_end_addr-displacement))) {
1538 /* function header */
1540 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
1541 /* Skip the function header */
1545 /* the case of PUSH imm32 */
1549 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1550 p, d6, d5, d4, d3, d2, d1, data));
1551 FSHOW((stderr, "/PUSH $0x%.8x\n", data));
1553 /* the case of MOV [reg-8],imm32 */
1555 && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
1556 || d2==0x45 || d2==0x46 || d2==0x47)
1560 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1561 p, d6, d5, d4, d3, d2, d1, data));
1562 FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
1564 /* the case of LEA reg,[disp32] */
1565 if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
1568 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1569 p, d6, d5, d4, d3, d2, d1, data));
1570 FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
1574 /* Check for constant references. */
1575 /* Check for a 32 bit word that looks like an absolute
1576 reference to within the constant vector. Constant references
1578 if ((data >= (constants_start_addr-displacement))
1579 && (data < (constants_end_addr-displacement))
1580 && (((unsigned)data & 0x3) == 0)) {
1585 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1586 p, d6, d5, d4, d3, d2, d1, data));
1587 FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
1590 /* the case of MOV m32,EAX */
1594 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1595 p, d6, d5, d4, d3, d2, d1, data));
1596 FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
1599 /* the case of CMP m32,imm32 */
1600 if ((d1 == 0x3d) && (d2 == 0x81)) {
1603 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1604 p, d6, d5, d4, d3, d2, d1, data));
1606 FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
1609 /* Check for a mod=00, r/m=101 byte. */
1610 if ((d1 & 0xc7) == 5) {
1615 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1616 p, d6, d5, d4, d3, d2, d1, data));
1617 FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
1619 /* the case of CMP reg32,m32 */
1623 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1624 p, d6, d5, d4, d3, d2, d1, data));
1625 FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
1627 /* the case of MOV m32,reg32 */
1631 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1632 p, d6, d5, d4, d3, d2, d1, data));
1633 FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
1635 /* the case of MOV reg32,m32 */
1639 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1640 p, d6, d5, d4, d3, d2, d1, data));
1641 FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
1643 /* the case of LEA reg32,m32 */
1647 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1648 p, d6, d5, d4, d3, d2, d1, data));
1649 FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
1655 /* If anything was found, print some information on the code
1659 "/compiled code object at %x: header words = %d, code words = %d\n",
1660 code, nheader_words, ncode_words));
1662 "/const start = %x, end = %x\n",
1663 constants_start_addr, constants_end_addr));
1665 "/code start = %x, end = %x\n",
1666 code_start_addr, code_end_addr));
1671 gencgc_apply_code_fixups(struct code *old_code, struct code *new_code)
1673 int nheader_words, ncode_words, nwords;
1674 void *constants_start_addr, *constants_end_addr;
1675 void *code_start_addr, *code_end_addr;
1676 lispobj fixups = NIL;
1677 unsigned displacement = (unsigned)new_code - (unsigned)old_code;
1678 struct vector *fixups_vector;
1680 ncode_words = fixnum_value(new_code->code_size);
1681 nheader_words = HeaderValue(*(lispobj *)new_code);
1682 nwords = ncode_words + nheader_words;
1684 "/compiled code object at %x: header words = %d, code words = %d\n",
1685 new_code, nheader_words, ncode_words)); */
1686 constants_start_addr = (void *)new_code + 5*4;
1687 constants_end_addr = (void *)new_code + nheader_words*4;
1688 code_start_addr = (void *)new_code + nheader_words*4;
1689 code_end_addr = (void *)new_code + nwords*4;
1692 "/const start = %x, end = %x\n",
1693 constants_start_addr,constants_end_addr));
1695 "/code start = %x; end = %x\n",
1696 code_start_addr,code_end_addr));
1699 /* The first constant should be a pointer to the fixups for this
1700 code objects. Check. */
1701 fixups = new_code->constants[0];
1703 /* It will be 0 or the unbound-marker if there are no fixups, and
1704 * will be an other pointer if it is valid. */
1705 if ((fixups == 0) || (fixups == UNBOUND_MARKER_WIDETAG) ||
1706 !is_lisp_pointer(fixups)) {
1707 /* Check for possible errors. */
1708 if (check_code_fixups)
1709 sniff_code_object(new_code, displacement);
1711 /*fprintf(stderr,"Fixups for code object not found!?\n");
1712 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
1713 new_code, nheader_words, ncode_words);
1714 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
1715 constants_start_addr,constants_end_addr,
1716 code_start_addr,code_end_addr);*/
1720 fixups_vector = (struct vector *)native_pointer(fixups);
1722 /* Could be pointing to a forwarding pointer. */
1723 if (is_lisp_pointer(fixups) &&
1724 (find_page_index((void*)fixups_vector) != -1) &&
1725 (fixups_vector->header == 0x01)) {
1726 /* If so, then follow it. */
1727 /*SHOW("following pointer to a forwarding pointer");*/
1728 fixups_vector = (struct vector *)native_pointer((lispobj)fixups_vector->length);
1731 /*SHOW("got fixups");*/
1733 if (widetag_of(fixups_vector->header) ==
1734 SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG) {
1735 /* Got the fixups for the code block. Now work through the vector,
1736 and apply a fixup at each address. */
1737 int length = fixnum_value(fixups_vector->length);
1739 for (i = 0; i < length; i++) {
1740 unsigned offset = fixups_vector->data[i];
1741 /* Now check the current value of offset. */
1742 unsigned old_value =
1743 *(unsigned *)((unsigned)code_start_addr + offset);
1745 /* If it's within the old_code object then it must be an
1746 * absolute fixup (relative ones are not saved) */
1747 if ((old_value >= (unsigned)old_code)
1748 && (old_value < ((unsigned)old_code + nwords*4)))
1749 /* So add the dispacement. */
1750 *(unsigned *)((unsigned)code_start_addr + offset) =
1751 old_value + displacement;
1753 /* It is outside the old code object so it must be a
1754 * relative fixup (absolute fixups are not saved). So
1755 * subtract the displacement. */
1756 *(unsigned *)((unsigned)code_start_addr + offset) =
1757 old_value - displacement;
1761 /* Check for possible errors. */
1762 if (check_code_fixups) {
1763 sniff_code_object(new_code,displacement);
1769 trans_boxed_large(lispobj object)
1772 unsigned long length;
1774 gc_assert(is_lisp_pointer(object));
1776 header = *((lispobj *) native_pointer(object));
1777 length = HeaderValue(header) + 1;
1778 length = CEILING(length, 2);
1780 return copy_large_object(object, length);
1785 trans_unboxed_large(lispobj object)
1788 unsigned long length;
1791 gc_assert(is_lisp_pointer(object));
1793 header = *((lispobj *) native_pointer(object));
1794 length = HeaderValue(header) + 1;
1795 length = CEILING(length, 2);
1797 return copy_large_unboxed_object(object, length);
1802 * vector-like objects
1806 /* FIXME: What does this mean? */
1807 int gencgc_hash = 1;
1810 scav_vector(lispobj *where, lispobj object)
1812 unsigned int kv_length;
1814 unsigned int length = 0; /* (0 = dummy to stop GCC warning) */
1815 lispobj *hash_table;
1816 lispobj empty_symbol;
1817 unsigned int *index_vector = NULL; /* (NULL = dummy to stop GCC warning) */
1818 unsigned int *next_vector = NULL; /* (NULL = dummy to stop GCC warning) */
1819 unsigned int *hash_vector = NULL; /* (NULL = dummy to stop GCC warning) */
1821 unsigned next_vector_length = 0;
1823 /* FIXME: A comment explaining this would be nice. It looks as
1824 * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
1825 * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
1826 if (HeaderValue(object) != subtype_VectorValidHashing)
1830 /* This is set for backward compatibility. FIXME: Do we need
1833 (subtype_VectorMustRehash<<N_WIDETAG_BITS) | SIMPLE_VECTOR_WIDETAG;
1837 kv_length = fixnum_value(where[1]);
1838 kv_vector = where + 2; /* Skip the header and length. */
1839 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
1841 /* Scavenge element 0, which may be a hash-table structure. */
1842 scavenge(where+2, 1);
1843 if (!is_lisp_pointer(where[2])) {
1844 lose("no pointer at %x in hash table", where[2]);
1846 hash_table = (lispobj *)native_pointer(where[2]);
1847 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
1848 if (widetag_of(hash_table[0]) != INSTANCE_HEADER_WIDETAG) {
1849 lose("hash table not instance (%x at %x)", hash_table[0], hash_table);
1852 /* Scavenge element 1, which should be some internal symbol that
1853 * the hash table code reserves for marking empty slots. */
1854 scavenge(where+3, 1);
1855 if (!is_lisp_pointer(where[3])) {
1856 lose("not empty-hash-table-slot symbol pointer: %x", where[3]);
1858 empty_symbol = where[3];
1859 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
1860 if (widetag_of(*(lispobj *)native_pointer(empty_symbol)) !=
1861 SYMBOL_HEADER_WIDETAG) {
1862 lose("not a symbol where empty-hash-table-slot symbol expected: %x",
1863 *(lispobj *)native_pointer(empty_symbol));
1866 /* Scavenge hash table, which will fix the positions of the other
1867 * needed objects. */
1868 scavenge(hash_table, 16);
1870 /* Cross-check the kv_vector. */
1871 if (where != (lispobj *)native_pointer(hash_table[9])) {
1872 lose("hash_table table!=this table %x", hash_table[9]);
1876 weak_p_obj = hash_table[10];
1880 lispobj index_vector_obj = hash_table[13];
1882 if (is_lisp_pointer(index_vector_obj) &&
1883 (widetag_of(*(lispobj *)native_pointer(index_vector_obj)) ==
1884 SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) {
1885 index_vector = ((unsigned int *)native_pointer(index_vector_obj)) + 2;
1886 /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
1887 length = fixnum_value(((unsigned int *)native_pointer(index_vector_obj))[1]);
1888 /*FSHOW((stderr, "/length = %d\n", length));*/
1890 lose("invalid index_vector %x", index_vector_obj);
1896 lispobj next_vector_obj = hash_table[14];
1898 if (is_lisp_pointer(next_vector_obj) &&
1899 (widetag_of(*(lispobj *)native_pointer(next_vector_obj)) ==
1900 SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) {
1901 next_vector = ((unsigned int *)native_pointer(next_vector_obj)) + 2;
1902 /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
1903 next_vector_length = fixnum_value(((unsigned int *)native_pointer(next_vector_obj))[1]);
1904 /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
1906 lose("invalid next_vector %x", next_vector_obj);
1910 /* maybe hash vector */
1912 /* FIXME: This bare "15" offset should become a symbolic
1913 * expression of some sort. And all the other bare offsets
1914 * too. And the bare "16" in scavenge(hash_table, 16). And
1915 * probably other stuff too. Ugh.. */
1916 lispobj hash_vector_obj = hash_table[15];
1918 if (is_lisp_pointer(hash_vector_obj) &&
1919 (widetag_of(*(lispobj *)native_pointer(hash_vector_obj))
1920 == SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG)) {
1921 hash_vector = ((unsigned int *)native_pointer(hash_vector_obj)) + 2;
1922 /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
1923 gc_assert(fixnum_value(((unsigned int *)native_pointer(hash_vector_obj))[1])
1924 == next_vector_length);
1927 /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
1931 /* These lengths could be different as the index_vector can be a
1932 * different length from the others, a larger index_vector could help
1933 * reduce collisions. */
1934 gc_assert(next_vector_length*2 == kv_length);
1936 /* now all set up.. */
1938 /* Work through the KV vector. */
1941 for (i = 1; i < next_vector_length; i++) {
1942 lispobj old_key = kv_vector[2*i];
1943 unsigned int old_index = (old_key & 0x1fffffff)%length;
1945 /* Scavenge the key and value. */
1946 scavenge(&kv_vector[2*i],2);
1948 /* Check whether the key has moved and is EQ based. */
1950 lispobj new_key = kv_vector[2*i];
1951 unsigned int new_index = (new_key & 0x1fffffff)%length;
1953 if ((old_index != new_index) &&
1954 ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
1955 ((new_key != empty_symbol) ||
1956 (kv_vector[2*i] != empty_symbol))) {
1959 "* EQ key %d moved from %x to %x; index %d to %d\n",
1960 i, old_key, new_key, old_index, new_index));*/
1962 if (index_vector[old_index] != 0) {
1963 /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
1965 /* Unlink the key from the old_index chain. */
1966 if (index_vector[old_index] == i) {
1967 /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
1968 index_vector[old_index] = next_vector[i];
1969 /* Link it into the needing rehash chain. */
1970 next_vector[i] = fixnum_value(hash_table[11]);
1971 hash_table[11] = make_fixnum(i);
1974 unsigned prior = index_vector[old_index];
1975 unsigned next = next_vector[prior];
1977 /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
1980 /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
1983 next_vector[prior] = next_vector[next];
1984 /* Link it into the needing rehash
1987 fixnum_value(hash_table[11]);
1988 hash_table[11] = make_fixnum(next);
1993 next = next_vector[next];
2001 return (CEILING(kv_length + 2, 2));
2010 /* XX This is a hack adapted from cgc.c. These don't work too
2011 * efficiently with the gencgc as a list of the weak pointers is
2012 * maintained within the objects which causes writes to the pages. A
2013 * limited attempt is made to avoid unnecessary writes, but this needs
2015 #define WEAK_POINTER_NWORDS \
2016 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
2019 scav_weak_pointer(lispobj *where, lispobj object)
2021 struct weak_pointer *wp = weak_pointers;
2022 /* Push the weak pointer onto the list of weak pointers.
2023 * Do I have to watch for duplicates? Originally this was
2024 * part of trans_weak_pointer but that didn't work in the
2025 * case where the WP was in a promoted region.
2028 /* Check whether it's already in the list. */
2029 while (wp != NULL) {
2030 if (wp == (struct weak_pointer*)where) {
2036 /* Add it to the start of the list. */
2037 wp = (struct weak_pointer*)where;
2038 if (wp->next != weak_pointers) {
2039 wp->next = weak_pointers;
2041 /*SHOW("avoided write to weak pointer");*/
2046 /* Do not let GC scavenge the value slot of the weak pointer.
2047 * (That is why it is a weak pointer.) */
2049 return WEAK_POINTER_NWORDS;
2053 /* Scan an area looking for an object which encloses the given pointer.
2054 * Return the object start on success or NULL on failure. */
2056 search_space(lispobj *start, size_t words, lispobj *pointer)
2060 lispobj thing = *start;
2062 /* If thing is an immediate then this is a cons. */
2063 if (is_lisp_pointer(thing)
2064 || ((thing & 3) == 0) /* fixnum */
2065 || (widetag_of(thing) == BASE_CHAR_WIDETAG)
2066 || (widetag_of(thing) == UNBOUND_MARKER_WIDETAG))
2069 count = (sizetab[widetag_of(thing)])(start);
2071 /* Check whether the pointer is within this object. */
2072 if ((pointer >= start) && (pointer < (start+count))) {
2074 /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/
2078 /* Round up the count. */
2079 count = CEILING(count,2);
2088 search_read_only_space(lispobj *pointer)
2090 lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
2091 lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
2092 if ((pointer < start) || (pointer >= end))
2094 return (search_space(start, (pointer+2)-start, pointer));
2098 search_static_space(lispobj *pointer)
2100 lispobj* start = (lispobj*)STATIC_SPACE_START;
2101 lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER);
2102 if ((pointer < start) || (pointer >= end))
2104 return (search_space(start, (pointer+2)-start, pointer));
2107 /* a faster version for searching the dynamic space. This will work even
2108 * if the object is in a current allocation region. */
2110 search_dynamic_space(lispobj *pointer)
2112 int page_index = find_page_index(pointer);
2115 /* The address may be invalid, so do some checks. */
2116 if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
2118 start = (lispobj *)((void *)page_address(page_index)
2119 + page_table[page_index].first_object_offset);
2120 return (search_space(start, (pointer+2)-start, pointer));
2123 /* Is there any possibility that pointer is a valid Lisp object
2124 * reference, and/or something else (e.g. subroutine call return
2125 * address) which should prevent us from moving the referred-to thing? */
2127 possibly_valid_dynamic_space_pointer(lispobj *pointer)
2129 lispobj *start_addr;
2131 /* Find the object start address. */
2132 if ((start_addr = search_dynamic_space(pointer)) == NULL) {
2136 /* We need to allow raw pointers into Code objects for return
2137 * addresses. This will also pick up pointers to functions in code
2139 if (widetag_of(*start_addr) == CODE_HEADER_WIDETAG) {
2140 /* XXX could do some further checks here */
2144 /* If it's not a return address then it needs to be a valid Lisp
2146 if (!is_lisp_pointer((lispobj)pointer)) {
2150 /* Check that the object pointed to is consistent with the pointer
2153 * FIXME: It's not safe to rely on the result from this check
2154 * before an object is initialized. Thus, if we were interrupted
2155 * just as an object had been allocated but not initialized, the
2156 * GC relying on this result could bogusly reclaim the memory.
2157 * However, we can't really afford to do without this check. So
2158 * we should make it safe somehow.
2159 * (1) Perhaps just review the code to make sure
2160 * that WITHOUT-GCING or WITHOUT-INTERRUPTS or some such
2161 * thing is wrapped around critical sections where allocated
2162 * memory type bits haven't been set.
2163 * (2) Perhaps find some other hack to protect against this, e.g.
2164 * recording the result of the last call to allocate-lisp-memory,
2165 * and returning true from this function when *pointer is
2166 * a reference to that result. */
2167 switch (lowtag_of((lispobj)pointer)) {
2168 case FUN_POINTER_LOWTAG:
2169 /* Start_addr should be the enclosing code object, or a closure
2171 switch (widetag_of(*start_addr)) {
2172 case CODE_HEADER_WIDETAG:
2173 /* This case is probably caught above. */
2175 case CLOSURE_HEADER_WIDETAG:
2176 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
2177 if ((unsigned)pointer !=
2178 ((unsigned)start_addr+FUN_POINTER_LOWTAG)) {
2182 pointer, start_addr, *start_addr));
2190 pointer, start_addr, *start_addr));
2194 case LIST_POINTER_LOWTAG:
2195 if ((unsigned)pointer !=
2196 ((unsigned)start_addr+LIST_POINTER_LOWTAG)) {
2200 pointer, start_addr, *start_addr));
2203 /* Is it plausible cons? */
2204 if ((is_lisp_pointer(start_addr[0])
2205 || ((start_addr[0] & 3) == 0) /* fixnum */
2206 || (widetag_of(start_addr[0]) == BASE_CHAR_WIDETAG)
2207 || (widetag_of(start_addr[0]) == UNBOUND_MARKER_WIDETAG))
2208 && (is_lisp_pointer(start_addr[1])
2209 || ((start_addr[1] & 3) == 0) /* fixnum */
2210 || (widetag_of(start_addr[1]) == BASE_CHAR_WIDETAG)
2211 || (widetag_of(start_addr[1]) == UNBOUND_MARKER_WIDETAG)))
2217 pointer, start_addr, *start_addr));
2220 case INSTANCE_POINTER_LOWTAG:
2221 if ((unsigned)pointer !=
2222 ((unsigned)start_addr+INSTANCE_POINTER_LOWTAG)) {
2226 pointer, start_addr, *start_addr));
2229 if (widetag_of(start_addr[0]) != INSTANCE_HEADER_WIDETAG) {
2233 pointer, start_addr, *start_addr));
2237 case OTHER_POINTER_LOWTAG:
2238 if ((unsigned)pointer !=
2239 ((int)start_addr+OTHER_POINTER_LOWTAG)) {
2243 pointer, start_addr, *start_addr));
2246 /* Is it plausible? Not a cons. XXX should check the headers. */
2247 if (is_lisp_pointer(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
2251 pointer, start_addr, *start_addr));
2254 switch (widetag_of(start_addr[0])) {
2255 case UNBOUND_MARKER_WIDETAG:
2256 case BASE_CHAR_WIDETAG:
2260 pointer, start_addr, *start_addr));
2263 /* only pointed to by function pointers? */
2264 case CLOSURE_HEADER_WIDETAG:
2265 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
2269 pointer, start_addr, *start_addr));
2272 case INSTANCE_HEADER_WIDETAG:
2276 pointer, start_addr, *start_addr));
2279 /* the valid other immediate pointer objects */
2280 case SIMPLE_VECTOR_WIDETAG:
2282 case COMPLEX_WIDETAG:
2283 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
2284 case COMPLEX_SINGLE_FLOAT_WIDETAG:
2286 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
2287 case COMPLEX_DOUBLE_FLOAT_WIDETAG:
2289 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
2290 case COMPLEX_LONG_FLOAT_WIDETAG:
2292 case SIMPLE_ARRAY_WIDETAG:
2293 case COMPLEX_STRING_WIDETAG:
2294 case COMPLEX_BIT_VECTOR_WIDETAG:
2295 case COMPLEX_VECTOR_WIDETAG:
2296 case COMPLEX_ARRAY_WIDETAG:
2297 case VALUE_CELL_HEADER_WIDETAG:
2298 case SYMBOL_HEADER_WIDETAG:
2300 case CODE_HEADER_WIDETAG:
2301 case BIGNUM_WIDETAG:
2302 case SINGLE_FLOAT_WIDETAG:
2303 case DOUBLE_FLOAT_WIDETAG:
2304 #ifdef LONG_FLOAT_WIDETAG
2305 case LONG_FLOAT_WIDETAG:
2307 case SIMPLE_STRING_WIDETAG:
2308 case SIMPLE_BIT_VECTOR_WIDETAG:
2309 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
2310 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
2311 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
2312 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
2313 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
2314 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2315 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
2317 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2318 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
2320 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2321 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
2323 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2324 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
2326 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
2327 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
2328 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2329 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
2331 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2332 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
2334 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2335 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
2337 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2338 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
2341 case WEAK_POINTER_WIDETAG:
2348 pointer, start_addr, *start_addr));
2356 pointer, start_addr, *start_addr));
2364 /* Adjust large bignum and vector objects. This will adjust the
2365 * allocated region if the size has shrunk, and move unboxed objects
2366 * into unboxed pages. The pages are not promoted here, and the
2367 * promoted region is not added to the new_regions; this is really
2368 * only designed to be called from preserve_pointer(). Shouldn't fail
2369 * if this is missed, just may delay the moving of objects to unboxed
2370 * pages, and the freeing of pages. */
2372 maybe_adjust_large_object(lispobj *where)
2377 int remaining_bytes;
2384 /* Check whether it's a vector or bignum object. */
2385 switch (widetag_of(where[0])) {
2386 case SIMPLE_VECTOR_WIDETAG:
2389 case BIGNUM_WIDETAG:
2390 case SIMPLE_STRING_WIDETAG:
2391 case SIMPLE_BIT_VECTOR_WIDETAG:
2392 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
2393 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
2394 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
2395 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
2396 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
2397 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
2398 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
2400 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
2401 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
2403 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
2404 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
2406 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
2407 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
2409 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
2410 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
2411 #ifdef SIMPLE_ARRAY_LONG_FLOAT_WIDETAG
2412 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
2414 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
2415 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
2417 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
2418 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
2420 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
2421 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
2423 boxed = UNBOXED_PAGE;
2429 /* Find its current size. */
2430 nwords = (sizetab[widetag_of(where[0])])(where);
2432 first_page = find_page_index((void *)where);
2433 gc_assert(first_page >= 0);
2435 /* Note: Any page write-protection must be removed, else a later
2436 * scavenge_newspace may incorrectly not scavenge these pages.
2437 * This would not be necessary if they are added to the new areas,
2438 * but lets do it for them all (they'll probably be written
2441 gc_assert(page_table[first_page].first_object_offset == 0);
2443 next_page = first_page;
2444 remaining_bytes = nwords*4;
2445 while (remaining_bytes > 4096) {
2446 gc_assert(page_table[next_page].gen == from_space);
2447 gc_assert((page_table[next_page].allocated == BOXED_PAGE)
2448 || (page_table[next_page].allocated == UNBOXED_PAGE));
2449 gc_assert(page_table[next_page].large_object);
2450 gc_assert(page_table[next_page].first_object_offset ==
2451 -4096*(next_page-first_page));
2452 gc_assert(page_table[next_page].bytes_used == 4096);
2454 page_table[next_page].allocated = boxed;
2456 /* Shouldn't be write-protected at this stage. Essential that the
2458 gc_assert(!page_table[next_page].write_protected);
2459 remaining_bytes -= 4096;
2463 /* Now only one page remains, but the object may have shrunk so
2464 * there may be more unused pages which will be freed. */
2466 /* Object may have shrunk but shouldn't have grown - check. */
2467 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
2469 page_table[next_page].allocated = boxed;
2470 gc_assert(page_table[next_page].allocated ==
2471 page_table[first_page].allocated);
2473 /* Adjust the bytes_used. */
2474 old_bytes_used = page_table[next_page].bytes_used;
2475 page_table[next_page].bytes_used = remaining_bytes;
2477 bytes_freed = old_bytes_used - remaining_bytes;
2479 /* Free any remaining pages; needs care. */
2481 while ((old_bytes_used == 4096) &&
2482 (page_table[next_page].gen == from_space) &&
2483 ((page_table[next_page].allocated == UNBOXED_PAGE)
2484 || (page_table[next_page].allocated == BOXED_PAGE)) &&
2485 page_table[next_page].large_object &&
2486 (page_table[next_page].first_object_offset ==
2487 -(next_page - first_page)*4096)) {
2488 /* It checks out OK, free the page. We don't need to both zeroing
2489 * pages as this should have been done before shrinking the
2490 * object. These pages shouldn't be write protected as they
2491 * should be zero filled. */
2492 gc_assert(page_table[next_page].write_protected == 0);
2494 old_bytes_used = page_table[next_page].bytes_used;
2495 page_table[next_page].allocated = FREE_PAGE;
2496 page_table[next_page].bytes_used = 0;
2497 bytes_freed += old_bytes_used;
2501 if ((bytes_freed > 0) && gencgc_verbose) {
2503 "/maybe_adjust_large_object() freed %d\n",
2507 generations[from_space].bytes_allocated -= bytes_freed;
2508 bytes_allocated -= bytes_freed;
2513 /* Take a possible pointer to a Lisp object and mark its page in the
2514 * page_table so that it will not be relocated during a GC.
2516 * This involves locating the page it points to, then backing up to
2517 * the first page that has its first object start at offset 0, and
2518 * then marking all pages dont_move from the first until a page that
2519 * ends by being full, or having free gen.
2521 * This ensures that objects spanning pages are not broken.
2523 * It is assumed that all the page static flags have been cleared at
2524 * the start of a GC.
2526 * It is also assumed that the current gc_alloc() region has been
2527 * flushed and the tables updated. */
2529 preserve_pointer(void *addr)
2531 int addr_page_index = find_page_index(addr);
2534 unsigned region_allocation;
2536 /* quick check 1: Address is quite likely to have been invalid. */
2537 if ((addr_page_index == -1)
2538 || (page_table[addr_page_index].allocated == FREE_PAGE)
2539 || (page_table[addr_page_index].bytes_used == 0)
2540 || (page_table[addr_page_index].gen != from_space)
2541 /* Skip if already marked dont_move. */
2542 || (page_table[addr_page_index].dont_move != 0))
2544 gc_assert(!(page_table[addr_page_index].allocated & OPEN_REGION_PAGE));
2545 /* (Now that we know that addr_page_index is in range, it's
2546 * safe to index into page_table[] with it.) */
2547 region_allocation = page_table[addr_page_index].allocated;
2549 /* quick check 2: Check the offset within the page.
2551 * FIXME: The mask should have a symbolic name, and ideally should
2552 * be derived from page size instead of hardwired to 0xfff.
2553 * (Also fix other uses of 0xfff, elsewhere.) */
2554 if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used)
2557 /* Filter out anything which can't be a pointer to a Lisp object
2558 * (or, as a special case which also requires dont_move, a return
2559 * address referring to something in a CodeObject). This is
2560 * expensive but important, since it vastly reduces the
2561 * probability that random garbage will be bogusly interpreter as
2562 * a pointer which prevents a page from moving. */
2563 if (!(possibly_valid_dynamic_space_pointer(addr)))
2565 first_page = addr_page_index;
2567 /* Work backwards to find a page with a first_object_offset of 0.
2568 * The pages should be contiguous with all bytes used in the same
2569 * gen. Assumes the first_object_offset is negative or zero. */
2571 /* this is probably needlessly conservative. The first object in
2572 * the page may not even be the one we were passed a pointer to:
2573 * if this is the case, we will write-protect all the previous
2574 * object's pages too.
2577 while (page_table[first_page].first_object_offset != 0) {
2579 /* Do some checks. */
2580 gc_assert(page_table[first_page].bytes_used == 4096);
2581 gc_assert(page_table[first_page].gen == from_space);
2582 gc_assert(page_table[first_page].allocated == region_allocation);
2585 /* Adjust any large objects before promotion as they won't be
2586 * copied after promotion. */
2587 if (page_table[first_page].large_object) {
2588 maybe_adjust_large_object(page_address(first_page));
2589 /* If a large object has shrunk then addr may now point to a
2590 * free area in which case it's ignored here. Note it gets
2591 * through the valid pointer test above because the tail looks
2593 if ((page_table[addr_page_index].allocated == FREE_PAGE)
2594 || (page_table[addr_page_index].bytes_used == 0)
2595 /* Check the offset within the page. */
2596 || (((unsigned)addr & 0xfff)
2597 > page_table[addr_page_index].bytes_used)) {
2599 "weird? ignore ptr 0x%x to freed area of large object\n",
2603 /* It may have moved to unboxed pages. */
2604 region_allocation = page_table[first_page].allocated;
2607 /* Now work forward until the end of this contiguous area is found,
2608 * marking all pages as dont_move. */
2609 for (i = first_page; ;i++) {
2610 gc_assert(page_table[i].allocated == region_allocation);
2612 /* Mark the page static. */
2613 page_table[i].dont_move = 1;
2615 /* Move the page to the new_space. XX I'd rather not do this
2616 * but the GC logic is not quite able to copy with the static
2617 * pages remaining in the from space. This also requires the
2618 * generation bytes_allocated counters be updated. */
2619 page_table[i].gen = new_space;
2620 generations[new_space].bytes_allocated += page_table[i].bytes_used;
2621 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
2623 /* It is essential that the pages are not write protected as
2624 * they may have pointers into the old-space which need
2625 * scavenging. They shouldn't be write protected at this
2627 gc_assert(!page_table[i].write_protected);
2629 /* Check whether this is the last page in this contiguous block.. */
2630 if ((page_table[i].bytes_used < 4096)
2631 /* ..or it is 4096 and is the last in the block */
2632 || (page_table[i+1].allocated == FREE_PAGE)
2633 || (page_table[i+1].bytes_used == 0) /* next page free */
2634 || (page_table[i+1].gen != from_space) /* diff. gen */
2635 || (page_table[i+1].first_object_offset == 0))
2639 /* Check that the page is now static. */
2640 gc_assert(page_table[addr_page_index].dont_move != 0);
2643 /* If the given page is not write-protected, then scan it for pointers
2644 * to younger generations or the top temp. generation, if no
2645 * suspicious pointers are found then the page is write-protected.
2647 * Care is taken to check for pointers to the current gc_alloc()
2648 * region if it is a younger generation or the temp. generation. This
2649 * frees the caller from doing a gc_alloc_update_page_tables(). Actually
2650 * the gc_alloc_generation does not need to be checked as this is only
2651 * called from scavenge_generation() when the gc_alloc generation is
2652 * younger, so it just checks if there is a pointer to the current
2655 * We return 1 if the page was write-protected, else 0. */
2657 update_page_write_prot(int page)
2659 int gen = page_table[page].gen;
2662 void **page_addr = (void **)page_address(page);
2663 int num_words = page_table[page].bytes_used / 4;
2665 /* Shouldn't be a free page. */
2666 gc_assert(page_table[page].allocated != FREE_PAGE);
2667 gc_assert(page_table[page].bytes_used != 0);
2669 /* Skip if it's already write-protected or an unboxed page. */
2670 if (page_table[page].write_protected
2671 || (page_table[page].allocated & UNBOXED_PAGE))
2674 /* Scan the page for pointers to younger generations or the
2675 * top temp. generation. */
2677 for (j = 0; j < num_words; j++) {
2678 void *ptr = *(page_addr+j);
2679 int index = find_page_index(ptr);
2681 /* Check that it's in the dynamic space */
2683 if (/* Does it point to a younger or the temp. generation? */
2684 ((page_table[index].allocated != FREE_PAGE)
2685 && (page_table[index].bytes_used != 0)
2686 && ((page_table[index].gen < gen)
2687 || (page_table[index].gen == NUM_GENERATIONS)))
2689 /* Or does it point within a current gc_alloc() region? */
2690 || ((boxed_region.start_addr <= ptr)
2691 && (ptr <= boxed_region.free_pointer))
2692 || ((unboxed_region.start_addr <= ptr)
2693 && (ptr <= unboxed_region.free_pointer))) {
2700 /* Write-protect the page. */
2701 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
2703 os_protect((void *)page_addr,
2705 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
2707 /* Note the page as protected in the page tables. */
2708 page_table[page].write_protected = 1;
2714 /* Scavenge a generation.
2716 * This will not resolve all pointers when generation is the new
2717 * space, as new objects may be added which are not check here - use
2718 * scavenge_newspace generation.
2720 * Write-protected pages should not have any pointers to the
2721 * from_space so do need scavenging; thus write-protected pages are
2722 * not always scavenged. There is some code to check that these pages
2723 * are not written; but to check fully the write-protected pages need
2724 * to be scavenged by disabling the code to skip them.
2726 * Under the current scheme when a generation is GCed the younger
2727 * generations will be empty. So, when a generation is being GCed it
2728 * is only necessary to scavenge the older generations for pointers
2729 * not the younger. So a page that does not have pointers to younger
2730 * generations does not need to be scavenged.
2732 * The write-protection can be used to note pages that don't have
2733 * pointers to younger pages. But pages can be written without having
2734 * pointers to younger generations. After the pages are scavenged here
2735 * they can be scanned for pointers to younger generations and if
2736 * there are none the page can be write-protected.
2738 * One complication is when the newspace is the top temp. generation.
2740 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
2741 * that none were written, which they shouldn't be as they should have
2742 * no pointers to younger generations. This breaks down for weak
2743 * pointers as the objects contain a link to the next and are written
2744 * if a weak pointer is scavenged. Still it's a useful check. */
2746 scavenge_generation(int generation)
2753 /* Clear the write_protected_cleared flags on all pages. */
2754 for (i = 0; i < NUM_PAGES; i++)
2755 page_table[i].write_protected_cleared = 0;
2758 for (i = 0; i < last_free_page; i++) {
2759 if ((page_table[i].allocated & BOXED_PAGE)
2760 && (page_table[i].bytes_used != 0)
2761 && (page_table[i].gen == generation)) {
2764 /* This should be the start of a contiguous block. */
2765 gc_assert(page_table[i].first_object_offset == 0);
2767 /* We need to find the full extent of this contiguous
2768 * block in case objects span pages. */
2770 /* Now work forward until the end of this contiguous area
2771 * is found. A small area is preferred as there is a
2772 * better chance of its pages being write-protected. */
2773 for (last_page = i; ; last_page++)
2774 /* Check whether this is the last page in this contiguous
2776 if ((page_table[last_page].bytes_used < 4096)
2777 /* Or it is 4096 and is the last in the block */
2778 || (!(page_table[last_page+1].allocated & BOXED_PAGE))
2779 || (page_table[last_page+1].bytes_used == 0)
2780 || (page_table[last_page+1].gen != generation)
2781 || (page_table[last_page+1].first_object_offset == 0))
2784 /* Do a limited check for write_protected pages. If all pages
2785 * are write_protected then there is no need to scavenge. */
2788 for (j = i; j <= last_page; j++)
2789 if (page_table[j].write_protected == 0) {
2797 scavenge(page_address(i), (page_table[last_page].bytes_used
2798 + (last_page-i)*4096)/4);
2800 /* Now scan the pages and write protect those
2801 * that don't have pointers to younger
2803 if (enable_page_protection) {
2804 for (j = i; j <= last_page; j++) {
2805 num_wp += update_page_write_prot(j);
2814 if ((gencgc_verbose > 1) && (num_wp != 0)) {
2816 "/write protected %d pages within generation %d\n",
2817 num_wp, generation));
2821 /* Check that none of the write_protected pages in this generation
2822 * have been written to. */
2823 for (i = 0; i < NUM_PAGES; i++) {
2824 if ((page_table[i].allocation ! =FREE_PAGE)
2825 && (page_table[i].bytes_used != 0)
2826 && (page_table[i].gen == generation)
2827 && (page_table[i].write_protected_cleared != 0)) {
2828 FSHOW((stderr, "/scavenge_generation() %d\n", generation));
2830 "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
2831 page_table[i].bytes_used,
2832 page_table[i].first_object_offset,
2833 page_table[i].dont_move));
2834 lose("write to protected page %d in scavenge_generation()", i);
2841 /* Scavenge a newspace generation. As it is scavenged new objects may
2842 * be allocated to it; these will also need to be scavenged. This
2843 * repeats until there are no more objects unscavenged in the
2844 * newspace generation.
2846 * To help improve the efficiency, areas written are recorded by
2847 * gc_alloc() and only these scavenged. Sometimes a little more will be
2848 * scavenged, but this causes no harm. An easy check is done that the
2849 * scavenged bytes equals the number allocated in the previous
2852 * Write-protected pages are not scanned except if they are marked
2853 * dont_move in which case they may have been promoted and still have
2854 * pointers to the from space.
2856 * Write-protected pages could potentially be written by alloc however
2857 * to avoid having to handle re-scavenging of write-protected pages
2858 * gc_alloc() does not write to write-protected pages.
2860 * New areas of objects allocated are recorded alternatively in the two
2861 * new_areas arrays below. */
2862 static struct new_area new_areas_1[NUM_NEW_AREAS];
2863 static struct new_area new_areas_2[NUM_NEW_AREAS];
2865 /* Do one full scan of the new space generation. This is not enough to
2866 * complete the job as new objects may be added to the generation in
2867 * the process which are not scavenged. */
2869 scavenge_newspace_generation_one_scan(int generation)
2874 "/starting one full scan of newspace generation %d\n",
2876 for (i = 0; i < last_free_page; i++) {
2877 /* note that this skips over open regions when it encounters them */
2878 if ((page_table[i].allocated == BOXED_PAGE)
2879 && (page_table[i].bytes_used != 0)
2880 && (page_table[i].gen == generation)
2881 && ((page_table[i].write_protected == 0)
2882 /* (This may be redundant as write_protected is now
2883 * cleared before promotion.) */
2884 || (page_table[i].dont_move == 1))) {
2887 /* The scavenge will start at the first_object_offset of page i.
2889 * We need to find the full extent of this contiguous
2890 * block in case objects span pages.
2892 * Now work forward until the end of this contiguous area
2893 * is found. A small area is preferred as there is a
2894 * better chance of its pages being write-protected. */
2895 for (last_page = i; ;last_page++) {
2896 /* Check whether this is the last page in this
2897 * contiguous block */
2898 if ((page_table[last_page].bytes_used < 4096)
2899 /* Or it is 4096 and is the last in the block */
2900 || (!(page_table[last_page+1].allocated & BOXED_PAGE))
2901 || (page_table[last_page+1].bytes_used == 0)
2902 || (page_table[last_page+1].gen != generation)
2903 || (page_table[last_page+1].first_object_offset == 0))
2907 /* Do a limited check for write-protected pages. If all
2908 * pages are write-protected then no need to scavenge,
2909 * except if the pages are marked dont_move. */
2912 for (j = i; j <= last_page; j++)
2913 if ((page_table[j].write_protected == 0)
2914 || (page_table[j].dont_move != 0)) {
2922 /* Calculate the size. */
2924 size = (page_table[last_page].bytes_used
2925 - page_table[i].first_object_offset)/4;
2927 size = (page_table[last_page].bytes_used
2928 + (last_page-i)*4096
2929 - page_table[i].first_object_offset)/4;
2932 new_areas_ignore_page = last_page;
2934 scavenge(page_address(i) +
2935 page_table[i].first_object_offset,
2946 "/done with one full scan of newspace generation %d\n",
2950 /* Do a complete scavenge of the newspace generation. */
2952 scavenge_newspace_generation(int generation)
2956 /* the new_areas array currently being written to by gc_alloc() */
2957 struct new_area (*current_new_areas)[] = &new_areas_1;
2958 int current_new_areas_index;
2960 /* the new_areas created but the previous scavenge cycle */
2961 struct new_area (*previous_new_areas)[] = NULL;
2962 int previous_new_areas_index;
2964 /* Flush the current regions updating the tables. */
2965 gc_alloc_update_all_page_tables();
2967 /* Turn on the recording of new areas by gc_alloc(). */
2968 new_areas = current_new_areas;
2969 new_areas_index = 0;
2971 /* Don't need to record new areas that get scavenged anyway during
2972 * scavenge_newspace_generation_one_scan. */
2973 record_new_objects = 1;
2975 /* Start with a full scavenge. */
2976 scavenge_newspace_generation_one_scan(generation);
2978 /* Record all new areas now. */
2979 record_new_objects = 2;
2981 /* Flush the current regions updating the tables. */
2982 gc_alloc_update_all_page_tables();
2984 /* Grab new_areas_index. */
2985 current_new_areas_index = new_areas_index;
2988 "The first scan is finished; current_new_areas_index=%d.\n",
2989 current_new_areas_index));*/
2991 while (current_new_areas_index > 0) {
2992 /* Move the current to the previous new areas */
2993 previous_new_areas = current_new_areas;
2994 previous_new_areas_index = current_new_areas_index;
2996 /* Scavenge all the areas in previous new areas. Any new areas
2997 * allocated are saved in current_new_areas. */
2999 /* Allocate an array for current_new_areas; alternating between
3000 * new_areas_1 and 2 */
3001 if (previous_new_areas == &new_areas_1)
3002 current_new_areas = &new_areas_2;
3004 current_new_areas = &new_areas_1;
3006 /* Set up for gc_alloc(). */
3007 new_areas = current_new_areas;
3008 new_areas_index = 0;
3010 /* Check whether previous_new_areas had overflowed. */
3011 if (previous_new_areas_index >= NUM_NEW_AREAS) {
3013 /* New areas of objects allocated have been lost so need to do a
3014 * full scan to be sure! If this becomes a problem try
3015 * increasing NUM_NEW_AREAS. */
3017 SHOW("new_areas overflow, doing full scavenge");
3019 /* Don't need to record new areas that get scavenge anyway
3020 * during scavenge_newspace_generation_one_scan. */
3021 record_new_objects = 1;
3023 scavenge_newspace_generation_one_scan(generation);
3025 /* Record all new areas now. */
3026 record_new_objects = 2;
3028 /* Flush the current regions updating the tables. */
3029 gc_alloc_update_all_page_tables();
3033 /* Work through previous_new_areas. */
3034 for (i = 0; i < previous_new_areas_index; i++) {
3035 /* FIXME: All these bare *4 and /4 should be something
3036 * like BYTES_PER_WORD or WBYTES. */
3037 int page = (*previous_new_areas)[i].page;
3038 int offset = (*previous_new_areas)[i].offset;
3039 int size = (*previous_new_areas)[i].size / 4;
3040 gc_assert((*previous_new_areas)[i].size % 4 == 0);
3041 scavenge(page_address(page)+offset, size);
3044 /* Flush the current regions updating the tables. */
3045 gc_alloc_update_all_page_tables();
3048 current_new_areas_index = new_areas_index;
3051 "The re-scan has finished; current_new_areas_index=%d.\n",
3052 current_new_areas_index));*/
3055 /* Turn off recording of areas allocated by gc_alloc(). */
3056 record_new_objects = 0;
3059 /* Check that none of the write_protected pages in this generation
3060 * have been written to. */
3061 for (i = 0; i < NUM_PAGES; i++) {
3062 if ((page_table[i].allocation != FREE_PAGE)
3063 && (page_table[i].bytes_used != 0)
3064 && (page_table[i].gen == generation)
3065 && (page_table[i].write_protected_cleared != 0)
3066 && (page_table[i].dont_move == 0)) {
3067 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
3068 i, generation, page_table[i].dont_move);
3074 /* Un-write-protect all the pages in from_space. This is done at the
3075 * start of a GC else there may be many page faults while scavenging
3076 * the newspace (I've seen drive the system time to 99%). These pages
3077 * would need to be unprotected anyway before unmapping in
3078 * free_oldspace; not sure what effect this has on paging.. */
3080 unprotect_oldspace(void)
3084 for (i = 0; i < last_free_page; i++) {
3085 if ((page_table[i].allocated != FREE_PAGE)
3086 && (page_table[i].bytes_used != 0)
3087 && (page_table[i].gen == from_space)) {
3090 page_start = (void *)page_address(i);
3092 /* Remove any write-protection. We should be able to rely
3093 * on the write-protect flag to avoid redundant calls. */
3094 if (page_table[i].write_protected) {
3095 os_protect(page_start, 4096, OS_VM_PROT_ALL);
3096 page_table[i].write_protected = 0;
3102 /* Work through all the pages and free any in from_space. This
3103 * assumes that all objects have been copied or promoted to an older
3104 * generation. Bytes_allocated and the generation bytes_allocated
3105 * counter are updated. The number of bytes freed is returned. */
3106 extern void i586_bzero(void *addr, int nbytes);
3110 int bytes_freed = 0;
3111 int first_page, last_page;
3116 /* Find a first page for the next region of pages. */
3117 while ((first_page < last_free_page)
3118 && ((page_table[first_page].allocated == FREE_PAGE)
3119 || (page_table[first_page].bytes_used == 0)
3120 || (page_table[first_page].gen != from_space)))
3123 if (first_page >= last_free_page)
3126 /* Find the last page of this region. */
3127 last_page = first_page;
3130 /* Free the page. */
3131 bytes_freed += page_table[last_page].bytes_used;
3132 generations[page_table[last_page].gen].bytes_allocated -=
3133 page_table[last_page].bytes_used;
3134 page_table[last_page].allocated = FREE_PAGE;
3135 page_table[last_page].bytes_used = 0;
3137 /* Remove any write-protection. We should be able to rely
3138 * on the write-protect flag to avoid redundant calls. */
3140 void *page_start = (void *)page_address(last_page);
3142 if (page_table[last_page].write_protected) {
3143 os_protect(page_start, 4096, OS_VM_PROT_ALL);
3144 page_table[last_page].write_protected = 0;
3149 while ((last_page < last_free_page)
3150 && (page_table[last_page].allocated != FREE_PAGE)
3151 && (page_table[last_page].bytes_used != 0)
3152 && (page_table[last_page].gen == from_space));
3154 /* Zero pages from first_page to (last_page-1).
3156 * FIXME: Why not use os_zero(..) function instead of
3157 * hand-coding this again? (Check other gencgc_unmap_zero
3159 if (gencgc_unmap_zero) {
3160 void *page_start, *addr;
3162 page_start = (void *)page_address(first_page);
3164 os_invalidate(page_start, 4096*(last_page-first_page));
3165 addr = os_validate(page_start, 4096*(last_page-first_page));
3166 if (addr == NULL || addr != page_start) {
3167 /* Is this an error condition? I couldn't really tell from
3168 * the old CMU CL code, which fprintf'ed a message with
3169 * an exclamation point at the end. But I've never seen the
3170 * message, so it must at least be unusual..
3172 * (The same condition is also tested for in gc_free_heap.)
3174 * -- WHN 19991129 */
3175 lose("i586_bzero: page moved, 0x%08x ==> 0x%08x",
3182 page_start = (int *)page_address(first_page);
3183 i586_bzero(page_start, 4096*(last_page-first_page));
3186 first_page = last_page;
3188 } while (first_page < last_free_page);
3190 bytes_allocated -= bytes_freed;
3195 /* Print some information about a pointer at the given address. */
3197 print_ptr(lispobj *addr)
3199 /* If addr is in the dynamic space then out the page information. */
3200 int pi1 = find_page_index((void*)addr);
3203 fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
3204 (unsigned int) addr,
3206 page_table[pi1].allocated,
3207 page_table[pi1].gen,
3208 page_table[pi1].bytes_used,
3209 page_table[pi1].first_object_offset,
3210 page_table[pi1].dont_move);
3211 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
3224 extern int undefined_tramp;
3227 verify_space(lispobj *start, size_t words)
3229 int is_in_dynamic_space = (find_page_index((void*)start) != -1);
3230 int is_in_readonly_space =
3231 (READ_ONLY_SPACE_START <= (unsigned)start &&
3232 (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
3236 lispobj thing = *(lispobj*)start;
3238 if (is_lisp_pointer(thing)) {
3239 int page_index = find_page_index((void*)thing);
3240 int to_readonly_space =
3241 (READ_ONLY_SPACE_START <= thing &&
3242 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
3243 int to_static_space =
3244 (STATIC_SPACE_START <= thing &&
3245 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
3247 /* Does it point to the dynamic space? */
3248 if (page_index != -1) {
3249 /* If it's within the dynamic space it should point to a used
3250 * page. XX Could check the offset too. */
3251 if ((page_table[page_index].allocated != FREE_PAGE)
3252 && (page_table[page_index].bytes_used == 0))
3253 lose ("Ptr %x @ %x sees free page.", thing, start);
3254 /* Check that it doesn't point to a forwarding pointer! */
3255 if (*((lispobj *)native_pointer(thing)) == 0x01) {
3256 lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
3258 /* Check that its not in the RO space as it would then be a
3259 * pointer from the RO to the dynamic space. */
3260 if (is_in_readonly_space) {
3261 lose("ptr to dynamic space %x from RO space %x",
3264 /* Does it point to a plausible object? This check slows
3265 * it down a lot (so it's commented out).
3267 * "a lot" is serious: it ate 50 minutes cpu time on
3268 * my duron 950 before I came back from lunch and
3271 * FIXME: Add a variable to enable this
3274 if (!possibly_valid_dynamic_space_pointer((lispobj *)thing)) {
3275 lose("ptr %x to invalid object %x", thing, start);
3279 /* Verify that it points to another valid space. */
3280 if (!to_readonly_space && !to_static_space
3281 && (thing != (unsigned)&undefined_tramp)) {
3282 lose("Ptr %x @ %x sees junk.", thing, start);
3286 if (thing & 0x3) { /* Skip fixnums. FIXME: There should be an
3287 * is_fixnum for this. */
3289 switch(widetag_of(*start)) {
3292 case SIMPLE_VECTOR_WIDETAG:
3294 case COMPLEX_WIDETAG:
3295 case SIMPLE_ARRAY_WIDETAG:
3296 case COMPLEX_STRING_WIDETAG:
3297 case COMPLEX_BIT_VECTOR_WIDETAG:
3298 case COMPLEX_VECTOR_WIDETAG:
3299 case COMPLEX_ARRAY_WIDETAG:
3300 case CLOSURE_HEADER_WIDETAG:
3301 case FUNCALLABLE_INSTANCE_HEADER_WIDETAG:
3302 case VALUE_CELL_HEADER_WIDETAG:
3303 case SYMBOL_HEADER_WIDETAG:
3304 case BASE_CHAR_WIDETAG:
3305 case UNBOUND_MARKER_WIDETAG:
3306 case INSTANCE_HEADER_WIDETAG:
3311 case CODE_HEADER_WIDETAG:
3313 lispobj object = *start;
3315 int nheader_words, ncode_words, nwords;
3317 struct simple_fun *fheaderp;
3319 code = (struct code *) start;
3321 /* Check that it's not in the dynamic space.
3322 * FIXME: Isn't is supposed to be OK for code
3323 * objects to be in the dynamic space these days? */
3324 if (is_in_dynamic_space
3325 /* It's ok if it's byte compiled code. The trace
3326 * table offset will be a fixnum if it's x86
3327 * compiled code - check.
3329 * FIXME: #^#@@! lack of abstraction here..
3330 * This line can probably go away now that
3331 * there's no byte compiler, but I've got
3332 * too much to worry about right now to try
3333 * to make sure. -- WHN 2001-10-06 */
3334 && !(code->trace_table_offset & 0x3)
3335 /* Only when enabled */
3336 && verify_dynamic_code_check) {
3338 "/code object at %x in the dynamic space\n",
3342 ncode_words = fixnum_value(code->code_size);
3343 nheader_words = HeaderValue(object);
3344 nwords = ncode_words + nheader_words;
3345 nwords = CEILING(nwords, 2);
3346 /* Scavenge the boxed section of the code data block */
3347 verify_space(start + 1, nheader_words - 1);
3349 /* Scavenge the boxed section of each function
3350 * object in the code data block. */
3351 fheaderl = code->entry_points;
3352 while (fheaderl != NIL) {
3354 (struct simple_fun *) native_pointer(fheaderl);
3355 gc_assert(widetag_of(fheaderp->header) == SIMPLE_FUN_HEADER_WIDETAG);
3356 verify_space(&fheaderp->name, 1);
3357 verify_space(&fheaderp->arglist, 1);
3358 verify_space(&fheaderp->type, 1);
3359 fheaderl = fheaderp->next;
3365 /* unboxed objects */
3366 case BIGNUM_WIDETAG:
3367 case SINGLE_FLOAT_WIDETAG:
3368 case DOUBLE_FLOAT_WIDETAG:
3369 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3370 case LONG_FLOAT_WIDETAG:
3372 #ifdef COMPLEX_SINGLE_FLOAT_WIDETAG
3373 case COMPLEX_SINGLE_FLOAT_WIDETAG:
3375 #ifdef COMPLEX_DOUBLE_FLOAT_WIDETAG
3376 case COMPLEX_DOUBLE_FLOAT_WIDETAG:
3378 #ifdef COMPLEX_LONG_FLOAT_WIDETAG
3379 case COMPLEX_LONG_FLOAT_WIDETAG:
3381 case SIMPLE_STRING_WIDETAG:
3382 case SIMPLE_BIT_VECTOR_WIDETAG:
3383 case SIMPLE_ARRAY_UNSIGNED_BYTE_2_WIDETAG:
3384 case SIMPLE_ARRAY_UNSIGNED_BYTE_4_WIDETAG:
3385 case SIMPLE_ARRAY_UNSIGNED_BYTE_8_WIDETAG:
3386 case SIMPLE_ARRAY_UNSIGNED_BYTE_16_WIDETAG:
3387 case SIMPLE_ARRAY_UNSIGNED_BYTE_32_WIDETAG:
3388 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG
3389 case SIMPLE_ARRAY_SIGNED_BYTE_8_WIDETAG:
3391 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG
3392 case SIMPLE_ARRAY_SIGNED_BYTE_16_WIDETAG:
3394 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG
3395 case SIMPLE_ARRAY_SIGNED_BYTE_30_WIDETAG:
3397 #ifdef SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG
3398 case SIMPLE_ARRAY_SIGNED_BYTE_32_WIDETAG:
3400 case SIMPLE_ARRAY_SINGLE_FLOAT_WIDETAG:
3401 case SIMPLE_ARRAY_DOUBLE_FLOAT_WIDETAG:
3402 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3403 case SIMPLE_ARRAY_LONG_FLOAT_WIDETAG:
3405 #ifdef SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG
3406 case SIMPLE_ARRAY_COMPLEX_SINGLE_FLOAT_WIDETAG:
3408 #ifdef SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG
3409 case SIMPLE_ARRAY_COMPLEX_DOUBLE_FLOAT_WIDETAG:
3411 #ifdef SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG
3412 case SIMPLE_ARRAY_COMPLEX_LONG_FLOAT_WIDETAG:
3415 case WEAK_POINTER_WIDETAG:
3416 count = (sizetab[widetag_of(*start)])(start);
3432 /* FIXME: It would be nice to make names consistent so that
3433 * foo_size meant size *in* *bytes* instead of size in some
3434 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
3435 * Some counts of lispobjs are called foo_count; it might be good
3436 * to grep for all foo_size and rename the appropriate ones to
3438 int read_only_space_size =
3439 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
3440 - (lispobj*)READ_ONLY_SPACE_START;
3441 int static_space_size =
3442 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER)
3443 - (lispobj*)STATIC_SPACE_START;
3444 int binding_stack_size =
3445 (lispobj*)SymbolValue(BINDING_STACK_POINTER)
3446 - (lispobj*)BINDING_STACK_START;
3448 verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
3449 verify_space((lispobj*)STATIC_SPACE_START , static_space_size);
3450 verify_space((lispobj*)BINDING_STACK_START , binding_stack_size);
3454 verify_generation(int generation)
3458 for (i = 0; i < last_free_page; i++) {
3459 if ((page_table[i].allocated != FREE_PAGE)
3460 && (page_table[i].bytes_used != 0)
3461 && (page_table[i].gen == generation)) {
3463 int region_allocation = page_table[i].allocated;
3465 /* This should be the start of a contiguous block */
3466 gc_assert(page_table[i].first_object_offset == 0);
3468 /* Need to find the full extent of this contiguous block in case
3469 objects span pages. */
3471 /* Now work forward until the end of this contiguous area is
3473 for (last_page = i; ;last_page++)
3474 /* Check whether this is the last page in this contiguous
3476 if ((page_table[last_page].bytes_used < 4096)
3477 /* Or it is 4096 and is the last in the block */
3478 || (page_table[last_page+1].allocated != region_allocation)
3479 || (page_table[last_page+1].bytes_used == 0)
3480 || (page_table[last_page+1].gen != generation)
3481 || (page_table[last_page+1].first_object_offset == 0))
3484 verify_space(page_address(i), (page_table[last_page].bytes_used
3485 + (last_page-i)*4096)/4);
3491 /* Check that all the free space is zero filled. */
3493 verify_zero_fill(void)
3497 for (page = 0; page < last_free_page; page++) {
3498 if (page_table[page].allocated == FREE_PAGE) {
3499 /* The whole page should be zero filled. */
3500 int *start_addr = (int *)page_address(page);
3503 for (i = 0; i < size; i++) {
3504 if (start_addr[i] != 0) {
3505 lose("free page not zero at %x", start_addr + i);
3509 int free_bytes = 4096 - page_table[page].bytes_used;
3510 if (free_bytes > 0) {
3511 int *start_addr = (int *)((unsigned)page_address(page)
3512 + page_table[page].bytes_used);
3513 int size = free_bytes / 4;
3515 for (i = 0; i < size; i++) {
3516 if (start_addr[i] != 0) {
3517 lose("free region not zero at %x", start_addr + i);
3525 /* External entry point for verify_zero_fill */
3527 gencgc_verify_zero_fill(void)
3529 /* Flush the alloc regions updating the tables. */
3530 gc_alloc_update_all_page_tables();
3531 SHOW("verifying zero fill");
3536 verify_dynamic_space(void)
3540 for (i = 0; i < NUM_GENERATIONS; i++)
3541 verify_generation(i);
3543 if (gencgc_enable_verify_zero_fill)
3547 /* Write-protect all the dynamic boxed pages in the given generation. */
3549 write_protect_generation_pages(int generation)
3553 gc_assert(generation < NUM_GENERATIONS);
3555 for (i = 0; i < last_free_page; i++)
3556 if ((page_table[i].allocated == BOXED_PAGE)
3557 && (page_table[i].bytes_used != 0)
3558 && (page_table[i].gen == generation)) {
3561 page_start = (void *)page_address(i);
3563 os_protect(page_start,
3565 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
3567 /* Note the page as protected in the page tables. */
3568 page_table[i].write_protected = 1;
3571 if (gencgc_verbose > 1) {
3573 "/write protected %d of %d pages in generation %d\n",
3574 count_write_protect_generation_pages(generation),
3575 count_generation_pages(generation),
3580 /* Garbage collect a generation. If raise is 0 then the remains of the
3581 * generation are not raised to the next generation. */
3583 garbage_collect_generation(int generation, int raise)
3585 unsigned long bytes_freed;
3587 unsigned long static_space_size;
3589 gc_assert(generation <= (NUM_GENERATIONS-1));
3591 /* The oldest generation can't be raised. */
3592 gc_assert((generation != (NUM_GENERATIONS-1)) || (raise == 0));
3594 /* Initialize the weak pointer list. */
3595 weak_pointers = NULL;
3597 /* When a generation is not being raised it is transported to a
3598 * temporary generation (NUM_GENERATIONS), and lowered when
3599 * done. Set up this new generation. There should be no pages
3600 * allocated to it yet. */
3602 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
3604 /* Set the global src and dest. generations */
3605 from_space = generation;
3607 new_space = generation+1;
3609 new_space = NUM_GENERATIONS;
3611 /* Change to a new space for allocation, resetting the alloc_start_page */
3612 gc_alloc_generation = new_space;
3613 generations[new_space].alloc_start_page = 0;
3614 generations[new_space].alloc_unboxed_start_page = 0;
3615 generations[new_space].alloc_large_start_page = 0;
3616 generations[new_space].alloc_large_unboxed_start_page = 0;
3618 /* Before any pointers are preserved, the dont_move flags on the
3619 * pages need to be cleared. */
3620 for (i = 0; i < last_free_page; i++)
3621 page_table[i].dont_move = 0;
3623 /* Un-write-protect the old-space pages. This is essential for the
3624 * promoted pages as they may contain pointers into the old-space
3625 * which need to be scavenged. It also helps avoid unnecessary page
3626 * faults as forwarding pointers are written into them. They need to
3627 * be un-protected anyway before unmapping later. */
3628 unprotect_oldspace();
3630 /* Scavenge the stack's conservative roots. */
3633 for (ptr = (void **)CONTROL_STACK_END - 1;
3634 ptr > (void **)&raise;
3636 preserve_pointer(*ptr);
3641 if (gencgc_verbose > 1) {
3642 int num_dont_move_pages = count_dont_move_pages();
3644 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
3645 num_dont_move_pages,
3646 /* FIXME: 4096 should be symbolic constant here and
3647 * prob'ly elsewhere too. */
3648 num_dont_move_pages * 4096);
3652 /* Scavenge all the rest of the roots. */
3654 /* Scavenge the Lisp functions of the interrupt handlers, taking
3655 * care to avoid SIG_DFL and SIG_IGN. */
3656 for (i = 0; i < NSIG; i++) {
3657 union interrupt_handler handler = interrupt_handlers[i];
3658 if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
3659 !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
3660 scavenge((lispobj *)(interrupt_handlers + i), 1);
3664 /* Scavenge the binding stack. */
3665 scavenge((lispobj *) BINDING_STACK_START,
3666 (lispobj *)SymbolValue(BINDING_STACK_POINTER) -
3667 (lispobj *)BINDING_STACK_START);
3669 /* The original CMU CL code had scavenge-read-only-space code
3670 * controlled by the Lisp-level variable
3671 * *SCAVENGE-READ-ONLY-SPACE*. It was disabled by default, and it
3672 * wasn't documented under what circumstances it was useful or
3673 * safe to turn it on, so it's been turned off in SBCL. If you
3674 * want/need this functionality, and can test and document it,
3675 * please submit a patch. */
3677 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
3678 unsigned long read_only_space_size =
3679 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) -
3680 (lispobj*)READ_ONLY_SPACE_START;
3682 "/scavenge read only space: %d bytes\n",
3683 read_only_space_size * sizeof(lispobj)));
3684 scavenge( (lispobj *) READ_ONLY_SPACE_START, read_only_space_size);
3688 /* Scavenge static space. */
3690 (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER) -
3691 (lispobj *)STATIC_SPACE_START;
3692 if (gencgc_verbose > 1) {
3694 "/scavenge static space: %d bytes\n",
3695 static_space_size * sizeof(lispobj)));
3697 scavenge( (lispobj *) STATIC_SPACE_START, static_space_size);
3699 /* All generations but the generation being GCed need to be
3700 * scavenged. The new_space generation needs special handling as
3701 * objects may be moved in - it is handled separately below. */
3702 for (i = 0; i < NUM_GENERATIONS; i++) {
3703 if ((i != generation) && (i != new_space)) {
3704 scavenge_generation(i);
3708 /* Finally scavenge the new_space generation. Keep going until no
3709 * more objects are moved into the new generation */
3710 scavenge_newspace_generation(new_space);
3712 /* FIXME: I tried reenabling this check when debugging unrelated
3713 * GC weirdness ca. sbcl-0.6.12.45, and it failed immediately.
3714 * Since the current GC code seems to work well, I'm guessing that
3715 * this debugging code is just stale, but I haven't tried to
3716 * figure it out. It should be figured out and then either made to
3717 * work or just deleted. */
3718 #define RESCAN_CHECK 0
3720 /* As a check re-scavenge the newspace once; no new objects should
3723 int old_bytes_allocated = bytes_allocated;
3724 int bytes_allocated;
3726 /* Start with a full scavenge. */
3727 scavenge_newspace_generation_one_scan(new_space);
3729 /* Flush the current regions, updating the tables. */
3730 gc_alloc_update_all_page_tables();
3732 bytes_allocated = bytes_allocated - old_bytes_allocated;
3734 if (bytes_allocated != 0) {
3735 lose("Rescan of new_space allocated %d more bytes.",
3741 scan_weak_pointers();
3743 /* Flush the current regions, updating the tables. */
3744 gc_alloc_update_all_page_tables();
3746 /* Free the pages in oldspace, but not those marked dont_move. */
3747 bytes_freed = free_oldspace();
3749 /* If the GC is not raising the age then lower the generation back
3750 * to its normal generation number */
3752 for (i = 0; i < last_free_page; i++)
3753 if ((page_table[i].bytes_used != 0)
3754 && (page_table[i].gen == NUM_GENERATIONS))
3755 page_table[i].gen = generation;
3756 gc_assert(generations[generation].bytes_allocated == 0);
3757 generations[generation].bytes_allocated =
3758 generations[NUM_GENERATIONS].bytes_allocated;
3759 generations[NUM_GENERATIONS].bytes_allocated = 0;
3762 /* Reset the alloc_start_page for generation. */
3763 generations[generation].alloc_start_page = 0;
3764 generations[generation].alloc_unboxed_start_page = 0;
3765 generations[generation].alloc_large_start_page = 0;
3766 generations[generation].alloc_large_unboxed_start_page = 0;
3768 if (generation >= verify_gens) {
3772 verify_dynamic_space();
3775 /* Set the new gc trigger for the GCed generation. */
3776 generations[generation].gc_trigger =
3777 generations[generation].bytes_allocated
3778 + generations[generation].bytes_consed_between_gc;
3781 generations[generation].num_gc = 0;
3783 ++generations[generation].num_gc;
3786 /* Update last_free_page, then SymbolValue(ALLOCATION_POINTER). */
3788 update_x86_dynamic_space_free_pointer(void)
3793 for (i = 0; i < NUM_PAGES; i++)
3794 if ((page_table[i].allocated != FREE_PAGE)
3795 && (page_table[i].bytes_used != 0))
3798 last_free_page = last_page+1;
3800 SetSymbolValue(ALLOCATION_POINTER,
3801 (lispobj)(((char *)heap_base) + last_free_page*4096));
3802 return 0; /* dummy value: return something ... */
3805 /* GC all generations newer than last_gen, raising the objects in each
3806 * to the next older generation - we finish when all generations below
3807 * last_gen are empty. Then if last_gen is due for a GC, or if
3808 * last_gen==NUM_GENERATIONS (the scratch generation? eh?) we GC that
3809 * too. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
3811 * We stop collecting at gencgc_oldest_gen_to_gc, even if this is less than
3812 * last_gen (oh, and note that by default it is NUM_GENERATIONS-1) */
3815 collect_garbage(unsigned last_gen)
3822 FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
3824 if (last_gen > NUM_GENERATIONS) {
3826 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
3831 /* Flush the alloc regions updating the tables. */
3832 gc_alloc_update_all_page_tables();
3834 /* Verify the new objects created by Lisp code. */
3835 if (pre_verify_gen_0) {
3836 FSHOW((stderr, "pre-checking generation 0\n"));
3837 verify_generation(0);
3840 if (gencgc_verbose > 1)
3841 print_generation_stats(0);
3844 /* Collect the generation. */
3846 if (gen >= gencgc_oldest_gen_to_gc) {
3847 /* Never raise the oldest generation. */
3852 || (generations[gen].num_gc >= generations[gen].trigger_age);
3855 if (gencgc_verbose > 1) {
3857 "starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
3860 generations[gen].bytes_allocated,
3861 generations[gen].gc_trigger,
3862 generations[gen].num_gc));
3865 /* If an older generation is being filled, then update its
3868 generations[gen+1].cum_sum_bytes_allocated +=
3869 generations[gen+1].bytes_allocated;
3872 garbage_collect_generation(gen, raise);
3874 /* Reset the memory age cum_sum. */
3875 generations[gen].cum_sum_bytes_allocated = 0;
3877 if (gencgc_verbose > 1) {
3878 FSHOW((stderr, "GC of generation %d finished:\n", gen));
3879 print_generation_stats(0);
3883 } while ((gen <= gencgc_oldest_gen_to_gc)
3884 && ((gen < last_gen)
3885 || ((gen <= gencgc_oldest_gen_to_gc)
3887 && (generations[gen].bytes_allocated
3888 > generations[gen].gc_trigger)
3889 && (gen_av_mem_age(gen)
3890 > generations[gen].min_av_mem_age))));
3892 /* Now if gen-1 was raised all generations before gen are empty.
3893 * If it wasn't raised then all generations before gen-1 are empty.
3895 * Now objects within this gen's pages cannot point to younger
3896 * generations unless they are written to. This can be exploited
3897 * by write-protecting the pages of gen; then when younger
3898 * generations are GCed only the pages which have been written
3903 gen_to_wp = gen - 1;
3905 /* There's not much point in WPing pages in generation 0 as it is
3906 * never scavenged (except promoted pages). */
3907 if ((gen_to_wp > 0) && enable_page_protection) {
3908 /* Check that they are all empty. */
3909 for (i = 0; i < gen_to_wp; i++) {
3910 if (generations[i].bytes_allocated)
3911 lose("trying to write-protect gen. %d when gen. %d nonempty",
3914 write_protect_generation_pages(gen_to_wp);
3917 /* Set gc_alloc() back to generation 0. The current regions should
3918 * be flushed after the above GCs. */
3919 gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
3920 gc_alloc_generation = 0;
3922 update_x86_dynamic_space_free_pointer();
3924 SHOW("returning from collect_garbage");
3927 /* This is called by Lisp PURIFY when it is finished. All live objects
3928 * will have been moved to the RO and Static heaps. The dynamic space
3929 * will need a full re-initialization. We don't bother having Lisp
3930 * PURIFY flush the current gc_alloc() region, as the page_tables are
3931 * re-initialized, and every page is zeroed to be sure. */
3937 if (gencgc_verbose > 1)
3938 SHOW("entering gc_free_heap");
3940 for (page = 0; page < NUM_PAGES; page++) {
3941 /* Skip free pages which should already be zero filled. */
3942 if (page_table[page].allocated != FREE_PAGE) {
3943 void *page_start, *addr;
3945 /* Mark the page free. The other slots are assumed invalid
3946 * when it is a FREE_PAGE and bytes_used is 0 and it
3947 * should not be write-protected -- except that the
3948 * generation is used for the current region but it sets
3950 page_table[page].allocated = FREE_PAGE;
3951 page_table[page].bytes_used = 0;
3953 /* Zero the page. */
3954 page_start = (void *)page_address(page);
3956 /* First, remove any write-protection. */
3957 os_protect(page_start, 4096, OS_VM_PROT_ALL);
3958 page_table[page].write_protected = 0;
3960 os_invalidate(page_start,4096);
3961 addr = os_validate(page_start,4096);
3962 if (addr == NULL || addr != page_start) {
3963 lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
3967 } else if (gencgc_zero_check_during_free_heap) {
3968 /* Double-check that the page is zero filled. */
3970 gc_assert(page_table[page].allocated == FREE_PAGE);
3971 gc_assert(page_table[page].bytes_used == 0);
3972 page_start = (int *)page_address(page);
3973 for (i=0; i<1024; i++) {
3974 if (page_start[i] != 0) {
3975 lose("free region not zero at %x", page_start + i);
3981 bytes_allocated = 0;
3983 /* Initialize the generations. */
3984 for (page = 0; page < NUM_GENERATIONS; page++) {
3985 generations[page].alloc_start_page = 0;
3986 generations[page].alloc_unboxed_start_page = 0;
3987 generations[page].alloc_large_start_page = 0;
3988 generations[page].alloc_large_unboxed_start_page = 0;
3989 generations[page].bytes_allocated = 0;
3990 generations[page].gc_trigger = 2000000;
3991 generations[page].num_gc = 0;
3992 generations[page].cum_sum_bytes_allocated = 0;
3995 if (gencgc_verbose > 1)
3996 print_generation_stats(0);
3998 /* Initialize gc_alloc(). */
3999 gc_alloc_generation = 0;
4001 gc_set_region_empty(&boxed_region);
4002 gc_set_region_empty(&unboxed_region);
4005 SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base));
4007 if (verify_after_free_heap) {
4008 /* Check whether purify has left any bad pointers. */
4010 SHOW("checking after free_heap\n");
4021 scavtab[SIMPLE_VECTOR_WIDETAG] = scav_vector;
4022 scavtab[WEAK_POINTER_WIDETAG] = scav_weak_pointer;
4023 transother[SIMPLE_ARRAY_WIDETAG] = trans_boxed_large;
4025 heap_base = (void*)DYNAMIC_SPACE_START;
4027 /* Initialize each page structure. */
4028 for (i = 0; i < NUM_PAGES; i++) {
4029 /* Initialize all pages as free. */
4030 page_table[i].allocated = FREE_PAGE;
4031 page_table[i].bytes_used = 0;
4033 /* Pages are not write-protected at startup. */
4034 page_table[i].write_protected = 0;
4037 bytes_allocated = 0;
4039 /* Initialize the generations.
4041 * FIXME: very similar to code in gc_free_heap(), should be shared */
4042 for (i = 0; i < NUM_GENERATIONS; i++) {
4043 generations[i].alloc_start_page = 0;
4044 generations[i].alloc_unboxed_start_page = 0;
4045 generations[i].alloc_large_start_page = 0;
4046 generations[i].alloc_large_unboxed_start_page = 0;
4047 generations[i].bytes_allocated = 0;
4048 generations[i].gc_trigger = 2000000;
4049 generations[i].num_gc = 0;
4050 generations[i].cum_sum_bytes_allocated = 0;
4051 /* the tune-able parameters */
4052 generations[i].bytes_consed_between_gc = 2000000;
4053 generations[i].trigger_age = 1;
4054 generations[i].min_av_mem_age = 0.75;
4057 /* Initialize gc_alloc. */
4058 gc_alloc_generation = 0;
4059 gc_set_region_empty(&boxed_region);
4060 gc_set_region_empty(&unboxed_region);
4066 /* Pick up the dynamic space from after a core load.
4068 * The ALLOCATION_POINTER points to the end of the dynamic space.
4070 * XX A scan is needed to identify the closest first objects for pages. */
4072 gencgc_pickup_dynamic(void)
4075 int addr = DYNAMIC_SPACE_START;
4076 int alloc_ptr = SymbolValue(ALLOCATION_POINTER);
4078 /* Initialize the first region. */
4080 page_table[page].allocated = BOXED_PAGE;
4081 page_table[page].gen = 0;
4082 page_table[page].bytes_used = 4096;
4083 page_table[page].large_object = 0;
4084 page_table[page].first_object_offset =
4085 (void *)DYNAMIC_SPACE_START - page_address(page);
4088 } while (addr < alloc_ptr);
4090 generations[0].bytes_allocated = 4096*page;
4091 bytes_allocated = 4096*page;
4096 gc_initialize_pointers(void)
4098 gencgc_pickup_dynamic();
4104 extern boolean maybe_gc_pending ;
4105 /* alloc(..) is the external interface for memory allocation. It
4106 * allocates to generation 0. It is not called from within the garbage
4107 * collector as it is only external uses that need the check for heap
4108 * size (GC trigger) and to disable the interrupts (interrupts are
4109 * always disabled during a GC).
4111 * The vops that call alloc(..) assume that the returned space is zero-filled.
4112 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
4114 * The check for a GC trigger is only performed when the current
4115 * region is full, so in most cases it's not needed. */
4120 struct alloc_region *region= &boxed_region;
4122 void *new_free_pointer;
4124 /* Check for alignment allocation problems. */
4125 gc_assert((((unsigned)region->free_pointer & 0x7) == 0)
4126 && ((nbytes & 0x7) == 0));
4127 /* At this point we should either be in pseudo-atomic, or early
4128 * enough in cold initn that interrupts are not yet enabled anyway.
4129 * It would be nice to assert same.
4131 gc_assert(SymbolValue(PSEUDO_ATOMIC_ATOMIC));
4133 /* maybe we can do this quickly ... */
4134 new_free_pointer = region->free_pointer + nbytes;
4135 if (new_free_pointer <= region->end_addr) {
4136 new_obj = (void*)(region->free_pointer);
4137 region->free_pointer = new_free_pointer;
4138 return(new_obj); /* yup */
4141 /* we have to go the long way around, it seems. Check whether
4142 * we should GC in the near future
4144 if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
4145 auto_gc_trigger *= 2;
4146 /* set things up so that GC happens when we finish the PA
4149 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(1));
4151 new_obj = gc_alloc_with_region(nbytes,0,region,0);
4157 * noise to manipulate the gc trigger stuff
4161 set_auto_gc_trigger(os_vm_size_t dynamic_usage)
4163 auto_gc_trigger += dynamic_usage;
4167 clear_auto_gc_trigger(void)
4169 auto_gc_trigger = 0;
4172 /* Find the code object for the given pc, or return NULL on failure.
4174 * FIXME: PC shouldn't be lispobj*, should it? Maybe void*? */
4176 component_ptr_from_pc(lispobj *pc)
4178 lispobj *object = NULL;
4180 if ( (object = search_read_only_space(pc)) )
4182 else if ( (object = search_static_space(pc)) )
4185 object = search_dynamic_space(pc);
4187 if (object) /* if we found something */
4188 if (widetag_of(*object) == CODE_HEADER_WIDETAG) /* if it's a code object */
4195 * shared support for the OS-dependent signal handlers which
4196 * catch GENCGC-related write-protect violations
4199 void unhandled_sigmemoryfault(void);
4201 /* Depending on which OS we're running under, different signals might
4202 * be raised for a violation of write protection in the heap. This
4203 * function factors out the common generational GC magic which needs
4204 * to invoked in this case, and should be called from whatever signal
4205 * handler is appropriate for the OS we're running under.
4207 * Return true if this signal is a normal generational GC thing that
4208 * we were able to handle, or false if it was abnormal and control
4209 * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
4212 gencgc_handle_wp_violation(void* fault_addr)
4214 int page_index = find_page_index(fault_addr);
4216 #if defined QSHOW_SIGNALS
4217 FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n",
4218 fault_addr, page_index));
4221 /* Check whether the fault is within the dynamic space. */
4222 if (page_index == (-1)) {
4224 /* It can be helpful to be able to put a breakpoint on this
4225 * case to help diagnose low-level problems. */
4226 unhandled_sigmemoryfault();
4228 /* not within the dynamic space -- not our responsibility */
4233 /* The only acceptable reason for an signal like this from the
4234 * heap is that the generational GC write-protected the page. */
4235 if (page_table[page_index].write_protected != 1) {
4236 lose("access failure in heap page not marked as write-protected");
4239 /* Unprotect the page. */
4240 os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL);
4241 page_table[page_index].write_protected = 0;
4242 page_table[page_index].write_protected_cleared = 1;
4244 /* Don't worry, we can handle it. */
4249 /* This is to be called when we catch a SIGSEGV/SIGBUS, determine that
4250 * it's not just a case of the program hitting the write barrier, and
4251 * are about to let Lisp deal with it. It's basically just a
4252 * convenient place to set a gdb breakpoint. */
4254 unhandled_sigmemoryfault()
4257 gc_alloc_update_all_page_tables(void)
4259 /* Flush the alloc regions updating the tables. */
4260 gc_alloc_update_page_tables(1, &unboxed_region);
4261 gc_alloc_update_page_tables(0, &boxed_region);
4264 gc_set_region_empty(struct alloc_region *region)
4266 region->first_page = 0;
4267 region->last_page = -1;
4268 region->start_addr = page_address(0);
4269 region->free_pointer = page_address(0);
4270 region->end_addr = page_address(0);