2 * GENerational Conservative Garbage Collector for SBCL x86
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
21 * For a review of garbage collection techniques (e.g. generational
22 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
23 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
24 * had been accepted for _ACM Computing Surveys_ and was available
25 * as a PostScript preprint through
26 * <http://www.cs.utexas.edu/users/oops/papers.html>
28 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
32 * FIXME: GC :FULL T seems to be unable to recover a lot of unused
33 * space. After cold init is complete, GC :FULL T gets us down to
34 * about 44 Mb total used, but PURIFY gets us down to about 17 Mb
44 #include "interrupt.h"
50 /* a function defined externally in assembly language, called from
52 void do_pending_interrupt(void);
58 /* the number of actual generations. (The number of 'struct
59 * generation' objects is one more than this, because one serves as
60 * scratch when GC'ing.) */
61 #define NUM_GENERATIONS 6
63 /* Should we use page protection to help avoid the scavenging of pages
64 * that don't have pointers to younger generations? */
65 boolean enable_page_protection = 1;
67 /* Should we unmap a page and re-mmap it to have it zero filled? */
68 #if defined(__FreeBSD__) || defined(__OpenBSD__)
69 /* Note: this can waste a lot of swap on FreeBSD so don't unmap there.
71 * Presumably this behavior exists on OpenBSD too, so don't unmap
72 * there either. -- WHN 20000727 */
73 boolean gencgc_unmap_zero = 0;
75 boolean gencgc_unmap_zero = 1;
78 /* the minimum size (in bytes) for a large object*/
79 unsigned large_object_size = 4 * 4096;
81 /* Should we filter stack/register pointers? This could reduce the
82 * number of invalid pointers accepted. KLUDGE: It will probably
83 * degrades interrupt safety during object initialization. */
84 boolean enable_pointer_filter = 1;
90 #define gc_abort() lose("GC invariant lost, file \"%s\", line %d", \
93 /* FIXME: In CMU CL, this was "#if 0" with no explanation. Find out
94 * how much it costs to make it "#if 1". If it's not too expensive,
97 #define gc_assert(ex) do { \
98 if (!(ex)) gc_abort(); \
101 #define gc_assert(ex)
104 /* the verbosity level. All non-error messages are disabled at level 0;
105 * and only a few rare messages are printed at level 1. */
106 unsigned gencgc_verbose = (QSHOW ? 1 : 0);
108 /* FIXME: At some point enable the various error-checking things below
109 * and see what they say. */
111 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
112 * Set verify_gens to NUM_GENERATIONS to disable this kind of check. */
113 int verify_gens = NUM_GENERATIONS;
115 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
116 boolean pre_verify_gen_0 = 0;
118 /* Should we check for bad pointers after gc_free_heap is called
119 * from Lisp PURIFY? */
120 boolean verify_after_free_heap = 0;
122 /* Should we print a note when code objects are found in the dynamic space
123 * during a heap verify? */
124 boolean verify_dynamic_code_check = 0;
126 /* Should we check code objects for fixup errors after they are transported? */
127 boolean check_code_fixups = 0;
129 /* Should we check that newly allocated regions are zero filled? */
130 boolean gencgc_zero_check = 0;
132 /* Should we check that the free space is zero filled? */
133 boolean gencgc_enable_verify_zero_fill = 0;
135 /* Should we check that free pages are zero filled during gc_free_heap
136 * called after Lisp PURIFY? */
137 boolean gencgc_zero_check_during_free_heap = 0;
140 * GC structures and variables
143 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
144 unsigned long bytes_allocated = 0;
145 static unsigned long auto_gc_trigger = 0;
147 /* the source and destination generations. These are set before a GC starts
149 static int from_space;
150 static int new_space;
152 /* FIXME: It would be nice to use this symbolic constant instead of
153 * bare 4096 almost everywhere. We could also use an assertion that
154 * it's equal to getpagesize(). */
155 #define PAGE_BYTES 4096
157 /* An array of page structures is statically allocated.
158 * This helps quickly map between an address its page structure.
159 * NUM_PAGES is set from the size of the dynamic space. */
160 struct page page_table[NUM_PAGES];
162 /* To map addresses to page structures the address of the first page
164 static void *heap_base = NULL;
166 /* Calculate the start address for the given page number. */
168 *page_address(int page_num)
170 return (heap_base + (page_num * 4096));
173 /* Find the page index within the page_table for the given
174 * address. Return -1 on failure. */
176 find_page_index(void *addr)
178 int index = addr-heap_base;
181 index = ((unsigned int)index)/4096;
182 if (index < NUM_PAGES)
189 /* a structure to hold the state of a generation */
192 /* the first page that gc_alloc checks on its next call */
193 int alloc_start_page;
195 /* the first page that gc_alloc_unboxed checks on its next call */
196 int alloc_unboxed_start_page;
198 /* the first page that gc_alloc_large (boxed) considers on its next
199 * call. (Although it always allocates after the boxed_region.) */
200 int alloc_large_start_page;
202 /* the first page that gc_alloc_large (unboxed) considers on its
203 * next call. (Although it always allocates after the
204 * current_unboxed_region.) */
205 int alloc_large_unboxed_start_page;
207 /* the bytes allocated to this generation */
210 /* the number of bytes at which to trigger a GC */
213 /* to calculate a new level for gc_trigger */
214 int bytes_consed_between_gc;
216 /* the number of GCs since the last raise */
219 /* the average age after which a GC will raise objects to the
223 /* the cumulative sum of the bytes allocated to this generation. It is
224 * cleared after a GC on this generations, and update before new
225 * objects are added from a GC of a younger generation. Dividing by
226 * the bytes_allocated will give the average age of the memory in
227 * this generation since its last GC. */
228 int cum_sum_bytes_allocated;
230 /* a minimum average memory age before a GC will occur helps
231 * prevent a GC when a large number of new live objects have been
232 * added, in which case a GC could be a waste of time */
233 double min_av_mem_age;
236 /* an array of generation structures. There needs to be one more
237 * generation structure than actual generations as the oldest
238 * generation is temporarily raised then lowered. */
239 static struct generation generations[NUM_GENERATIONS+1];
241 /* the oldest generation that is will currently be GCed by default.
242 * Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
244 * The default of (NUM_GENERATIONS-1) enables GC on all generations.
246 * Setting this to 0 effectively disables the generational nature of
247 * the GC. In some applications generational GC may not be useful
248 * because there are no long-lived objects.
250 * An intermediate value could be handy after moving long-lived data
251 * into an older generation so an unnecessary GC of this long-lived
252 * data can be avoided. */
253 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1;
255 /* The maximum free page in the heap is maintained and used to update
256 * ALLOCATION_POINTER which is used by the room function to limit its
257 * search of the heap. XX Gencgc obviously needs to be better
258 * integrated with the Lisp code. */
259 static int last_free_page;
260 static int last_used_page = 0;
263 * miscellaneous heap functions
266 /* Count the number of pages which are write-protected within the
267 * given generation. */
269 count_write_protect_generation_pages(int generation)
274 for (i = 0; i < last_free_page; i++)
275 if ((page_table[i].allocated != FREE_PAGE)
276 && (page_table[i].gen == generation)
277 && (page_table[i].write_protected == 1))
282 /* Count the number of pages within the given generation */
284 count_generation_pages(int generation)
289 for (i = 0; i < last_free_page; i++)
290 if ((page_table[i].allocated != 0)
291 && (page_table[i].gen == generation))
296 /* Count the number of dont_move pages. */
298 count_dont_move_pages(void)
303 for (i = 0; i < last_free_page; i++)
304 if ((page_table[i].allocated != 0)
305 && (page_table[i].dont_move != 0))
310 /* Work through the pages and add up the number of bytes used for the
311 * given generation. */
313 generation_bytes_allocated (int gen)
318 for (i = 0; i < last_free_page; i++) {
319 if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
320 result += page_table[i].bytes_used;
325 /* Return the average age of the memory in a generation. */
327 gen_av_mem_age(int gen)
329 if (generations[gen].bytes_allocated == 0)
333 ((double)generations[gen].cum_sum_bytes_allocated)
334 / ((double)generations[gen].bytes_allocated);
337 /* The verbose argument controls how much to print: 0 for normal
338 * level of detail; 1 for debugging. */
340 print_generation_stats(int verbose) /* FIXME: should take FILE argument */
345 /* This code uses the FP instructions which may be set up for Lisp
346 * so they need to be saved and reset for C. */
349 /* number of generations to print */
351 gens = NUM_GENERATIONS+1;
353 gens = NUM_GENERATIONS;
355 /* Print the heap stats. */
357 " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
359 for (i = 0; i < gens; i++) {
363 int large_boxed_cnt = 0;
364 int large_unboxed_cnt = 0;
366 for (j = 0; j < last_free_page; j++)
367 if (page_table[j].gen == i) {
368 /* Count the number of boxed pages within the given
370 if (page_table[j].allocated == BOXED_PAGE)
371 if (page_table[j].large_object)
376 /* Count the number of unboxed pages within the given
378 if (page_table[j].allocated == UNBOXED_PAGE)
379 if (page_table[j].large_object)
385 gc_assert(generations[i].bytes_allocated
386 == generation_bytes_allocated(i));
388 " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
390 boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
391 generations[i].bytes_allocated,
392 (count_generation_pages(i)*4096
393 - generations[i].bytes_allocated),
394 generations[i].gc_trigger,
395 count_write_protect_generation_pages(i),
396 generations[i].num_gc,
399 fprintf(stderr," Total bytes allocated=%d\n", bytes_allocated);
401 fpu_restore(fpu_state);
405 * allocation routines
409 * To support quick and inline allocation, regions of memory can be
410 * allocated and then allocated from with just a free pointer and a
411 * check against an end address.
413 * Since objects can be allocated to spaces with different properties
414 * e.g. boxed/unboxed, generation, ages; there may need to be many
415 * allocation regions.
417 * Each allocation region may be start within a partly used page. Many
418 * features of memory use are noted on a page wise basis, e.g. the
419 * generation; so if a region starts within an existing allocated page
420 * it must be consistent with this page.
422 * During the scavenging of the newspace, objects will be transported
423 * into an allocation region, and pointers updated to point to this
424 * allocation region. It is possible that these pointers will be
425 * scavenged again before the allocation region is closed, e.g. due to
426 * trans_list which jumps all over the place to cleanup the list. It
427 * is important to be able to determine properties of all objects
428 * pointed to when scavenging, e.g to detect pointers to the oldspace.
429 * Thus it's important that the allocation regions have the correct
430 * properties set when allocated, and not just set when closed. The
431 * region allocation routines return regions with the specified
432 * properties, and grab all the pages, setting their properties
433 * appropriately, except that the amount used is not known.
435 * These regions are used to support quicker allocation using just a
436 * free pointer. The actual space used by the region is not reflected
437 * in the pages tables until it is closed. It can't be scavenged until
440 * When finished with the region it should be closed, which will
441 * update the page tables for the actual space used returning unused
442 * space. Further it may be noted in the new regions which is
443 * necessary when scavenging the newspace.
445 * Large objects may be allocated directly without an allocation
446 * region, the page tables are updated immediately.
448 * Unboxed objects don't contain pointers to other objects and so
449 * don't need scavenging. Further they can't contain pointers to
450 * younger generations so WP is not needed. By allocating pages to
451 * unboxed objects the whole page never needs scavenging or
452 * write-protecting. */
454 /* We are only using two regions at present. Both are for the current
455 * newspace generation. */
456 struct alloc_region boxed_region;
457 struct alloc_region unboxed_region;
459 /* XX hack. Current Lisp code uses the following. Need copying in/out. */
460 void *current_region_free_pointer;
461 void *current_region_end_addr;
463 /* The generation currently being allocated to. */
464 static int gc_alloc_generation;
466 /* Find a new region with room for at least the given number of bytes.
468 * It starts looking at the current generation's alloc_start_page. So
469 * may pick up from the previous region if there is enough space. This
470 * keeps the allocation contiguous when scavenging the newspace.
472 * The alloc_region should have been closed by a call to
473 * gc_alloc_update_page_tables, and will thus be in an empty state.
475 * To assist the scavenging functions write-protected pages are not
476 * used. Free pages should not be write-protected.
478 * It is critical to the conservative GC that the start of regions be
479 * known. To help achieve this only small regions are allocated at a
482 * During scavenging, pointers may be found to within the current
483 * region and the page generation must be set so that pointers to the
484 * from space can be recognized. Therefore the generation of pages in
485 * the region are set to gc_alloc_generation. To prevent another
486 * allocation call using the same pages, all the pages in the region
487 * are allocated, although they will initially be empty.
490 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
502 "/alloc_new_region for %d bytes from gen %d\n",
503 nbytes, gc_alloc_generation));
506 /* Check that the region is in a reset state. */
507 gc_assert((alloc_region->first_page == 0)
508 && (alloc_region->last_page == -1)
509 && (alloc_region->free_pointer == alloc_region->end_addr));
513 generations[gc_alloc_generation].alloc_unboxed_start_page;
516 generations[gc_alloc_generation].alloc_start_page;
519 /* Search for a contiguous free region of at least nbytes with the
520 * given properties: boxed/unboxed, generation. */
522 first_page = restart_page;
524 /* First search for a page with at least 32 bytes free, which is
525 * not write-protected, and which is not marked dont_move. */
526 while ((first_page < NUM_PAGES)
527 && (page_table[first_page].allocated != FREE_PAGE) /* not free page */
529 (page_table[first_page].allocated != UNBOXED_PAGE))
531 (page_table[first_page].allocated != BOXED_PAGE))
532 || (page_table[first_page].large_object != 0)
533 || (page_table[first_page].gen != gc_alloc_generation)
534 || (page_table[first_page].bytes_used >= (4096-32))
535 || (page_table[first_page].write_protected != 0)
536 || (page_table[first_page].dont_move != 0)))
538 /* Check for a failure. */
539 if (first_page >= NUM_PAGES) {
541 "Argh! gc_alloc_new_region failed on first_page, nbytes=%d.\n",
543 print_generation_stats(1);
547 gc_assert(page_table[first_page].write_protected == 0);
551 "/first_page=%d bytes_used=%d\n",
552 first_page, page_table[first_page].bytes_used));
555 /* Now search forward to calculate the available region size. It
556 * tries to keeps going until nbytes are found and the number of
557 * pages is greater than some level. This helps keep down the
558 * number of pages in a region. */
559 last_page = first_page;
560 bytes_found = 4096 - page_table[first_page].bytes_used;
562 while (((bytes_found < nbytes) || (num_pages < 2))
563 && (last_page < (NUM_PAGES-1))
564 && (page_table[last_page+1].allocated == FREE_PAGE)) {
568 gc_assert(page_table[last_page].write_protected == 0);
571 region_size = (4096 - page_table[first_page].bytes_used)
572 + 4096*(last_page-first_page);
574 gc_assert(bytes_found == region_size);
578 "/last_page=%d bytes_found=%d num_pages=%d\n",
579 last_page, bytes_found, num_pages));
582 restart_page = last_page + 1;
583 } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
585 /* Check for a failure. */
586 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
588 "Argh! gc_alloc_new_region failed on restart_page, nbytes=%d.\n",
590 print_generation_stats(1);
596 "/gc_alloc_new_region gen %d: %d bytes: pages %d to %d: addr=%x\n",
601 page_address(first_page)));
604 /* Set up the alloc_region. */
605 alloc_region->first_page = first_page;
606 alloc_region->last_page = last_page;
607 alloc_region->start_addr = page_table[first_page].bytes_used
608 + page_address(first_page);
609 alloc_region->free_pointer = alloc_region->start_addr;
610 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
612 if (gencgc_zero_check) {
614 for (p = (int *)alloc_region->start_addr;
615 p < (int *)alloc_region->end_addr; p++) {
617 /* KLUDGE: It would be nice to use %lx and explicit casts
618 * (long) in code like this, so that it is less likely to
619 * break randomly when running on a machine with different
620 * word sizes. -- WHN 19991129 */
621 lose("The new region at %x is not zero.", p);
626 /* Set up the pages. */
628 /* The first page may have already been in use. */
629 if (page_table[first_page].bytes_used == 0) {
631 page_table[first_page].allocated = UNBOXED_PAGE;
633 page_table[first_page].allocated = BOXED_PAGE;
634 page_table[first_page].gen = gc_alloc_generation;
635 page_table[first_page].large_object = 0;
636 page_table[first_page].first_object_offset = 0;
640 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
642 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
643 gc_assert(page_table[first_page].gen == gc_alloc_generation);
644 gc_assert(page_table[first_page].large_object == 0);
646 for (i = first_page+1; i <= last_page; i++) {
648 page_table[i].allocated = UNBOXED_PAGE;
650 page_table[i].allocated = BOXED_PAGE;
651 page_table[i].gen = gc_alloc_generation;
652 page_table[i].large_object = 0;
653 /* This may not be necessary for unboxed regions (think it was
655 page_table[i].first_object_offset =
656 alloc_region->start_addr - page_address(i);
659 /* Bump up last_free_page. */
660 if (last_page+1 > last_free_page) {
661 last_free_page = last_page+1;
662 SetSymbolValue(ALLOCATION_POINTER,
663 (lispobj)(((char *)heap_base) + last_free_page*4096));
664 if (last_page+1 > last_used_page)
665 last_used_page = last_page+1;
669 /* If the record_new_objects flag is 2 then all new regions created
672 * If it's 1 then then it is only recorded if the first page of the
673 * current region is <= new_areas_ignore_page. This helps avoid
674 * unnecessary recording when doing full scavenge pass.
676 * The new_object structure holds the page, byte offset, and size of
677 * new regions of objects. Each new area is placed in the array of
678 * these structures pointer to by new_areas. new_areas_index holds the
679 * offset into new_areas.
681 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
682 * later code must detect this and handle it, probably by doing a full
683 * scavenge of a generation. */
684 #define NUM_NEW_AREAS 512
685 static int record_new_objects = 0;
686 static int new_areas_ignore_page;
692 static struct new_area (*new_areas)[];
693 static new_areas_index;
696 /* Add a new area to new_areas. */
698 add_new_area(int first_page, int offset, int size)
700 unsigned new_area_start,c;
703 /* Ignore if full. */
704 if (new_areas_index >= NUM_NEW_AREAS)
707 switch (record_new_objects) {
711 if (first_page > new_areas_ignore_page)
720 new_area_start = 4096*first_page + offset;
722 /* Search backwards for a prior area that this follows from. If
723 found this will save adding a new area. */
724 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
726 4096*((*new_areas)[i].page)
727 + (*new_areas)[i].offset
728 + (*new_areas)[i].size;
730 "/add_new_area S1 %d %d %d %d\n",
731 i, c, new_area_start, area_end));*/
732 if (new_area_start == area_end) {
734 "/adding to [%d] %d %d %d with %d %d %d:\n",
736 (*new_areas)[i].page,
737 (*new_areas)[i].offset,
738 (*new_areas)[i].size,
742 (*new_areas)[i].size += size;
746 /*FSHOW((stderr, "/add_new_area S1 %d %d %d\n", i, c, new_area_start));*/
748 (*new_areas)[new_areas_index].page = first_page;
749 (*new_areas)[new_areas_index].offset = offset;
750 (*new_areas)[new_areas_index].size = size;
752 "/new_area %d page %d offset %d size %d\n",
753 new_areas_index, first_page, offset, size));*/
756 /* Note the max new_areas used. */
757 if (new_areas_index > max_new_areas)
758 max_new_areas = new_areas_index;
761 /* Update the tables for the alloc_region. The region maybe added to
764 * When done the alloc_region is set up so that the next quick alloc
765 * will fail safely and thus a new region will be allocated. Further
766 * it is safe to try to re-update the page table of this reset
769 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
775 int orig_first_page_bytes_used;
781 "/gc_alloc_update_page_tables to gen %d:\n",
782 gc_alloc_generation));
785 first_page = alloc_region->first_page;
787 /* Catch an unused alloc_region. */
788 if ((first_page == 0) && (alloc_region->last_page == -1))
791 next_page = first_page+1;
793 /* Skip if no bytes were allocated */
794 if (alloc_region->free_pointer != alloc_region->start_addr) {
795 orig_first_page_bytes_used = page_table[first_page].bytes_used;
797 gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
799 /* All the pages used need to be updated */
801 /* Update the first page. */
803 /* If the page was free then set up the gen, and
804 first_object_offset. */
805 if (page_table[first_page].bytes_used == 0)
806 gc_assert(page_table[first_page].first_object_offset == 0);
809 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
811 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
812 gc_assert(page_table[first_page].gen == gc_alloc_generation);
813 gc_assert(page_table[first_page].large_object == 0);
817 /* Calc. the number of bytes used in this page. This is not always
818 the number of new bytes, unless it was free. */
820 if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
824 page_table[first_page].bytes_used = bytes_used;
825 byte_cnt += bytes_used;
828 /* All the rest of the pages should be free. Need to set their
829 first_object_offset pointer to the start of the region, and set
833 gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
835 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
836 gc_assert(page_table[next_page].bytes_used == 0);
837 gc_assert(page_table[next_page].gen == gc_alloc_generation);
838 gc_assert(page_table[next_page].large_object == 0);
840 gc_assert(page_table[next_page].first_object_offset ==
841 alloc_region->start_addr - page_address(next_page));
843 /* Calculate the number of bytes used in this page. */
845 if ((bytes_used = (alloc_region->free_pointer
846 - page_address(next_page)))>4096) {
850 page_table[next_page].bytes_used = bytes_used;
851 byte_cnt += bytes_used;
856 region_size = alloc_region->free_pointer - alloc_region->start_addr;
857 bytes_allocated += region_size;
858 generations[gc_alloc_generation].bytes_allocated += region_size;
860 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
862 /* Set the generations alloc restart page to the last page of
865 generations[gc_alloc_generation].alloc_unboxed_start_page =
868 generations[gc_alloc_generation].alloc_start_page = next_page-1;
870 /* Add the region to the new_areas if requested. */
872 add_new_area(first_page,orig_first_page_bytes_used, region_size);
876 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
878 gc_alloc_generation));
882 /* No bytes allocated. Unallocate the first_page if there are 0
884 if (page_table[first_page].bytes_used == 0)
885 page_table[first_page].allocated = FREE_PAGE;
887 /* Unallocate any unused pages. */
888 while (next_page <= alloc_region->last_page) {
889 gc_assert(page_table[next_page].bytes_used == 0);
890 page_table[next_page].allocated = FREE_PAGE;
894 /* Reset the alloc_region. */
895 alloc_region->first_page = 0;
896 alloc_region->last_page = -1;
897 alloc_region->start_addr = page_address(0);
898 alloc_region->free_pointer = page_address(0);
899 alloc_region->end_addr = page_address(0);
902 static inline void *gc_quick_alloc(int nbytes);
904 /* Allocate a possibly large object. */
906 *gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
914 int orig_first_page_bytes_used;
919 int large = (nbytes >= large_object_size);
923 FSHOW((stderr, "/alloc_large %d\n", nbytes));
928 "/gc_alloc_large for %d bytes from gen %d\n",
929 nbytes, gc_alloc_generation));
932 /* If the object is small, and there is room in the current region
933 then allocation it in the current region. */
935 && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
936 return gc_quick_alloc(nbytes);
938 /* Search for a contiguous free region of at least nbytes. If it's a
939 large object then align it on a page boundary by searching for a
942 /* To allow the allocation of small objects without the danger of
943 using a page in the current boxed region, the search starts after
944 the current boxed free region. XX could probably keep a page
945 index ahead of the current region and bumped up here to save a
946 lot of re-scanning. */
948 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
950 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
951 if (restart_page <= alloc_region->last_page)
952 restart_page = alloc_region->last_page+1;
955 first_page = restart_page;
958 while ((first_page < NUM_PAGES)
959 && (page_table[first_page].allocated != FREE_PAGE))
962 while ((first_page < NUM_PAGES)
963 && (page_table[first_page].allocated != FREE_PAGE)
965 (page_table[first_page].allocated != UNBOXED_PAGE))
967 (page_table[first_page].allocated != BOXED_PAGE))
968 || (page_table[first_page].large_object != 0)
969 || (page_table[first_page].gen != gc_alloc_generation)
970 || (page_table[first_page].bytes_used >= (4096-32))
971 || (page_table[first_page].write_protected != 0)
972 || (page_table[first_page].dont_move != 0)))
975 if (first_page >= NUM_PAGES) {
977 "Argh! gc_alloc_large failed (first_page), nbytes=%d.\n",
979 print_generation_stats(1);
983 gc_assert(page_table[first_page].write_protected == 0);
987 "/first_page=%d bytes_used=%d\n",
988 first_page, page_table[first_page].bytes_used));
991 last_page = first_page;
992 bytes_found = 4096 - page_table[first_page].bytes_used;
994 while ((bytes_found < nbytes)
995 && (last_page < (NUM_PAGES-1))
996 && (page_table[last_page+1].allocated == FREE_PAGE)) {
1000 gc_assert(page_table[last_page].write_protected == 0);
1003 region_size = (4096 - page_table[first_page].bytes_used)
1004 + 4096*(last_page-first_page);
1006 gc_assert(bytes_found == region_size);
1010 "/last_page=%d bytes_found=%d num_pages=%d\n",
1011 last_page, bytes_found, num_pages));
1014 restart_page = last_page + 1;
1015 } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
1017 /* Check for a failure */
1018 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
1020 "Argh! gc_alloc_large failed (restart_page), nbytes=%d.\n",
1022 print_generation_stats(1);
1029 "/gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1030 gc_alloc_generation,
1035 page_address(first_page)));
1038 gc_assert(first_page > alloc_region->last_page);
1040 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1043 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1045 /* Set up the pages. */
1046 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1048 /* If the first page was free then set up the gen, and
1049 * first_object_offset. */
1050 if (page_table[first_page].bytes_used == 0) {
1052 page_table[first_page].allocated = UNBOXED_PAGE;
1054 page_table[first_page].allocated = BOXED_PAGE;
1055 page_table[first_page].gen = gc_alloc_generation;
1056 page_table[first_page].first_object_offset = 0;
1057 page_table[first_page].large_object = large;
1061 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
1063 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
1064 gc_assert(page_table[first_page].gen == gc_alloc_generation);
1065 gc_assert(page_table[first_page].large_object == large);
1069 /* Calc. the number of bytes used in this page. This is not
1070 * always the number of new bytes, unless it was free. */
1072 if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
1076 page_table[first_page].bytes_used = bytes_used;
1077 byte_cnt += bytes_used;
1079 next_page = first_page+1;
1081 /* All the rest of the pages should be free. We need to set their
1082 * first_object_offset pointer to the start of the region, and
1083 * set the bytes_used. */
1085 gc_assert(page_table[next_page].allocated == FREE_PAGE);
1086 gc_assert(page_table[next_page].bytes_used == 0);
1088 page_table[next_page].allocated = UNBOXED_PAGE;
1090 page_table[next_page].allocated = BOXED_PAGE;
1091 page_table[next_page].gen = gc_alloc_generation;
1092 page_table[next_page].large_object = large;
1094 page_table[next_page].first_object_offset =
1095 orig_first_page_bytes_used - 4096*(next_page-first_page);
1097 /* Calculate the number of bytes used in this page. */
1099 if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
1103 page_table[next_page].bytes_used = bytes_used;
1104 byte_cnt += bytes_used;
1109 gc_assert((byte_cnt-orig_first_page_bytes_used) == nbytes);
1111 bytes_allocated += nbytes;
1112 generations[gc_alloc_generation].bytes_allocated += nbytes;
1114 /* Add the region to the new_areas if requested. */
1116 add_new_area(first_page,orig_first_page_bytes_used,nbytes);
1118 /* Bump up last_free_page */
1119 if (last_page+1 > last_free_page) {
1120 last_free_page = last_page+1;
1121 SetSymbolValue(ALLOCATION_POINTER,
1122 (lispobj)(((char *)heap_base) + last_free_page*4096));
1123 if (last_page+1 > last_used_page)
1124 last_used_page = last_page+1;
1127 return((void *)(page_address(first_page)+orig_first_page_bytes_used));
1130 /* Allocate bytes from the boxed_region. It first checks if there is
1131 * room, if not then it calls gc_alloc_new_region to find a new region
1132 * with enough space. A pointer to the start of the region is returned. */
1134 *gc_alloc(int nbytes)
1136 void *new_free_pointer;
1138 /* FSHOW((stderr, "/gc_alloc %d\n", nbytes)); */
1140 /* Check whether there is room in the current alloc region. */
1141 new_free_pointer = boxed_region.free_pointer + nbytes;
1143 if (new_free_pointer <= boxed_region.end_addr) {
1144 /* If so then allocate from the current alloc region. */
1145 void *new_obj = boxed_region.free_pointer;
1146 boxed_region.free_pointer = new_free_pointer;
1148 /* Check whether the alloc region is almost empty. */
1149 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1150 /* If so finished with the current region. */
1151 gc_alloc_update_page_tables(0, &boxed_region);
1152 /* Set up a new region. */
1153 gc_alloc_new_region(32, 0, &boxed_region);
1155 return((void *)new_obj);
1158 /* Else not enough free space in the current region. */
1160 /* If there some room left in the current region, enough to be worth
1161 * saving, then allocate a large object. */
1162 /* FIXME: "32" should be a named parameter. */
1163 if ((boxed_region.end_addr-boxed_region.free_pointer) > 32)
1164 return gc_alloc_large(nbytes, 0, &boxed_region);
1166 /* Else find a new region. */
1168 /* Finished with the current region. */
1169 gc_alloc_update_page_tables(0, &boxed_region);
1171 /* Set up a new region. */
1172 gc_alloc_new_region(nbytes, 0, &boxed_region);
1174 /* Should now be enough room. */
1176 /* Check whether there is room in the current region. */
1177 new_free_pointer = boxed_region.free_pointer + nbytes;
1179 if (new_free_pointer <= boxed_region.end_addr) {
1180 /* If so then allocate from the current region. */
1181 void *new_obj = boxed_region.free_pointer;
1182 boxed_region.free_pointer = new_free_pointer;
1184 /* Check whether the current region is almost empty. */
1185 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1186 /* If so find, finished with the current region. */
1187 gc_alloc_update_page_tables(0, &boxed_region);
1189 /* Set up a new region. */
1190 gc_alloc_new_region(32, 0, &boxed_region);
1193 return((void *)new_obj);
1196 /* shouldn't happen */
1200 /* Allocate space from the boxed_region. If there is not enough free
1201 * space then call gc_alloc to do the job. A pointer to the start of
1202 * the region is returned. */
1204 *gc_quick_alloc(int nbytes)
1206 void *new_free_pointer;
1208 /* Check whether there is room in the current region. */
1209 new_free_pointer = boxed_region.free_pointer + nbytes;
1211 if (new_free_pointer <= boxed_region.end_addr) {
1212 /* If so then allocate from the current region. */
1213 void *new_obj = boxed_region.free_pointer;
1214 boxed_region.free_pointer = new_free_pointer;
1215 return((void *)new_obj);
1218 /* Else call gc_alloc */
1219 return (gc_alloc(nbytes));
1222 /* Allocate space for the boxed object. If it is a large object then
1223 * do a large alloc else allocate from the current region. If there is
1224 * not enough free space then call gc_alloc to do the job. A pointer
1225 * to the start of the region is returned. */
1227 *gc_quick_alloc_large(int nbytes)
1229 void *new_free_pointer;
1231 if (nbytes >= large_object_size)
1232 return gc_alloc_large(nbytes, 0, &boxed_region);
1234 /* Check whether there is room in the current region. */
1235 new_free_pointer = boxed_region.free_pointer + nbytes;
1237 if (new_free_pointer <= boxed_region.end_addr) {
1238 /* If so then allocate from the current region. */
1239 void *new_obj = boxed_region.free_pointer;
1240 boxed_region.free_pointer = new_free_pointer;
1241 return((void *)new_obj);
1244 /* Else call gc_alloc */
1245 return (gc_alloc(nbytes));
1249 *gc_alloc_unboxed(int nbytes)
1251 void *new_free_pointer;
1254 FSHOW((stderr, "/gc_alloc_unboxed %d\n", nbytes));
1257 /* Check whether there is room in the current region. */
1258 new_free_pointer = unboxed_region.free_pointer + nbytes;
1260 if (new_free_pointer <= unboxed_region.end_addr) {
1261 /* If so then allocate from the current region. */
1262 void *new_obj = unboxed_region.free_pointer;
1263 unboxed_region.free_pointer = new_free_pointer;
1265 /* Check whether the current region is almost empty. */
1266 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1267 /* If so finished with the current region. */
1268 gc_alloc_update_page_tables(1, &unboxed_region);
1270 /* Set up a new region. */
1271 gc_alloc_new_region(32, 1, &unboxed_region);
1274 return((void *)new_obj);
1277 /* Else not enough free space in the current region. */
1279 /* If there is a bit of room left in the current region then
1280 allocate a large object. */
1281 if ((unboxed_region.end_addr-unboxed_region.free_pointer) > 32)
1282 return gc_alloc_large(nbytes,1,&unboxed_region);
1284 /* Else find a new region. */
1286 /* Finished with the current region. */
1287 gc_alloc_update_page_tables(1, &unboxed_region);
1289 /* Set up a new region. */
1290 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1292 /* Should now be enough room. */
1294 /* Check whether there is room in the current region. */
1295 new_free_pointer = unboxed_region.free_pointer + nbytes;
1297 if (new_free_pointer <= unboxed_region.end_addr) {
1298 /* If so then allocate from the current region. */
1299 void *new_obj = unboxed_region.free_pointer;
1300 unboxed_region.free_pointer = new_free_pointer;
1302 /* Check whether the current region is almost empty. */
1303 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1304 /* If so find, finished with the current region. */
1305 gc_alloc_update_page_tables(1, &unboxed_region);
1307 /* Set up a new region. */
1308 gc_alloc_new_region(32, 1, &unboxed_region);
1311 return((void *)new_obj);
1314 /* shouldn't happen? */
1319 *gc_quick_alloc_unboxed(int nbytes)
1321 void *new_free_pointer;
1323 /* Check whether there is room in the current region. */
1324 new_free_pointer = unboxed_region.free_pointer + nbytes;
1326 if (new_free_pointer <= unboxed_region.end_addr) {
1327 /* If so then allocate from the current region. */
1328 void *new_obj = unboxed_region.free_pointer;
1329 unboxed_region.free_pointer = new_free_pointer;
1331 return((void *)new_obj);
1334 /* Else call gc_alloc */
1335 return (gc_alloc_unboxed(nbytes));
1338 /* Allocate space for the object. If it is a large object then do a
1339 * large alloc else allocate from the current region. If there is not
1340 * enough free space then call gc_alloc to do the job.
1342 * A pointer to the start of the region is returned. */
1344 *gc_quick_alloc_large_unboxed(int nbytes)
1346 void *new_free_pointer;
1348 if (nbytes >= large_object_size)
1349 return gc_alloc_large(nbytes,1,&unboxed_region);
1351 /* Check whether there is room in the current region. */
1352 new_free_pointer = unboxed_region.free_pointer + nbytes;
1354 if (new_free_pointer <= unboxed_region.end_addr) {
1355 /* If so then allocate from the current region. */
1356 void *new_obj = unboxed_region.free_pointer;
1357 unboxed_region.free_pointer = new_free_pointer;
1359 return((void *)new_obj);
1362 /* Else call gc_alloc. */
1363 return (gc_alloc_unboxed(nbytes));
1367 * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
1370 static int (*scavtab[256])(lispobj *where, lispobj object);
1371 static lispobj (*transother[256])(lispobj object);
1372 static int (*sizetab[256])(lispobj *where);
1374 static struct weak_pointer *weak_pointers;
1376 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1382 static inline boolean
1383 from_space_p(lispobj obj)
1385 int page_index=(void*)obj - heap_base;
1386 return ((page_index >= 0)
1387 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1388 && (page_table[page_index].gen == from_space));
1391 static inline boolean
1392 new_space_p(lispobj obj)
1394 int page_index = (void*)obj - heap_base;
1395 return ((page_index >= 0)
1396 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1397 && (page_table[page_index].gen == new_space));
1404 /* to copy a boxed object */
1405 static inline lispobj
1406 copy_object(lispobj object, int nwords)
1410 lispobj *source, *dest;
1412 gc_assert(Pointerp(object));
1413 gc_assert(from_space_p(object));
1414 gc_assert((nwords & 0x01) == 0);
1416 /* Get tag of object. */
1417 tag = LowtagOf(object);
1419 /* Allocate space. */
1420 new = gc_quick_alloc(nwords*4);
1423 source = (lispobj *) PTR(object);
1425 /* Copy the object. */
1426 while (nwords > 0) {
1427 dest[0] = source[0];
1428 dest[1] = source[1];
1434 /* Return Lisp pointer of new object. */
1435 return ((lispobj) new) | tag;
1438 /* to copy a large boxed object. If the object is in a large object
1439 * region then it is simply promoted, else it is copied. If it's large
1440 * enough then it's copied to a large object region.
1442 * Vectors may have shrunk. If the object is not copied the space
1443 * needs to be reclaimed, and the page_tables corrected. */
1445 copy_large_object(lispobj object, int nwords)
1449 lispobj *source, *dest;
1452 gc_assert(Pointerp(object));
1453 gc_assert(from_space_p(object));
1454 gc_assert((nwords & 0x01) == 0);
1456 if ((nwords > 1024*1024) && gencgc_verbose) {
1457 FSHOW((stderr, "/copy_large_object: %d bytes\n", nwords*4));
1460 /* Check whether it's a large object. */
1461 first_page = find_page_index((void *)object);
1462 gc_assert(first_page >= 0);
1464 if (page_table[first_page].large_object) {
1466 /* Promote the object. */
1468 int remaining_bytes;
1473 /* Note: Any page write-protection must be removed, else a
1474 * later scavenge_newspace may incorrectly not scavenge these
1475 * pages. This would not be necessary if they are added to the
1476 * new areas, but let's do it for them all (they'll probably
1477 * be written anyway?). */
1479 gc_assert(page_table[first_page].first_object_offset == 0);
1481 next_page = first_page;
1482 remaining_bytes = nwords*4;
1483 while (remaining_bytes > 4096) {
1484 gc_assert(page_table[next_page].gen == from_space);
1485 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
1486 gc_assert(page_table[next_page].large_object);
1487 gc_assert(page_table[next_page].first_object_offset==
1488 -4096*(next_page-first_page));
1489 gc_assert(page_table[next_page].bytes_used == 4096);
1491 page_table[next_page].gen = new_space;
1493 /* Remove any write-protection. We should be able to rely
1494 * on the write-protect flag to avoid redundant calls. */
1495 if (page_table[next_page].write_protected) {
1496 os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
1497 page_table[next_page].write_protected = 0;
1499 remaining_bytes -= 4096;
1503 /* Now only one page remains, but the object may have shrunk
1504 * so there may be more unused pages which will be freed. */
1506 /* The object may have shrunk but shouldn't have grown. */
1507 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1509 page_table[next_page].gen = new_space;
1510 gc_assert(page_table[next_page].allocated = BOXED_PAGE);
1512 /* Adjust the bytes_used. */
1513 old_bytes_used = page_table[next_page].bytes_used;
1514 page_table[next_page].bytes_used = remaining_bytes;
1516 bytes_freed = old_bytes_used - remaining_bytes;
1518 /* Free any remaining pages; needs care. */
1520 while ((old_bytes_used == 4096) &&
1521 (page_table[next_page].gen == from_space) &&
1522 (page_table[next_page].allocated == BOXED_PAGE) &&
1523 page_table[next_page].large_object &&
1524 (page_table[next_page].first_object_offset ==
1525 -(next_page - first_page)*4096)) {
1526 /* Checks out OK, free the page. Don't need to both zeroing
1527 * pages as this should have been done before shrinking the
1528 * object. These pages shouldn't be write-protected as they
1529 * should be zero filled. */
1530 gc_assert(page_table[next_page].write_protected == 0);
1532 old_bytes_used = page_table[next_page].bytes_used;
1533 page_table[next_page].allocated = FREE_PAGE;
1534 page_table[next_page].bytes_used = 0;
1535 bytes_freed += old_bytes_used;
1539 if ((bytes_freed > 0) && gencgc_verbose)
1540 FSHOW((stderr, "/copy_large_boxed bytes_freed=%d\n", bytes_freed));
1542 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1543 generations[new_space].bytes_allocated += 4*nwords;
1544 bytes_allocated -= bytes_freed;
1546 /* Add the region to the new_areas if requested. */
1547 add_new_area(first_page,0,nwords*4);
1551 /* Get tag of object. */
1552 tag = LowtagOf(object);
1554 /* Allocate space. */
1555 new = gc_quick_alloc_large(nwords*4);
1558 source = (lispobj *) PTR(object);
1560 /* Copy the object. */
1561 while (nwords > 0) {
1562 dest[0] = source[0];
1563 dest[1] = source[1];
1569 /* Return Lisp pointer of new object. */
1570 return ((lispobj) new) | tag;
1574 /* to copy unboxed objects */
1575 static inline lispobj
1576 copy_unboxed_object(lispobj object, int nwords)
1580 lispobj *source, *dest;
1582 gc_assert(Pointerp(object));
1583 gc_assert(from_space_p(object));
1584 gc_assert((nwords & 0x01) == 0);
1586 /* Get tag of object. */
1587 tag = LowtagOf(object);
1589 /* Allocate space. */
1590 new = gc_quick_alloc_unboxed(nwords*4);
1593 source = (lispobj *) PTR(object);
1595 /* Copy the object. */
1596 while (nwords > 0) {
1597 dest[0] = source[0];
1598 dest[1] = source[1];
1604 /* Return Lisp pointer of new object. */
1605 return ((lispobj) new) | tag;
1608 /* to copy large unboxed objects
1610 * If the object is in a large object region then it is simply
1611 * promoted, else it is copied. If it's large enough then it's copied
1612 * to a large object region.
1614 * Bignums and vectors may have shrunk. If the object is not copied
1615 * the space needs to be reclaimed, and the page_tables corrected.
1617 * KLUDGE: There's a lot of cut-and-paste duplication between this
1618 * function and copy_large_object(..). -- WHN 20000619 */
1620 copy_large_unboxed_object(lispobj object, int nwords)
1624 lispobj *source, *dest;
1627 gc_assert(Pointerp(object));
1628 gc_assert(from_space_p(object));
1629 gc_assert((nwords & 0x01) == 0);
1631 if ((nwords > 1024*1024) && gencgc_verbose)
1632 FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*4));
1634 /* Check whether it's a large object. */
1635 first_page = find_page_index((void *)object);
1636 gc_assert(first_page >= 0);
1638 if (page_table[first_page].large_object) {
1639 /* Promote the object. Note: Unboxed objects may have been
1640 * allocated to a BOXED region so it may be necessary to
1641 * change the region to UNBOXED. */
1642 int remaining_bytes;
1647 gc_assert(page_table[first_page].first_object_offset == 0);
1649 next_page = first_page;
1650 remaining_bytes = nwords*4;
1651 while (remaining_bytes > 4096) {
1652 gc_assert(page_table[next_page].gen == from_space);
1653 gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
1654 || (page_table[next_page].allocated == BOXED_PAGE));
1655 gc_assert(page_table[next_page].large_object);
1656 gc_assert(page_table[next_page].first_object_offset==
1657 -4096*(next_page-first_page));
1658 gc_assert(page_table[next_page].bytes_used == 4096);
1660 page_table[next_page].gen = new_space;
1661 page_table[next_page].allocated = UNBOXED_PAGE;
1662 remaining_bytes -= 4096;
1666 /* Now only one page remains, but the object may have shrunk so
1667 * there may be more unused pages which will be freed. */
1669 /* Object may have shrunk but shouldn't have grown - check. */
1670 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1672 page_table[next_page].gen = new_space;
1673 page_table[next_page].allocated = UNBOXED_PAGE;
1675 /* Adjust the bytes_used. */
1676 old_bytes_used = page_table[next_page].bytes_used;
1677 page_table[next_page].bytes_used = remaining_bytes;
1679 bytes_freed = old_bytes_used - remaining_bytes;
1681 /* Free any remaining pages; needs care. */
1683 while ((old_bytes_used == 4096) &&
1684 (page_table[next_page].gen == from_space) &&
1685 ((page_table[next_page].allocated == UNBOXED_PAGE)
1686 || (page_table[next_page].allocated == BOXED_PAGE)) &&
1687 page_table[next_page].large_object &&
1688 (page_table[next_page].first_object_offset ==
1689 -(next_page - first_page)*4096)) {
1690 /* Checks out OK, free the page. Don't need to both zeroing
1691 * pages as this should have been done before shrinking the
1692 * object. These pages shouldn't be write-protected, even if
1693 * boxed they should be zero filled. */
1694 gc_assert(page_table[next_page].write_protected == 0);
1696 old_bytes_used = page_table[next_page].bytes_used;
1697 page_table[next_page].allocated = FREE_PAGE;
1698 page_table[next_page].bytes_used = 0;
1699 bytes_freed += old_bytes_used;
1703 if ((bytes_freed > 0) && gencgc_verbose)
1705 "/copy_large_unboxed bytes_freed=%d\n",
1708 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1709 generations[new_space].bytes_allocated += 4*nwords;
1710 bytes_allocated -= bytes_freed;
1715 /* Get tag of object. */
1716 tag = LowtagOf(object);
1718 /* Allocate space. */
1719 new = gc_quick_alloc_large_unboxed(nwords*4);
1722 source = (lispobj *) PTR(object);
1724 /* Copy the object. */
1725 while (nwords > 0) {
1726 dest[0] = source[0];
1727 dest[1] = source[1];
1733 /* Return Lisp pointer of new object. */
1734 return ((lispobj) new) | tag;
1742 #define DIRECT_SCAV 0
1745 scavenge(lispobj *start, long nwords)
1747 while (nwords > 0) {
1749 int type, words_scavenged;
1753 /* FSHOW((stderr, "Scavenge: %p, %ld\n", start, nwords)); */
1755 gc_assert(object != 0x01); /* not a forwarding pointer */
1758 type = TypeOf(object);
1759 words_scavenged = (scavtab[type])(start, object);
1761 if (Pointerp(object)) {
1762 /* It's a pointer. */
1763 if (from_space_p(object)) {
1764 /* It currently points to old space. Check for a forwarding
1766 lispobj *ptr = (lispobj *)PTR(object);
1767 lispobj first_word = *ptr;
1769 if (first_word == 0x01) {
1770 /* Yep, there be a forwarding pointer. */
1772 words_scavenged = 1;
1775 /* Scavenge that pointer. */
1776 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1778 /* It points somewhere other than oldspace. Leave it alone. */
1779 words_scavenged = 1;
1782 if ((object & 3) == 0) {
1783 /* It's a fixnum. Real easy.. */
1784 words_scavenged = 1;
1786 /* It's some sort of header object or another. */
1787 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1792 start += words_scavenged;
1793 nwords -= words_scavenged;
1795 gc_assert(nwords == 0);
1800 * code and code-related objects
1803 #define RAW_ADDR_OFFSET (6*sizeof(lispobj) - type_FunctionPointer)
1805 static lispobj trans_function_header(lispobj object);
1806 static lispobj trans_boxed(lispobj object);
1810 scav_function_pointer(lispobj *where, lispobj object)
1812 gc_assert(Pointerp(object));
1814 if (from_space_p(object)) {
1815 lispobj first, *first_pointer;
1817 /* object is a pointer into from space. Check to see whether
1818 * it has been forwarded. */
1819 first_pointer = (lispobj *) PTR(object);
1820 first = *first_pointer;
1822 if (first == 0x01) {
1824 *where = first_pointer[1];
1831 /* must transport object -- object may point to either a
1832 * function header, a closure function header, or to a
1833 * closure header. */
1835 type = TypeOf(first);
1837 case type_FunctionHeader:
1838 case type_ClosureFunctionHeader:
1839 copy = trans_function_header(object);
1842 copy = trans_boxed(object);
1846 if (copy != object) {
1847 /* Set forwarding pointer. */
1848 first_pointer[0] = 0x01;
1849 first_pointer[1] = copy;
1855 gc_assert(Pointerp(first));
1856 gc_assert(!from_space_p(first));
1864 scav_function_pointer(lispobj *where, lispobj object)
1866 lispobj *first_pointer;
1869 gc_assert(Pointerp(object));
1871 /* Object is a pointer into from space - no a FP. */
1872 first_pointer = (lispobj *) PTR(object);
1874 /* must transport object -- object may point to either a function
1875 * header, a closure function header, or to a closure header. */
1877 switch (TypeOf(*first_pointer)) {
1878 case type_FunctionHeader:
1879 case type_ClosureFunctionHeader:
1880 copy = trans_function_header(object);
1883 copy = trans_boxed(object);
1887 if (copy != object) {
1888 /* Set forwarding pointer */
1889 first_pointer[0] = 0x01;
1890 first_pointer[1] = copy;
1893 gc_assert(Pointerp(copy));
1894 gc_assert(!from_space_p(copy));
1902 /* Scan a x86 compiled code object, looking for possible fixups that
1903 * have been missed after a move.
1905 * Two types of fixups are needed:
1906 * 1. Absolute fixups to within the code object.
1907 * 2. Relative fixups to outside the code object.
1909 * Currently only absolute fixups to the constant vector, or to the
1910 * code area are checked. */
1912 sniff_code_object(struct code *code, unsigned displacement)
1914 int nheader_words, ncode_words, nwords;
1916 struct function *fheaderp;
1918 void *constants_start_addr, *constants_end_addr;
1919 void *code_start_addr, *code_end_addr;
1920 int fixup_found = 0;
1922 if (!check_code_fixups)
1925 /* It's ok if it's byte compiled code. The trace table offset will
1926 * be a fixnum if it's x86 compiled code - check. */
1927 if (code->trace_table_offset & 0x3) {
1928 FSHOW((stderr, "/Sniffing byte compiled code object at %x.\n", code));
1932 /* Else it's x86 machine code. */
1934 ncode_words = fixnum_value(code->code_size);
1935 nheader_words = HeaderValue(*(lispobj *)code);
1936 nwords = ncode_words + nheader_words;
1938 constants_start_addr = (void *)code + 5*4;
1939 constants_end_addr = (void *)code + nheader_words*4;
1940 code_start_addr = (void *)code + nheader_words*4;
1941 code_end_addr = (void *)code + nwords*4;
1943 /* Work through the unboxed code. */
1944 for (p = code_start_addr; p < code_end_addr; p++) {
1945 void *data = *(void **)p;
1946 unsigned d1 = *((unsigned char *)p - 1);
1947 unsigned d2 = *((unsigned char *)p - 2);
1948 unsigned d3 = *((unsigned char *)p - 3);
1949 unsigned d4 = *((unsigned char *)p - 4);
1950 unsigned d5 = *((unsigned char *)p - 5);
1951 unsigned d6 = *((unsigned char *)p - 6);
1953 /* Check for code references. */
1954 /* Check for a 32 bit word that looks like an absolute
1955 reference to within the code adea of the code object. */
1956 if ((data >= (code_start_addr-displacement))
1957 && (data < (code_end_addr-displacement))) {
1958 /* function header */
1960 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
1961 /* Skip the function header */
1965 /* the case of PUSH imm32 */
1969 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1970 p, d6, d5, d4, d3, d2, d1, data));
1971 FSHOW((stderr, "/PUSH $0x%.8x\n", data));
1973 /* the case of MOV [reg-8],imm32 */
1975 && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
1976 || d2==0x45 || d2==0x46 || d2==0x47)
1980 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1981 p, d6, d5, d4, d3, d2, d1, data));
1982 FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
1984 /* the case of LEA reg,[disp32] */
1985 if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
1988 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1989 p, d6, d5, d4, d3, d2, d1, data));
1990 FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
1994 /* Check for constant references. */
1995 /* Check for a 32 bit word that looks like an absolute
1996 reference to within the constant vector. Constant references
1998 if ((data >= (constants_start_addr-displacement))
1999 && (data < (constants_end_addr-displacement))
2000 && (((unsigned)data & 0x3) == 0)) {
2005 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2006 p, d6, d5, d4, d3, d2, d1, data));
2007 FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
2010 /* the case of MOV m32,EAX */
2014 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2015 p, d6, d5, d4, d3, d2, d1, data));
2016 FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
2019 /* the case of CMP m32,imm32 */
2020 if ((d1 == 0x3d) && (d2 == 0x81)) {
2023 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2024 p, d6, d5, d4, d3, d2, d1, data));
2026 FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
2029 /* Check for a mod=00, r/m=101 byte. */
2030 if ((d1 & 0xc7) == 5) {
2035 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2036 p, d6, d5, d4, d3, d2, d1, data));
2037 FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
2039 /* the case of CMP reg32,m32 */
2043 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2044 p, d6, d5, d4, d3, d2, d1, data));
2045 FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
2047 /* the case of MOV m32,reg32 */
2051 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2052 p, d6, d5, d4, d3, d2, d1, data));
2053 FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
2055 /* the case of MOV reg32,m32 */
2059 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2060 p, d6, d5, d4, d3, d2, d1, data));
2061 FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
2063 /* the case of LEA reg32,m32 */
2067 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2068 p, d6, d5, d4, d3, d2, d1, data));
2069 FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
2075 /* If anything was found, print some information on the code
2079 "/compiled code object at %x: header words = %d, code words = %d\n",
2080 code, nheader_words, ncode_words));
2082 "/const start = %x, end = %x\n",
2083 constants_start_addr, constants_end_addr));
2085 "/code start = %x, end = %x\n",
2086 code_start_addr, code_end_addr));
2091 apply_code_fixups(struct code *old_code, struct code *new_code)
2093 int nheader_words, ncode_words, nwords;
2094 void *constants_start_addr, *constants_end_addr;
2095 void *code_start_addr, *code_end_addr;
2097 lispobj fixups = NIL;
2098 unsigned displacement = (unsigned)new_code - (unsigned)old_code;
2099 struct vector *fixups_vector;
2101 /* It's OK if it's byte compiled code. The trace table offset will
2102 * be a fixnum if it's x86 compiled code - check. */
2103 if (new_code->trace_table_offset & 0x3) {
2104 /* FSHOW((stderr, "/byte compiled code object at %x\n", new_code)); */
2108 /* Else it's x86 machine code. */
2109 ncode_words = fixnum_value(new_code->code_size);
2110 nheader_words = HeaderValue(*(lispobj *)new_code);
2111 nwords = ncode_words + nheader_words;
2113 "/compiled code object at %x: header words = %d, code words = %d\n",
2114 new_code, nheader_words, ncode_words)); */
2115 constants_start_addr = (void *)new_code + 5*4;
2116 constants_end_addr = (void *)new_code + nheader_words*4;
2117 code_start_addr = (void *)new_code + nheader_words*4;
2118 code_end_addr = (void *)new_code + nwords*4;
2121 "/const start = %x, end = %x\n",
2122 constants_start_addr,constants_end_addr));
2124 "/code start = %x; end = %x\n",
2125 code_start_addr,code_end_addr));
2128 /* The first constant should be a pointer to the fixups for this
2129 code objects. Check. */
2130 fixups = new_code->constants[0];
2132 /* It will be 0 or the unbound-marker if there are no fixups, and
2133 * will be an other pointer if it is valid. */
2134 if ((fixups == 0) || (fixups == type_UnboundMarker) || !Pointerp(fixups)) {
2135 /* Check for possible errors. */
2136 if (check_code_fixups)
2137 sniff_code_object(new_code, displacement);
2139 /*fprintf(stderr,"Fixups for code object not found!?\n");
2140 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2141 new_code, nheader_words, ncode_words);
2142 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2143 constants_start_addr,constants_end_addr,
2144 code_start_addr,code_end_addr);*/
2148 fixups_vector = (struct vector *)PTR(fixups);
2150 /* Could be pointing to a forwarding pointer. */
2151 if (Pointerp(fixups) && (find_page_index((void*)fixups_vector) != -1)
2152 && (fixups_vector->header == 0x01)) {
2153 /* If so, then follow it. */
2154 /*SHOW("following pointer to a forwarding pointer");*/
2155 fixups_vector = (struct vector *)PTR((lispobj)fixups_vector->length);
2158 /*SHOW("got fixups");*/
2160 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2161 /* Got the fixups for the code block. Now work through the vector,
2162 and apply a fixup at each address. */
2163 int length = fixnum_value(fixups_vector->length);
2165 for (i = 0; i < length; i++) {
2166 unsigned offset = fixups_vector->data[i];
2167 /* Now check the current value of offset. */
2168 unsigned old_value =
2169 *(unsigned *)((unsigned)code_start_addr + offset);
2171 /* If it's within the old_code object then it must be an
2172 * absolute fixup (relative ones are not saved) */
2173 if ((old_value >= (unsigned)old_code)
2174 && (old_value < ((unsigned)old_code + nwords*4)))
2175 /* So add the dispacement. */
2176 *(unsigned *)((unsigned)code_start_addr + offset) =
2177 old_value + displacement;
2179 /* It is outside the old code object so it must be a
2180 * relative fixup (absolute fixups are not saved). So
2181 * subtract the displacement. */
2182 *(unsigned *)((unsigned)code_start_addr + offset) =
2183 old_value - displacement;
2187 /* Check for possible errors. */
2188 if (check_code_fixups) {
2189 sniff_code_object(new_code,displacement);
2193 static struct code *
2194 trans_code(struct code *code)
2196 struct code *new_code;
2197 lispobj l_code, l_new_code;
2198 int nheader_words, ncode_words, nwords;
2199 unsigned long displacement;
2200 lispobj fheaderl, *prev_pointer;
2203 "\n/transporting code object located at 0x%08x\n",
2204 (unsigned long) code)); */
2206 /* If object has already been transported, just return pointer. */
2207 if (*((lispobj *)code) == 0x01)
2208 return (struct code*)(((lispobj *)code)[1]);
2210 gc_assert(TypeOf(code->header) == type_CodeHeader);
2212 /* Prepare to transport the code vector. */
2213 l_code = (lispobj) code | type_OtherPointer;
2215 ncode_words = fixnum_value(code->code_size);
2216 nheader_words = HeaderValue(code->header);
2217 nwords = ncode_words + nheader_words;
2218 nwords = CEILING(nwords, 2);
2220 l_new_code = copy_large_object(l_code, nwords);
2221 new_code = (struct code *) PTR(l_new_code);
2223 /* may not have been moved.. */
2224 if (new_code == code)
2227 displacement = l_new_code - l_code;
2231 "/old code object at 0x%08x, new code object at 0x%08x\n",
2232 (unsigned long) code,
2233 (unsigned long) new_code));
2234 FSHOW((stderr, "/Code object is %d words long.\n", nwords));
2237 /* Set forwarding pointer. */
2238 ((lispobj *)code)[0] = 0x01;
2239 ((lispobj *)code)[1] = l_new_code;
2241 /* Set forwarding pointers for all the function headers in the
2242 * code object. Also fix all self pointers. */
2244 fheaderl = code->entry_points;
2245 prev_pointer = &new_code->entry_points;
2247 while (fheaderl != NIL) {
2248 struct function *fheaderp, *nfheaderp;
2251 fheaderp = (struct function *) PTR(fheaderl);
2252 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2254 /* Calculate the new function pointer and the new */
2255 /* function header. */
2256 nfheaderl = fheaderl + displacement;
2257 nfheaderp = (struct function *) PTR(nfheaderl);
2259 /* Set forwarding pointer. */
2260 ((lispobj *)fheaderp)[0] = 0x01;
2261 ((lispobj *)fheaderp)[1] = nfheaderl;
2263 /* Fix self pointer. */
2264 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2266 *prev_pointer = nfheaderl;
2268 fheaderl = fheaderp->next;
2269 prev_pointer = &nfheaderp->next;
2272 /* sniff_code_object(new_code,displacement);*/
2273 apply_code_fixups(code,new_code);
2279 scav_code_header(lispobj *where, lispobj object)
2282 int nheader_words, ncode_words, nwords;
2284 struct function *fheaderp;
2286 code = (struct code *) where;
2287 ncode_words = fixnum_value(code->code_size);
2288 nheader_words = HeaderValue(object);
2289 nwords = ncode_words + nheader_words;
2290 nwords = CEILING(nwords, 2);
2292 /* Scavenge the boxed section of the code data block. */
2293 scavenge(where + 1, nheader_words - 1);
2295 /* Scavenge the boxed section of each function object in the */
2296 /* code data block. */
2297 fheaderl = code->entry_points;
2298 while (fheaderl != NIL) {
2299 fheaderp = (struct function *) PTR(fheaderl);
2300 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2302 scavenge(&fheaderp->name, 1);
2303 scavenge(&fheaderp->arglist, 1);
2304 scavenge(&fheaderp->type, 1);
2306 fheaderl = fheaderp->next;
2313 trans_code_header(lispobj object)
2317 ncode = trans_code((struct code *) PTR(object));
2318 return (lispobj) ncode | type_OtherPointer;
2322 size_code_header(lispobj *where)
2325 int nheader_words, ncode_words, nwords;
2327 code = (struct code *) where;
2329 ncode_words = fixnum_value(code->code_size);
2330 nheader_words = HeaderValue(code->header);
2331 nwords = ncode_words + nheader_words;
2332 nwords = CEILING(nwords, 2);
2338 scav_return_pc_header(lispobj *where, lispobj object)
2340 lose("attempted to scavenge a return PC header where=0x%08x object=0x%08x",
2341 (unsigned long) where,
2342 (unsigned long) object);
2343 return 0; /* bogus return value to satisfy static type checking */
2347 trans_return_pc_header(lispobj object)
2349 struct function *return_pc;
2350 unsigned long offset;
2351 struct code *code, *ncode;
2353 SHOW("/trans_return_pc_header: Will this work?");
2355 return_pc = (struct function *) PTR(object);
2356 offset = HeaderValue(return_pc->header) * 4;
2358 /* Transport the whole code object. */
2359 code = (struct code *) ((unsigned long) return_pc - offset);
2360 ncode = trans_code(code);
2362 return ((lispobj) ncode + offset) | type_OtherPointer;
2365 /* On the 386, closures hold a pointer to the raw address instead of the
2366 * function object. */
2369 scav_closure_header(lispobj *where, lispobj object)
2371 struct closure *closure;
2374 closure = (struct closure *)where;
2375 fun = closure->function - RAW_ADDR_OFFSET;
2377 /* The function may have moved so update the raw address. But
2378 * don't write unnecessarily. */
2379 if (closure->function != fun + RAW_ADDR_OFFSET)
2380 closure->function = fun + RAW_ADDR_OFFSET;
2387 scav_function_header(lispobj *where, lispobj object)
2389 lose("attempted to scavenge a function header where=0x%08x object=0x%08x",
2390 (unsigned long) where,
2391 (unsigned long) object);
2392 return 0; /* bogus return value to satisfy static type checking */
2396 trans_function_header(lispobj object)
2398 struct function *fheader;
2399 unsigned long offset;
2400 struct code *code, *ncode;
2402 fheader = (struct function *) PTR(object);
2403 offset = HeaderValue(fheader->header) * 4;
2405 /* Transport the whole code object. */
2406 code = (struct code *) ((unsigned long) fheader - offset);
2407 ncode = trans_code(code);
2409 return ((lispobj) ncode + offset) | type_FunctionPointer;
2418 scav_instance_pointer(lispobj *where, lispobj object)
2420 if (from_space_p(object)) {
2421 lispobj first, *first_pointer;
2423 /* Object is a pointer into from space. Check to see */
2424 /* whether it has been forwarded. */
2425 first_pointer = (lispobj *) PTR(object);
2426 first = *first_pointer;
2428 if (first == 0x01) {
2430 first = first_pointer[1];
2432 first = trans_boxed(object);
2433 gc_assert(first != object);
2434 /* Set forwarding pointer. */
2435 first_pointer[0] = 0x01;
2436 first_pointer[1] = first;
2444 scav_instance_pointer(lispobj *where, lispobj object)
2446 lispobj copy, *first_pointer;
2448 /* Object is a pointer into from space - not a FP. */
2449 copy = trans_boxed(object);
2451 gc_assert(copy != object);
2453 first_pointer = (lispobj *) PTR(object);
2455 /* Set forwarding pointer. */
2456 first_pointer[0] = 0x01;
2457 first_pointer[1] = copy;
2468 static lispobj trans_list(lispobj object);
2472 scav_list_pointer(lispobj *where, lispobj object)
2474 /* KLUDGE: There's lots of cut-and-paste duplication between this
2475 * and scav_instance_pointer(..), scav_other_pointer(..), and
2476 * perhaps other functions too. -- WHN 20000620 */
2478 gc_assert(Pointerp(object));
2480 if (from_space_p(object)) {
2481 lispobj first, *first_pointer;
2483 /* Object is a pointer into from space. Check to see whether it has
2484 * been forwarded. */
2485 first_pointer = (lispobj *) PTR(object);
2486 first = *first_pointer;
2488 if (first == 0x01) {
2490 first = first_pointer[1];
2492 first = trans_list(object);
2494 /* Set forwarding pointer */
2495 first_pointer[0] = 0x01;
2496 first_pointer[1] = first;
2499 gc_assert(Pointerp(first));
2500 gc_assert(!from_space_p(first));
2507 scav_list_pointer(lispobj *where, lispobj object)
2509 lispobj first, *first_pointer;
2511 gc_assert(Pointerp(object));
2513 /* Object is a pointer into from space - not FP. */
2515 first = trans_list(object);
2516 gc_assert(first != object);
2518 first_pointer = (lispobj *) PTR(object);
2520 /* Set forwarding pointer */
2521 first_pointer[0] = 0x01;
2522 first_pointer[1] = first;
2524 gc_assert(Pointerp(first));
2525 gc_assert(!from_space_p(first));
2532 trans_list(lispobj object)
2534 lispobj new_list_pointer;
2535 struct cons *cons, *new_cons;
2539 gc_assert(from_space_p(object));
2541 cons = (struct cons *) PTR(object);
2543 /* Copy 'object'. */
2544 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2545 new_cons->car = cons->car;
2546 new_cons->cdr = cons->cdr; /* updated later */
2547 new_list_pointer = (lispobj)new_cons | LowtagOf(object);
2549 /* Grab the cdr before it is clobbered. */
2552 /* Set forwarding pointer (clobbers start of list). */
2554 cons->cdr = new_list_pointer;
2556 /* Try to linearize the list in the cdr direction to help reduce
2560 struct cons *cdr_cons, *new_cdr_cons;
2562 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2563 || (*((lispobj *)PTR(cdr)) == 0x01))
2566 cdr_cons = (struct cons *) PTR(cdr);
2569 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2570 new_cdr_cons->car = cdr_cons->car;
2571 new_cdr_cons->cdr = cdr_cons->cdr;
2572 new_cdr = (lispobj)new_cdr_cons | LowtagOf(cdr);
2574 /* Grab the cdr before it is clobbered. */
2575 cdr = cdr_cons->cdr;
2577 /* Set forwarding pointer. */
2578 cdr_cons->car = 0x01;
2579 cdr_cons->cdr = new_cdr;
2581 /* Update the cdr of the last cons copied into new space to
2582 * keep the newspace scavenge from having to do it. */
2583 new_cons->cdr = new_cdr;
2585 new_cons = new_cdr_cons;
2588 return new_list_pointer;
2593 * scavenging and transporting other pointers
2598 scav_other_pointer(lispobj *where, lispobj object)
2600 gc_assert(Pointerp(object));
2602 if (from_space_p(object)) {
2603 lispobj first, *first_pointer;
2605 /* Object is a pointer into from space. Check to see */
2606 /* whether it has been forwarded. */
2607 first_pointer = (lispobj *) PTR(object);
2608 first = *first_pointer;
2610 if (first == 0x01) {
2612 first = first_pointer[1];
2615 first = (transother[TypeOf(first)])(object);
2617 if (first != object) {
2618 /* Set forwarding pointer */
2619 first_pointer[0] = 0x01;
2620 first_pointer[1] = first;
2625 gc_assert(Pointerp(first));
2626 gc_assert(!from_space_p(first));
2632 scav_other_pointer(lispobj *where, lispobj object)
2634 lispobj first, *first_pointer;
2636 gc_assert(Pointerp(object));
2638 /* Object is a pointer into from space - not FP. */
2639 first_pointer = (lispobj *) PTR(object);
2641 first = (transother[TypeOf(*first_pointer)])(object);
2643 if (first != object) {
2644 /* Set forwarding pointer. */
2645 first_pointer[0] = 0x01;
2646 first_pointer[1] = first;
2650 gc_assert(Pointerp(first));
2651 gc_assert(!from_space_p(first));
2659 * immediate, boxed, and unboxed objects
2663 size_pointer(lispobj *where)
2669 scav_immediate(lispobj *where, lispobj object)
2675 trans_immediate(lispobj object)
2677 lose("trying to transport an immediate");
2678 return NIL; /* bogus return value to satisfy static type checking */
2682 size_immediate(lispobj *where)
2689 scav_boxed(lispobj *where, lispobj object)
2695 trans_boxed(lispobj object)
2698 unsigned long length;
2700 gc_assert(Pointerp(object));
2702 header = *((lispobj *) PTR(object));
2703 length = HeaderValue(header) + 1;
2704 length = CEILING(length, 2);
2706 return copy_object(object, length);
2710 trans_boxed_large(lispobj object)
2713 unsigned long length;
2715 gc_assert(Pointerp(object));
2717 header = *((lispobj *) PTR(object));
2718 length = HeaderValue(header) + 1;
2719 length = CEILING(length, 2);
2721 return copy_large_object(object, length);
2725 size_boxed(lispobj *where)
2728 unsigned long length;
2731 length = HeaderValue(header) + 1;
2732 length = CEILING(length, 2);
2738 scav_fdefn(lispobj *where, lispobj object)
2740 struct fdefn *fdefn;
2742 fdefn = (struct fdefn *)where;
2744 /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
2745 fdefn->function, fdefn->raw_addr)); */
2747 if ((char *)(fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2748 scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1);
2750 /* Don't write unnecessarily. */
2751 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2752 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2754 return sizeof(struct fdefn) / sizeof(lispobj);
2761 scav_unboxed(lispobj *where, lispobj object)
2763 unsigned long length;
2765 length = HeaderValue(object) + 1;
2766 length = CEILING(length, 2);
2772 trans_unboxed(lispobj object)
2775 unsigned long length;
2778 gc_assert(Pointerp(object));
2780 header = *((lispobj *) PTR(object));
2781 length = HeaderValue(header) + 1;
2782 length = CEILING(length, 2);
2784 return copy_unboxed_object(object, length);
2788 trans_unboxed_large(lispobj object)
2791 unsigned long length;
2794 gc_assert(Pointerp(object));
2796 header = *((lispobj *) PTR(object));
2797 length = HeaderValue(header) + 1;
2798 length = CEILING(length, 2);
2800 return copy_large_unboxed_object(object, length);
2804 size_unboxed(lispobj *where)
2807 unsigned long length;
2810 length = HeaderValue(header) + 1;
2811 length = CEILING(length, 2);
2817 * vector-like objects
2820 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2823 scav_string(lispobj *where, lispobj object)
2825 struct vector *vector;
2828 /* NOTE: Strings contain one more byte of data than the length */
2829 /* slot indicates. */
2831 vector = (struct vector *) where;
2832 length = fixnum_value(vector->length) + 1;
2833 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2839 trans_string(lispobj object)
2841 struct vector *vector;
2844 gc_assert(Pointerp(object));
2846 /* NOTE: A string contains one more byte of data (a terminating
2847 * '\0' to help when interfacing with C functions) than indicated
2848 * by the length slot. */
2850 vector = (struct vector *) PTR(object);
2851 length = fixnum_value(vector->length) + 1;
2852 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2854 return copy_large_unboxed_object(object, nwords);
2858 size_string(lispobj *where)
2860 struct vector *vector;
2863 /* NOTE: A string contains one more byte of data (a terminating
2864 * '\0' to help when interfacing with C functions) than indicated
2865 * by the length slot. */
2867 vector = (struct vector *) where;
2868 length = fixnum_value(vector->length) + 1;
2869 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2874 /* FIXME: What does this mean? */
2875 int gencgc_hash = 1;
2878 scav_vector(lispobj *where, lispobj object)
2880 unsigned int kv_length;
2882 unsigned int length;
2883 lispobj *hash_table;
2884 lispobj empty_symbol;
2885 unsigned int *index_vector, *next_vector, *hash_vector;
2887 unsigned next_vector_length;
2889 /* FIXME: A comment explaining this would be nice. It looks as
2890 * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
2891 * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
2892 if (HeaderValue(object) != subtype_VectorValidHashing)
2896 /* This is set for backward compatibility. FIXME: Do we need
2898 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2902 kv_length = fixnum_value(where[1]);
2903 kv_vector = where + 2; /* Skip the header and length. */
2904 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
2906 /* Scavenge element 0, which may be a hash-table structure. */
2907 scavenge(where+2, 1);
2908 if (!Pointerp(where[2])) {
2909 lose("no pointer at %x in hash table", where[2]);
2911 hash_table = (lispobj *)PTR(where[2]);
2912 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
2913 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2914 lose("hash table not instance (%x at %x)", hash_table[0], hash_table);
2917 /* Scavenge element 1, which should be some internal symbol that
2918 * the hash table code reserves for marking empty slots. */
2919 scavenge(where+3, 1);
2920 if (!Pointerp(where[3])) {
2921 lose("not #:%EMPTY-HT-SLOT% symbol pointer: %x", where[3]);
2923 empty_symbol = where[3];
2924 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
2925 if (TypeOf(*(lispobj *)PTR(empty_symbol)) != type_SymbolHeader) {
2926 lose("not a symbol where #:%EMPTY-HT-SLOT% expected: %x",
2927 *(lispobj *)PTR(empty_symbol));
2930 /* Scavenge hash table, which will fix the positions of the other
2931 * needed objects. */
2932 scavenge(hash_table, 16);
2934 /* Cross-check the kv_vector. */
2935 if (where != (lispobj *)PTR(hash_table[9])) {
2936 lose("hash_table table!=this table %x", hash_table[9]);
2940 weak_p_obj = hash_table[10];
2944 lispobj index_vector_obj = hash_table[13];
2946 if (Pointerp(index_vector_obj) &&
2947 (TypeOf(*(lispobj *)PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2948 index_vector = ((unsigned int *)PTR(index_vector_obj)) + 2;
2949 /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
2950 length = fixnum_value(((unsigned int *)PTR(index_vector_obj))[1]);
2951 /*FSHOW((stderr, "/length = %d\n", length));*/
2953 lose("invalid index_vector %x", index_vector_obj);
2959 lispobj next_vector_obj = hash_table[14];
2961 if (Pointerp(next_vector_obj) &&
2962 (TypeOf(*(lispobj *)PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2963 next_vector = ((unsigned int *)PTR(next_vector_obj)) + 2;
2964 /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
2965 next_vector_length = fixnum_value(((unsigned int *)PTR(next_vector_obj))[1]);
2966 /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
2968 lose("invalid next_vector %x", next_vector_obj);
2972 /* maybe hash vector */
2974 /* FIXME: This bare "15" offset should become a symbolic
2975 * expression of some sort. And all the other bare offsets
2976 * too. And the bare "16" in scavenge(hash_table, 16). And
2977 * probably other stuff too. Ugh.. */
2978 lispobj hash_vector_obj = hash_table[15];
2980 if (Pointerp(hash_vector_obj) &&
2981 (TypeOf(*(lispobj *)PTR(hash_vector_obj))
2982 == type_SimpleArrayUnsignedByte32)) {
2983 hash_vector = ((unsigned int *)PTR(hash_vector_obj)) + 2;
2984 /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
2985 gc_assert(fixnum_value(((unsigned int *)PTR(hash_vector_obj))[1])
2986 == next_vector_length);
2989 /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
2993 /* These lengths could be different as the index_vector can be a
2994 * different length from the others, a larger index_vector could help
2995 * reduce collisions. */
2996 gc_assert(next_vector_length*2 == kv_length);
2998 /* now all set up.. */
3000 /* Work through the KV vector. */
3003 for (i = 1; i < next_vector_length; i++) {
3004 lispobj old_key = kv_vector[2*i];
3005 unsigned int old_index = (old_key & 0x1fffffff)%length;
3007 /* Scavenge the key and value. */
3008 scavenge(&kv_vector[2*i],2);
3010 /* Check whether the key has moved and is EQ based. */
3012 lispobj new_key = kv_vector[2*i];
3013 unsigned int new_index = (new_key & 0x1fffffff)%length;
3015 if ((old_index != new_index) &&
3016 ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
3017 ((new_key != empty_symbol) ||
3018 (kv_vector[2*i] != empty_symbol))) {
3021 "* EQ key %d moved from %x to %x; index %d to %d\n",
3022 i, old_key, new_key, old_index, new_index));*/
3024 if (index_vector[old_index] != 0) {
3025 /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
3027 /* Unlink the key from the old_index chain. */
3028 if (index_vector[old_index] == i) {
3029 /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
3030 index_vector[old_index] = next_vector[i];
3031 /* Link it into the needing rehash chain. */
3032 next_vector[i] = fixnum_value(hash_table[11]);
3033 hash_table[11] = make_fixnum(i);
3036 unsigned prior = index_vector[old_index];
3037 unsigned next = next_vector[prior];
3039 /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
3042 /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
3045 next_vector[prior] = next_vector[next];
3046 /* Link it into the needing rehash
3049 fixnum_value(hash_table[11]);
3050 hash_table[11] = make_fixnum(next);
3055 next = next_vector[next];
3063 return (CEILING(kv_length + 2, 2));
3067 trans_vector(lispobj object)
3069 struct vector *vector;
3072 gc_assert(Pointerp(object));
3074 vector = (struct vector *) PTR(object);
3076 length = fixnum_value(vector->length);
3077 nwords = CEILING(length + 2, 2);
3079 return copy_large_object(object, nwords);
3083 size_vector(lispobj *where)
3085 struct vector *vector;
3088 vector = (struct vector *) where;
3089 length = fixnum_value(vector->length);
3090 nwords = CEILING(length + 2, 2);
3097 scav_vector_bit(lispobj *where, lispobj object)
3099 struct vector *vector;
3102 vector = (struct vector *) where;
3103 length = fixnum_value(vector->length);
3104 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3110 trans_vector_bit(lispobj object)
3112 struct vector *vector;
3115 gc_assert(Pointerp(object));
3117 vector = (struct vector *) PTR(object);
3118 length = fixnum_value(vector->length);
3119 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3121 return copy_large_unboxed_object(object, nwords);
3125 size_vector_bit(lispobj *where)
3127 struct vector *vector;
3130 vector = (struct vector *) where;
3131 length = fixnum_value(vector->length);
3132 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3139 scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3141 struct vector *vector;
3144 vector = (struct vector *) where;
3145 length = fixnum_value(vector->length);
3146 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3152 trans_vector_unsigned_byte_2(lispobj object)
3154 struct vector *vector;
3157 gc_assert(Pointerp(object));
3159 vector = (struct vector *) PTR(object);
3160 length = fixnum_value(vector->length);
3161 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3163 return copy_large_unboxed_object(object, nwords);
3167 size_vector_unsigned_byte_2(lispobj *where)
3169 struct vector *vector;
3172 vector = (struct vector *) where;
3173 length = fixnum_value(vector->length);
3174 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3181 scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3183 struct vector *vector;
3186 vector = (struct vector *) where;
3187 length = fixnum_value(vector->length);
3188 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3194 trans_vector_unsigned_byte_4(lispobj object)
3196 struct vector *vector;
3199 gc_assert(Pointerp(object));
3201 vector = (struct vector *) PTR(object);
3202 length = fixnum_value(vector->length);
3203 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3205 return copy_large_unboxed_object(object, nwords);
3209 size_vector_unsigned_byte_4(lispobj *where)
3211 struct vector *vector;
3214 vector = (struct vector *) where;
3215 length = fixnum_value(vector->length);
3216 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3222 scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3224 struct vector *vector;
3227 vector = (struct vector *) where;
3228 length = fixnum_value(vector->length);
3229 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3235 trans_vector_unsigned_byte_8(lispobj object)
3237 struct vector *vector;
3240 gc_assert(Pointerp(object));
3242 vector = (struct vector *) PTR(object);
3243 length = fixnum_value(vector->length);
3244 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3246 return copy_large_unboxed_object(object, nwords);
3250 size_vector_unsigned_byte_8(lispobj *where)
3252 struct vector *vector;
3255 vector = (struct vector *) where;
3256 length = fixnum_value(vector->length);
3257 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3264 scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3266 struct vector *vector;
3269 vector = (struct vector *) where;
3270 length = fixnum_value(vector->length);
3271 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3277 trans_vector_unsigned_byte_16(lispobj object)
3279 struct vector *vector;
3282 gc_assert(Pointerp(object));
3284 vector = (struct vector *) PTR(object);
3285 length = fixnum_value(vector->length);
3286 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3288 return copy_large_unboxed_object(object, nwords);
3292 size_vector_unsigned_byte_16(lispobj *where)
3294 struct vector *vector;
3297 vector = (struct vector *) where;
3298 length = fixnum_value(vector->length);
3299 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3305 scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3307 struct vector *vector;
3310 vector = (struct vector *) where;
3311 length = fixnum_value(vector->length);
3312 nwords = CEILING(length + 2, 2);
3318 trans_vector_unsigned_byte_32(lispobj object)
3320 struct vector *vector;
3323 gc_assert(Pointerp(object));
3325 vector = (struct vector *) PTR(object);
3326 length = fixnum_value(vector->length);
3327 nwords = CEILING(length + 2, 2);
3329 return copy_large_unboxed_object(object, nwords);
3333 size_vector_unsigned_byte_32(lispobj *where)
3335 struct vector *vector;
3338 vector = (struct vector *) where;
3339 length = fixnum_value(vector->length);
3340 nwords = CEILING(length + 2, 2);
3346 scav_vector_single_float(lispobj *where, lispobj object)
3348 struct vector *vector;
3351 vector = (struct vector *) where;
3352 length = fixnum_value(vector->length);
3353 nwords = CEILING(length + 2, 2);
3359 trans_vector_single_float(lispobj object)
3361 struct vector *vector;
3364 gc_assert(Pointerp(object));
3366 vector = (struct vector *) PTR(object);
3367 length = fixnum_value(vector->length);
3368 nwords = CEILING(length + 2, 2);
3370 return copy_large_unboxed_object(object, nwords);
3374 size_vector_single_float(lispobj *where)
3376 struct vector *vector;
3379 vector = (struct vector *) where;
3380 length = fixnum_value(vector->length);
3381 nwords = CEILING(length + 2, 2);
3387 scav_vector_double_float(lispobj *where, lispobj object)
3389 struct vector *vector;
3392 vector = (struct vector *) where;
3393 length = fixnum_value(vector->length);
3394 nwords = CEILING(length * 2 + 2, 2);
3400 trans_vector_double_float(lispobj object)
3402 struct vector *vector;
3405 gc_assert(Pointerp(object));
3407 vector = (struct vector *) PTR(object);
3408 length = fixnum_value(vector->length);
3409 nwords = CEILING(length * 2 + 2, 2);
3411 return copy_large_unboxed_object(object, nwords);
3415 size_vector_double_float(lispobj *where)
3417 struct vector *vector;
3420 vector = (struct vector *) where;
3421 length = fixnum_value(vector->length);
3422 nwords = CEILING(length * 2 + 2, 2);
3427 #ifdef type_SimpleArrayLongFloat
3429 scav_vector_long_float(lispobj *where, lispobj object)
3431 struct vector *vector;
3434 vector = (struct vector *) where;
3435 length = fixnum_value(vector->length);
3436 nwords = CEILING(length * 3 + 2, 2);
3442 trans_vector_long_float(lispobj object)
3444 struct vector *vector;
3447 gc_assert(Pointerp(object));
3449 vector = (struct vector *) PTR(object);
3450 length = fixnum_value(vector->length);
3451 nwords = CEILING(length * 3 + 2, 2);
3453 return copy_large_unboxed_object(object, nwords);
3457 size_vector_long_float(lispobj *where)
3459 struct vector *vector;
3462 vector = (struct vector *) where;
3463 length = fixnum_value(vector->length);
3464 nwords = CEILING(length * 3 + 2, 2);
3471 #ifdef type_SimpleArrayComplexSingleFloat
3473 scav_vector_complex_single_float(lispobj *where, lispobj object)
3475 struct vector *vector;
3478 vector = (struct vector *) where;
3479 length = fixnum_value(vector->length);
3480 nwords = CEILING(length * 2 + 2, 2);
3486 trans_vector_complex_single_float(lispobj object)
3488 struct vector *vector;
3491 gc_assert(Pointerp(object));
3493 vector = (struct vector *) PTR(object);
3494 length = fixnum_value(vector->length);
3495 nwords = CEILING(length * 2 + 2, 2);
3497 return copy_large_unboxed_object(object, nwords);
3501 size_vector_complex_single_float(lispobj *where)
3503 struct vector *vector;
3506 vector = (struct vector *) where;
3507 length = fixnum_value(vector->length);
3508 nwords = CEILING(length * 2 + 2, 2);
3514 #ifdef type_SimpleArrayComplexDoubleFloat
3516 scav_vector_complex_double_float(lispobj *where, lispobj object)
3518 struct vector *vector;
3521 vector = (struct vector *) where;
3522 length = fixnum_value(vector->length);
3523 nwords = CEILING(length * 4 + 2, 2);
3529 trans_vector_complex_double_float(lispobj object)
3531 struct vector *vector;
3534 gc_assert(Pointerp(object));
3536 vector = (struct vector *) PTR(object);
3537 length = fixnum_value(vector->length);
3538 nwords = CEILING(length * 4 + 2, 2);
3540 return copy_large_unboxed_object(object, nwords);
3544 size_vector_complex_double_float(lispobj *where)
3546 struct vector *vector;
3549 vector = (struct vector *) where;
3550 length = fixnum_value(vector->length);
3551 nwords = CEILING(length * 4 + 2, 2);
3558 #ifdef type_SimpleArrayComplexLongFloat
3560 scav_vector_complex_long_float(lispobj *where, lispobj object)
3562 struct vector *vector;
3565 vector = (struct vector *) where;
3566 length = fixnum_value(vector->length);
3567 nwords = CEILING(length * 6 + 2, 2);
3573 trans_vector_complex_long_float(lispobj object)
3575 struct vector *vector;
3578 gc_assert(Pointerp(object));
3580 vector = (struct vector *) PTR(object);
3581 length = fixnum_value(vector->length);
3582 nwords = CEILING(length * 6 + 2, 2);
3584 return copy_large_unboxed_object(object, nwords);
3588 size_vector_complex_long_float(lispobj *where)
3590 struct vector *vector;
3593 vector = (struct vector *) where;
3594 length = fixnum_value(vector->length);
3595 nwords = CEILING(length * 6 + 2, 2);
3606 /* XX This is a hack adapted from cgc.c. These don't work too well with the
3607 * gencgc as a list of the weak pointers is maintained within the
3608 * objects which causes writes to the pages. A limited attempt is made
3609 * to avoid unnecessary writes, but this needs a re-think. */
3611 #define WEAK_POINTER_NWORDS \
3612 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3615 scav_weak_pointer(lispobj *where, lispobj object)
3617 struct weak_pointer *wp = weak_pointers;
3618 /* Push the weak pointer onto the list of weak pointers.
3619 * Do I have to watch for duplicates? Originally this was
3620 * part of trans_weak_pointer but that didn't work in the
3621 * case where the WP was in a promoted region.
3624 /* Check whether it's already in the list. */
3625 while (wp != NULL) {
3626 if (wp == (struct weak_pointer*)where) {
3632 /* Add it to the start of the list. */
3633 wp = (struct weak_pointer*)where;
3634 if (wp->next != weak_pointers) {
3635 wp->next = weak_pointers;
3637 /*SHOW("avoided write to weak pointer");*/
3642 /* Do not let GC scavenge the value slot of the weak pointer.
3643 * (That is why it is a weak pointer.) */
3645 return WEAK_POINTER_NWORDS;
3649 trans_weak_pointer(lispobj object)
3652 struct weak_pointer *wp;
3654 gc_assert(Pointerp(object));
3656 #if defined(DEBUG_WEAK)
3657 FSHOW((stderr, "Transporting weak pointer from 0x%08x\n", object));
3660 /* Need to remember where all the weak pointers are that have */
3661 /* been transported so they can be fixed up in a post-GC pass. */
3663 copy = copy_object(object, WEAK_POINTER_NWORDS);
3664 /* wp = (struct weak_pointer *) PTR(copy);*/
3667 /* Push the weak pointer onto the list of weak pointers. */
3668 /* wp->next = weak_pointers;
3669 * weak_pointers = wp;*/
3675 size_weak_pointer(lispobj *where)
3677 return WEAK_POINTER_NWORDS;
3680 void scan_weak_pointers(void)
3682 struct weak_pointer *wp;
3683 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3684 lispobj value = wp->value;
3685 lispobj first, *first_pointer;
3687 first_pointer = (lispobj *)PTR(value);
3690 FSHOW((stderr, "/weak pointer at 0x%08x\n", (unsigned long) wp));
3691 FSHOW((stderr, "/value: 0x%08x\n", (unsigned long) value));
3694 if (Pointerp(value) && from_space_p(value)) {
3695 /* Now, we need to check whether the object has been forwarded. If
3696 * it has been, the weak pointer is still good and needs to be
3697 * updated. Otherwise, the weak pointer needs to be nil'ed
3699 if (first_pointer[0] == 0x01) {
3700 wp->value = first_pointer[1];
3716 scav_lose(lispobj *where, lispobj object)
3718 lose("no scavenge function for object 0x%08x", (unsigned long) object);
3719 return 0; /* bogus return value to satisfy static type checking */
3723 trans_lose(lispobj object)
3725 lose("no transport function for object 0x%08x", (unsigned long) object);
3726 return NIL; /* bogus return value to satisfy static type checking */
3730 size_lose(lispobj *where)
3732 lose("no size function for object at 0x%08x", (unsigned long) where);
3733 return 1; /* bogus return value to satisfy static type checking */
3737 gc_init_tables(void)
3741 /* Set default value in all slots of scavenge table. */
3742 for (i = 0; i < 256; i++) { /* FIXME: bare constant length, ick! */
3743 scavtab[i] = scav_lose;
3746 /* For each type which can be selected by the low 3 bits of the tag
3747 * alone, set multiple entries in our 8-bit scavenge table (one for each
3748 * possible value of the high 5 bits). */
3749 for (i = 0; i < 32; i++) { /* FIXME: bare constant length, ick! */
3750 scavtab[type_EvenFixnum|(i<<3)] = scav_immediate;
3751 scavtab[type_FunctionPointer|(i<<3)] = scav_function_pointer;
3752 /* OtherImmediate0 */
3753 scavtab[type_ListPointer|(i<<3)] = scav_list_pointer;
3754 scavtab[type_OddFixnum|(i<<3)] = scav_immediate;
3755 scavtab[type_InstancePointer|(i<<3)] = scav_instance_pointer;
3756 /* OtherImmediate1 */
3757 scavtab[type_OtherPointer|(i<<3)] = scav_other_pointer;
3760 /* Other-pointer types (those selected by all eight bits of the tag) get
3761 * one entry each in the scavenge table. */
3762 scavtab[type_Bignum] = scav_unboxed;
3763 scavtab[type_Ratio] = scav_boxed;
3764 scavtab[type_SingleFloat] = scav_unboxed;
3765 scavtab[type_DoubleFloat] = scav_unboxed;
3766 #ifdef type_LongFloat
3767 scavtab[type_LongFloat] = scav_unboxed;
3769 scavtab[type_Complex] = scav_boxed;
3770 #ifdef type_ComplexSingleFloat
3771 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3773 #ifdef type_ComplexDoubleFloat
3774 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3776 #ifdef type_ComplexLongFloat
3777 scavtab[type_ComplexLongFloat] = scav_unboxed;
3779 scavtab[type_SimpleArray] = scav_boxed;
3780 scavtab[type_SimpleString] = scav_string;
3781 scavtab[type_SimpleBitVector] = scav_vector_bit;
3782 scavtab[type_SimpleVector] = scav_vector;
3783 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3784 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3785 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3786 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3787 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3788 #ifdef type_SimpleArraySignedByte8
3789 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3791 #ifdef type_SimpleArraySignedByte16
3792 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3794 #ifdef type_SimpleArraySignedByte30
3795 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3797 #ifdef type_SimpleArraySignedByte32
3798 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3800 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3801 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3802 #ifdef type_SimpleArrayLongFloat
3803 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3805 #ifdef type_SimpleArrayComplexSingleFloat
3806 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3808 #ifdef type_SimpleArrayComplexDoubleFloat
3809 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3811 #ifdef type_SimpleArrayComplexLongFloat
3812 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3814 scavtab[type_ComplexString] = scav_boxed;
3815 scavtab[type_ComplexBitVector] = scav_boxed;
3816 scavtab[type_ComplexVector] = scav_boxed;
3817 scavtab[type_ComplexArray] = scav_boxed;
3818 scavtab[type_CodeHeader] = scav_code_header;
3819 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3820 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3821 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3823 scavtab[type_ClosureHeader] = scav_closure_header;
3824 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3825 scavtab[type_ByteCodeFunction] = scav_closure_header;
3826 scavtab[type_ByteCodeClosure] = scav_closure_header;
3828 scavtab[type_ClosureHeader] = scav_boxed;
3829 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3830 scavtab[type_ByteCodeFunction] = scav_boxed;
3831 scavtab[type_ByteCodeClosure] = scav_boxed;
3833 scavtab[type_ValueCellHeader] = scav_boxed;
3834 scavtab[type_SymbolHeader] = scav_boxed;
3835 scavtab[type_BaseChar] = scav_immediate;
3836 scavtab[type_Sap] = scav_unboxed;
3837 scavtab[type_UnboundMarker] = scav_immediate;
3838 scavtab[type_WeakPointer] = scav_weak_pointer;
3839 scavtab[type_InstanceHeader] = scav_boxed;
3840 scavtab[type_Fdefn] = scav_fdefn;
3842 /* transport other table, initialized same way as scavtab */
3843 for (i = 0; i < 256; i++)
3844 transother[i] = trans_lose;
3845 transother[type_Bignum] = trans_unboxed;
3846 transother[type_Ratio] = trans_boxed;
3847 transother[type_SingleFloat] = trans_unboxed;
3848 transother[type_DoubleFloat] = trans_unboxed;
3849 #ifdef type_LongFloat
3850 transother[type_LongFloat] = trans_unboxed;
3852 transother[type_Complex] = trans_boxed;
3853 #ifdef type_ComplexSingleFloat
3854 transother[type_ComplexSingleFloat] = trans_unboxed;
3856 #ifdef type_ComplexDoubleFloat
3857 transother[type_ComplexDoubleFloat] = trans_unboxed;
3859 #ifdef type_ComplexLongFloat
3860 transother[type_ComplexLongFloat] = trans_unboxed;
3862 transother[type_SimpleArray] = trans_boxed_large;
3863 transother[type_SimpleString] = trans_string;
3864 transother[type_SimpleBitVector] = trans_vector_bit;
3865 transother[type_SimpleVector] = trans_vector;
3866 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3867 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3868 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3869 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3870 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3871 #ifdef type_SimpleArraySignedByte8
3872 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3874 #ifdef type_SimpleArraySignedByte16
3875 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
3877 #ifdef type_SimpleArraySignedByte30
3878 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
3880 #ifdef type_SimpleArraySignedByte32
3881 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
3883 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
3884 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
3885 #ifdef type_SimpleArrayLongFloat
3886 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
3888 #ifdef type_SimpleArrayComplexSingleFloat
3889 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
3891 #ifdef type_SimpleArrayComplexDoubleFloat
3892 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
3894 #ifdef type_SimpleArrayComplexLongFloat
3895 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
3897 transother[type_ComplexString] = trans_boxed;
3898 transother[type_ComplexBitVector] = trans_boxed;
3899 transother[type_ComplexVector] = trans_boxed;
3900 transother[type_ComplexArray] = trans_boxed;
3901 transother[type_CodeHeader] = trans_code_header;
3902 transother[type_FunctionHeader] = trans_function_header;
3903 transother[type_ClosureFunctionHeader] = trans_function_header;
3904 transother[type_ReturnPcHeader] = trans_return_pc_header;
3905 transother[type_ClosureHeader] = trans_boxed;
3906 transother[type_FuncallableInstanceHeader] = trans_boxed;
3907 transother[type_ByteCodeFunction] = trans_boxed;
3908 transother[type_ByteCodeClosure] = trans_boxed;
3909 transother[type_ValueCellHeader] = trans_boxed;
3910 transother[type_SymbolHeader] = trans_boxed;
3911 transother[type_BaseChar] = trans_immediate;
3912 transother[type_Sap] = trans_unboxed;
3913 transother[type_UnboundMarker] = trans_immediate;
3914 transother[type_WeakPointer] = trans_weak_pointer;
3915 transother[type_InstanceHeader] = trans_boxed;
3916 transother[type_Fdefn] = trans_boxed;
3918 /* size table, initialized the same way as scavtab */
3919 for (i = 0; i < 256; i++)
3920 sizetab[i] = size_lose;
3921 for (i = 0; i < 32; i++) {
3922 sizetab[type_EvenFixnum|(i<<3)] = size_immediate;
3923 sizetab[type_FunctionPointer|(i<<3)] = size_pointer;
3924 /* OtherImmediate0 */
3925 sizetab[type_ListPointer|(i<<3)] = size_pointer;
3926 sizetab[type_OddFixnum|(i<<3)] = size_immediate;
3927 sizetab[type_InstancePointer|(i<<3)] = size_pointer;
3928 /* OtherImmediate1 */
3929 sizetab[type_OtherPointer|(i<<3)] = size_pointer;
3931 sizetab[type_Bignum] = size_unboxed;
3932 sizetab[type_Ratio] = size_boxed;
3933 sizetab[type_SingleFloat] = size_unboxed;
3934 sizetab[type_DoubleFloat] = size_unboxed;
3935 #ifdef type_LongFloat
3936 sizetab[type_LongFloat] = size_unboxed;
3938 sizetab[type_Complex] = size_boxed;
3939 #ifdef type_ComplexSingleFloat
3940 sizetab[type_ComplexSingleFloat] = size_unboxed;
3942 #ifdef type_ComplexDoubleFloat
3943 sizetab[type_ComplexDoubleFloat] = size_unboxed;
3945 #ifdef type_ComplexLongFloat
3946 sizetab[type_ComplexLongFloat] = size_unboxed;
3948 sizetab[type_SimpleArray] = size_boxed;
3949 sizetab[type_SimpleString] = size_string;
3950 sizetab[type_SimpleBitVector] = size_vector_bit;
3951 sizetab[type_SimpleVector] = size_vector;
3952 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
3953 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
3954 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
3955 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
3956 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
3957 #ifdef type_SimpleArraySignedByte8
3958 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
3960 #ifdef type_SimpleArraySignedByte16
3961 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
3963 #ifdef type_SimpleArraySignedByte30
3964 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
3966 #ifdef type_SimpleArraySignedByte32
3967 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
3969 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
3970 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
3971 #ifdef type_SimpleArrayLongFloat
3972 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
3974 #ifdef type_SimpleArrayComplexSingleFloat
3975 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
3977 #ifdef type_SimpleArrayComplexDoubleFloat
3978 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
3980 #ifdef type_SimpleArrayComplexLongFloat
3981 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
3983 sizetab[type_ComplexString] = size_boxed;
3984 sizetab[type_ComplexBitVector] = size_boxed;
3985 sizetab[type_ComplexVector] = size_boxed;
3986 sizetab[type_ComplexArray] = size_boxed;
3987 sizetab[type_CodeHeader] = size_code_header;
3989 /* We shouldn't see these, so just lose if it happens. */
3990 sizetab[type_FunctionHeader] = size_function_header;
3991 sizetab[type_ClosureFunctionHeader] = size_function_header;
3992 sizetab[type_ReturnPcHeader] = size_return_pc_header;
3994 sizetab[type_ClosureHeader] = size_boxed;
3995 sizetab[type_FuncallableInstanceHeader] = size_boxed;
3996 sizetab[type_ValueCellHeader] = size_boxed;
3997 sizetab[type_SymbolHeader] = size_boxed;
3998 sizetab[type_BaseChar] = size_immediate;
3999 sizetab[type_Sap] = size_unboxed;
4000 sizetab[type_UnboundMarker] = size_immediate;
4001 sizetab[type_WeakPointer] = size_weak_pointer;
4002 sizetab[type_InstanceHeader] = size_boxed;
4003 sizetab[type_Fdefn] = size_boxed;
4006 /* Scan an area looking for an object which encloses the given pointer.
4007 * Return the object start on success or NULL on failure. */
4009 search_space(lispobj *start, size_t words, lispobj *pointer)
4013 lispobj thing = *start;
4015 /* If thing is an immediate then this is a cons */
4017 || ((thing & 3) == 0) /* fixnum */
4018 || (TypeOf(thing) == type_BaseChar)
4019 || (TypeOf(thing) == type_UnboundMarker))
4022 count = (sizetab[TypeOf(thing)])(start);
4024 /* Check whether the pointer is within this object? */
4025 if ((pointer >= start) && (pointer < (start+count))) {
4027 /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/
4031 /* Round up the count */
4032 count = CEILING(count,2);
4041 search_read_only_space(lispobj *pointer)
4043 lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
4044 lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4045 if ((pointer < start) || (pointer >= end))
4047 return (search_space(start, (pointer+2)-start, pointer));
4051 search_static_space(lispobj *pointer)
4053 lispobj* start = (lispobj*)static_space;
4054 lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER);
4055 if ((pointer < start) || (pointer >= end))
4057 return (search_space(start, (pointer+2)-start, pointer));
4060 /* a faster version for searching the dynamic space. This will work even
4061 * if the object is in a current allocation region. */
4063 search_dynamic_space(lispobj *pointer)
4065 int page_index = find_page_index(pointer);
4068 /* Address may be invalid - do some checks. */
4069 if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
4071 start = (lispobj *)((void *)page_address(page_index)
4072 + page_table[page_index].first_object_offset);
4073 return (search_space(start, (pointer+2)-start, pointer));
4076 /* FIXME: There is a strong family resemblance between this function
4077 * and the function of the same name in purify.c. Would it be possible
4078 * to implement them as exactly the same function? */
4080 valid_dynamic_space_pointer(lispobj *pointer)
4082 lispobj *start_addr;
4084 /* Find the object start address */
4085 if ((start_addr = search_dynamic_space(pointer)) == NULL) {
4089 /* We need to allow raw pointers into Code objects for return
4090 * addresses. This will also pickup pointers to functions in code
4092 if (TypeOf(*start_addr) == type_CodeHeader) {
4093 /* X Could do some further checks here. */
4097 /* If it's not a return address then it needs to be a valid Lisp
4099 if (!Pointerp((lispobj)pointer)) {
4103 /* Check that the object pointed to is consistent with the pointer
4105 switch (LowtagOf((lispobj)pointer)) {
4106 case type_FunctionPointer:
4107 /* Start_addr should be the enclosing code object, or a closure
4109 switch (TypeOf(*start_addr)) {
4110 case type_CodeHeader:
4111 /* This case is probably caught above. */
4113 case type_ClosureHeader:
4114 case type_FuncallableInstanceHeader:
4115 case type_ByteCodeFunction:
4116 case type_ByteCodeClosure:
4117 if ((int)pointer != ((int)start_addr+type_FunctionPointer)) {
4121 pointer, start_addr, *start_addr));
4129 pointer, start_addr, *start_addr));
4133 case type_ListPointer:
4134 if ((int)pointer != ((int)start_addr+type_ListPointer)) {
4138 pointer, start_addr, *start_addr));
4141 /* Is it plausible cons? */
4142 if ((Pointerp(start_addr[0])
4143 || ((start_addr[0] & 3) == 0) /* fixnum */
4144 || (TypeOf(start_addr[0]) == type_BaseChar)
4145 || (TypeOf(start_addr[0]) == type_UnboundMarker))
4146 && (Pointerp(start_addr[1])
4147 || ((start_addr[1] & 3) == 0) /* fixnum */
4148 || (TypeOf(start_addr[1]) == type_BaseChar)
4149 || (TypeOf(start_addr[1]) == type_UnboundMarker)))
4155 pointer, start_addr, *start_addr));
4158 case type_InstancePointer:
4159 if ((int)pointer != ((int)start_addr+type_InstancePointer)) {
4163 pointer, start_addr, *start_addr));
4166 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4170 pointer, start_addr, *start_addr));
4174 case type_OtherPointer:
4175 if ((int)pointer != ((int)start_addr+type_OtherPointer)) {
4179 pointer, start_addr, *start_addr));
4182 /* Is it plausible? Not a cons. X should check the headers. */
4183 if (Pointerp(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
4187 pointer, start_addr, *start_addr));
4190 switch (TypeOf(start_addr[0])) {
4191 case type_UnboundMarker:
4196 pointer, start_addr, *start_addr));
4199 /* only pointed to by function pointers? */
4200 case type_ClosureHeader:
4201 case type_FuncallableInstanceHeader:
4202 case type_ByteCodeFunction:
4203 case type_ByteCodeClosure:
4207 pointer, start_addr, *start_addr));
4210 case type_InstanceHeader:
4214 pointer, start_addr, *start_addr));
4217 /* the valid other immediate pointer objects */
4218 case type_SimpleVector:
4221 #ifdef type_ComplexSingleFloat
4222 case type_ComplexSingleFloat:
4224 #ifdef type_ComplexDoubleFloat
4225 case type_ComplexDoubleFloat:
4227 #ifdef type_ComplexLongFloat
4228 case type_ComplexLongFloat:
4230 case type_SimpleArray:
4231 case type_ComplexString:
4232 case type_ComplexBitVector:
4233 case type_ComplexVector:
4234 case type_ComplexArray:
4235 case type_ValueCellHeader:
4236 case type_SymbolHeader:
4238 case type_CodeHeader:
4240 case type_SingleFloat:
4241 case type_DoubleFloat:
4242 #ifdef type_LongFloat
4243 case type_LongFloat:
4245 case type_SimpleString:
4246 case type_SimpleBitVector:
4247 case type_SimpleArrayUnsignedByte2:
4248 case type_SimpleArrayUnsignedByte4:
4249 case type_SimpleArrayUnsignedByte8:
4250 case type_SimpleArrayUnsignedByte16:
4251 case type_SimpleArrayUnsignedByte32:
4252 #ifdef type_SimpleArraySignedByte8
4253 case type_SimpleArraySignedByte8:
4255 #ifdef type_SimpleArraySignedByte16
4256 case type_SimpleArraySignedByte16:
4258 #ifdef type_SimpleArraySignedByte30
4259 case type_SimpleArraySignedByte30:
4261 #ifdef type_SimpleArraySignedByte32
4262 case type_SimpleArraySignedByte32:
4264 case type_SimpleArraySingleFloat:
4265 case type_SimpleArrayDoubleFloat:
4266 #ifdef type_SimpleArrayLongFloat
4267 case type_SimpleArrayLongFloat:
4269 #ifdef type_SimpleArrayComplexSingleFloat
4270 case type_SimpleArrayComplexSingleFloat:
4272 #ifdef type_SimpleArrayComplexDoubleFloat
4273 case type_SimpleArrayComplexDoubleFloat:
4275 #ifdef type_SimpleArrayComplexLongFloat
4276 case type_SimpleArrayComplexLongFloat:
4279 case type_WeakPointer:
4286 pointer, start_addr, *start_addr));
4294 pointer, start_addr, *start_addr));
4302 /* Adjust large bignum and vector objects. This will adjust the allocated
4303 * region if the size has shrunk, and move unboxed objects into unboxed
4304 * pages. The pages are not promoted here, and the promoted region is not
4305 * added to the new_regions; this is really only designed to be called from
4306 * preserve_pointer. Shouldn't fail if this is missed, just may delay the
4307 * moving of objects to unboxed pages, and the freeing of pages. */
4309 maybe_adjust_large_object(lispobj *where)
4313 lispobj *source, *dest;
4317 int remaining_bytes;
4324 /* Check whether it's a vector or bignum object. */
4325 switch (TypeOf(where[0])) {
4326 case type_SimpleVector:
4330 case type_SimpleString:
4331 case type_SimpleBitVector:
4332 case type_SimpleArrayUnsignedByte2:
4333 case type_SimpleArrayUnsignedByte4:
4334 case type_SimpleArrayUnsignedByte8:
4335 case type_SimpleArrayUnsignedByte16:
4336 case type_SimpleArrayUnsignedByte32:
4337 #ifdef type_SimpleArraySignedByte8
4338 case type_SimpleArraySignedByte8:
4340 #ifdef type_SimpleArraySignedByte16
4341 case type_SimpleArraySignedByte16:
4343 #ifdef type_SimpleArraySignedByte30
4344 case type_SimpleArraySignedByte30:
4346 #ifdef type_SimpleArraySignedByte32
4347 case type_SimpleArraySignedByte32:
4349 case type_SimpleArraySingleFloat:
4350 case type_SimpleArrayDoubleFloat:
4351 #ifdef type_SimpleArrayLongFloat
4352 case type_SimpleArrayLongFloat:
4354 #ifdef type_SimpleArrayComplexSingleFloat
4355 case type_SimpleArrayComplexSingleFloat:
4357 #ifdef type_SimpleArrayComplexDoubleFloat
4358 case type_SimpleArrayComplexDoubleFloat:
4360 #ifdef type_SimpleArrayComplexLongFloat
4361 case type_SimpleArrayComplexLongFloat:
4363 boxed = UNBOXED_PAGE;
4369 /* Find its current size. */
4370 nwords = (sizetab[TypeOf(where[0])])(where);
4372 first_page = find_page_index((void *)where);
4373 gc_assert(first_page >= 0);
4375 /* Note: Any page write-protection must be removed, else a later
4376 * scavenge_newspace may incorrectly not scavenge these pages.
4377 * This would not be necessary if they are added to the new areas,
4378 * but lets do it for them all (they'll probably be written
4381 gc_assert(page_table[first_page].first_object_offset == 0);
4383 next_page = first_page;
4384 remaining_bytes = nwords*4;
4385 while (remaining_bytes > 4096) {
4386 gc_assert(page_table[next_page].gen == from_space);
4387 gc_assert((page_table[next_page].allocated == BOXED_PAGE)
4388 || (page_table[next_page].allocated == UNBOXED_PAGE));
4389 gc_assert(page_table[next_page].large_object);
4390 gc_assert(page_table[next_page].first_object_offset ==
4391 -4096*(next_page-first_page));
4392 gc_assert(page_table[next_page].bytes_used == 4096);
4394 page_table[next_page].allocated = boxed;
4396 /* Shouldn't be write-protected at this stage. Essential that the
4398 gc_assert(!page_table[next_page].write_protected);
4399 remaining_bytes -= 4096;
4403 /* Now only one page remains, but the object may have shrunk so
4404 * there may be more unused pages which will be freed. */
4406 /* Object may have shrunk but shouldn't have grown - check. */
4407 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4409 page_table[next_page].allocated = boxed;
4410 gc_assert(page_table[next_page].allocated ==
4411 page_table[first_page].allocated);
4413 /* Adjust the bytes_used. */
4414 old_bytes_used = page_table[next_page].bytes_used;
4415 page_table[next_page].bytes_used = remaining_bytes;
4417 bytes_freed = old_bytes_used - remaining_bytes;
4419 /* Free any remaining pages; needs care. */
4421 while ((old_bytes_used == 4096) &&
4422 (page_table[next_page].gen == from_space) &&
4423 ((page_table[next_page].allocated == UNBOXED_PAGE)
4424 || (page_table[next_page].allocated == BOXED_PAGE)) &&
4425 page_table[next_page].large_object &&
4426 (page_table[next_page].first_object_offset ==
4427 -(next_page - first_page)*4096)) {
4428 /* It checks out OK, free the page. We don't need to both zeroing
4429 * pages as this should have been done before shrinking the
4430 * object. These pages shouldn't be write protected as they
4431 * should be zero filled. */
4432 gc_assert(page_table[next_page].write_protected == 0);
4434 old_bytes_used = page_table[next_page].bytes_used;
4435 page_table[next_page].allocated = FREE_PAGE;
4436 page_table[next_page].bytes_used = 0;
4437 bytes_freed += old_bytes_used;
4441 if ((bytes_freed > 0) && gencgc_verbose)
4442 FSHOW((stderr, "/adjust_large_object freed %d\n", bytes_freed));
4444 generations[from_space].bytes_allocated -= bytes_freed;
4445 bytes_allocated -= bytes_freed;
4450 /* Take a possible pointer to a list object and mark the page_table
4451 * so that it will not need changing during a GC.
4453 * This involves locating the page it points to, then backing up to
4454 * the first page that has its first object start at offset 0, and
4455 * then marking all pages dont_move from the first until a page that ends
4456 * by being full, or having free gen.
4458 * This ensures that objects spanning pages are not broken.
4460 * It is assumed that all the page static flags have been cleared at
4461 * the start of a GC.
4463 * It is also assumed that the current gc_alloc region has been flushed and
4464 * the tables updated. */
4466 preserve_pointer(void *addr)
4468 int addr_page_index = find_page_index(addr);
4471 unsigned region_allocation;
4473 /* Address is quite likely to have been invalid - do some checks. */
4474 if ((addr_page_index == -1)
4475 || (page_table[addr_page_index].allocated == FREE_PAGE)
4476 || (page_table[addr_page_index].bytes_used == 0)
4477 || (page_table[addr_page_index].gen != from_space)
4478 /* Skip if already marked dont_move */
4479 || (page_table[addr_page_index].dont_move != 0))
4482 region_allocation = page_table[addr_page_index].allocated;
4484 /* Check the offset within the page */
4485 if (((int)addr & 0xfff) > page_table[addr_page_index].bytes_used)
4488 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4491 /* Work backwards to find a page with a first_object_offset of 0.
4492 * The pages should be contiguous with all bytes used in the same
4493 * gen. Assumes the first_object_offset is negative or zero. */
4494 first_page = addr_page_index;
4495 while (page_table[first_page].first_object_offset != 0) {
4497 /* Do some checks. */
4498 gc_assert(page_table[first_page].bytes_used == 4096);
4499 gc_assert(page_table[first_page].gen == from_space);
4500 gc_assert(page_table[first_page].allocated == region_allocation);
4503 /* Adjust any large objects before promotion as they won't be copied
4504 * after promotion. */
4505 if (page_table[first_page].large_object) {
4506 maybe_adjust_large_object(page_address(first_page));
4507 /* If a large object has shrunk then addr may now point to a free
4508 * area in which case it's ignored here. Note it gets through the
4509 * valid pointer test above because the tail looks like conses. */
4510 if ((page_table[addr_page_index].allocated == FREE_PAGE)
4511 || (page_table[addr_page_index].bytes_used == 0)
4512 /* Check the offset within the page. */
4513 || (((int)addr & 0xfff)
4514 > page_table[addr_page_index].bytes_used)) {
4516 "weird? ignore ptr 0x%x to freed area of large object\n",
4520 /* It may have moved to unboxed pages. */
4521 region_allocation = page_table[first_page].allocated;
4524 /* Now work forward until the end of this contiguous area is found,
4525 * marking all pages as dont_move. */
4526 for (i = first_page; ;i++) {
4527 gc_assert(page_table[i].allocated == region_allocation);
4529 /* Mark the page static. */
4530 page_table[i].dont_move = 1;
4532 /* Move the page to the new_space. XX I'd rather not do this but
4533 * the GC logic is not quite able to copy with the static pages
4534 * remaining in the from space. This also requires the generation
4535 * bytes_allocated counters be updated. */
4536 page_table[i].gen = new_space;
4537 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4538 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4540 /* It is essential that the pages are not write protected as they
4541 * may have pointers into the old-space which need scavenging. They
4542 * shouldn't be write protected at this stage. */
4543 gc_assert(!page_table[i].write_protected);
4545 /* Check whether this is the last page in this contiguous block.. */
4546 if ((page_table[i].bytes_used < 4096)
4547 /* ..or it is 4096 and is the last in the block */
4548 || (page_table[i+1].allocated == FREE_PAGE)
4549 || (page_table[i+1].bytes_used == 0) /* next page free */
4550 || (page_table[i+1].gen != from_space) /* diff. gen */
4551 || (page_table[i+1].first_object_offset == 0))
4555 /* Check that the page is now static. */
4556 gc_assert(page_table[addr_page_index].dont_move != 0);
4561 #ifdef CONTROL_STACKS
4562 /* Scavenge the thread stack conservative roots. */
4564 scavenge_thread_stacks(void)
4566 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4567 int type = TypeOf(thread_stacks);
4569 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4570 struct vector *vector = (struct vector *) PTR(thread_stacks);
4572 if (TypeOf(vector->header) != type_SimpleVector)
4574 length = fixnum_value(vector->length);
4575 for (i = 0; i < length; i++) {
4576 lispobj stack_obj = vector->data[i];
4577 if (LowtagOf(stack_obj) == type_OtherPointer) {
4578 struct vector *stack = (struct vector *) PTR(stack_obj);
4580 if (TypeOf(stack->header) !=
4581 type_SimpleArrayUnsignedByte32) {
4584 vector_length = fixnum_value(stack->length);
4585 if ((gencgc_verbose > 1) && (vector_length <= 0))
4587 "/weird? control stack vector length %d\n",
4589 if (vector_length > 0) {
4590 lispobj *stack_pointer = (lispobj*)stack->data[0];
4591 if ((stack_pointer < control_stack) ||
4592 (stack_pointer > control_stack_end))
4593 lose("invalid stack pointer %x",
4594 (unsigned)stack_pointer);
4595 if ((stack_pointer > control_stack) &&
4596 (stack_pointer < control_stack_end)) {
4597 unsigned int length = ((int)control_stack_end -
4598 (int)stack_pointer) / 4;
4600 if (length >= vector_length) {
4601 lose("invalid stack size %d >= vector length %d",
4605 if (gencgc_verbose > 1) {
4607 "scavenging %d words of control stack %d of length %d words.\n",
4608 length, i, vector_length));
4610 for (j = 0; j < length; j++) {
4611 preserve_pointer((void *)stack->data[1+j]);
4622 /* If the given page is not write-protected, then scan it for pointers
4623 * to younger generations or the top temp. generation, if no
4624 * suspicious pointers are found then the page is write-protected.
4626 * Care is taken to check for pointers to the current gc_alloc region
4627 * if it is a younger generation or the temp. generation. This frees
4628 * the caller from doing a gc_alloc_update_page_tables. Actually the
4629 * gc_alloc_generation does not need to be checked as this is only
4630 * called from scavenge_generation when the gc_alloc generation is
4631 * younger, so it just checks if there is a pointer to the current
4634 * We return 1 if the page was write-protected, else 0.
4637 update_page_write_prot(int page)
4639 int gen = page_table[page].gen;
4642 void **page_addr = (void **)page_address(page);
4643 int num_words = page_table[page].bytes_used / 4;
4645 /* Shouldn't be a free page. */
4646 gc_assert(page_table[page].allocated != FREE_PAGE);
4647 gc_assert(page_table[page].bytes_used != 0);
4649 /* Skip if it's already write-protected or an unboxed page. */
4650 if (page_table[page].write_protected
4651 || (page_table[page].allocated == UNBOXED_PAGE))
4654 /* Scan the page for pointers to younger generations or the
4655 * top temp. generation. */
4657 for (j = 0; j < num_words; j++) {
4658 void *ptr = *(page_addr+j);
4659 int index = find_page_index(ptr);
4661 /* Check that it's in the dynamic space */
4663 if (/* Does it point to a younger or the temp. generation? */
4664 ((page_table[index].allocated != FREE_PAGE)
4665 && (page_table[index].bytes_used != 0)
4666 && ((page_table[index].gen < gen)
4667 || (page_table[index].gen == NUM_GENERATIONS)))
4669 /* Or does it point within a current gc_alloc region? */
4670 || ((boxed_region.start_addr <= ptr)
4671 && (ptr <= boxed_region.free_pointer))
4672 || ((unboxed_region.start_addr <= ptr)
4673 && (ptr <= unboxed_region.free_pointer))) {
4680 /* Write-protect the page. */
4681 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
4683 os_protect((void *)page_addr,
4685 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
4687 /* Note the page as protected in the page tables. */
4688 page_table[page].write_protected = 1;
4694 /* Scavenge a generation.
4696 * This will not resolve all pointers when generation is the new
4697 * space, as new objects may be added which are not check here - use
4698 * scavenge_newspace generation.
4700 * Write-protected pages should not have any pointers to the
4701 * from_space so do need scavenging; thus write-protected pages are
4702 * not always scavenged. There is some code to check that these pages
4703 * are not written; but to check fully the write-protected pages need
4704 * to be scavenged by disabling the code to skip them.
4706 * Under the current scheme when a generation is GCed the younger
4707 * generations will be empty. So, when a generation is being GCed it
4708 * is only necessary to scavenge the older generations for pointers
4709 * not the younger. So a page that does not have pointers to younger
4710 * generations does not need to be scavenged.
4712 * The write-protection can be used to note pages that don't have
4713 * pointers to younger pages. But pages can be written without having
4714 * pointers to younger generations. After the pages are scavenged here
4715 * they can be scanned for pointers to younger generations and if
4716 * there are none the page can be write-protected.
4718 * One complication is when the newspace is the top temp. generation.
4720 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
4721 * that none were written, which they shouldn't be as they should have
4722 * no pointers to younger generations. This breaks down for weak
4723 * pointers as the objects contain a link to the next and are written
4724 * if a weak pointer is scavenged. Still it's a useful check. */
4726 scavenge_generation(int generation)
4733 /* Clear the write_protected_cleared flags on all pages. */
4734 for (i = 0; i < NUM_PAGES; i++)
4735 page_table[i].write_protected_cleared = 0;
4738 for (i = 0; i < last_free_page; i++) {
4739 if ((page_table[i].allocated == BOXED_PAGE)
4740 && (page_table[i].bytes_used != 0)
4741 && (page_table[i].gen == generation)) {
4744 /* This should be the start of a contiguous block. */
4745 gc_assert(page_table[i].first_object_offset == 0);
4747 /* We need to find the full extent of this contiguous
4748 * block in case objects span pages. */
4750 /* Now work forward until the end of this contiguous area
4751 * is found. A small area is preferred as there is a
4752 * better chance of its pages being write-protected. */
4753 for (last_page = i; ;last_page++)
4754 /* Check whether this is the last page in this contiguous
4756 if ((page_table[last_page].bytes_used < 4096)
4757 /* Or it is 4096 and is the last in the block */
4758 || (page_table[last_page+1].allocated != BOXED_PAGE)
4759 || (page_table[last_page+1].bytes_used == 0)
4760 || (page_table[last_page+1].gen != generation)
4761 || (page_table[last_page+1].first_object_offset == 0))
4764 /* Do a limited check for write_protected pages. If all pages
4765 * are write_protected then there is no need to scavenge. */
4768 for (j = i; j <= last_page; j++)
4769 if (page_table[j].write_protected == 0) {
4777 scavenge(page_address(i), (page_table[last_page].bytes_used
4778 + (last_page-i)*4096)/4);
4780 /* Now scan the pages and write protect those
4781 * that don't have pointers to younger
4783 if (enable_page_protection) {
4784 for (j = i; j <= last_page; j++) {
4785 num_wp += update_page_write_prot(j);
4794 if ((gencgc_verbose > 1) && (num_wp != 0)) {
4796 "/write protected %d pages within generation %d\n",
4797 num_wp, generation));
4801 /* Check that none of the write_protected pages in this generation
4802 * have been written to. */
4803 for (i = 0; i < NUM_PAGES; i++) {
4804 if ((page_table[i].allocation ! =FREE_PAGE)
4805 && (page_table[i].bytes_used != 0)
4806 && (page_table[i].gen == generation)
4807 && (page_table[i].write_protected_cleared != 0)) {
4808 FSHOW((stderr, "/scavenge_generation %d\n", generation));
4810 "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
4811 page_table[i].bytes_used,
4812 page_table[i].first_object_offset,
4813 page_table[i].dont_move));
4814 lose("write-protected page %d written to in scavenge_generation",
4822 /* Scavenge a newspace generation. As it is scavenged new objects may
4823 * be allocated to it; these will also need to be scavenged. This
4824 * repeats until there are no more objects unscavenged in the
4825 * newspace generation.
4827 * To help improve the efficiency, areas written are recorded by
4828 * gc_alloc and only these scavenged. Sometimes a little more will be
4829 * scavenged, but this causes no harm. An easy check is done that the
4830 * scavenged bytes equals the number allocated in the previous
4833 * Write-protected pages are not scanned except if they are marked
4834 * dont_move in which case they may have been promoted and still have
4835 * pointers to the from space.
4837 * Write-protected pages could potentially be written by alloc however
4838 * to avoid having to handle re-scavenging of write-protected pages
4839 * gc_alloc does not write to write-protected pages.
4841 * New areas of objects allocated are recorded alternatively in the two
4842 * new_areas arrays below. */
4843 static struct new_area new_areas_1[NUM_NEW_AREAS];
4844 static struct new_area new_areas_2[NUM_NEW_AREAS];
4846 /* Do one full scan of the new space generation. This is not enough to
4847 * complete the job as new objects may be added to the generation in
4848 * the process which are not scavenged. */
4850 scavenge_newspace_generation_one_scan(int generation)
4855 "/starting one full scan of newspace generation %d\n",
4858 for (i = 0; i < last_free_page; i++) {
4859 if ((page_table[i].allocated == BOXED_PAGE)
4860 && (page_table[i].bytes_used != 0)
4861 && (page_table[i].gen == generation)
4862 && ((page_table[i].write_protected == 0)
4863 /* (This may be redundant as write_protected is now
4864 * cleared before promotion.) */
4865 || (page_table[i].dont_move == 1))) {
4868 /* The scavenge will start at the first_object_offset of page i.
4870 * We need to find the full extent of this contiguous block in case
4871 * objects span pages.
4873 * Now work forward until the end of this contiguous area is
4874 * found. A small area is preferred as there is a better chance
4875 * of its pages being write-protected. */
4876 for (last_page = i; ;last_page++) {
4877 /* Check whether this is the last page in this contiguous
4879 if ((page_table[last_page].bytes_used < 4096)
4880 /* Or it is 4096 and is the last in the block */
4881 || (page_table[last_page+1].allocated != BOXED_PAGE)
4882 || (page_table[last_page+1].bytes_used == 0)
4883 || (page_table[last_page+1].gen != generation)
4884 || (page_table[last_page+1].first_object_offset == 0))
4888 /* Do a limited check for write_protected pages. If all pages
4889 * are write_protected then no need to scavenge. Except if the
4890 * pages are marked dont_move. */
4893 for (j = i; j <= last_page; j++)
4894 if ((page_table[j].write_protected == 0)
4895 || (page_table[j].dont_move != 0)) {
4905 /* Calculate the size. */
4907 size = (page_table[last_page].bytes_used
4908 - page_table[i].first_object_offset)/4;
4910 size = (page_table[last_page].bytes_used
4911 + (last_page-i)*4096
4912 - page_table[i].first_object_offset)/4;
4916 int a1 = bytes_allocated;
4919 "/scavenge(%x,%d)\n",
4921 + page_table[i].first_object_offset,
4924 new_areas_ignore_page = last_page;
4926 scavenge(page_address(i)+page_table[i].first_object_offset,size);
4929 /* Flush the alloc regions updating the tables. */
4930 gc_alloc_update_page_tables(0, &boxed_region);
4931 gc_alloc_update_page_tables(1, &unboxed_region);
4933 if ((all_wp != 0) && (a1 != bytes_allocated)) {
4935 "alloc'ed over %d to %d\n",
4938 "/page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
4939 page_table[i].bytes_used,
4940 page_table[i].first_object_offset,
4941 page_table[i].dont_move,
4942 page_table[i].write_protected,
4943 page_table[i].write_protected_cleared));
4955 /* Do a complete scavenge of the newspace generation. */
4957 scavenge_newspace_generation(int generation)
4961 /* the new_areas array currently being written to by gc_alloc */
4962 struct new_area (*current_new_areas)[] = &new_areas_1;
4963 int current_new_areas_index;
4964 int current_new_areas_allocated;
4966 /* the new_areas created but the previous scavenge cycle */
4967 struct new_area (*previous_new_areas)[] = NULL;
4968 int previous_new_areas_index;
4969 int previous_new_areas_allocated;
4971 #define SC_NS_GEN_CK 0
4973 /* Clear the write_protected_cleared flags on all pages. */
4974 for (i = 0; i < NUM_PAGES; i++)
4975 page_table[i].write_protected_cleared = 0;
4978 /* Flush the current regions updating the tables. */
4979 gc_alloc_update_page_tables(0, &boxed_region);
4980 gc_alloc_update_page_tables(1, &unboxed_region);
4982 /* Turn on the recording of new areas by gc_alloc. */
4983 new_areas = current_new_areas;
4984 new_areas_index = 0;
4986 /* Don't need to record new areas that get scavenged anyway during
4987 * scavenge_newspace_generation_one_scan. */
4988 record_new_objects = 1;
4990 /* Start with a full scavenge. */
4991 scavenge_newspace_generation_one_scan(generation);
4993 /* Record all new areas now. */
4994 record_new_objects = 2;
4996 /* Flush the current regions updating the tables. */
4997 gc_alloc_update_page_tables(0, &boxed_region);
4998 gc_alloc_update_page_tables(1, &unboxed_region);
5000 /* Grab new_areas_index. */
5001 current_new_areas_index = new_areas_index;
5004 "The first scan is finished; current_new_areas_index=%d.\n",
5005 current_new_areas_index));*/
5007 while (current_new_areas_index > 0) {
5008 /* Move the current to the previous new areas */
5009 previous_new_areas = current_new_areas;
5010 previous_new_areas_index = current_new_areas_index;
5012 /* Scavenge all the areas in previous new areas. Any new areas
5013 * allocated are saved in current_new_areas. */
5015 /* Allocate an array for current_new_areas; alternating between
5016 * new_areas_1 and 2 */
5017 if (previous_new_areas == &new_areas_1)
5018 current_new_areas = &new_areas_2;
5020 current_new_areas = &new_areas_1;
5022 /* Set up for gc_alloc. */
5023 new_areas = current_new_areas;
5024 new_areas_index = 0;
5026 /* Check whether previous_new_areas had overflowed. */
5027 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5028 /* New areas of objects allocated have been lost so need to do a
5029 * full scan to be sure! If this becomes a problem try
5030 * increasing NUM_NEW_AREAS. */
5032 SHOW("new_areas overflow, doing full scavenge");
5034 /* Don't need to record new areas that get scavenge anyway
5035 * during scavenge_newspace_generation_one_scan. */
5036 record_new_objects = 1;
5038 scavenge_newspace_generation_one_scan(generation);
5040 /* Record all new areas now. */
5041 record_new_objects = 2;
5043 /* Flush the current regions updating the tables. */
5044 gc_alloc_update_page_tables(0, &boxed_region);
5045 gc_alloc_update_page_tables(1, &unboxed_region);
5047 /* Work through previous_new_areas. */
5048 for (i = 0; i < previous_new_areas_index; i++) {
5049 int page = (*previous_new_areas)[i].page;
5050 int offset = (*previous_new_areas)[i].offset;
5051 int size = (*previous_new_areas)[i].size / 4;
5052 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5054 /* FIXME: All these bare *4 and /4 should be something
5055 * like BYTES_PER_WORD or WBYTES. */
5058 "/S page %d offset %d size %d\n",
5059 page, offset, size*4));*/
5060 scavenge(page_address(page)+offset, size);
5063 /* Flush the current regions updating the tables. */
5064 gc_alloc_update_page_tables(0, &boxed_region);
5065 gc_alloc_update_page_tables(1, &unboxed_region);
5068 current_new_areas_index = new_areas_index;
5071 "The re-scan has finished; current_new_areas_index=%d.\n",
5072 current_new_areas_index));*/
5075 /* Turn off recording of areas allocated by gc_alloc. */
5076 record_new_objects = 0;
5079 /* Check that none of the write_protected pages in this generation
5080 * have been written to. */
5081 for (i = 0; i < NUM_PAGES; i++) {
5082 if ((page_table[i].allocation != FREE_PAGE)
5083 && (page_table[i].bytes_used != 0)
5084 && (page_table[i].gen == generation)
5085 && (page_table[i].write_protected_cleared != 0)
5086 && (page_table[i].dont_move == 0)) {
5087 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
5088 i, generation, page_table[i].dont_move);
5094 /* Un-write-protect all the pages in from_space. This is done at the
5095 * start of a GC else there may be many page faults while scavenging
5096 * the newspace (I've seen drive the system time to 99%). These pages
5097 * would need to be unprotected anyway before unmapping in
5098 * free_oldspace; not sure what effect this has on paging.. */
5100 unprotect_oldspace(void)
5102 int bytes_freed = 0;
5105 for (i = 0; i < last_free_page; i++) {
5106 if ((page_table[i].allocated != FREE_PAGE)
5107 && (page_table[i].bytes_used != 0)
5108 && (page_table[i].gen == from_space)) {
5109 void *page_start, *addr;
5111 page_start = (void *)page_address(i);
5113 /* Remove any write-protection. We should be able to rely
5114 * on the write-protect flag to avoid redundant calls. */
5115 if (page_table[i].write_protected) {
5116 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5117 page_table[i].write_protected = 0;
5123 /* Work through all the pages and free any in from_space. This
5124 * assumes that all objects have been copied or promoted to an older
5125 * generation. Bytes_allocated and the generation bytes_allocated
5126 * counter are updated. The number of bytes freed is returned. */
5127 extern void i586_bzero(void *addr, int nbytes);
5131 int bytes_freed = 0;
5132 int first_page, last_page;
5137 /* Find a first page for the next region of pages. */
5138 while ((first_page < last_free_page)
5139 && ((page_table[first_page].allocated == FREE_PAGE)
5140 || (page_table[first_page].bytes_used == 0)
5141 || (page_table[first_page].gen != from_space)))
5144 if (first_page >= last_free_page)
5147 /* Find the last page of this region. */
5148 last_page = first_page;
5151 /* Free the page. */
5152 bytes_freed += page_table[last_page].bytes_used;
5153 generations[page_table[last_page].gen].bytes_allocated -=
5154 page_table[last_page].bytes_used;
5155 page_table[last_page].allocated = FREE_PAGE;
5156 page_table[last_page].bytes_used = 0;
5158 /* Remove any write-protection. We should be able to rely
5159 * on the write-protect flag to avoid redundant calls. */
5161 void *page_start = (void *)page_address(last_page);
5163 if (page_table[last_page].write_protected) {
5164 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5165 page_table[last_page].write_protected = 0;
5170 while ((last_page < last_free_page)
5171 && (page_table[last_page].allocated != FREE_PAGE)
5172 && (page_table[last_page].bytes_used != 0)
5173 && (page_table[last_page].gen == from_space));
5175 /* Zero pages from first_page to (last_page-1).
5177 * FIXME: Why not use os_zero(..) function instead of
5178 * hand-coding this again? (Check other gencgc_unmap_zero
5180 if (gencgc_unmap_zero) {
5181 void *page_start, *addr;
5183 page_start = (void *)page_address(first_page);
5185 os_invalidate(page_start, 4096*(last_page-first_page));
5186 addr = os_validate(page_start, 4096*(last_page-first_page));
5187 if (addr == NULL || addr != page_start) {
5188 /* Is this an error condition? I couldn't really tell from
5189 * the old CMU CL code, which fprintf'ed a message with
5190 * an exclamation point at the end. But I've never seen the
5191 * message, so it must at least be unusual..
5193 * (The same condition is also tested for in gc_free_heap.)
5195 * -- WHN 19991129 */
5196 lose("i586_bzero: page moved, 0x%08x ==> 0x%08x",
5203 page_start = (int *)page_address(first_page);
5204 i586_bzero(page_start, 4096*(last_page-first_page));
5207 first_page = last_page;
5209 } while (first_page < last_free_page);
5211 bytes_allocated -= bytes_freed;
5215 /* Print some information about a pointer at the given address. */
5217 print_ptr(lispobj *addr)
5219 /* If addr is in the dynamic space then out the page information. */
5220 int pi1 = find_page_index((void*)addr);
5223 fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
5226 page_table[pi1].allocated,
5227 page_table[pi1].gen,
5228 page_table[pi1].bytes_used,
5229 page_table[pi1].first_object_offset,
5230 page_table[pi1].dont_move);
5231 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
5243 extern int undefined_tramp;
5246 verify_space(lispobj*start, size_t words)
5248 int dynamic_space = (find_page_index((void*)start) != -1);
5249 int readonly_space =
5250 (READ_ONLY_SPACE_START <= (int)start &&
5251 (int)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5255 lispobj thing = *(lispobj*)start;
5257 if (Pointerp(thing)) {
5258 int page_index = find_page_index((void*)thing);
5259 int to_readonly_space =
5260 (READ_ONLY_SPACE_START <= thing &&
5261 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5262 int to_static_space =
5263 ((int)static_space <= thing &&
5264 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5266 /* Does it point to the dynamic space? */
5267 if (page_index != -1) {
5268 /* If it's within the dynamic space it should point to a used
5269 * page. XX Could check the offset too. */
5270 if ((page_table[page_index].allocated != FREE_PAGE)
5271 && (page_table[page_index].bytes_used == 0))
5272 lose ("Ptr %x @ %x sees free page.", thing, start);
5273 /* Check that it doesn't point to a forwarding pointer! */
5274 if (*((lispobj *)PTR(thing)) == 0x01) {
5275 lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
5277 /* Check that its not in the RO space as it would then be a
5278 * pointer from the RO to the dynamic space. */
5279 if (readonly_space) {
5280 lose("ptr to dynamic space %x from RO space %x",
5283 /* Does it point to a plausible object? This check slows
5284 * it down a lot (so it's commented out).
5286 * FIXME: Add a variable to enable this dynamically. */
5287 /* if (!valid_dynamic_space_pointer((lispobj *)thing)) {
5288 * lose("ptr %x to invalid object %x", thing, start); */
5290 /* Verify that it points to another valid space. */
5291 if (!to_readonly_space && !to_static_space
5292 && (thing != (int)&undefined_tramp)) {
5293 lose("Ptr %x @ %x sees junk.", thing, start);
5297 if (thing & 0x3) { /* Skip fixnums. FIXME: There should be an
5298 * is_fixnum for this. */
5300 switch(TypeOf(*start)) {
5303 case type_SimpleVector:
5306 case type_SimpleArray:
5307 case type_ComplexString:
5308 case type_ComplexBitVector:
5309 case type_ComplexVector:
5310 case type_ComplexArray:
5311 case type_ClosureHeader:
5312 case type_FuncallableInstanceHeader:
5313 case type_ByteCodeFunction:
5314 case type_ByteCodeClosure:
5315 case type_ValueCellHeader:
5316 case type_SymbolHeader:
5318 case type_UnboundMarker:
5319 case type_InstanceHeader:
5324 case type_CodeHeader:
5326 lispobj object = *start;
5328 int nheader_words, ncode_words, nwords;
5330 struct function *fheaderp;
5332 code = (struct code *) start;
5334 /* Check that it's not in the dynamic space.
5335 * FIXME: Isn't is supposed to be OK for code
5336 * objects to be in the dynamic space these days? */
5338 /* It's ok if it's byte compiled code. The trace
5339 * table offset will be a fixnum if it's x86
5340 * compiled code - check. */
5341 && !(code->trace_table_offset & 0x3)
5342 /* Only when enabled */
5343 && verify_dynamic_code_check) {
5345 "/code object at %x in the dynamic space\n",
5349 ncode_words = fixnum_value(code->code_size);
5350 nheader_words = HeaderValue(object);
5351 nwords = ncode_words + nheader_words;
5352 nwords = CEILING(nwords, 2);
5353 /* Scavenge the boxed section of the code data block */
5354 verify_space(start + 1, nheader_words - 1);
5356 /* Scavenge the boxed section of each function object in
5357 * the code data block. */
5358 fheaderl = code->entry_points;
5359 while (fheaderl != NIL) {
5360 fheaderp = (struct function *) PTR(fheaderl);
5361 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5362 verify_space(&fheaderp->name, 1);
5363 verify_space(&fheaderp->arglist, 1);
5364 verify_space(&fheaderp->type, 1);
5365 fheaderl = fheaderp->next;
5371 /* unboxed objects */
5373 case type_SingleFloat:
5374 case type_DoubleFloat:
5375 #ifdef type_ComplexLongFloat
5376 case type_LongFloat:
5378 #ifdef type_ComplexSingleFloat
5379 case type_ComplexSingleFloat:
5381 #ifdef type_ComplexDoubleFloat
5382 case type_ComplexDoubleFloat:
5384 #ifdef type_ComplexLongFloat
5385 case type_ComplexLongFloat:
5387 case type_SimpleString:
5388 case type_SimpleBitVector:
5389 case type_SimpleArrayUnsignedByte2:
5390 case type_SimpleArrayUnsignedByte4:
5391 case type_SimpleArrayUnsignedByte8:
5392 case type_SimpleArrayUnsignedByte16:
5393 case type_SimpleArrayUnsignedByte32:
5394 #ifdef type_SimpleArraySignedByte8
5395 case type_SimpleArraySignedByte8:
5397 #ifdef type_SimpleArraySignedByte16
5398 case type_SimpleArraySignedByte16:
5400 #ifdef type_SimpleArraySignedByte30
5401 case type_SimpleArraySignedByte30:
5403 #ifdef type_SimpleArraySignedByte32
5404 case type_SimpleArraySignedByte32:
5406 case type_SimpleArraySingleFloat:
5407 case type_SimpleArrayDoubleFloat:
5408 #ifdef type_SimpleArrayComplexLongFloat
5409 case type_SimpleArrayLongFloat:
5411 #ifdef type_SimpleArrayComplexSingleFloat
5412 case type_SimpleArrayComplexSingleFloat:
5414 #ifdef type_SimpleArrayComplexDoubleFloat
5415 case type_SimpleArrayComplexDoubleFloat:
5417 #ifdef type_SimpleArrayComplexLongFloat
5418 case type_SimpleArrayComplexLongFloat:
5421 case type_WeakPointer:
5422 count = (sizetab[TypeOf(*start)])(start);
5438 int read_only_space_size =
5439 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5440 - (lispobj*)READ_ONLY_SPACE_START;
5441 int static_space_size =
5442 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER)
5443 - (lispobj*)static_space;
5444 int binding_stack_size =
5445 (lispobj*)SymbolValue(BINDING_STACK_POINTER)
5446 - (lispobj*)BINDING_STACK_START;
5448 verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
5449 verify_space((lispobj*)static_space, static_space_size);
5450 verify_space((lispobj*)BINDING_STACK_START, binding_stack_size);
5454 verify_generation(int generation)
5458 for (i = 0; i < last_free_page; i++) {
5459 if ((page_table[i].allocated != FREE_PAGE)
5460 && (page_table[i].bytes_used != 0)
5461 && (page_table[i].gen == generation)) {
5463 int region_allocation = page_table[i].allocated;
5465 /* This should be the start of a contiguous block */
5466 gc_assert(page_table[i].first_object_offset == 0);
5468 /* Need to find the full extent of this contiguous block in case
5469 objects span pages. */
5471 /* Now work forward until the end of this contiguous area is
5473 for (last_page = i; ;last_page++)
5474 /* Check whether this is the last page in this contiguous
5476 if ((page_table[last_page].bytes_used < 4096)
5477 /* Or it is 4096 and is the last in the block */
5478 || (page_table[last_page+1].allocated != region_allocation)
5479 || (page_table[last_page+1].bytes_used == 0)
5480 || (page_table[last_page+1].gen != generation)
5481 || (page_table[last_page+1].first_object_offset == 0))
5484 verify_space(page_address(i), (page_table[last_page].bytes_used
5485 + (last_page-i)*4096)/4);
5491 /* Check the all the free space is zero filled. */
5493 verify_zero_fill(void)
5497 for (page = 0; page < last_free_page; page++) {
5498 if (page_table[page].allocated == FREE_PAGE) {
5499 /* The whole page should be zero filled. */
5500 int *start_addr = (int *)page_address(page);
5503 for (i = 0; i < size; i++) {
5504 if (start_addr[i] != 0) {
5505 lose("free page not zero at %x", start_addr + i);
5509 int free_bytes = 4096 - page_table[page].bytes_used;
5510 if (free_bytes > 0) {
5511 int *start_addr = (int *)((int)page_address(page)
5512 + page_table[page].bytes_used);
5513 int size = free_bytes / 4;
5515 for (i = 0; i < size; i++) {
5516 if (start_addr[i] != 0) {
5517 lose("free region not zero at %x", start_addr + i);
5525 /* External entry point for verify_zero_fill */
5527 gencgc_verify_zero_fill(void)
5529 /* Flush the alloc regions updating the tables. */
5530 boxed_region.free_pointer = current_region_free_pointer;
5531 gc_alloc_update_page_tables(0, &boxed_region);
5532 gc_alloc_update_page_tables(1, &unboxed_region);
5533 SHOW("verifying zero fill");
5535 current_region_free_pointer = boxed_region.free_pointer;
5536 current_region_end_addr = boxed_region.end_addr;
5540 verify_dynamic_space(void)
5544 for (i = 0; i < NUM_GENERATIONS; i++)
5545 verify_generation(i);
5547 if (gencgc_enable_verify_zero_fill)
5551 /* Write-protect all the dynamic boxed pages in the given generation. */
5553 write_protect_generation_pages(int generation)
5557 gc_assert(generation < NUM_GENERATIONS);
5559 for (i = 0; i < last_free_page; i++)
5560 if ((page_table[i].allocated == BOXED_PAGE)
5561 && (page_table[i].bytes_used != 0)
5562 && (page_table[i].gen == generation)) {
5565 page_start = (void *)page_address(i);
5567 os_protect(page_start,
5569 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5571 /* Note the page as protected in the page tables. */
5572 page_table[i].write_protected = 1;
5575 if (gencgc_verbose > 1) {
5577 "/write protected %d of %d pages in generation %d\n",
5578 count_write_protect_generation_pages(generation),
5579 count_generation_pages(generation),
5584 /* Garbage collect a generation. If raise is 0 the remains of the
5585 * generation are not raised to the next generation. */
5587 garbage_collect_generation(int generation, int raise)
5589 unsigned long allocated = bytes_allocated;
5590 unsigned long bytes_freed;
5592 unsigned long read_only_space_size, static_space_size;
5594 gc_assert(generation <= (NUM_GENERATIONS-1));
5596 /* The oldest generation can't be raised. */
5597 gc_assert((generation != (NUM_GENERATIONS-1)) || (raise == 0));
5599 /* Initialize the weak pointer list. */
5600 weak_pointers = NULL;
5602 /* When a generation is not being raised it is transported to a
5603 * temporary generation (NUM_GENERATIONS), and lowered when
5604 * done. Set up this new generation. There should be no pages
5605 * allocated to it yet. */
5607 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5609 /* Set the global src and dest. generations */
5610 from_space = generation;
5612 new_space = generation+1;
5614 new_space = NUM_GENERATIONS;
5616 /* Change to a new space for allocation, resetting the alloc_start_page */
5617 gc_alloc_generation = new_space;
5618 generations[new_space].alloc_start_page = 0;
5619 generations[new_space].alloc_unboxed_start_page = 0;
5620 generations[new_space].alloc_large_start_page = 0;
5621 generations[new_space].alloc_large_unboxed_start_page = 0;
5623 /* Before any pointers are preserved, the dont_move flags on the
5624 * pages need to be cleared. */
5625 for (i = 0; i < last_free_page; i++)
5626 page_table[i].dont_move = 0;
5628 /* Un-write-protect the old-space pages. This is essential for the
5629 * promoted pages as they may contain pointers into the old-space
5630 * which need to be scavenged. It also helps avoid unnecessary page
5631 * faults as forwarding pointer are written into them. They need to
5632 * be un-protected anyway before unmapping later. */
5633 unprotect_oldspace();
5635 /* Scavenge the stack's conservative roots. */
5638 for (ptr = (lispobj **)CONTROL_STACK_END-1;
5639 ptr > (lispobj **)&raise; ptr--)
5640 preserve_pointer(*ptr);
5642 #ifdef CONTROL_STACKS
5643 scavenge_thread_stacks();
5646 if (gencgc_verbose > 1) {
5647 int num_dont_move_pages = count_dont_move_pages();
5649 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
5650 num_dont_move_pages,
5651 /* FIXME: 4096 should be symbolic constant here and
5652 * prob'ly elsewhere too. */
5653 num_dont_move_pages * 4096));
5656 /* Scavenge all the rest of the roots. */
5658 /* Scavenge the Lisp functions of the interrupt handlers, taking
5659 * care to avoid SIG_DFL, SIG_IGN. */
5660 for (i = 0; i < NSIG; i++) {
5661 union interrupt_handler handler = interrupt_handlers[i];
5662 if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
5663 !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
5664 scavenge((lispobj *)(interrupt_handlers + i), 1);
5668 /* Scavenge the binding stack. */
5669 scavenge(binding_stack,
5670 (lispobj *)SymbolValue(BINDING_STACK_POINTER) - binding_stack);
5672 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
5673 read_only_space_size =
5674 (lispobj *)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5677 "/scavenge read only space: %d bytes\n",
5678 read_only_space_size * sizeof(lispobj)));
5679 scavenge(read_only_space, read_only_space_size);
5682 static_space_size = (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER)
5684 if (gencgc_verbose > 1)
5686 "/scavenge static space: %d bytes\n",
5687 static_space_size * sizeof(lispobj)));
5688 scavenge(static_space, static_space_size);
5690 /* All generations but the generation being GCed need to be
5691 * scavenged. The new_space generation needs special handling as
5692 * objects may be moved in - it is handled separately below. */
5693 for (i = 0; i < NUM_GENERATIONS; i++)
5694 if ((i != generation) && (i != new_space))
5695 scavenge_generation(i);
5697 /* Finally scavenge the new_space generation. Keep going until no
5698 * more objects are moved into the new generation */
5699 scavenge_newspace_generation(new_space);
5701 #define RESCAN_CHECK 0
5703 /* As a check re-scavenge the newspace once; no new objects should
5706 int old_bytes_allocated = bytes_allocated;
5707 int bytes_allocated;
5709 /* Start with a full scavenge. */
5710 scavenge_newspace_generation_one_scan(new_space);
5712 /* Flush the current regions, updating the tables. */
5713 gc_alloc_update_page_tables(0, &boxed_region);
5714 gc_alloc_update_page_tables(1, &unboxed_region);
5716 bytes_allocated = bytes_allocated - old_bytes_allocated;
5718 if (bytes_allocated != 0) {
5719 lose("Rescan of new_space allocated %d more bytes.",
5725 scan_weak_pointers();
5727 /* Flush the current regions, updating the tables. */
5728 gc_alloc_update_page_tables(0, &boxed_region);
5729 gc_alloc_update_page_tables(1, &unboxed_region);
5731 /* Free the pages in oldspace, but not those marked dont_move. */
5732 bytes_freed = free_oldspace();
5734 /* If the GC is not raising the age then lower the generation back
5735 * to its normal generation number */
5737 for (i = 0; i < last_free_page; i++)
5738 if ((page_table[i].bytes_used != 0)
5739 && (page_table[i].gen == NUM_GENERATIONS))
5740 page_table[i].gen = generation;
5741 gc_assert(generations[generation].bytes_allocated == 0);
5742 generations[generation].bytes_allocated =
5743 generations[NUM_GENERATIONS].bytes_allocated;
5744 generations[NUM_GENERATIONS].bytes_allocated = 0;
5747 /* Reset the alloc_start_page for generation. */
5748 generations[generation].alloc_start_page = 0;
5749 generations[generation].alloc_unboxed_start_page = 0;
5750 generations[generation].alloc_large_start_page = 0;
5751 generations[generation].alloc_large_unboxed_start_page = 0;
5753 if (generation >= verify_gens) {
5757 verify_dynamic_space();
5760 /* Set the new gc trigger for the GCed generation. */
5761 generations[generation].gc_trigger =
5762 generations[generation].bytes_allocated
5763 + generations[generation].bytes_consed_between_gc;
5766 generations[generation].num_gc = 0;
5768 ++generations[generation].num_gc;
5771 /* Update last_free_page then ALLOCATION_POINTER */
5773 update_x86_dynamic_space_free_pointer(void)
5778 for (i = 0; i < NUM_PAGES; i++)
5779 if ((page_table[i].allocated != FREE_PAGE)
5780 && (page_table[i].bytes_used != 0))
5783 last_free_page = last_page+1;
5785 SetSymbolValue(ALLOCATION_POINTER,
5786 (lispobj)(((char *)heap_base) + last_free_page*4096));
5789 /* GC all generations below last_gen, raising their objects to the
5790 * next generation until all generations below last_gen are empty.
5791 * Then if last_gen is due for a GC then GC it. In the special case
5792 * that last_gen==NUM_GENERATIONS, the last generation is always
5793 * GC'ed. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
5795 * The oldest generation to be GCed will always be
5796 * gencgc_oldest_gen_to_gc, partly ignoring last_gen if necessary. */
5798 collect_garbage(unsigned last_gen)
5805 boxed_region.free_pointer = current_region_free_pointer;
5807 FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
5809 if (last_gen > NUM_GENERATIONS) {
5811 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
5816 /* Flush the alloc regions updating the tables. */
5817 gc_alloc_update_page_tables(0, &boxed_region);
5818 gc_alloc_update_page_tables(1, &unboxed_region);
5820 /* Verify the new objects created by Lisp code. */
5821 if (pre_verify_gen_0) {
5822 SHOW((stderr, "pre-checking generation 0\n"));
5823 verify_generation(0);
5826 if (gencgc_verbose > 1)
5827 print_generation_stats(0);
5830 /* Collect the generation. */
5832 if (gen >= gencgc_oldest_gen_to_gc) {
5833 /* Never raise the oldest generation. */
5838 || (generations[gen].num_gc >= generations[gen].trigger_age);
5841 if (gencgc_verbose > 1) {
5843 "Starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
5846 generations[gen].bytes_allocated,
5847 generations[gen].gc_trigger,
5848 generations[gen].num_gc));
5851 /* If an older generation is being filled then update its memory
5854 generations[gen+1].cum_sum_bytes_allocated +=
5855 generations[gen+1].bytes_allocated;
5858 garbage_collect_generation(gen, raise);
5860 /* Reset the memory age cum_sum. */
5861 generations[gen].cum_sum_bytes_allocated = 0;
5863 if (gencgc_verbose > 1) {
5864 FSHOW((stderr, "GC of generation %d finished:\n", gen));
5865 print_generation_stats(0);
5869 } while ((gen <= gencgc_oldest_gen_to_gc)
5870 && ((gen < last_gen)
5871 || ((gen <= gencgc_oldest_gen_to_gc)
5873 && (generations[gen].bytes_allocated
5874 > generations[gen].gc_trigger)
5875 && (gen_av_mem_age(gen)
5876 > generations[gen].min_av_mem_age))));
5878 /* Now if gen-1 was raised all generations before gen are empty.
5879 * If it wasn't raised then all generations before gen-1 are empty.
5881 * Now objects within this gen's pages cannot point to younger
5882 * generations unless they are written to. This can be exploited
5883 * by write-protecting the pages of gen; then when younger
5884 * generations are GCed only the pages which have been written
5889 gen_to_wp = gen - 1;
5891 /* There's not much point in WPing pages in generation 0 as it is
5892 * never scavenged (except promoted pages). */
5893 if ((gen_to_wp > 0) && enable_page_protection) {
5894 /* Check that they are all empty. */
5895 for (i = 0; i < gen_to_wp; i++) {
5896 if (generations[i].bytes_allocated)
5897 lose("trying to write-protect gen. %d when gen. %d nonempty",
5900 write_protect_generation_pages(gen_to_wp);
5903 /* Set gc_alloc back to generation 0. The current regions should
5904 * be flushed after the above GCs */
5905 gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
5906 gc_alloc_generation = 0;
5908 update_x86_dynamic_space_free_pointer();
5910 /* This is now done by Lisp SCRUB-CONTROL-STACK in Lisp SUB-GC, so we
5911 * needn't do it here: */
5914 current_region_free_pointer = boxed_region.free_pointer;
5915 current_region_end_addr = boxed_region.end_addr;
5917 SHOW("returning from collect_garbage");
5920 /* This is called by Lisp PURIFY when it is finished. All live objects
5921 * will have been moved to the RO and Static heaps. The dynamic space
5922 * will need a full re-initialization. We don't bother having Lisp
5923 * PURIFY flush the current gc_alloc region, as the page_tables are
5924 * re-initialized, and every page is zeroed to be sure. */
5930 if (gencgc_verbose > 1)
5931 SHOW("entering gc_free_heap");
5933 for (page = 0; page < NUM_PAGES; page++) {
5934 /* Skip free pages which should already be zero filled. */
5935 if (page_table[page].allocated != FREE_PAGE) {
5936 void *page_start, *addr;
5938 /* Mark the page free. The other slots are assumed invalid
5939 * when it is a FREE_PAGE and bytes_used is 0 and it
5940 * should not be write-protected -- except that the
5941 * generation is used for the current region but it sets
5943 page_table[page].allocated = FREE_PAGE;
5944 page_table[page].bytes_used = 0;
5946 /* Zero the page. */
5947 page_start = (void *)page_address(page);
5949 /* First, remove any write-protection. */
5950 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5951 page_table[page].write_protected = 0;
5953 os_invalidate(page_start,4096);
5954 addr = os_validate(page_start,4096);
5955 if (addr == NULL || addr != page_start) {
5956 lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
5960 } else if (gencgc_zero_check_during_free_heap) {
5963 /* Double-check that the page is zero filled. */
5964 gc_assert(page_table[page].allocated == FREE_PAGE);
5965 gc_assert(page_table[page].bytes_used == 0);
5967 page_start = (int *)page_address(i);
5969 for (i=0; i<1024; i++) {
5970 if (page_start[i] != 0) {
5971 lose("free region not zero at %x", page_start + i);
5977 bytes_allocated = 0;
5979 /* Initialize the generations. */
5980 for (page = 0; page < NUM_GENERATIONS; page++) {
5981 generations[page].alloc_start_page = 0;
5982 generations[page].alloc_unboxed_start_page = 0;
5983 generations[page].alloc_large_start_page = 0;
5984 generations[page].alloc_large_unboxed_start_page = 0;
5985 generations[page].bytes_allocated = 0;
5986 generations[page].gc_trigger = 2000000;
5987 generations[page].num_gc = 0;
5988 generations[page].cum_sum_bytes_allocated = 0;
5991 if (gencgc_verbose > 1)
5992 print_generation_stats(0);
5994 /* Initialize gc_alloc */
5995 gc_alloc_generation = 0;
5996 boxed_region.first_page = 0;
5997 boxed_region.last_page = -1;
5998 boxed_region.start_addr = page_address(0);
5999 boxed_region.free_pointer = page_address(0);
6000 boxed_region.end_addr = page_address(0);
6002 unboxed_region.first_page = 0;
6003 unboxed_region.last_page = -1;
6004 unboxed_region.start_addr = page_address(0);
6005 unboxed_region.free_pointer = page_address(0);
6006 unboxed_region.end_addr = page_address(0);
6008 #if 0 /* Lisp PURIFY is currently running on the C stack so don't do this. */
6013 SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base));
6015 current_region_free_pointer = boxed_region.free_pointer;
6016 current_region_end_addr = boxed_region.end_addr;
6018 if (verify_after_free_heap) {
6019 /* Check whether purify has left any bad pointers. */
6021 SHOW("checking after free_heap\n");
6033 heap_base = (void*)DYNAMIC_0_SPACE_START;
6035 /* Initialize each page structure. */
6036 for (i = 0; i < NUM_PAGES; i++) {
6037 /* Initialize all pages as free. */
6038 page_table[i].allocated = FREE_PAGE;
6039 page_table[i].bytes_used = 0;
6041 /* Pages are not write-protected at startup. */
6042 page_table[i].write_protected = 0;
6045 bytes_allocated = 0;
6047 /* Initialize the generations. */
6048 for (i = 0; i < NUM_GENERATIONS; i++) {
6049 generations[i].alloc_start_page = 0;
6050 generations[i].alloc_unboxed_start_page = 0;
6051 generations[i].alloc_large_start_page = 0;
6052 generations[i].alloc_large_unboxed_start_page = 0;
6053 generations[i].bytes_allocated = 0;
6054 generations[i].gc_trigger = 2000000;
6055 generations[i].num_gc = 0;
6056 generations[i].cum_sum_bytes_allocated = 0;
6057 /* the tune-able parameters */
6058 generations[i].bytes_consed_between_gc = 2000000;
6059 generations[i].trigger_age = 1;
6060 generations[i].min_av_mem_age = 0.75;
6063 /* Initialize gc_alloc. */
6064 gc_alloc_generation = 0;
6065 boxed_region.first_page = 0;
6066 boxed_region.last_page = -1;
6067 boxed_region.start_addr = page_address(0);
6068 boxed_region.free_pointer = page_address(0);
6069 boxed_region.end_addr = page_address(0);
6071 unboxed_region.first_page = 0;
6072 unboxed_region.last_page = -1;
6073 unboxed_region.start_addr = page_address(0);
6074 unboxed_region.free_pointer = page_address(0);
6075 unboxed_region.end_addr = page_address(0);
6079 current_region_free_pointer = boxed_region.free_pointer;
6080 current_region_end_addr = boxed_region.end_addr;
6083 /* Pick up the dynamic space from after a core load.
6085 * The ALLOCATION_POINTER points to the end of the dynamic space.
6087 * XX A scan is needed to identify the closest first objects for pages. */
6089 gencgc_pickup_dynamic(void)
6092 int addr = DYNAMIC_0_SPACE_START;
6093 int alloc_ptr = SymbolValue(ALLOCATION_POINTER);
6095 /* Initialize the first region. */
6097 page_table[page].allocated = BOXED_PAGE;
6098 page_table[page].gen = 0;
6099 page_table[page].bytes_used = 4096;
6100 page_table[page].large_object = 0;
6101 page_table[page].first_object_offset =
6102 (void *)DYNAMIC_0_SPACE_START - page_address(page);
6105 } while (addr < alloc_ptr);
6107 generations[0].bytes_allocated = 4096*page;
6108 bytes_allocated = 4096*page;
6110 current_region_free_pointer = boxed_region.free_pointer;
6111 current_region_end_addr = boxed_region.end_addr;
6114 /* a counter for how deep we are in alloc(..) calls */
6115 int alloc_entered = 0;
6117 /* alloc(..) is the external interface for memory allocation. It
6118 * allocates to generation 0. It is not called from within the garbage
6119 * collector as it is only external uses that need the check for heap
6120 * size (GC trigger) and to disable the interrupts (interrupts are
6121 * always disabled during a GC).
6123 * The vops that call alloc(..) assume that the returned space is zero-filled.
6124 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
6126 * The check for a GC trigger is only performed when the current
6127 * region is full, so in most cases it's not needed. Further MAYBE-GC
6128 * is only called once because Lisp will remember "need to collect
6129 * garbage" and get around to it when it can. */
6133 /* Check for alignment allocation problems. */
6134 gc_assert((((unsigned)current_region_free_pointer & 0x7) == 0)
6135 && ((nbytes & 0x7) == 0));
6137 if (SymbolValue(PSEUDO_ATOMIC_ATOMIC)) {/* if already in a pseudo atomic */
6139 void *new_free_pointer;
6142 if (alloc_entered) {
6143 SHOW("alloc re-entered in already-pseudo-atomic case");
6147 /* Check whether there is room in the current region. */
6148 new_free_pointer = current_region_free_pointer + nbytes;
6150 /* FIXME: Shouldn't we be doing some sort of lock here, to
6151 * keep from getting screwed if an interrupt service routine
6152 * allocates memory between the time we calculate new_free_pointer
6153 * and the time we write it back to current_region_free_pointer?
6154 * Perhaps I just don't understand pseudo-atomics..
6156 * Perhaps I don't. It looks as though what happens is if we
6157 * were interrupted any time during the pseudo-atomic
6158 * interval (which includes now) we discard the allocated
6159 * memory and try again. So, at least we don't return
6160 * a memory area that was allocated out from underneath us
6161 * by code in an ISR.
6162 * Still, that doesn't seem to prevent
6163 * current_region_free_pointer from getting corrupted:
6164 * We read current_region_free_pointer.
6165 * They read current_region_free_pointer.
6166 * They write current_region_free_pointer.
6167 * We write current_region_free_pointer, scribbling over
6168 * whatever they wrote. */
6170 if (new_free_pointer <= boxed_region.end_addr) {
6171 /* If so then allocate from the current region. */
6172 void *new_obj = current_region_free_pointer;
6173 current_region_free_pointer = new_free_pointer;
6175 return((void *)new_obj);
6178 if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
6179 /* Double the trigger. */
6180 auto_gc_trigger *= 2;
6182 /* Exit the pseudo-atomic. */
6183 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
6184 if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
6185 /* Handle any interrupts that occurred during
6187 do_pending_interrupt();
6189 funcall0(SymbolFunction(MAYBE_GC));
6190 /* Re-enter the pseudo-atomic. */
6191 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0));
6192 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
6195 /* Call gc_alloc. */
6196 boxed_region.free_pointer = current_region_free_pointer;
6198 void *new_obj = gc_alloc(nbytes);
6199 current_region_free_pointer = boxed_region.free_pointer;
6200 current_region_end_addr = boxed_region.end_addr;
6206 void *new_free_pointer;
6209 /* At least wrap this allocation in a pseudo atomic to prevent
6210 * gc_alloc from being re-entered. */
6211 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0));
6212 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
6215 SHOW("alloc re-entered in not-already-pseudo-atomic case");
6218 /* Check whether there is room in the current region. */
6219 new_free_pointer = current_region_free_pointer + nbytes;
6221 if (new_free_pointer <= boxed_region.end_addr) {
6222 /* If so then allocate from the current region. */
6223 void *new_obj = current_region_free_pointer;
6224 current_region_free_pointer = new_free_pointer;
6226 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
6227 if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)) {
6228 /* Handle any interrupts that occurred during
6230 do_pending_interrupt();
6234 return((void *)new_obj);
6237 /* KLUDGE: There's lots of code around here shared with the
6238 * the other branch. Is there some way to factor out the
6239 * duplicate code? -- WHN 19991129 */
6240 if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
6241 /* Double the trigger. */
6242 auto_gc_trigger *= 2;
6244 /* Exit the pseudo atomic. */
6245 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
6246 if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
6247 /* Handle any interrupts that occurred during
6249 do_pending_interrupt();
6251 funcall0(SymbolFunction(MAYBE_GC));
6255 /* Else call gc_alloc. */
6256 boxed_region.free_pointer = current_region_free_pointer;
6257 result = gc_alloc(nbytes);
6258 current_region_free_pointer = boxed_region.free_pointer;
6259 current_region_end_addr = boxed_region.end_addr;
6262 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
6263 if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
6264 /* Handle any interrupts that occurred during
6266 do_pending_interrupt();
6275 * noise to manipulate the gc trigger stuff
6279 set_auto_gc_trigger(os_vm_size_t dynamic_usage)
6281 auto_gc_trigger += dynamic_usage;
6285 clear_auto_gc_trigger(void)
6287 auto_gc_trigger = 0;
6290 /* Find the code object for the given pc, or return NULL on failure. */
6292 component_ptr_from_pc(lispobj *pc)
6294 lispobj *object = NULL;
6296 if (object = search_read_only_space(pc))
6298 else if (object = search_static_space(pc))
6301 object = search_dynamic_space(pc);
6303 if (object) /* if we found something */
6304 if (TypeOf(*object) == type_CodeHeader) /* if it's a code object */
6311 * shared support for the OS-dependent signal handlers which
6312 * catch GENCGC-related write-protect violations
6315 /* Depending on which OS we're running under, different signals might
6316 * be raised for a violation of write protection in the heap. This
6317 * function factors out the common generational GC magic which needs
6318 * to invoked in this case, and should be called from whatever signal
6319 * handler is appropriate for the OS we're running under.
6321 * Return true if this signal is a normal generational GC thing that
6322 * we were able to handle, or false if it was abnormal and control
6323 * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
6325 gencgc_handle_wp_violation(void* fault_addr)
6327 int page_index = find_page_index(fault_addr);
6329 #if defined QSHOW_SIGNALS
6330 FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n",
6331 fault_addr, page_index));
6334 /* Check whether the fault is within the dynamic space. */
6335 if (page_index == (-1)) {
6337 /* not within the dynamic space -- not our responsibility */
6342 /* The only acceptable reason for an signal like this from the
6343 * heap is that the generational GC write-protected the page. */
6344 if (page_table[page_index].write_protected != 1) {
6345 lose("access failure in heap page not marked as write-protected");
6348 /* Unprotect the page. */
6349 os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL);
6350 page_table[page_index].write_protected = 0;
6351 page_table[page_index].write_protected_cleared = 1;
6353 /* Don't worry, we can handle it. */