2 * GENerational Conservative Garbage Collector for SBCL x86
6 * This software is part of the SBCL system. See the README file for
9 * This software is derived from the CMU CL system, which was
10 * written at Carnegie Mellon University and released into the
11 * public domain. The software is in the public domain and is
12 * provided with absolutely no warranty. See the COPYING and CREDITS
13 * files for more information.
21 * For a review of garbage collection techniques (e.g. generational
22 * GC) and terminology (e.g. "scavenging") see Paul R. Wilson,
23 * "Uniprocessor Garbage Collection Techniques". As of 20000618, this
24 * had been accepted for _ACM Computing Surveys_ and was available
25 * as a PostScript preprint through
26 * <http://www.cs.utexas.edu/users/oops/papers.html>
28 * <ftp://ftp.cs.utexas.edu/pub/garbage/bigsurv.ps>.
32 * FIXME: GC :FULL T seems to be unable to recover a lot of unused
33 * space. After cold init is complete, GC :FULL T gets us down to
34 * about 44 Mb total used, but PURIFY gets us down to about 17 Mb
45 #include "interrupt.h"
52 /* a function defined externally in assembly language, called from
54 void do_pending_interrupt(void);
60 /* the number of actual generations. (The number of 'struct
61 * generation' objects is one more than this, because one serves as
62 * scratch when GC'ing.) */
63 #define NUM_GENERATIONS 6
65 /* Should we use page protection to help avoid the scavenging of pages
66 * that don't have pointers to younger generations? */
67 boolean enable_page_protection = 1;
69 /* Should we unmap a page and re-mmap it to have it zero filled? */
70 #if defined(__FreeBSD__) || defined(__OpenBSD__)
71 /* Note: this can waste a lot of swap on FreeBSD so don't unmap there.
73 * Presumably this behavior exists on OpenBSD too, so don't unmap
74 * there either. -- WHN 20000727 */
75 boolean gencgc_unmap_zero = 0;
77 boolean gencgc_unmap_zero = 1;
80 /* the minimum size (in bytes) for a large object*/
81 unsigned large_object_size = 4 * 4096;
83 /* Should we filter stack/register pointers? This could reduce the
84 * number of invalid pointers accepted. KLUDGE: It will probably
85 * degrades interrupt safety during object initialization. */
86 boolean enable_pointer_filter = 1;
92 #define gc_abort() lose("GC invariant lost, file \"%s\", line %d", \
95 /* FIXME: In CMU CL, this was "#if 0" with no explanation. Find out
96 * how much it costs to make it "#if 1". If it's not too expensive,
99 #define gc_assert(ex) do { \
100 if (!(ex)) gc_abort(); \
103 #define gc_assert(ex)
106 /* the verbosity level. All non-error messages are disabled at level 0;
107 * and only a few rare messages are printed at level 1. */
108 unsigned gencgc_verbose = (QSHOW ? 1 : 0);
110 /* FIXME: At some point enable the various error-checking things below
111 * and see what they say. */
113 /* We hunt for pointers to old-space, when GCing generations >= verify_gen.
114 * Set verify_gens to NUM_GENERATIONS to disable this kind of check. */
115 int verify_gens = NUM_GENERATIONS;
117 /* Should we do a pre-scan verify of generation 0 before it's GCed? */
118 boolean pre_verify_gen_0 = 0;
120 /* Should we check for bad pointers after gc_free_heap is called
121 * from Lisp PURIFY? */
122 boolean verify_after_free_heap = 0;
124 /* Should we print a note when code objects are found in the dynamic space
125 * during a heap verify? */
126 boolean verify_dynamic_code_check = 0;
128 /* Should we check code objects for fixup errors after they are transported? */
129 boolean check_code_fixups = 0;
131 /* Should we check that newly allocated regions are zero filled? */
132 boolean gencgc_zero_check = 0;
134 /* Should we check that the free space is zero filled? */
135 boolean gencgc_enable_verify_zero_fill = 0;
137 /* Should we check that free pages are zero filled during gc_free_heap
138 * called after Lisp PURIFY? */
139 boolean gencgc_zero_check_during_free_heap = 0;
142 * GC structures and variables
145 /* the total bytes allocated. These are seen by Lisp DYNAMIC-USAGE. */
146 unsigned long bytes_allocated = 0;
147 static unsigned long auto_gc_trigger = 0;
149 /* the source and destination generations. These are set before a GC starts
151 static int from_space;
152 static int new_space;
154 /* FIXME: It would be nice to use this symbolic constant instead of
155 * bare 4096 almost everywhere. We could also use an assertion that
156 * it's equal to getpagesize(). */
157 #define PAGE_BYTES 4096
159 /* An array of page structures is statically allocated.
160 * This helps quickly map between an address its page structure.
161 * NUM_PAGES is set from the size of the dynamic space. */
162 struct page page_table[NUM_PAGES];
164 /* To map addresses to page structures the address of the first page
166 static void *heap_base = NULL;
168 /* Calculate the start address for the given page number. */
170 *page_address(int page_num)
172 return (heap_base + (page_num * 4096));
175 /* Find the page index within the page_table for the given
176 * address. Return -1 on failure. */
178 find_page_index(void *addr)
180 int index = addr-heap_base;
183 index = ((unsigned int)index)/4096;
184 if (index < NUM_PAGES)
191 /* a structure to hold the state of a generation */
194 /* the first page that gc_alloc checks on its next call */
195 int alloc_start_page;
197 /* the first page that gc_alloc_unboxed checks on its next call */
198 int alloc_unboxed_start_page;
200 /* the first page that gc_alloc_large (boxed) considers on its next
201 * call. (Although it always allocates after the boxed_region.) */
202 int alloc_large_start_page;
204 /* the first page that gc_alloc_large (unboxed) considers on its
205 * next call. (Although it always allocates after the
206 * current_unboxed_region.) */
207 int alloc_large_unboxed_start_page;
209 /* the bytes allocated to this generation */
212 /* the number of bytes at which to trigger a GC */
215 /* to calculate a new level for gc_trigger */
216 int bytes_consed_between_gc;
218 /* the number of GCs since the last raise */
221 /* the average age after which a GC will raise objects to the
225 /* the cumulative sum of the bytes allocated to this generation. It is
226 * cleared after a GC on this generations, and update before new
227 * objects are added from a GC of a younger generation. Dividing by
228 * the bytes_allocated will give the average age of the memory in
229 * this generation since its last GC. */
230 int cum_sum_bytes_allocated;
232 /* a minimum average memory age before a GC will occur helps
233 * prevent a GC when a large number of new live objects have been
234 * added, in which case a GC could be a waste of time */
235 double min_av_mem_age;
238 /* an array of generation structures. There needs to be one more
239 * generation structure than actual generations as the oldest
240 * generation is temporarily raised then lowered. */
241 static struct generation generations[NUM_GENERATIONS+1];
243 /* the oldest generation that is will currently be GCed by default.
244 * Valid values are: 0, 1, ... (NUM_GENERATIONS-1)
246 * The default of (NUM_GENERATIONS-1) enables GC on all generations.
248 * Setting this to 0 effectively disables the generational nature of
249 * the GC. In some applications generational GC may not be useful
250 * because there are no long-lived objects.
252 * An intermediate value could be handy after moving long-lived data
253 * into an older generation so an unnecessary GC of this long-lived
254 * data can be avoided. */
255 unsigned int gencgc_oldest_gen_to_gc = NUM_GENERATIONS-1;
257 /* The maximum free page in the heap is maintained and used to update
258 * ALLOCATION_POINTER which is used by the room function to limit its
259 * search of the heap. XX Gencgc obviously needs to be better
260 * integrated with the Lisp code. */
261 static int last_free_page;
262 static int last_used_page = 0;
265 * miscellaneous heap functions
268 /* Count the number of pages which are write-protected within the
269 * given generation. */
271 count_write_protect_generation_pages(int generation)
276 for (i = 0; i < last_free_page; i++)
277 if ((page_table[i].allocated != FREE_PAGE)
278 && (page_table[i].gen == generation)
279 && (page_table[i].write_protected == 1))
284 /* Count the number of pages within the given generation */
286 count_generation_pages(int generation)
291 for (i = 0; i < last_free_page; i++)
292 if ((page_table[i].allocated != 0)
293 && (page_table[i].gen == generation))
298 /* Count the number of dont_move pages. */
300 count_dont_move_pages(void)
305 for (i = 0; i < last_free_page; i++)
306 if ((page_table[i].allocated != 0)
307 && (page_table[i].dont_move != 0))
312 /* Work through the pages and add up the number of bytes used for the
313 * given generation. */
315 generation_bytes_allocated (int gen)
320 for (i = 0; i < last_free_page; i++) {
321 if ((page_table[i].allocated != 0) && (page_table[i].gen == gen))
322 result += page_table[i].bytes_used;
327 /* Return the average age of the memory in a generation. */
329 gen_av_mem_age(int gen)
331 if (generations[gen].bytes_allocated == 0)
335 ((double)generations[gen].cum_sum_bytes_allocated)
336 / ((double)generations[gen].bytes_allocated);
339 /* The verbose argument controls how much to print: 0 for normal
340 * level of detail; 1 for debugging. */
342 print_generation_stats(int verbose) /* FIXME: should take FILE argument */
347 /* This code uses the FP instructions which may be set up for Lisp
348 * so they need to be saved and reset for C. */
351 /* number of generations to print */
353 gens = NUM_GENERATIONS+1;
355 gens = NUM_GENERATIONS;
357 /* Print the heap stats. */
359 " Generation Boxed Unboxed LB LUB Alloc Waste Trig WP GCs Mem-age\n");
361 for (i = 0; i < gens; i++) {
365 int large_boxed_cnt = 0;
366 int large_unboxed_cnt = 0;
368 for (j = 0; j < last_free_page; j++)
369 if (page_table[j].gen == i) {
371 /* Count the number of boxed pages within the given
373 if (page_table[j].allocated == BOXED_PAGE) {
374 if (page_table[j].large_object)
380 /* Count the number of unboxed pages within the given
382 if (page_table[j].allocated == UNBOXED_PAGE) {
383 if (page_table[j].large_object)
390 gc_assert(generations[i].bytes_allocated
391 == generation_bytes_allocated(i));
393 " %8d: %5d %5d %5d %5d %8d %5d %8d %4d %3d %7.4lf\n",
395 boxed_cnt, unboxed_cnt, large_boxed_cnt, large_unboxed_cnt,
396 generations[i].bytes_allocated,
397 (count_generation_pages(i)*4096
398 - generations[i].bytes_allocated),
399 generations[i].gc_trigger,
400 count_write_protect_generation_pages(i),
401 generations[i].num_gc,
404 fprintf(stderr," Total bytes allocated=%d\n", bytes_allocated);
406 fpu_restore(fpu_state);
410 * allocation routines
414 * To support quick and inline allocation, regions of memory can be
415 * allocated and then allocated from with just a free pointer and a
416 * check against an end address.
418 * Since objects can be allocated to spaces with different properties
419 * e.g. boxed/unboxed, generation, ages; there may need to be many
420 * allocation regions.
422 * Each allocation region may be start within a partly used page. Many
423 * features of memory use are noted on a page wise basis, e.g. the
424 * generation; so if a region starts within an existing allocated page
425 * it must be consistent with this page.
427 * During the scavenging of the newspace, objects will be transported
428 * into an allocation region, and pointers updated to point to this
429 * allocation region. It is possible that these pointers will be
430 * scavenged again before the allocation region is closed, e.g. due to
431 * trans_list which jumps all over the place to cleanup the list. It
432 * is important to be able to determine properties of all objects
433 * pointed to when scavenging, e.g to detect pointers to the oldspace.
434 * Thus it's important that the allocation regions have the correct
435 * properties set when allocated, and not just set when closed. The
436 * region allocation routines return regions with the specified
437 * properties, and grab all the pages, setting their properties
438 * appropriately, except that the amount used is not known.
440 * These regions are used to support quicker allocation using just a
441 * free pointer. The actual space used by the region is not reflected
442 * in the pages tables until it is closed. It can't be scavenged until
445 * When finished with the region it should be closed, which will
446 * update the page tables for the actual space used returning unused
447 * space. Further it may be noted in the new regions which is
448 * necessary when scavenging the newspace.
450 * Large objects may be allocated directly without an allocation
451 * region, the page tables are updated immediately.
453 * Unboxed objects don't contain pointers to other objects and so
454 * don't need scavenging. Further they can't contain pointers to
455 * younger generations so WP is not needed. By allocating pages to
456 * unboxed objects the whole page never needs scavenging or
457 * write-protecting. */
459 /* We are only using two regions at present. Both are for the current
460 * newspace generation. */
461 struct alloc_region boxed_region;
462 struct alloc_region unboxed_region;
464 /* XX hack. Current Lisp code uses the following. Need copying in/out. */
465 void *current_region_free_pointer;
466 void *current_region_end_addr;
468 /* The generation currently being allocated to. */
469 static int gc_alloc_generation;
471 /* Find a new region with room for at least the given number of bytes.
473 * It starts looking at the current generation's alloc_start_page. So
474 * may pick up from the previous region if there is enough space. This
475 * keeps the allocation contiguous when scavenging the newspace.
477 * The alloc_region should have been closed by a call to
478 * gc_alloc_update_page_tables, and will thus be in an empty state.
480 * To assist the scavenging functions write-protected pages are not
481 * used. Free pages should not be write-protected.
483 * It is critical to the conservative GC that the start of regions be
484 * known. To help achieve this only small regions are allocated at a
487 * During scavenging, pointers may be found to within the current
488 * region and the page generation must be set so that pointers to the
489 * from space can be recognized. Therefore the generation of pages in
490 * the region are set to gc_alloc_generation. To prevent another
491 * allocation call using the same pages, all the pages in the region
492 * are allocated, although they will initially be empty.
495 gc_alloc_new_region(int nbytes, int unboxed, struct alloc_region *alloc_region)
507 "/alloc_new_region for %d bytes from gen %d\n",
508 nbytes, gc_alloc_generation));
511 /* Check that the region is in a reset state. */
512 gc_assert((alloc_region->first_page == 0)
513 && (alloc_region->last_page == -1)
514 && (alloc_region->free_pointer == alloc_region->end_addr));
518 generations[gc_alloc_generation].alloc_unboxed_start_page;
521 generations[gc_alloc_generation].alloc_start_page;
524 /* Search for a contiguous free region of at least nbytes with the
525 * given properties: boxed/unboxed, generation. */
527 first_page = restart_page;
529 /* First search for a page with at least 32 bytes free, which is
530 * not write-protected, and which is not marked dont_move. */
531 while ((first_page < NUM_PAGES)
532 && (page_table[first_page].allocated != FREE_PAGE) /* not free page */
534 (page_table[first_page].allocated != UNBOXED_PAGE))
536 (page_table[first_page].allocated != BOXED_PAGE))
537 || (page_table[first_page].large_object != 0)
538 || (page_table[first_page].gen != gc_alloc_generation)
539 || (page_table[first_page].bytes_used >= (4096-32))
540 || (page_table[first_page].write_protected != 0)
541 || (page_table[first_page].dont_move != 0)))
543 /* Check for a failure. */
544 if (first_page >= NUM_PAGES) {
546 "Argh! gc_alloc_new_region failed on first_page, nbytes=%d.\n",
548 print_generation_stats(1);
552 gc_assert(page_table[first_page].write_protected == 0);
556 "/first_page=%d bytes_used=%d\n",
557 first_page, page_table[first_page].bytes_used));
560 /* Now search forward to calculate the available region size. It
561 * tries to keeps going until nbytes are found and the number of
562 * pages is greater than some level. This helps keep down the
563 * number of pages in a region. */
564 last_page = first_page;
565 bytes_found = 4096 - page_table[first_page].bytes_used;
567 while (((bytes_found < nbytes) || (num_pages < 2))
568 && (last_page < (NUM_PAGES-1))
569 && (page_table[last_page+1].allocated == FREE_PAGE)) {
573 gc_assert(page_table[last_page].write_protected == 0);
576 region_size = (4096 - page_table[first_page].bytes_used)
577 + 4096*(last_page-first_page);
579 gc_assert(bytes_found == region_size);
583 "/last_page=%d bytes_found=%d num_pages=%d\n",
584 last_page, bytes_found, num_pages));
587 restart_page = last_page + 1;
588 } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
590 /* Check for a failure. */
591 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
593 "Argh! gc_alloc_new_region failed on restart_page, nbytes=%d.\n",
595 print_generation_stats(1);
601 "/gc_alloc_new_region gen %d: %d bytes: pages %d to %d: addr=%x\n",
606 page_address(first_page)));
609 /* Set up the alloc_region. */
610 alloc_region->first_page = first_page;
611 alloc_region->last_page = last_page;
612 alloc_region->start_addr = page_table[first_page].bytes_used
613 + page_address(first_page);
614 alloc_region->free_pointer = alloc_region->start_addr;
615 alloc_region->end_addr = alloc_region->start_addr + bytes_found;
617 if (gencgc_zero_check) {
619 for (p = (int *)alloc_region->start_addr;
620 p < (int *)alloc_region->end_addr; p++) {
622 /* KLUDGE: It would be nice to use %lx and explicit casts
623 * (long) in code like this, so that it is less likely to
624 * break randomly when running on a machine with different
625 * word sizes. -- WHN 19991129 */
626 lose("The new region at %x is not zero.", p);
631 /* Set up the pages. */
633 /* The first page may have already been in use. */
634 if (page_table[first_page].bytes_used == 0) {
636 page_table[first_page].allocated = UNBOXED_PAGE;
638 page_table[first_page].allocated = BOXED_PAGE;
639 page_table[first_page].gen = gc_alloc_generation;
640 page_table[first_page].large_object = 0;
641 page_table[first_page].first_object_offset = 0;
645 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
647 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
648 gc_assert(page_table[first_page].gen == gc_alloc_generation);
649 gc_assert(page_table[first_page].large_object == 0);
651 for (i = first_page+1; i <= last_page; i++) {
653 page_table[i].allocated = UNBOXED_PAGE;
655 page_table[i].allocated = BOXED_PAGE;
656 page_table[i].gen = gc_alloc_generation;
657 page_table[i].large_object = 0;
658 /* This may not be necessary for unboxed regions (think it was
660 page_table[i].first_object_offset =
661 alloc_region->start_addr - page_address(i);
664 /* Bump up last_free_page. */
665 if (last_page+1 > last_free_page) {
666 last_free_page = last_page+1;
667 SetSymbolValue(ALLOCATION_POINTER,
668 (lispobj)(((char *)heap_base) + last_free_page*4096));
669 if (last_page+1 > last_used_page)
670 last_used_page = last_page+1;
674 /* If the record_new_objects flag is 2 then all new regions created
677 * If it's 1 then then it is only recorded if the first page of the
678 * current region is <= new_areas_ignore_page. This helps avoid
679 * unnecessary recording when doing full scavenge pass.
681 * The new_object structure holds the page, byte offset, and size of
682 * new regions of objects. Each new area is placed in the array of
683 * these structures pointer to by new_areas. new_areas_index holds the
684 * offset into new_areas.
686 * If new_area overflows NUM_NEW_AREAS then it stops adding them. The
687 * later code must detect this and handle it, probably by doing a full
688 * scavenge of a generation. */
689 #define NUM_NEW_AREAS 512
690 static int record_new_objects = 0;
691 static int new_areas_ignore_page;
697 static struct new_area (*new_areas)[];
698 static new_areas_index;
701 /* Add a new area to new_areas. */
703 add_new_area(int first_page, int offset, int size)
705 unsigned new_area_start,c;
708 /* Ignore if full. */
709 if (new_areas_index >= NUM_NEW_AREAS)
712 switch (record_new_objects) {
716 if (first_page > new_areas_ignore_page)
725 new_area_start = 4096*first_page + offset;
727 /* Search backwards for a prior area that this follows from. If
728 found this will save adding a new area. */
729 for (i = new_areas_index-1, c = 0; (i >= 0) && (c < 8); i--, c++) {
731 4096*((*new_areas)[i].page)
732 + (*new_areas)[i].offset
733 + (*new_areas)[i].size;
735 "/add_new_area S1 %d %d %d %d\n",
736 i, c, new_area_start, area_end));*/
737 if (new_area_start == area_end) {
739 "/adding to [%d] %d %d %d with %d %d %d:\n",
741 (*new_areas)[i].page,
742 (*new_areas)[i].offset,
743 (*new_areas)[i].size,
747 (*new_areas)[i].size += size;
751 /*FSHOW((stderr, "/add_new_area S1 %d %d %d\n", i, c, new_area_start));*/
753 (*new_areas)[new_areas_index].page = first_page;
754 (*new_areas)[new_areas_index].offset = offset;
755 (*new_areas)[new_areas_index].size = size;
757 "/new_area %d page %d offset %d size %d\n",
758 new_areas_index, first_page, offset, size));*/
761 /* Note the max new_areas used. */
762 if (new_areas_index > max_new_areas)
763 max_new_areas = new_areas_index;
766 /* Update the tables for the alloc_region. The region maybe added to
769 * When done the alloc_region is set up so that the next quick alloc
770 * will fail safely and thus a new region will be allocated. Further
771 * it is safe to try to re-update the page table of this reset
774 gc_alloc_update_page_tables(int unboxed, struct alloc_region *alloc_region)
780 int orig_first_page_bytes_used;
786 "/gc_alloc_update_page_tables to gen %d:\n",
787 gc_alloc_generation));
790 first_page = alloc_region->first_page;
792 /* Catch an unused alloc_region. */
793 if ((first_page == 0) && (alloc_region->last_page == -1))
796 next_page = first_page+1;
798 /* Skip if no bytes were allocated */
799 if (alloc_region->free_pointer != alloc_region->start_addr) {
800 orig_first_page_bytes_used = page_table[first_page].bytes_used;
802 gc_assert(alloc_region->start_addr == (page_address(first_page) + page_table[first_page].bytes_used));
804 /* All the pages used need to be updated */
806 /* Update the first page. */
808 /* If the page was free then set up the gen, and
809 first_object_offset. */
810 if (page_table[first_page].bytes_used == 0)
811 gc_assert(page_table[first_page].first_object_offset == 0);
814 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
816 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
817 gc_assert(page_table[first_page].gen == gc_alloc_generation);
818 gc_assert(page_table[first_page].large_object == 0);
822 /* Calc. the number of bytes used in this page. This is not always
823 the number of new bytes, unless it was free. */
825 if ((bytes_used = (alloc_region->free_pointer - page_address(first_page)))>4096) {
829 page_table[first_page].bytes_used = bytes_used;
830 byte_cnt += bytes_used;
833 /* All the rest of the pages should be free. Need to set their
834 first_object_offset pointer to the start of the region, and set
838 gc_assert(page_table[next_page].allocated == UNBOXED_PAGE);
840 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
841 gc_assert(page_table[next_page].bytes_used == 0);
842 gc_assert(page_table[next_page].gen == gc_alloc_generation);
843 gc_assert(page_table[next_page].large_object == 0);
845 gc_assert(page_table[next_page].first_object_offset ==
846 alloc_region->start_addr - page_address(next_page));
848 /* Calculate the number of bytes used in this page. */
850 if ((bytes_used = (alloc_region->free_pointer
851 - page_address(next_page)))>4096) {
855 page_table[next_page].bytes_used = bytes_used;
856 byte_cnt += bytes_used;
861 region_size = alloc_region->free_pointer - alloc_region->start_addr;
862 bytes_allocated += region_size;
863 generations[gc_alloc_generation].bytes_allocated += region_size;
865 gc_assert((byte_cnt- orig_first_page_bytes_used) == region_size);
867 /* Set the generations alloc restart page to the last page of
870 generations[gc_alloc_generation].alloc_unboxed_start_page =
873 generations[gc_alloc_generation].alloc_start_page = next_page-1;
875 /* Add the region to the new_areas if requested. */
877 add_new_area(first_page,orig_first_page_bytes_used, region_size);
881 "/gc_alloc_update_page_tables update %d bytes to gen %d\n",
883 gc_alloc_generation));
887 /* No bytes allocated. Unallocate the first_page if there are 0
889 if (page_table[first_page].bytes_used == 0)
890 page_table[first_page].allocated = FREE_PAGE;
892 /* Unallocate any unused pages. */
893 while (next_page <= alloc_region->last_page) {
894 gc_assert(page_table[next_page].bytes_used == 0);
895 page_table[next_page].allocated = FREE_PAGE;
899 /* Reset the alloc_region. */
900 alloc_region->first_page = 0;
901 alloc_region->last_page = -1;
902 alloc_region->start_addr = page_address(0);
903 alloc_region->free_pointer = page_address(0);
904 alloc_region->end_addr = page_address(0);
907 static inline void *gc_quick_alloc(int nbytes);
909 /* Allocate a possibly large object. */
911 *gc_alloc_large(int nbytes, int unboxed, struct alloc_region *alloc_region)
919 int orig_first_page_bytes_used;
924 int large = (nbytes >= large_object_size);
928 FSHOW((stderr, "/alloc_large %d\n", nbytes));
933 "/gc_alloc_large for %d bytes from gen %d\n",
934 nbytes, gc_alloc_generation));
937 /* If the object is small, and there is room in the current region
938 then allocation it in the current region. */
940 && ((alloc_region->end_addr-alloc_region->free_pointer) >= nbytes))
941 return gc_quick_alloc(nbytes);
943 /* Search for a contiguous free region of at least nbytes. If it's a
944 large object then align it on a page boundary by searching for a
947 /* To allow the allocation of small objects without the danger of
948 using a page in the current boxed region, the search starts after
949 the current boxed free region. XX could probably keep a page
950 index ahead of the current region and bumped up here to save a
951 lot of re-scanning. */
953 restart_page = generations[gc_alloc_generation].alloc_large_unboxed_start_page;
955 restart_page = generations[gc_alloc_generation].alloc_large_start_page;
956 if (restart_page <= alloc_region->last_page)
957 restart_page = alloc_region->last_page+1;
960 first_page = restart_page;
963 while ((first_page < NUM_PAGES)
964 && (page_table[first_page].allocated != FREE_PAGE))
967 while ((first_page < NUM_PAGES)
968 && (page_table[first_page].allocated != FREE_PAGE)
970 (page_table[first_page].allocated != UNBOXED_PAGE))
972 (page_table[first_page].allocated != BOXED_PAGE))
973 || (page_table[first_page].large_object != 0)
974 || (page_table[first_page].gen != gc_alloc_generation)
975 || (page_table[first_page].bytes_used >= (4096-32))
976 || (page_table[first_page].write_protected != 0)
977 || (page_table[first_page].dont_move != 0)))
980 if (first_page >= NUM_PAGES) {
982 "Argh! gc_alloc_large failed (first_page), nbytes=%d.\n",
984 print_generation_stats(1);
988 gc_assert(page_table[first_page].write_protected == 0);
992 "/first_page=%d bytes_used=%d\n",
993 first_page, page_table[first_page].bytes_used));
996 last_page = first_page;
997 bytes_found = 4096 - page_table[first_page].bytes_used;
999 while ((bytes_found < nbytes)
1000 && (last_page < (NUM_PAGES-1))
1001 && (page_table[last_page+1].allocated == FREE_PAGE)) {
1004 bytes_found += 4096;
1005 gc_assert(page_table[last_page].write_protected == 0);
1008 region_size = (4096 - page_table[first_page].bytes_used)
1009 + 4096*(last_page-first_page);
1011 gc_assert(bytes_found == region_size);
1015 "/last_page=%d bytes_found=%d num_pages=%d\n",
1016 last_page, bytes_found, num_pages));
1019 restart_page = last_page + 1;
1020 } while ((restart_page < NUM_PAGES) && (bytes_found < nbytes));
1022 /* Check for a failure */
1023 if ((restart_page >= NUM_PAGES) && (bytes_found < nbytes)) {
1025 "Argh! gc_alloc_large failed (restart_page), nbytes=%d.\n",
1027 print_generation_stats(1);
1034 "/gc_alloc_large gen %d: %d of %d bytes: from pages %d to %d: addr=%x\n",
1035 gc_alloc_generation,
1040 page_address(first_page)));
1043 gc_assert(first_page > alloc_region->last_page);
1045 generations[gc_alloc_generation].alloc_large_unboxed_start_page =
1048 generations[gc_alloc_generation].alloc_large_start_page = last_page;
1050 /* Set up the pages. */
1051 orig_first_page_bytes_used = page_table[first_page].bytes_used;
1053 /* If the first page was free then set up the gen, and
1054 * first_object_offset. */
1055 if (page_table[first_page].bytes_used == 0) {
1057 page_table[first_page].allocated = UNBOXED_PAGE;
1059 page_table[first_page].allocated = BOXED_PAGE;
1060 page_table[first_page].gen = gc_alloc_generation;
1061 page_table[first_page].first_object_offset = 0;
1062 page_table[first_page].large_object = large;
1066 gc_assert(page_table[first_page].allocated == UNBOXED_PAGE);
1068 gc_assert(page_table[first_page].allocated == BOXED_PAGE);
1069 gc_assert(page_table[first_page].gen == gc_alloc_generation);
1070 gc_assert(page_table[first_page].large_object == large);
1074 /* Calc. the number of bytes used in this page. This is not
1075 * always the number of new bytes, unless it was free. */
1077 if ((bytes_used = nbytes+orig_first_page_bytes_used) > 4096) {
1081 page_table[first_page].bytes_used = bytes_used;
1082 byte_cnt += bytes_used;
1084 next_page = first_page+1;
1086 /* All the rest of the pages should be free. We need to set their
1087 * first_object_offset pointer to the start of the region, and
1088 * set the bytes_used. */
1090 gc_assert(page_table[next_page].allocated == FREE_PAGE);
1091 gc_assert(page_table[next_page].bytes_used == 0);
1093 page_table[next_page].allocated = UNBOXED_PAGE;
1095 page_table[next_page].allocated = BOXED_PAGE;
1096 page_table[next_page].gen = gc_alloc_generation;
1097 page_table[next_page].large_object = large;
1099 page_table[next_page].first_object_offset =
1100 orig_first_page_bytes_used - 4096*(next_page-first_page);
1102 /* Calculate the number of bytes used in this page. */
1104 if ((bytes_used=(nbytes+orig_first_page_bytes_used)-byte_cnt) > 4096) {
1108 page_table[next_page].bytes_used = bytes_used;
1109 byte_cnt += bytes_used;
1114 gc_assert((byte_cnt-orig_first_page_bytes_used) == nbytes);
1116 bytes_allocated += nbytes;
1117 generations[gc_alloc_generation].bytes_allocated += nbytes;
1119 /* Add the region to the new_areas if requested. */
1121 add_new_area(first_page,orig_first_page_bytes_used,nbytes);
1123 /* Bump up last_free_page */
1124 if (last_page+1 > last_free_page) {
1125 last_free_page = last_page+1;
1126 SetSymbolValue(ALLOCATION_POINTER,
1127 (lispobj)(((char *)heap_base) + last_free_page*4096));
1128 if (last_page+1 > last_used_page)
1129 last_used_page = last_page+1;
1132 return((void *)(page_address(first_page)+orig_first_page_bytes_used));
1135 /* Allocate bytes from the boxed_region. It first checks if there is
1136 * room, if not then it calls gc_alloc_new_region to find a new region
1137 * with enough space. A pointer to the start of the region is returned. */
1139 *gc_alloc(int nbytes)
1141 void *new_free_pointer;
1143 /* FSHOW((stderr, "/gc_alloc %d\n", nbytes)); */
1145 /* Check whether there is room in the current alloc region. */
1146 new_free_pointer = boxed_region.free_pointer + nbytes;
1148 if (new_free_pointer <= boxed_region.end_addr) {
1149 /* If so then allocate from the current alloc region. */
1150 void *new_obj = boxed_region.free_pointer;
1151 boxed_region.free_pointer = new_free_pointer;
1153 /* Check whether the alloc region is almost empty. */
1154 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1155 /* If so finished with the current region. */
1156 gc_alloc_update_page_tables(0, &boxed_region);
1157 /* Set up a new region. */
1158 gc_alloc_new_region(32, 0, &boxed_region);
1160 return((void *)new_obj);
1163 /* Else not enough free space in the current region. */
1165 /* If there some room left in the current region, enough to be worth
1166 * saving, then allocate a large object. */
1167 /* FIXME: "32" should be a named parameter. */
1168 if ((boxed_region.end_addr-boxed_region.free_pointer) > 32)
1169 return gc_alloc_large(nbytes, 0, &boxed_region);
1171 /* Else find a new region. */
1173 /* Finished with the current region. */
1174 gc_alloc_update_page_tables(0, &boxed_region);
1176 /* Set up a new region. */
1177 gc_alloc_new_region(nbytes, 0, &boxed_region);
1179 /* Should now be enough room. */
1181 /* Check whether there is room in the current region. */
1182 new_free_pointer = boxed_region.free_pointer + nbytes;
1184 if (new_free_pointer <= boxed_region.end_addr) {
1185 /* If so then allocate from the current region. */
1186 void *new_obj = boxed_region.free_pointer;
1187 boxed_region.free_pointer = new_free_pointer;
1189 /* Check whether the current region is almost empty. */
1190 if ((boxed_region.end_addr - boxed_region.free_pointer) <= 32) {
1191 /* If so find, finished with the current region. */
1192 gc_alloc_update_page_tables(0, &boxed_region);
1194 /* Set up a new region. */
1195 gc_alloc_new_region(32, 0, &boxed_region);
1198 return((void *)new_obj);
1201 /* shouldn't happen */
1205 /* Allocate space from the boxed_region. If there is not enough free
1206 * space then call gc_alloc to do the job. A pointer to the start of
1207 * the region is returned. */
1209 *gc_quick_alloc(int nbytes)
1211 void *new_free_pointer;
1213 /* Check whether there is room in the current region. */
1214 new_free_pointer = boxed_region.free_pointer + nbytes;
1216 if (new_free_pointer <= boxed_region.end_addr) {
1217 /* If so then allocate from the current region. */
1218 void *new_obj = boxed_region.free_pointer;
1219 boxed_region.free_pointer = new_free_pointer;
1220 return((void *)new_obj);
1223 /* Else call gc_alloc */
1224 return (gc_alloc(nbytes));
1227 /* Allocate space for the boxed object. If it is a large object then
1228 * do a large alloc else allocate from the current region. If there is
1229 * not enough free space then call gc_alloc to do the job. A pointer
1230 * to the start of the region is returned. */
1232 *gc_quick_alloc_large(int nbytes)
1234 void *new_free_pointer;
1236 if (nbytes >= large_object_size)
1237 return gc_alloc_large(nbytes, 0, &boxed_region);
1239 /* Check whether there is room in the current region. */
1240 new_free_pointer = boxed_region.free_pointer + nbytes;
1242 if (new_free_pointer <= boxed_region.end_addr) {
1243 /* If so then allocate from the current region. */
1244 void *new_obj = boxed_region.free_pointer;
1245 boxed_region.free_pointer = new_free_pointer;
1246 return((void *)new_obj);
1249 /* Else call gc_alloc */
1250 return (gc_alloc(nbytes));
1254 *gc_alloc_unboxed(int nbytes)
1256 void *new_free_pointer;
1259 FSHOW((stderr, "/gc_alloc_unboxed %d\n", nbytes));
1262 /* Check whether there is room in the current region. */
1263 new_free_pointer = unboxed_region.free_pointer + nbytes;
1265 if (new_free_pointer <= unboxed_region.end_addr) {
1266 /* If so then allocate from the current region. */
1267 void *new_obj = unboxed_region.free_pointer;
1268 unboxed_region.free_pointer = new_free_pointer;
1270 /* Check whether the current region is almost empty. */
1271 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1272 /* If so finished with the current region. */
1273 gc_alloc_update_page_tables(1, &unboxed_region);
1275 /* Set up a new region. */
1276 gc_alloc_new_region(32, 1, &unboxed_region);
1279 return((void *)new_obj);
1282 /* Else not enough free space in the current region. */
1284 /* If there is a bit of room left in the current region then
1285 allocate a large object. */
1286 if ((unboxed_region.end_addr-unboxed_region.free_pointer) > 32)
1287 return gc_alloc_large(nbytes,1,&unboxed_region);
1289 /* Else find a new region. */
1291 /* Finished with the current region. */
1292 gc_alloc_update_page_tables(1, &unboxed_region);
1294 /* Set up a new region. */
1295 gc_alloc_new_region(nbytes, 1, &unboxed_region);
1297 /* Should now be enough room. */
1299 /* Check whether there is room in the current region. */
1300 new_free_pointer = unboxed_region.free_pointer + nbytes;
1302 if (new_free_pointer <= unboxed_region.end_addr) {
1303 /* If so then allocate from the current region. */
1304 void *new_obj = unboxed_region.free_pointer;
1305 unboxed_region.free_pointer = new_free_pointer;
1307 /* Check whether the current region is almost empty. */
1308 if ((unboxed_region.end_addr - unboxed_region.free_pointer) <= 32) {
1309 /* If so find, finished with the current region. */
1310 gc_alloc_update_page_tables(1, &unboxed_region);
1312 /* Set up a new region. */
1313 gc_alloc_new_region(32, 1, &unboxed_region);
1316 return((void *)new_obj);
1319 /* shouldn't happen? */
1324 *gc_quick_alloc_unboxed(int nbytes)
1326 void *new_free_pointer;
1328 /* Check whether there is room in the current region. */
1329 new_free_pointer = unboxed_region.free_pointer + nbytes;
1331 if (new_free_pointer <= unboxed_region.end_addr) {
1332 /* If so then allocate from the current region. */
1333 void *new_obj = unboxed_region.free_pointer;
1334 unboxed_region.free_pointer = new_free_pointer;
1336 return((void *)new_obj);
1339 /* Else call gc_alloc */
1340 return (gc_alloc_unboxed(nbytes));
1343 /* Allocate space for the object. If it is a large object then do a
1344 * large alloc else allocate from the current region. If there is not
1345 * enough free space then call gc_alloc to do the job.
1347 * A pointer to the start of the region is returned. */
1349 *gc_quick_alloc_large_unboxed(int nbytes)
1351 void *new_free_pointer;
1353 if (nbytes >= large_object_size)
1354 return gc_alloc_large(nbytes,1,&unboxed_region);
1356 /* Check whether there is room in the current region. */
1357 new_free_pointer = unboxed_region.free_pointer + nbytes;
1359 if (new_free_pointer <= unboxed_region.end_addr) {
1360 /* If so then allocate from the current region. */
1361 void *new_obj = unboxed_region.free_pointer;
1362 unboxed_region.free_pointer = new_free_pointer;
1364 return((void *)new_obj);
1367 /* Else call gc_alloc. */
1368 return (gc_alloc_unboxed(nbytes));
1372 * scavenging/transporting routines derived from gc.c in CMU CL ca. 18b
1375 static int (*scavtab[256])(lispobj *where, lispobj object);
1376 static lispobj (*transother[256])(lispobj object);
1377 static int (*sizetab[256])(lispobj *where);
1379 static struct weak_pointer *weak_pointers;
1381 #define CEILING(x,y) (((x) + ((y) - 1)) & (~((y) - 1)))
1387 static inline boolean
1388 from_space_p(lispobj obj)
1390 int page_index=(void*)obj - heap_base;
1391 return ((page_index >= 0)
1392 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1393 && (page_table[page_index].gen == from_space));
1396 static inline boolean
1397 new_space_p(lispobj obj)
1399 int page_index = (void*)obj - heap_base;
1400 return ((page_index >= 0)
1401 && ((page_index = ((unsigned int)page_index)/4096) < NUM_PAGES)
1402 && (page_table[page_index].gen == new_space));
1409 /* to copy a boxed object */
1410 static inline lispobj
1411 copy_object(lispobj object, int nwords)
1415 lispobj *source, *dest;
1417 gc_assert(Pointerp(object));
1418 gc_assert(from_space_p(object));
1419 gc_assert((nwords & 0x01) == 0);
1421 /* Get tag of object. */
1422 tag = LowtagOf(object);
1424 /* Allocate space. */
1425 new = gc_quick_alloc(nwords*4);
1428 source = (lispobj *) PTR(object);
1430 /* Copy the object. */
1431 while (nwords > 0) {
1432 dest[0] = source[0];
1433 dest[1] = source[1];
1439 /* Return Lisp pointer of new object. */
1440 return ((lispobj) new) | tag;
1443 /* to copy a large boxed object. If the object is in a large object
1444 * region then it is simply promoted, else it is copied. If it's large
1445 * enough then it's copied to a large object region.
1447 * Vectors may have shrunk. If the object is not copied the space
1448 * needs to be reclaimed, and the page_tables corrected. */
1450 copy_large_object(lispobj object, int nwords)
1454 lispobj *source, *dest;
1457 gc_assert(Pointerp(object));
1458 gc_assert(from_space_p(object));
1459 gc_assert((nwords & 0x01) == 0);
1461 if ((nwords > 1024*1024) && gencgc_verbose) {
1462 FSHOW((stderr, "/copy_large_object: %d bytes\n", nwords*4));
1465 /* Check whether it's a large object. */
1466 first_page = find_page_index((void *)object);
1467 gc_assert(first_page >= 0);
1469 if (page_table[first_page].large_object) {
1471 /* Promote the object. */
1473 int remaining_bytes;
1478 /* Note: Any page write-protection must be removed, else a
1479 * later scavenge_newspace may incorrectly not scavenge these
1480 * pages. This would not be necessary if they are added to the
1481 * new areas, but let's do it for them all (they'll probably
1482 * be written anyway?). */
1484 gc_assert(page_table[first_page].first_object_offset == 0);
1486 next_page = first_page;
1487 remaining_bytes = nwords*4;
1488 while (remaining_bytes > 4096) {
1489 gc_assert(page_table[next_page].gen == from_space);
1490 gc_assert(page_table[next_page].allocated == BOXED_PAGE);
1491 gc_assert(page_table[next_page].large_object);
1492 gc_assert(page_table[next_page].first_object_offset==
1493 -4096*(next_page-first_page));
1494 gc_assert(page_table[next_page].bytes_used == 4096);
1496 page_table[next_page].gen = new_space;
1498 /* Remove any write-protection. We should be able to rely
1499 * on the write-protect flag to avoid redundant calls. */
1500 if (page_table[next_page].write_protected) {
1501 os_protect(page_address(next_page), 4096, OS_VM_PROT_ALL);
1502 page_table[next_page].write_protected = 0;
1504 remaining_bytes -= 4096;
1508 /* Now only one page remains, but the object may have shrunk
1509 * so there may be more unused pages which will be freed. */
1511 /* The object may have shrunk but shouldn't have grown. */
1512 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1514 page_table[next_page].gen = new_space;
1515 gc_assert(page_table[next_page].allocated = BOXED_PAGE);
1517 /* Adjust the bytes_used. */
1518 old_bytes_used = page_table[next_page].bytes_used;
1519 page_table[next_page].bytes_used = remaining_bytes;
1521 bytes_freed = old_bytes_used - remaining_bytes;
1523 /* Free any remaining pages; needs care. */
1525 while ((old_bytes_used == 4096) &&
1526 (page_table[next_page].gen == from_space) &&
1527 (page_table[next_page].allocated == BOXED_PAGE) &&
1528 page_table[next_page].large_object &&
1529 (page_table[next_page].first_object_offset ==
1530 -(next_page - first_page)*4096)) {
1531 /* Checks out OK, free the page. Don't need to both zeroing
1532 * pages as this should have been done before shrinking the
1533 * object. These pages shouldn't be write-protected as they
1534 * should be zero filled. */
1535 gc_assert(page_table[next_page].write_protected == 0);
1537 old_bytes_used = page_table[next_page].bytes_used;
1538 page_table[next_page].allocated = FREE_PAGE;
1539 page_table[next_page].bytes_used = 0;
1540 bytes_freed += old_bytes_used;
1544 if ((bytes_freed > 0) && gencgc_verbose)
1545 FSHOW((stderr, "/copy_large_boxed bytes_freed=%d\n", bytes_freed));
1547 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1548 generations[new_space].bytes_allocated += 4*nwords;
1549 bytes_allocated -= bytes_freed;
1551 /* Add the region to the new_areas if requested. */
1552 add_new_area(first_page,0,nwords*4);
1556 /* Get tag of object. */
1557 tag = LowtagOf(object);
1559 /* Allocate space. */
1560 new = gc_quick_alloc_large(nwords*4);
1563 source = (lispobj *) PTR(object);
1565 /* Copy the object. */
1566 while (nwords > 0) {
1567 dest[0] = source[0];
1568 dest[1] = source[1];
1574 /* Return Lisp pointer of new object. */
1575 return ((lispobj) new) | tag;
1579 /* to copy unboxed objects */
1580 static inline lispobj
1581 copy_unboxed_object(lispobj object, int nwords)
1585 lispobj *source, *dest;
1587 gc_assert(Pointerp(object));
1588 gc_assert(from_space_p(object));
1589 gc_assert((nwords & 0x01) == 0);
1591 /* Get tag of object. */
1592 tag = LowtagOf(object);
1594 /* Allocate space. */
1595 new = gc_quick_alloc_unboxed(nwords*4);
1598 source = (lispobj *) PTR(object);
1600 /* Copy the object. */
1601 while (nwords > 0) {
1602 dest[0] = source[0];
1603 dest[1] = source[1];
1609 /* Return Lisp pointer of new object. */
1610 return ((lispobj) new) | tag;
1613 /* to copy large unboxed objects
1615 * If the object is in a large object region then it is simply
1616 * promoted, else it is copied. If it's large enough then it's copied
1617 * to a large object region.
1619 * Bignums and vectors may have shrunk. If the object is not copied
1620 * the space needs to be reclaimed, and the page_tables corrected.
1622 * KLUDGE: There's a lot of cut-and-paste duplication between this
1623 * function and copy_large_object(..). -- WHN 20000619 */
1625 copy_large_unboxed_object(lispobj object, int nwords)
1629 lispobj *source, *dest;
1632 gc_assert(Pointerp(object));
1633 gc_assert(from_space_p(object));
1634 gc_assert((nwords & 0x01) == 0);
1636 if ((nwords > 1024*1024) && gencgc_verbose)
1637 FSHOW((stderr, "/copy_large_unboxed_object: %d bytes\n", nwords*4));
1639 /* Check whether it's a large object. */
1640 first_page = find_page_index((void *)object);
1641 gc_assert(first_page >= 0);
1643 if (page_table[first_page].large_object) {
1644 /* Promote the object. Note: Unboxed objects may have been
1645 * allocated to a BOXED region so it may be necessary to
1646 * change the region to UNBOXED. */
1647 int remaining_bytes;
1652 gc_assert(page_table[first_page].first_object_offset == 0);
1654 next_page = first_page;
1655 remaining_bytes = nwords*4;
1656 while (remaining_bytes > 4096) {
1657 gc_assert(page_table[next_page].gen == from_space);
1658 gc_assert((page_table[next_page].allocated == UNBOXED_PAGE)
1659 || (page_table[next_page].allocated == BOXED_PAGE));
1660 gc_assert(page_table[next_page].large_object);
1661 gc_assert(page_table[next_page].first_object_offset==
1662 -4096*(next_page-first_page));
1663 gc_assert(page_table[next_page].bytes_used == 4096);
1665 page_table[next_page].gen = new_space;
1666 page_table[next_page].allocated = UNBOXED_PAGE;
1667 remaining_bytes -= 4096;
1671 /* Now only one page remains, but the object may have shrunk so
1672 * there may be more unused pages which will be freed. */
1674 /* Object may have shrunk but shouldn't have grown - check. */
1675 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
1677 page_table[next_page].gen = new_space;
1678 page_table[next_page].allocated = UNBOXED_PAGE;
1680 /* Adjust the bytes_used. */
1681 old_bytes_used = page_table[next_page].bytes_used;
1682 page_table[next_page].bytes_used = remaining_bytes;
1684 bytes_freed = old_bytes_used - remaining_bytes;
1686 /* Free any remaining pages; needs care. */
1688 while ((old_bytes_used == 4096) &&
1689 (page_table[next_page].gen == from_space) &&
1690 ((page_table[next_page].allocated == UNBOXED_PAGE)
1691 || (page_table[next_page].allocated == BOXED_PAGE)) &&
1692 page_table[next_page].large_object &&
1693 (page_table[next_page].first_object_offset ==
1694 -(next_page - first_page)*4096)) {
1695 /* Checks out OK, free the page. Don't need to both zeroing
1696 * pages as this should have been done before shrinking the
1697 * object. These pages shouldn't be write-protected, even if
1698 * boxed they should be zero filled. */
1699 gc_assert(page_table[next_page].write_protected == 0);
1701 old_bytes_used = page_table[next_page].bytes_used;
1702 page_table[next_page].allocated = FREE_PAGE;
1703 page_table[next_page].bytes_used = 0;
1704 bytes_freed += old_bytes_used;
1708 if ((bytes_freed > 0) && gencgc_verbose)
1710 "/copy_large_unboxed bytes_freed=%d\n",
1713 generations[from_space].bytes_allocated -= 4*nwords + bytes_freed;
1714 generations[new_space].bytes_allocated += 4*nwords;
1715 bytes_allocated -= bytes_freed;
1720 /* Get tag of object. */
1721 tag = LowtagOf(object);
1723 /* Allocate space. */
1724 new = gc_quick_alloc_large_unboxed(nwords*4);
1727 source = (lispobj *) PTR(object);
1729 /* Copy the object. */
1730 while (nwords > 0) {
1731 dest[0] = source[0];
1732 dest[1] = source[1];
1738 /* Return Lisp pointer of new object. */
1739 return ((lispobj) new) | tag;
1747 #define DIRECT_SCAV 0
1749 /* FIXME: Most calls end up going to a little trouble to compute an
1750 * 'nwords' value. The system might be a little simpler if this
1751 * function used an 'end' parameter instead. */
1753 scavenge(lispobj *start, long nwords)
1755 while (nwords > 0) {
1757 int type, words_scavenged;
1761 /* FSHOW((stderr, "Scavenge: %p, %ld\n", start, nwords)); */
1763 gc_assert(object != 0x01); /* not a forwarding pointer */
1766 type = TypeOf(object);
1767 words_scavenged = (scavtab[type])(start, object);
1769 if (Pointerp(object)) {
1770 /* It's a pointer. */
1771 if (from_space_p(object)) {
1772 /* It currently points to old space. Check for a forwarding
1774 lispobj *ptr = (lispobj *)PTR(object);
1775 lispobj first_word = *ptr;
1777 if (first_word == 0x01) {
1778 /* Yes, there's a forwarding pointer. */
1780 words_scavenged = 1;
1783 /* Scavenge that pointer. */
1784 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1786 /* It points somewhere other than oldspace. Leave it alone. */
1787 words_scavenged = 1;
1790 if ((object & 3) == 0) {
1791 /* It's a fixnum: really easy.. */
1792 words_scavenged = 1;
1794 /* It's some sort of header object or another. */
1795 words_scavenged = (scavtab[TypeOf(object)])(start, object);
1800 start += words_scavenged;
1801 nwords -= words_scavenged;
1803 gc_assert(nwords == 0);
1808 * code and code-related objects
1811 #define RAW_ADDR_OFFSET (6*sizeof(lispobj) - type_FunctionPointer)
1813 static lispobj trans_function_header(lispobj object);
1814 static lispobj trans_boxed(lispobj object);
1818 scav_function_pointer(lispobj *where, lispobj object)
1820 gc_assert(Pointerp(object));
1822 if (from_space_p(object)) {
1823 lispobj first, *first_pointer;
1825 /* object is a pointer into from space. Check to see whether
1826 * it has been forwarded. */
1827 first_pointer = (lispobj *) PTR(object);
1828 first = *first_pointer;
1830 if (first == 0x01) {
1832 *where = first_pointer[1];
1839 /* must transport object -- object may point to either a
1840 * function header, a closure function header, or to a
1841 * closure header. */
1843 type = TypeOf(first);
1845 case type_FunctionHeader:
1846 case type_ClosureFunctionHeader:
1847 copy = trans_function_header(object);
1850 copy = trans_boxed(object);
1854 if (copy != object) {
1855 /* Set forwarding pointer. */
1856 first_pointer[0] = 0x01;
1857 first_pointer[1] = copy;
1863 gc_assert(Pointerp(first));
1864 gc_assert(!from_space_p(first));
1872 scav_function_pointer(lispobj *where, lispobj object)
1874 lispobj *first_pointer;
1877 gc_assert(Pointerp(object));
1879 /* Object is a pointer into from space - no a FP. */
1880 first_pointer = (lispobj *) PTR(object);
1882 /* must transport object -- object may point to either a function
1883 * header, a closure function header, or to a closure header. */
1885 switch (TypeOf(*first_pointer)) {
1886 case type_FunctionHeader:
1887 case type_ClosureFunctionHeader:
1888 copy = trans_function_header(object);
1891 copy = trans_boxed(object);
1895 if (copy != object) {
1896 /* Set forwarding pointer */
1897 first_pointer[0] = 0x01;
1898 first_pointer[1] = copy;
1901 gc_assert(Pointerp(copy));
1902 gc_assert(!from_space_p(copy));
1910 /* Scan a x86 compiled code object, looking for possible fixups that
1911 * have been missed after a move.
1913 * Two types of fixups are needed:
1914 * 1. Absolute fixups to within the code object.
1915 * 2. Relative fixups to outside the code object.
1917 * Currently only absolute fixups to the constant vector, or to the
1918 * code area are checked. */
1920 sniff_code_object(struct code *code, unsigned displacement)
1922 int nheader_words, ncode_words, nwords;
1924 struct function *fheaderp;
1926 void *constants_start_addr, *constants_end_addr;
1927 void *code_start_addr, *code_end_addr;
1928 int fixup_found = 0;
1930 if (!check_code_fixups)
1933 /* It's ok if it's byte compiled code. The trace table offset will
1934 * be a fixnum if it's x86 compiled code - check. */
1935 if (code->trace_table_offset & 0x3) {
1936 FSHOW((stderr, "/Sniffing byte compiled code object at %x.\n", code));
1940 /* Else it's x86 machine code. */
1942 ncode_words = fixnum_value(code->code_size);
1943 nheader_words = HeaderValue(*(lispobj *)code);
1944 nwords = ncode_words + nheader_words;
1946 constants_start_addr = (void *)code + 5*4;
1947 constants_end_addr = (void *)code + nheader_words*4;
1948 code_start_addr = (void *)code + nheader_words*4;
1949 code_end_addr = (void *)code + nwords*4;
1951 /* Work through the unboxed code. */
1952 for (p = code_start_addr; p < code_end_addr; p++) {
1953 void *data = *(void **)p;
1954 unsigned d1 = *((unsigned char *)p - 1);
1955 unsigned d2 = *((unsigned char *)p - 2);
1956 unsigned d3 = *((unsigned char *)p - 3);
1957 unsigned d4 = *((unsigned char *)p - 4);
1958 unsigned d5 = *((unsigned char *)p - 5);
1959 unsigned d6 = *((unsigned char *)p - 6);
1961 /* Check for code references. */
1962 /* Check for a 32 bit word that looks like an absolute
1963 reference to within the code adea of the code object. */
1964 if ((data >= (code_start_addr-displacement))
1965 && (data < (code_end_addr-displacement))) {
1966 /* function header */
1968 && (((unsigned)p - 4 - 4*HeaderValue(*((unsigned *)p-1))) == (unsigned)code)) {
1969 /* Skip the function header */
1973 /* the case of PUSH imm32 */
1977 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1978 p, d6, d5, d4, d3, d2, d1, data));
1979 FSHOW((stderr, "/PUSH $0x%.8x\n", data));
1981 /* the case of MOV [reg-8],imm32 */
1983 && (d2==0x40 || d2==0x41 || d2==0x42 || d2==0x43
1984 || d2==0x45 || d2==0x46 || d2==0x47)
1988 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1989 p, d6, d5, d4, d3, d2, d1, data));
1990 FSHOW((stderr, "/MOV [reg-8],$0x%.8x\n", data));
1992 /* the case of LEA reg,[disp32] */
1993 if ((d2 == 0x8d) && ((d1 & 0xc7) == 5)) {
1996 "/code ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
1997 p, d6, d5, d4, d3, d2, d1, data));
1998 FSHOW((stderr,"/LEA reg,[$0x%.8x]\n", data));
2002 /* Check for constant references. */
2003 /* Check for a 32 bit word that looks like an absolute
2004 reference to within the constant vector. Constant references
2006 if ((data >= (constants_start_addr-displacement))
2007 && (data < (constants_end_addr-displacement))
2008 && (((unsigned)data & 0x3) == 0)) {
2013 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2014 p, d6, d5, d4, d3, d2, d1, data));
2015 FSHOW((stderr,"/MOV eax,0x%.8x\n", data));
2018 /* the case of MOV m32,EAX */
2022 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2023 p, d6, d5, d4, d3, d2, d1, data));
2024 FSHOW((stderr, "/MOV 0x%.8x,eax\n", data));
2027 /* the case of CMP m32,imm32 */
2028 if ((d1 == 0x3d) && (d2 == 0x81)) {
2031 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2032 p, d6, d5, d4, d3, d2, d1, data));
2034 FSHOW((stderr, "/CMP 0x%.8x,immed32\n", data));
2037 /* Check for a mod=00, r/m=101 byte. */
2038 if ((d1 & 0xc7) == 5) {
2043 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2044 p, d6, d5, d4, d3, d2, d1, data));
2045 FSHOW((stderr,"/CMP 0x%.8x,reg\n", data));
2047 /* the case of CMP reg32,m32 */
2051 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2052 p, d6, d5, d4, d3, d2, d1, data));
2053 FSHOW((stderr, "/CMP reg32,0x%.8x\n", data));
2055 /* the case of MOV m32,reg32 */
2059 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2060 p, d6, d5, d4, d3, d2, d1, data));
2061 FSHOW((stderr, "/MOV 0x%.8x,reg32\n", data));
2063 /* the case of MOV reg32,m32 */
2067 "/abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2068 p, d6, d5, d4, d3, d2, d1, data));
2069 FSHOW((stderr, "/MOV reg32,0x%.8x\n", data));
2071 /* the case of LEA reg32,m32 */
2075 "abs const ref @%x: %.2x %.2x %.2x %.2x %.2x %.2x (%.8x)\n",
2076 p, d6, d5, d4, d3, d2, d1, data));
2077 FSHOW((stderr, "/LEA reg32,0x%.8x\n", data));
2083 /* If anything was found, print some information on the code
2087 "/compiled code object at %x: header words = %d, code words = %d\n",
2088 code, nheader_words, ncode_words));
2090 "/const start = %x, end = %x\n",
2091 constants_start_addr, constants_end_addr));
2093 "/code start = %x, end = %x\n",
2094 code_start_addr, code_end_addr));
2099 apply_code_fixups(struct code *old_code, struct code *new_code)
2101 int nheader_words, ncode_words, nwords;
2102 void *constants_start_addr, *constants_end_addr;
2103 void *code_start_addr, *code_end_addr;
2105 lispobj fixups = NIL;
2106 unsigned displacement = (unsigned)new_code - (unsigned)old_code;
2107 struct vector *fixups_vector;
2109 /* It's OK if it's byte compiled code. The trace table offset will
2110 * be a fixnum if it's x86 compiled code - check. */
2111 if (new_code->trace_table_offset & 0x3) {
2112 /* FSHOW((stderr, "/byte compiled code object at %x\n", new_code)); */
2116 /* Else it's x86 machine code. */
2117 ncode_words = fixnum_value(new_code->code_size);
2118 nheader_words = HeaderValue(*(lispobj *)new_code);
2119 nwords = ncode_words + nheader_words;
2121 "/compiled code object at %x: header words = %d, code words = %d\n",
2122 new_code, nheader_words, ncode_words)); */
2123 constants_start_addr = (void *)new_code + 5*4;
2124 constants_end_addr = (void *)new_code + nheader_words*4;
2125 code_start_addr = (void *)new_code + nheader_words*4;
2126 code_end_addr = (void *)new_code + nwords*4;
2129 "/const start = %x, end = %x\n",
2130 constants_start_addr,constants_end_addr));
2132 "/code start = %x; end = %x\n",
2133 code_start_addr,code_end_addr));
2136 /* The first constant should be a pointer to the fixups for this
2137 code objects. Check. */
2138 fixups = new_code->constants[0];
2140 /* It will be 0 or the unbound-marker if there are no fixups, and
2141 * will be an other pointer if it is valid. */
2142 if ((fixups == 0) || (fixups == type_UnboundMarker) || !Pointerp(fixups)) {
2143 /* Check for possible errors. */
2144 if (check_code_fixups)
2145 sniff_code_object(new_code, displacement);
2147 /*fprintf(stderr,"Fixups for code object not found!?\n");
2148 fprintf(stderr,"*** Compiled code object at %x: header_words=%d code_words=%d .\n",
2149 new_code, nheader_words, ncode_words);
2150 fprintf(stderr,"*** Const. start = %x; end= %x; Code start = %x; end = %x\n",
2151 constants_start_addr,constants_end_addr,
2152 code_start_addr,code_end_addr);*/
2156 fixups_vector = (struct vector *)PTR(fixups);
2158 /* Could be pointing to a forwarding pointer. */
2159 if (Pointerp(fixups) && (find_page_index((void*)fixups_vector) != -1)
2160 && (fixups_vector->header == 0x01)) {
2161 /* If so, then follow it. */
2162 /*SHOW("following pointer to a forwarding pointer");*/
2163 fixups_vector = (struct vector *)PTR((lispobj)fixups_vector->length);
2166 /*SHOW("got fixups");*/
2168 if (TypeOf(fixups_vector->header) == type_SimpleArrayUnsignedByte32) {
2169 /* Got the fixups for the code block. Now work through the vector,
2170 and apply a fixup at each address. */
2171 int length = fixnum_value(fixups_vector->length);
2173 for (i = 0; i < length; i++) {
2174 unsigned offset = fixups_vector->data[i];
2175 /* Now check the current value of offset. */
2176 unsigned old_value =
2177 *(unsigned *)((unsigned)code_start_addr + offset);
2179 /* If it's within the old_code object then it must be an
2180 * absolute fixup (relative ones are not saved) */
2181 if ((old_value >= (unsigned)old_code)
2182 && (old_value < ((unsigned)old_code + nwords*4)))
2183 /* So add the dispacement. */
2184 *(unsigned *)((unsigned)code_start_addr + offset) =
2185 old_value + displacement;
2187 /* It is outside the old code object so it must be a
2188 * relative fixup (absolute fixups are not saved). So
2189 * subtract the displacement. */
2190 *(unsigned *)((unsigned)code_start_addr + offset) =
2191 old_value - displacement;
2195 /* Check for possible errors. */
2196 if (check_code_fixups) {
2197 sniff_code_object(new_code,displacement);
2201 static struct code *
2202 trans_code(struct code *code)
2204 struct code *new_code;
2205 lispobj l_code, l_new_code;
2206 int nheader_words, ncode_words, nwords;
2207 unsigned long displacement;
2208 lispobj fheaderl, *prev_pointer;
2211 "\n/transporting code object located at 0x%08x\n",
2212 (unsigned long) code)); */
2214 /* If object has already been transported, just return pointer. */
2215 if (*((lispobj *)code) == 0x01)
2216 return (struct code*)(((lispobj *)code)[1]);
2218 gc_assert(TypeOf(code->header) == type_CodeHeader);
2220 /* Prepare to transport the code vector. */
2221 l_code = (lispobj) code | type_OtherPointer;
2223 ncode_words = fixnum_value(code->code_size);
2224 nheader_words = HeaderValue(code->header);
2225 nwords = ncode_words + nheader_words;
2226 nwords = CEILING(nwords, 2);
2228 l_new_code = copy_large_object(l_code, nwords);
2229 new_code = (struct code *) PTR(l_new_code);
2231 /* may not have been moved.. */
2232 if (new_code == code)
2235 displacement = l_new_code - l_code;
2239 "/old code object at 0x%08x, new code object at 0x%08x\n",
2240 (unsigned long) code,
2241 (unsigned long) new_code));
2242 FSHOW((stderr, "/Code object is %d words long.\n", nwords));
2245 /* Set forwarding pointer. */
2246 ((lispobj *)code)[0] = 0x01;
2247 ((lispobj *)code)[1] = l_new_code;
2249 /* Set forwarding pointers for all the function headers in the
2250 * code object. Also fix all self pointers. */
2252 fheaderl = code->entry_points;
2253 prev_pointer = &new_code->entry_points;
2255 while (fheaderl != NIL) {
2256 struct function *fheaderp, *nfheaderp;
2259 fheaderp = (struct function *) PTR(fheaderl);
2260 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2262 /* Calculate the new function pointer and the new */
2263 /* function header. */
2264 nfheaderl = fheaderl + displacement;
2265 nfheaderp = (struct function *) PTR(nfheaderl);
2267 /* Set forwarding pointer. */
2268 ((lispobj *)fheaderp)[0] = 0x01;
2269 ((lispobj *)fheaderp)[1] = nfheaderl;
2271 /* Fix self pointer. */
2272 nfheaderp->self = nfheaderl + RAW_ADDR_OFFSET;
2274 *prev_pointer = nfheaderl;
2276 fheaderl = fheaderp->next;
2277 prev_pointer = &nfheaderp->next;
2280 /* sniff_code_object(new_code,displacement);*/
2281 apply_code_fixups(code,new_code);
2287 scav_code_header(lispobj *where, lispobj object)
2290 int nheader_words, ncode_words, nwords;
2292 struct function *fheaderp;
2294 code = (struct code *) where;
2295 ncode_words = fixnum_value(code->code_size);
2296 nheader_words = HeaderValue(object);
2297 nwords = ncode_words + nheader_words;
2298 nwords = CEILING(nwords, 2);
2300 /* Scavenge the boxed section of the code data block. */
2301 scavenge(where + 1, nheader_words - 1);
2303 /* Scavenge the boxed section of each function object in the */
2304 /* code data block. */
2305 fheaderl = code->entry_points;
2306 while (fheaderl != NIL) {
2307 fheaderp = (struct function *) PTR(fheaderl);
2308 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
2310 scavenge(&fheaderp->name, 1);
2311 scavenge(&fheaderp->arglist, 1);
2312 scavenge(&fheaderp->type, 1);
2314 fheaderl = fheaderp->next;
2321 trans_code_header(lispobj object)
2325 ncode = trans_code((struct code *) PTR(object));
2326 return (lispobj) ncode | type_OtherPointer;
2330 size_code_header(lispobj *where)
2333 int nheader_words, ncode_words, nwords;
2335 code = (struct code *) where;
2337 ncode_words = fixnum_value(code->code_size);
2338 nheader_words = HeaderValue(code->header);
2339 nwords = ncode_words + nheader_words;
2340 nwords = CEILING(nwords, 2);
2346 scav_return_pc_header(lispobj *where, lispobj object)
2348 lose("attempted to scavenge a return PC header where=0x%08x object=0x%08x",
2349 (unsigned long) where,
2350 (unsigned long) object);
2351 return 0; /* bogus return value to satisfy static type checking */
2355 trans_return_pc_header(lispobj object)
2357 struct function *return_pc;
2358 unsigned long offset;
2359 struct code *code, *ncode;
2361 SHOW("/trans_return_pc_header: Will this work?");
2363 return_pc = (struct function *) PTR(object);
2364 offset = HeaderValue(return_pc->header) * 4;
2366 /* Transport the whole code object. */
2367 code = (struct code *) ((unsigned long) return_pc - offset);
2368 ncode = trans_code(code);
2370 return ((lispobj) ncode + offset) | type_OtherPointer;
2373 /* On the 386, closures hold a pointer to the raw address instead of the
2374 * function object. */
2377 scav_closure_header(lispobj *where, lispobj object)
2379 struct closure *closure;
2382 closure = (struct closure *)where;
2383 fun = closure->function - RAW_ADDR_OFFSET;
2385 /* The function may have moved so update the raw address. But
2386 * don't write unnecessarily. */
2387 if (closure->function != fun + RAW_ADDR_OFFSET)
2388 closure->function = fun + RAW_ADDR_OFFSET;
2395 scav_function_header(lispobj *where, lispobj object)
2397 lose("attempted to scavenge a function header where=0x%08x object=0x%08x",
2398 (unsigned long) where,
2399 (unsigned long) object);
2400 return 0; /* bogus return value to satisfy static type checking */
2404 trans_function_header(lispobj object)
2406 struct function *fheader;
2407 unsigned long offset;
2408 struct code *code, *ncode;
2410 fheader = (struct function *) PTR(object);
2411 offset = HeaderValue(fheader->header) * 4;
2413 /* Transport the whole code object. */
2414 code = (struct code *) ((unsigned long) fheader - offset);
2415 ncode = trans_code(code);
2417 return ((lispobj) ncode + offset) | type_FunctionPointer;
2426 scav_instance_pointer(lispobj *where, lispobj object)
2428 if (from_space_p(object)) {
2429 lispobj first, *first_pointer;
2431 /* Object is a pointer into from space. Check to see */
2432 /* whether it has been forwarded. */
2433 first_pointer = (lispobj *) PTR(object);
2434 first = *first_pointer;
2436 if (first == 0x01) {
2438 first = first_pointer[1];
2440 first = trans_boxed(object);
2441 gc_assert(first != object);
2442 /* Set forwarding pointer. */
2443 first_pointer[0] = 0x01;
2444 first_pointer[1] = first;
2452 scav_instance_pointer(lispobj *where, lispobj object)
2454 lispobj copy, *first_pointer;
2456 /* Object is a pointer into from space - not a FP. */
2457 copy = trans_boxed(object);
2459 gc_assert(copy != object);
2461 first_pointer = (lispobj *) PTR(object);
2463 /* Set forwarding pointer. */
2464 first_pointer[0] = 0x01;
2465 first_pointer[1] = copy;
2476 static lispobj trans_list(lispobj object);
2480 scav_list_pointer(lispobj *where, lispobj object)
2482 /* KLUDGE: There's lots of cut-and-paste duplication between this
2483 * and scav_instance_pointer(..), scav_other_pointer(..), and
2484 * perhaps other functions too. -- WHN 20000620 */
2486 gc_assert(Pointerp(object));
2488 if (from_space_p(object)) {
2489 lispobj first, *first_pointer;
2491 /* Object is a pointer into from space. Check to see whether it has
2492 * been forwarded. */
2493 first_pointer = (lispobj *) PTR(object);
2494 first = *first_pointer;
2496 if (first == 0x01) {
2498 first = first_pointer[1];
2500 first = trans_list(object);
2502 /* Set forwarding pointer */
2503 first_pointer[0] = 0x01;
2504 first_pointer[1] = first;
2507 gc_assert(Pointerp(first));
2508 gc_assert(!from_space_p(first));
2515 scav_list_pointer(lispobj *where, lispobj object)
2517 lispobj first, *first_pointer;
2519 gc_assert(Pointerp(object));
2521 /* Object is a pointer into from space - not FP. */
2523 first = trans_list(object);
2524 gc_assert(first != object);
2526 first_pointer = (lispobj *) PTR(object);
2528 /* Set forwarding pointer */
2529 first_pointer[0] = 0x01;
2530 first_pointer[1] = first;
2532 gc_assert(Pointerp(first));
2533 gc_assert(!from_space_p(first));
2540 trans_list(lispobj object)
2542 lispobj new_list_pointer;
2543 struct cons *cons, *new_cons;
2547 gc_assert(from_space_p(object));
2549 cons = (struct cons *) PTR(object);
2551 /* Copy 'object'. */
2552 new_cons = (struct cons *) gc_quick_alloc(sizeof(struct cons));
2553 new_cons->car = cons->car;
2554 new_cons->cdr = cons->cdr; /* updated later */
2555 new_list_pointer = (lispobj)new_cons | LowtagOf(object);
2557 /* Grab the cdr before it is clobbered. */
2560 /* Set forwarding pointer (clobbers start of list). */
2562 cons->cdr = new_list_pointer;
2564 /* Try to linearize the list in the cdr direction to help reduce
2568 struct cons *cdr_cons, *new_cdr_cons;
2570 if (LowtagOf(cdr) != type_ListPointer || !from_space_p(cdr)
2571 || (*((lispobj *)PTR(cdr)) == 0x01))
2574 cdr_cons = (struct cons *) PTR(cdr);
2577 new_cdr_cons = (struct cons*) gc_quick_alloc(sizeof(struct cons));
2578 new_cdr_cons->car = cdr_cons->car;
2579 new_cdr_cons->cdr = cdr_cons->cdr;
2580 new_cdr = (lispobj)new_cdr_cons | LowtagOf(cdr);
2582 /* Grab the cdr before it is clobbered. */
2583 cdr = cdr_cons->cdr;
2585 /* Set forwarding pointer. */
2586 cdr_cons->car = 0x01;
2587 cdr_cons->cdr = new_cdr;
2589 /* Update the cdr of the last cons copied into new space to
2590 * keep the newspace scavenge from having to do it. */
2591 new_cons->cdr = new_cdr;
2593 new_cons = new_cdr_cons;
2596 return new_list_pointer;
2601 * scavenging and transporting other pointers
2606 scav_other_pointer(lispobj *where, lispobj object)
2608 gc_assert(Pointerp(object));
2610 if (from_space_p(object)) {
2611 lispobj first, *first_pointer;
2613 /* Object is a pointer into from space. Check to see */
2614 /* whether it has been forwarded. */
2615 first_pointer = (lispobj *) PTR(object);
2616 first = *first_pointer;
2618 if (first == 0x01) {
2620 first = first_pointer[1];
2623 first = (transother[TypeOf(first)])(object);
2625 if (first != object) {
2626 /* Set forwarding pointer */
2627 first_pointer[0] = 0x01;
2628 first_pointer[1] = first;
2633 gc_assert(Pointerp(first));
2634 gc_assert(!from_space_p(first));
2640 scav_other_pointer(lispobj *where, lispobj object)
2642 lispobj first, *first_pointer;
2644 gc_assert(Pointerp(object));
2646 /* Object is a pointer into from space - not FP. */
2647 first_pointer = (lispobj *) PTR(object);
2649 first = (transother[TypeOf(*first_pointer)])(object);
2651 if (first != object) {
2652 /* Set forwarding pointer. */
2653 first_pointer[0] = 0x01;
2654 first_pointer[1] = first;
2658 gc_assert(Pointerp(first));
2659 gc_assert(!from_space_p(first));
2667 * immediate, boxed, and unboxed objects
2671 size_pointer(lispobj *where)
2677 scav_immediate(lispobj *where, lispobj object)
2683 trans_immediate(lispobj object)
2685 lose("trying to transport an immediate");
2686 return NIL; /* bogus return value to satisfy static type checking */
2690 size_immediate(lispobj *where)
2697 scav_boxed(lispobj *where, lispobj object)
2703 trans_boxed(lispobj object)
2706 unsigned long length;
2708 gc_assert(Pointerp(object));
2710 header = *((lispobj *) PTR(object));
2711 length = HeaderValue(header) + 1;
2712 length = CEILING(length, 2);
2714 return copy_object(object, length);
2718 trans_boxed_large(lispobj object)
2721 unsigned long length;
2723 gc_assert(Pointerp(object));
2725 header = *((lispobj *) PTR(object));
2726 length = HeaderValue(header) + 1;
2727 length = CEILING(length, 2);
2729 return copy_large_object(object, length);
2733 size_boxed(lispobj *where)
2736 unsigned long length;
2739 length = HeaderValue(header) + 1;
2740 length = CEILING(length, 2);
2746 scav_fdefn(lispobj *where, lispobj object)
2748 struct fdefn *fdefn;
2750 fdefn = (struct fdefn *)where;
2752 /* FSHOW((stderr, "scav_fdefn, function = %p, raw_addr = %p\n",
2753 fdefn->function, fdefn->raw_addr)); */
2755 if ((char *)(fdefn->function + RAW_ADDR_OFFSET) == fdefn->raw_addr) {
2756 scavenge(where + 1, sizeof(struct fdefn)/sizeof(lispobj) - 1);
2758 /* Don't write unnecessarily. */
2759 if (fdefn->raw_addr != (char *)(fdefn->function + RAW_ADDR_OFFSET))
2760 fdefn->raw_addr = (char *)(fdefn->function + RAW_ADDR_OFFSET);
2762 return sizeof(struct fdefn) / sizeof(lispobj);
2769 scav_unboxed(lispobj *where, lispobj object)
2771 unsigned long length;
2773 length = HeaderValue(object) + 1;
2774 length = CEILING(length, 2);
2780 trans_unboxed(lispobj object)
2783 unsigned long length;
2786 gc_assert(Pointerp(object));
2788 header = *((lispobj *) PTR(object));
2789 length = HeaderValue(header) + 1;
2790 length = CEILING(length, 2);
2792 return copy_unboxed_object(object, length);
2796 trans_unboxed_large(lispobj object)
2799 unsigned long length;
2802 gc_assert(Pointerp(object));
2804 header = *((lispobj *) PTR(object));
2805 length = HeaderValue(header) + 1;
2806 length = CEILING(length, 2);
2808 return copy_large_unboxed_object(object, length);
2812 size_unboxed(lispobj *where)
2815 unsigned long length;
2818 length = HeaderValue(header) + 1;
2819 length = CEILING(length, 2);
2825 * vector-like objects
2828 #define NWORDS(x,y) (CEILING((x),(y)) / (y))
2831 scav_string(lispobj *where, lispobj object)
2833 struct vector *vector;
2836 /* NOTE: Strings contain one more byte of data than the length */
2837 /* slot indicates. */
2839 vector = (struct vector *) where;
2840 length = fixnum_value(vector->length) + 1;
2841 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2847 trans_string(lispobj object)
2849 struct vector *vector;
2852 gc_assert(Pointerp(object));
2854 /* NOTE: A string contains one more byte of data (a terminating
2855 * '\0' to help when interfacing with C functions) than indicated
2856 * by the length slot. */
2858 vector = (struct vector *) PTR(object);
2859 length = fixnum_value(vector->length) + 1;
2860 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2862 return copy_large_unboxed_object(object, nwords);
2866 size_string(lispobj *where)
2868 struct vector *vector;
2871 /* NOTE: A string contains one more byte of data (a terminating
2872 * '\0' to help when interfacing with C functions) than indicated
2873 * by the length slot. */
2875 vector = (struct vector *) where;
2876 length = fixnum_value(vector->length) + 1;
2877 nwords = CEILING(NWORDS(length, 4) + 2, 2);
2882 /* FIXME: What does this mean? */
2883 int gencgc_hash = 1;
2886 scav_vector(lispobj *where, lispobj object)
2888 unsigned int kv_length;
2890 unsigned int length;
2891 lispobj *hash_table;
2892 lispobj empty_symbol;
2893 unsigned int *index_vector, *next_vector, *hash_vector;
2895 unsigned next_vector_length;
2897 /* FIXME: A comment explaining this would be nice. It looks as
2898 * though SB-VM:VECTOR-VALID-HASHING-SUBTYPE is set for EQ-based
2899 * hash tables in the Lisp HASH-TABLE code, and nowhere else. */
2900 if (HeaderValue(object) != subtype_VectorValidHashing)
2904 /* This is set for backward compatibility. FIXME: Do we need
2906 *where = (subtype_VectorMustRehash << type_Bits) | type_SimpleVector;
2910 kv_length = fixnum_value(where[1]);
2911 kv_vector = where + 2; /* Skip the header and length. */
2912 /*FSHOW((stderr,"/kv_length = %d\n", kv_length));*/
2914 /* Scavenge element 0, which may be a hash-table structure. */
2915 scavenge(where+2, 1);
2916 if (!Pointerp(where[2])) {
2917 lose("no pointer at %x in hash table", where[2]);
2919 hash_table = (lispobj *)PTR(where[2]);
2920 /*FSHOW((stderr,"/hash_table = %x\n", hash_table));*/
2921 if (TypeOf(hash_table[0]) != type_InstanceHeader) {
2922 lose("hash table not instance (%x at %x)", hash_table[0], hash_table);
2925 /* Scavenge element 1, which should be some internal symbol that
2926 * the hash table code reserves for marking empty slots. */
2927 scavenge(where+3, 1);
2928 if (!Pointerp(where[3])) {
2929 lose("not #:%EMPTY-HT-SLOT% symbol pointer: %x", where[3]);
2931 empty_symbol = where[3];
2932 /* fprintf(stderr,"* empty_symbol = %x\n", empty_symbol);*/
2933 if (TypeOf(*(lispobj *)PTR(empty_symbol)) != type_SymbolHeader) {
2934 lose("not a symbol where #:%EMPTY-HT-SLOT% expected: %x",
2935 *(lispobj *)PTR(empty_symbol));
2938 /* Scavenge hash table, which will fix the positions of the other
2939 * needed objects. */
2940 scavenge(hash_table, 16);
2942 /* Cross-check the kv_vector. */
2943 if (where != (lispobj *)PTR(hash_table[9])) {
2944 lose("hash_table table!=this table %x", hash_table[9]);
2948 weak_p_obj = hash_table[10];
2952 lispobj index_vector_obj = hash_table[13];
2954 if (Pointerp(index_vector_obj) &&
2955 (TypeOf(*(lispobj *)PTR(index_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2956 index_vector = ((unsigned int *)PTR(index_vector_obj)) + 2;
2957 /*FSHOW((stderr, "/index_vector = %x\n",index_vector));*/
2958 length = fixnum_value(((unsigned int *)PTR(index_vector_obj))[1]);
2959 /*FSHOW((stderr, "/length = %d\n", length));*/
2961 lose("invalid index_vector %x", index_vector_obj);
2967 lispobj next_vector_obj = hash_table[14];
2969 if (Pointerp(next_vector_obj) &&
2970 (TypeOf(*(lispobj *)PTR(next_vector_obj)) == type_SimpleArrayUnsignedByte32)) {
2971 next_vector = ((unsigned int *)PTR(next_vector_obj)) + 2;
2972 /*FSHOW((stderr, "/next_vector = %x\n", next_vector));*/
2973 next_vector_length = fixnum_value(((unsigned int *)PTR(next_vector_obj))[1]);
2974 /*FSHOW((stderr, "/next_vector_length = %d\n", next_vector_length));*/
2976 lose("invalid next_vector %x", next_vector_obj);
2980 /* maybe hash vector */
2982 /* FIXME: This bare "15" offset should become a symbolic
2983 * expression of some sort. And all the other bare offsets
2984 * too. And the bare "16" in scavenge(hash_table, 16). And
2985 * probably other stuff too. Ugh.. */
2986 lispobj hash_vector_obj = hash_table[15];
2988 if (Pointerp(hash_vector_obj) &&
2989 (TypeOf(*(lispobj *)PTR(hash_vector_obj))
2990 == type_SimpleArrayUnsignedByte32)) {
2991 hash_vector = ((unsigned int *)PTR(hash_vector_obj)) + 2;
2992 /*FSHOW((stderr, "/hash_vector = %x\n", hash_vector));*/
2993 gc_assert(fixnum_value(((unsigned int *)PTR(hash_vector_obj))[1])
2994 == next_vector_length);
2997 /*FSHOW((stderr, "/no hash_vector: %x\n", hash_vector_obj));*/
3001 /* These lengths could be different as the index_vector can be a
3002 * different length from the others, a larger index_vector could help
3003 * reduce collisions. */
3004 gc_assert(next_vector_length*2 == kv_length);
3006 /* now all set up.. */
3008 /* Work through the KV vector. */
3011 for (i = 1; i < next_vector_length; i++) {
3012 lispobj old_key = kv_vector[2*i];
3013 unsigned int old_index = (old_key & 0x1fffffff)%length;
3015 /* Scavenge the key and value. */
3016 scavenge(&kv_vector[2*i],2);
3018 /* Check whether the key has moved and is EQ based. */
3020 lispobj new_key = kv_vector[2*i];
3021 unsigned int new_index = (new_key & 0x1fffffff)%length;
3023 if ((old_index != new_index) &&
3024 ((!hash_vector) || (hash_vector[i] == 0x80000000)) &&
3025 ((new_key != empty_symbol) ||
3026 (kv_vector[2*i] != empty_symbol))) {
3029 "* EQ key %d moved from %x to %x; index %d to %d\n",
3030 i, old_key, new_key, old_index, new_index));*/
3032 if (index_vector[old_index] != 0) {
3033 /*FSHOW((stderr, "/P1 %d\n", index_vector[old_index]));*/
3035 /* Unlink the key from the old_index chain. */
3036 if (index_vector[old_index] == i) {
3037 /*FSHOW((stderr, "/P2a %d\n", next_vector[i]));*/
3038 index_vector[old_index] = next_vector[i];
3039 /* Link it into the needing rehash chain. */
3040 next_vector[i] = fixnum_value(hash_table[11]);
3041 hash_table[11] = make_fixnum(i);
3044 unsigned prior = index_vector[old_index];
3045 unsigned next = next_vector[prior];
3047 /*FSHOW((stderr, "/P3a %d %d\n", prior, next));*/
3050 /*FSHOW((stderr, "/P3b %d %d\n", prior, next));*/
3053 next_vector[prior] = next_vector[next];
3054 /* Link it into the needing rehash
3057 fixnum_value(hash_table[11]);
3058 hash_table[11] = make_fixnum(next);
3063 next = next_vector[next];
3071 return (CEILING(kv_length + 2, 2));
3075 trans_vector(lispobj object)
3077 struct vector *vector;
3080 gc_assert(Pointerp(object));
3082 vector = (struct vector *) PTR(object);
3084 length = fixnum_value(vector->length);
3085 nwords = CEILING(length + 2, 2);
3087 return copy_large_object(object, nwords);
3091 size_vector(lispobj *where)
3093 struct vector *vector;
3096 vector = (struct vector *) where;
3097 length = fixnum_value(vector->length);
3098 nwords = CEILING(length + 2, 2);
3105 scav_vector_bit(lispobj *where, lispobj object)
3107 struct vector *vector;
3110 vector = (struct vector *) where;
3111 length = fixnum_value(vector->length);
3112 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3118 trans_vector_bit(lispobj object)
3120 struct vector *vector;
3123 gc_assert(Pointerp(object));
3125 vector = (struct vector *) PTR(object);
3126 length = fixnum_value(vector->length);
3127 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3129 return copy_large_unboxed_object(object, nwords);
3133 size_vector_bit(lispobj *where)
3135 struct vector *vector;
3138 vector = (struct vector *) where;
3139 length = fixnum_value(vector->length);
3140 nwords = CEILING(NWORDS(length, 32) + 2, 2);
3147 scav_vector_unsigned_byte_2(lispobj *where, lispobj object)
3149 struct vector *vector;
3152 vector = (struct vector *) where;
3153 length = fixnum_value(vector->length);
3154 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3160 trans_vector_unsigned_byte_2(lispobj object)
3162 struct vector *vector;
3165 gc_assert(Pointerp(object));
3167 vector = (struct vector *) PTR(object);
3168 length = fixnum_value(vector->length);
3169 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3171 return copy_large_unboxed_object(object, nwords);
3175 size_vector_unsigned_byte_2(lispobj *where)
3177 struct vector *vector;
3180 vector = (struct vector *) where;
3181 length = fixnum_value(vector->length);
3182 nwords = CEILING(NWORDS(length, 16) + 2, 2);
3189 scav_vector_unsigned_byte_4(lispobj *where, lispobj object)
3191 struct vector *vector;
3194 vector = (struct vector *) where;
3195 length = fixnum_value(vector->length);
3196 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3202 trans_vector_unsigned_byte_4(lispobj object)
3204 struct vector *vector;
3207 gc_assert(Pointerp(object));
3209 vector = (struct vector *) PTR(object);
3210 length = fixnum_value(vector->length);
3211 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3213 return copy_large_unboxed_object(object, nwords);
3217 size_vector_unsigned_byte_4(lispobj *where)
3219 struct vector *vector;
3222 vector = (struct vector *) where;
3223 length = fixnum_value(vector->length);
3224 nwords = CEILING(NWORDS(length, 8) + 2, 2);
3230 scav_vector_unsigned_byte_8(lispobj *where, lispobj object)
3232 struct vector *vector;
3235 vector = (struct vector *) where;
3236 length = fixnum_value(vector->length);
3237 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3243 trans_vector_unsigned_byte_8(lispobj object)
3245 struct vector *vector;
3248 gc_assert(Pointerp(object));
3250 vector = (struct vector *) PTR(object);
3251 length = fixnum_value(vector->length);
3252 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3254 return copy_large_unboxed_object(object, nwords);
3258 size_vector_unsigned_byte_8(lispobj *where)
3260 struct vector *vector;
3263 vector = (struct vector *) where;
3264 length = fixnum_value(vector->length);
3265 nwords = CEILING(NWORDS(length, 4) + 2, 2);
3272 scav_vector_unsigned_byte_16(lispobj *where, lispobj object)
3274 struct vector *vector;
3277 vector = (struct vector *) where;
3278 length = fixnum_value(vector->length);
3279 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3285 trans_vector_unsigned_byte_16(lispobj object)
3287 struct vector *vector;
3290 gc_assert(Pointerp(object));
3292 vector = (struct vector *) PTR(object);
3293 length = fixnum_value(vector->length);
3294 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3296 return copy_large_unboxed_object(object, nwords);
3300 size_vector_unsigned_byte_16(lispobj *where)
3302 struct vector *vector;
3305 vector = (struct vector *) where;
3306 length = fixnum_value(vector->length);
3307 nwords = CEILING(NWORDS(length, 2) + 2, 2);
3313 scav_vector_unsigned_byte_32(lispobj *where, lispobj object)
3315 struct vector *vector;
3318 vector = (struct vector *) where;
3319 length = fixnum_value(vector->length);
3320 nwords = CEILING(length + 2, 2);
3326 trans_vector_unsigned_byte_32(lispobj object)
3328 struct vector *vector;
3331 gc_assert(Pointerp(object));
3333 vector = (struct vector *) PTR(object);
3334 length = fixnum_value(vector->length);
3335 nwords = CEILING(length + 2, 2);
3337 return copy_large_unboxed_object(object, nwords);
3341 size_vector_unsigned_byte_32(lispobj *where)
3343 struct vector *vector;
3346 vector = (struct vector *) where;
3347 length = fixnum_value(vector->length);
3348 nwords = CEILING(length + 2, 2);
3354 scav_vector_single_float(lispobj *where, lispobj object)
3356 struct vector *vector;
3359 vector = (struct vector *) where;
3360 length = fixnum_value(vector->length);
3361 nwords = CEILING(length + 2, 2);
3367 trans_vector_single_float(lispobj object)
3369 struct vector *vector;
3372 gc_assert(Pointerp(object));
3374 vector = (struct vector *) PTR(object);
3375 length = fixnum_value(vector->length);
3376 nwords = CEILING(length + 2, 2);
3378 return copy_large_unboxed_object(object, nwords);
3382 size_vector_single_float(lispobj *where)
3384 struct vector *vector;
3387 vector = (struct vector *) where;
3388 length = fixnum_value(vector->length);
3389 nwords = CEILING(length + 2, 2);
3395 scav_vector_double_float(lispobj *where, lispobj object)
3397 struct vector *vector;
3400 vector = (struct vector *) where;
3401 length = fixnum_value(vector->length);
3402 nwords = CEILING(length * 2 + 2, 2);
3408 trans_vector_double_float(lispobj object)
3410 struct vector *vector;
3413 gc_assert(Pointerp(object));
3415 vector = (struct vector *) PTR(object);
3416 length = fixnum_value(vector->length);
3417 nwords = CEILING(length * 2 + 2, 2);
3419 return copy_large_unboxed_object(object, nwords);
3423 size_vector_double_float(lispobj *where)
3425 struct vector *vector;
3428 vector = (struct vector *) where;
3429 length = fixnum_value(vector->length);
3430 nwords = CEILING(length * 2 + 2, 2);
3435 #ifdef type_SimpleArrayLongFloat
3437 scav_vector_long_float(lispobj *where, lispobj object)
3439 struct vector *vector;
3442 vector = (struct vector *) where;
3443 length = fixnum_value(vector->length);
3444 nwords = CEILING(length * 3 + 2, 2);
3450 trans_vector_long_float(lispobj object)
3452 struct vector *vector;
3455 gc_assert(Pointerp(object));
3457 vector = (struct vector *) PTR(object);
3458 length = fixnum_value(vector->length);
3459 nwords = CEILING(length * 3 + 2, 2);
3461 return copy_large_unboxed_object(object, nwords);
3465 size_vector_long_float(lispobj *where)
3467 struct vector *vector;
3470 vector = (struct vector *) where;
3471 length = fixnum_value(vector->length);
3472 nwords = CEILING(length * 3 + 2, 2);
3479 #ifdef type_SimpleArrayComplexSingleFloat
3481 scav_vector_complex_single_float(lispobj *where, lispobj object)
3483 struct vector *vector;
3486 vector = (struct vector *) where;
3487 length = fixnum_value(vector->length);
3488 nwords = CEILING(length * 2 + 2, 2);
3494 trans_vector_complex_single_float(lispobj object)
3496 struct vector *vector;
3499 gc_assert(Pointerp(object));
3501 vector = (struct vector *) PTR(object);
3502 length = fixnum_value(vector->length);
3503 nwords = CEILING(length * 2 + 2, 2);
3505 return copy_large_unboxed_object(object, nwords);
3509 size_vector_complex_single_float(lispobj *where)
3511 struct vector *vector;
3514 vector = (struct vector *) where;
3515 length = fixnum_value(vector->length);
3516 nwords = CEILING(length * 2 + 2, 2);
3522 #ifdef type_SimpleArrayComplexDoubleFloat
3524 scav_vector_complex_double_float(lispobj *where, lispobj object)
3526 struct vector *vector;
3529 vector = (struct vector *) where;
3530 length = fixnum_value(vector->length);
3531 nwords = CEILING(length * 4 + 2, 2);
3537 trans_vector_complex_double_float(lispobj object)
3539 struct vector *vector;
3542 gc_assert(Pointerp(object));
3544 vector = (struct vector *) PTR(object);
3545 length = fixnum_value(vector->length);
3546 nwords = CEILING(length * 4 + 2, 2);
3548 return copy_large_unboxed_object(object, nwords);
3552 size_vector_complex_double_float(lispobj *where)
3554 struct vector *vector;
3557 vector = (struct vector *) where;
3558 length = fixnum_value(vector->length);
3559 nwords = CEILING(length * 4 + 2, 2);
3566 #ifdef type_SimpleArrayComplexLongFloat
3568 scav_vector_complex_long_float(lispobj *where, lispobj object)
3570 struct vector *vector;
3573 vector = (struct vector *) where;
3574 length = fixnum_value(vector->length);
3575 nwords = CEILING(length * 6 + 2, 2);
3581 trans_vector_complex_long_float(lispobj object)
3583 struct vector *vector;
3586 gc_assert(Pointerp(object));
3588 vector = (struct vector *) PTR(object);
3589 length = fixnum_value(vector->length);
3590 nwords = CEILING(length * 6 + 2, 2);
3592 return copy_large_unboxed_object(object, nwords);
3596 size_vector_complex_long_float(lispobj *where)
3598 struct vector *vector;
3601 vector = (struct vector *) where;
3602 length = fixnum_value(vector->length);
3603 nwords = CEILING(length * 6 + 2, 2);
3614 /* XX This is a hack adapted from cgc.c. These don't work too well with the
3615 * gencgc as a list of the weak pointers is maintained within the
3616 * objects which causes writes to the pages. A limited attempt is made
3617 * to avoid unnecessary writes, but this needs a re-think. */
3619 #define WEAK_POINTER_NWORDS \
3620 CEILING((sizeof(struct weak_pointer) / sizeof(lispobj)), 2)
3623 scav_weak_pointer(lispobj *where, lispobj object)
3625 struct weak_pointer *wp = weak_pointers;
3626 /* Push the weak pointer onto the list of weak pointers.
3627 * Do I have to watch for duplicates? Originally this was
3628 * part of trans_weak_pointer but that didn't work in the
3629 * case where the WP was in a promoted region.
3632 /* Check whether it's already in the list. */
3633 while (wp != NULL) {
3634 if (wp == (struct weak_pointer*)where) {
3640 /* Add it to the start of the list. */
3641 wp = (struct weak_pointer*)where;
3642 if (wp->next != weak_pointers) {
3643 wp->next = weak_pointers;
3645 /*SHOW("avoided write to weak pointer");*/
3650 /* Do not let GC scavenge the value slot of the weak pointer.
3651 * (That is why it is a weak pointer.) */
3653 return WEAK_POINTER_NWORDS;
3657 trans_weak_pointer(lispobj object)
3660 struct weak_pointer *wp;
3662 gc_assert(Pointerp(object));
3664 #if defined(DEBUG_WEAK)
3665 FSHOW((stderr, "Transporting weak pointer from 0x%08x\n", object));
3668 /* Need to remember where all the weak pointers are that have */
3669 /* been transported so they can be fixed up in a post-GC pass. */
3671 copy = copy_object(object, WEAK_POINTER_NWORDS);
3672 /* wp = (struct weak_pointer *) PTR(copy);*/
3675 /* Push the weak pointer onto the list of weak pointers. */
3676 /* wp->next = weak_pointers;
3677 * weak_pointers = wp;*/
3683 size_weak_pointer(lispobj *where)
3685 return WEAK_POINTER_NWORDS;
3688 void scan_weak_pointers(void)
3690 struct weak_pointer *wp;
3691 for (wp = weak_pointers; wp != NULL; wp = wp->next) {
3692 lispobj value = wp->value;
3693 lispobj first, *first_pointer;
3695 first_pointer = (lispobj *)PTR(value);
3698 FSHOW((stderr, "/weak pointer at 0x%08x\n", (unsigned long) wp));
3699 FSHOW((stderr, "/value: 0x%08x\n", (unsigned long) value));
3702 if (Pointerp(value) && from_space_p(value)) {
3703 /* Now, we need to check whether the object has been forwarded. If
3704 * it has been, the weak pointer is still good and needs to be
3705 * updated. Otherwise, the weak pointer needs to be nil'ed
3707 if (first_pointer[0] == 0x01) {
3708 wp->value = first_pointer[1];
3724 scav_lose(lispobj *where, lispobj object)
3726 lose("no scavenge function for object 0x%08x", (unsigned long) object);
3727 return 0; /* bogus return value to satisfy static type checking */
3731 trans_lose(lispobj object)
3733 lose("no transport function for object 0x%08x", (unsigned long) object);
3734 return NIL; /* bogus return value to satisfy static type checking */
3738 size_lose(lispobj *where)
3740 lose("no size function for object at 0x%08x", (unsigned long) where);
3741 return 1; /* bogus return value to satisfy static type checking */
3745 gc_init_tables(void)
3749 /* Set default value in all slots of scavenge table. */
3750 for (i = 0; i < 256; i++) { /* FIXME: bare constant length, ick! */
3751 scavtab[i] = scav_lose;
3754 /* For each type which can be selected by the low 3 bits of the tag
3755 * alone, set multiple entries in our 8-bit scavenge table (one for each
3756 * possible value of the high 5 bits). */
3757 for (i = 0; i < 32; i++) { /* FIXME: bare constant length, ick! */
3758 scavtab[type_EvenFixnum|(i<<3)] = scav_immediate;
3759 scavtab[type_FunctionPointer|(i<<3)] = scav_function_pointer;
3760 /* OtherImmediate0 */
3761 scavtab[type_ListPointer|(i<<3)] = scav_list_pointer;
3762 scavtab[type_OddFixnum|(i<<3)] = scav_immediate;
3763 scavtab[type_InstancePointer|(i<<3)] = scav_instance_pointer;
3764 /* OtherImmediate1 */
3765 scavtab[type_OtherPointer|(i<<3)] = scav_other_pointer;
3768 /* Other-pointer types (those selected by all eight bits of the tag) get
3769 * one entry each in the scavenge table. */
3770 scavtab[type_Bignum] = scav_unboxed;
3771 scavtab[type_Ratio] = scav_boxed;
3772 scavtab[type_SingleFloat] = scav_unboxed;
3773 scavtab[type_DoubleFloat] = scav_unboxed;
3774 #ifdef type_LongFloat
3775 scavtab[type_LongFloat] = scav_unboxed;
3777 scavtab[type_Complex] = scav_boxed;
3778 #ifdef type_ComplexSingleFloat
3779 scavtab[type_ComplexSingleFloat] = scav_unboxed;
3781 #ifdef type_ComplexDoubleFloat
3782 scavtab[type_ComplexDoubleFloat] = scav_unboxed;
3784 #ifdef type_ComplexLongFloat
3785 scavtab[type_ComplexLongFloat] = scav_unboxed;
3787 scavtab[type_SimpleArray] = scav_boxed;
3788 scavtab[type_SimpleString] = scav_string;
3789 scavtab[type_SimpleBitVector] = scav_vector_bit;
3790 scavtab[type_SimpleVector] = scav_vector;
3791 scavtab[type_SimpleArrayUnsignedByte2] = scav_vector_unsigned_byte_2;
3792 scavtab[type_SimpleArrayUnsignedByte4] = scav_vector_unsigned_byte_4;
3793 scavtab[type_SimpleArrayUnsignedByte8] = scav_vector_unsigned_byte_8;
3794 scavtab[type_SimpleArrayUnsignedByte16] = scav_vector_unsigned_byte_16;
3795 scavtab[type_SimpleArrayUnsignedByte32] = scav_vector_unsigned_byte_32;
3796 #ifdef type_SimpleArraySignedByte8
3797 scavtab[type_SimpleArraySignedByte8] = scav_vector_unsigned_byte_8;
3799 #ifdef type_SimpleArraySignedByte16
3800 scavtab[type_SimpleArraySignedByte16] = scav_vector_unsigned_byte_16;
3802 #ifdef type_SimpleArraySignedByte30
3803 scavtab[type_SimpleArraySignedByte30] = scav_vector_unsigned_byte_32;
3805 #ifdef type_SimpleArraySignedByte32
3806 scavtab[type_SimpleArraySignedByte32] = scav_vector_unsigned_byte_32;
3808 scavtab[type_SimpleArraySingleFloat] = scav_vector_single_float;
3809 scavtab[type_SimpleArrayDoubleFloat] = scav_vector_double_float;
3810 #ifdef type_SimpleArrayLongFloat
3811 scavtab[type_SimpleArrayLongFloat] = scav_vector_long_float;
3813 #ifdef type_SimpleArrayComplexSingleFloat
3814 scavtab[type_SimpleArrayComplexSingleFloat] = scav_vector_complex_single_float;
3816 #ifdef type_SimpleArrayComplexDoubleFloat
3817 scavtab[type_SimpleArrayComplexDoubleFloat] = scav_vector_complex_double_float;
3819 #ifdef type_SimpleArrayComplexLongFloat
3820 scavtab[type_SimpleArrayComplexLongFloat] = scav_vector_complex_long_float;
3822 scavtab[type_ComplexString] = scav_boxed;
3823 scavtab[type_ComplexBitVector] = scav_boxed;
3824 scavtab[type_ComplexVector] = scav_boxed;
3825 scavtab[type_ComplexArray] = scav_boxed;
3826 scavtab[type_CodeHeader] = scav_code_header;
3827 /*scavtab[type_FunctionHeader] = scav_function_header;*/
3828 /*scavtab[type_ClosureFunctionHeader] = scav_function_header;*/
3829 /*scavtab[type_ReturnPcHeader] = scav_return_pc_header;*/
3831 scavtab[type_ClosureHeader] = scav_closure_header;
3832 scavtab[type_FuncallableInstanceHeader] = scav_closure_header;
3833 scavtab[type_ByteCodeFunction] = scav_closure_header;
3834 scavtab[type_ByteCodeClosure] = scav_closure_header;
3836 scavtab[type_ClosureHeader] = scav_boxed;
3837 scavtab[type_FuncallableInstanceHeader] = scav_boxed;
3838 scavtab[type_ByteCodeFunction] = scav_boxed;
3839 scavtab[type_ByteCodeClosure] = scav_boxed;
3841 scavtab[type_ValueCellHeader] = scav_boxed;
3842 scavtab[type_SymbolHeader] = scav_boxed;
3843 scavtab[type_BaseChar] = scav_immediate;
3844 scavtab[type_Sap] = scav_unboxed;
3845 scavtab[type_UnboundMarker] = scav_immediate;
3846 scavtab[type_WeakPointer] = scav_weak_pointer;
3847 scavtab[type_InstanceHeader] = scav_boxed;
3848 scavtab[type_Fdefn] = scav_fdefn;
3850 /* transport other table, initialized same way as scavtab */
3851 for (i = 0; i < 256; i++)
3852 transother[i] = trans_lose;
3853 transother[type_Bignum] = trans_unboxed;
3854 transother[type_Ratio] = trans_boxed;
3855 transother[type_SingleFloat] = trans_unboxed;
3856 transother[type_DoubleFloat] = trans_unboxed;
3857 #ifdef type_LongFloat
3858 transother[type_LongFloat] = trans_unboxed;
3860 transother[type_Complex] = trans_boxed;
3861 #ifdef type_ComplexSingleFloat
3862 transother[type_ComplexSingleFloat] = trans_unboxed;
3864 #ifdef type_ComplexDoubleFloat
3865 transother[type_ComplexDoubleFloat] = trans_unboxed;
3867 #ifdef type_ComplexLongFloat
3868 transother[type_ComplexLongFloat] = trans_unboxed;
3870 transother[type_SimpleArray] = trans_boxed_large;
3871 transother[type_SimpleString] = trans_string;
3872 transother[type_SimpleBitVector] = trans_vector_bit;
3873 transother[type_SimpleVector] = trans_vector;
3874 transother[type_SimpleArrayUnsignedByte2] = trans_vector_unsigned_byte_2;
3875 transother[type_SimpleArrayUnsignedByte4] = trans_vector_unsigned_byte_4;
3876 transother[type_SimpleArrayUnsignedByte8] = trans_vector_unsigned_byte_8;
3877 transother[type_SimpleArrayUnsignedByte16] = trans_vector_unsigned_byte_16;
3878 transother[type_SimpleArrayUnsignedByte32] = trans_vector_unsigned_byte_32;
3879 #ifdef type_SimpleArraySignedByte8
3880 transother[type_SimpleArraySignedByte8] = trans_vector_unsigned_byte_8;
3882 #ifdef type_SimpleArraySignedByte16
3883 transother[type_SimpleArraySignedByte16] = trans_vector_unsigned_byte_16;
3885 #ifdef type_SimpleArraySignedByte30
3886 transother[type_SimpleArraySignedByte30] = trans_vector_unsigned_byte_32;
3888 #ifdef type_SimpleArraySignedByte32
3889 transother[type_SimpleArraySignedByte32] = trans_vector_unsigned_byte_32;
3891 transother[type_SimpleArraySingleFloat] = trans_vector_single_float;
3892 transother[type_SimpleArrayDoubleFloat] = trans_vector_double_float;
3893 #ifdef type_SimpleArrayLongFloat
3894 transother[type_SimpleArrayLongFloat] = trans_vector_long_float;
3896 #ifdef type_SimpleArrayComplexSingleFloat
3897 transother[type_SimpleArrayComplexSingleFloat] = trans_vector_complex_single_float;
3899 #ifdef type_SimpleArrayComplexDoubleFloat
3900 transother[type_SimpleArrayComplexDoubleFloat] = trans_vector_complex_double_float;
3902 #ifdef type_SimpleArrayComplexLongFloat
3903 transother[type_SimpleArrayComplexLongFloat] = trans_vector_complex_long_float;
3905 transother[type_ComplexString] = trans_boxed;
3906 transother[type_ComplexBitVector] = trans_boxed;
3907 transother[type_ComplexVector] = trans_boxed;
3908 transother[type_ComplexArray] = trans_boxed;
3909 transother[type_CodeHeader] = trans_code_header;
3910 transother[type_FunctionHeader] = trans_function_header;
3911 transother[type_ClosureFunctionHeader] = trans_function_header;
3912 transother[type_ReturnPcHeader] = trans_return_pc_header;
3913 transother[type_ClosureHeader] = trans_boxed;
3914 transother[type_FuncallableInstanceHeader] = trans_boxed;
3915 transother[type_ByteCodeFunction] = trans_boxed;
3916 transother[type_ByteCodeClosure] = trans_boxed;
3917 transother[type_ValueCellHeader] = trans_boxed;
3918 transother[type_SymbolHeader] = trans_boxed;
3919 transother[type_BaseChar] = trans_immediate;
3920 transother[type_Sap] = trans_unboxed;
3921 transother[type_UnboundMarker] = trans_immediate;
3922 transother[type_WeakPointer] = trans_weak_pointer;
3923 transother[type_InstanceHeader] = trans_boxed;
3924 transother[type_Fdefn] = trans_boxed;
3926 /* size table, initialized the same way as scavtab */
3927 for (i = 0; i < 256; i++)
3928 sizetab[i] = size_lose;
3929 for (i = 0; i < 32; i++) {
3930 sizetab[type_EvenFixnum|(i<<3)] = size_immediate;
3931 sizetab[type_FunctionPointer|(i<<3)] = size_pointer;
3932 /* OtherImmediate0 */
3933 sizetab[type_ListPointer|(i<<3)] = size_pointer;
3934 sizetab[type_OddFixnum|(i<<3)] = size_immediate;
3935 sizetab[type_InstancePointer|(i<<3)] = size_pointer;
3936 /* OtherImmediate1 */
3937 sizetab[type_OtherPointer|(i<<3)] = size_pointer;
3939 sizetab[type_Bignum] = size_unboxed;
3940 sizetab[type_Ratio] = size_boxed;
3941 sizetab[type_SingleFloat] = size_unboxed;
3942 sizetab[type_DoubleFloat] = size_unboxed;
3943 #ifdef type_LongFloat
3944 sizetab[type_LongFloat] = size_unboxed;
3946 sizetab[type_Complex] = size_boxed;
3947 #ifdef type_ComplexSingleFloat
3948 sizetab[type_ComplexSingleFloat] = size_unboxed;
3950 #ifdef type_ComplexDoubleFloat
3951 sizetab[type_ComplexDoubleFloat] = size_unboxed;
3953 #ifdef type_ComplexLongFloat
3954 sizetab[type_ComplexLongFloat] = size_unboxed;
3956 sizetab[type_SimpleArray] = size_boxed;
3957 sizetab[type_SimpleString] = size_string;
3958 sizetab[type_SimpleBitVector] = size_vector_bit;
3959 sizetab[type_SimpleVector] = size_vector;
3960 sizetab[type_SimpleArrayUnsignedByte2] = size_vector_unsigned_byte_2;
3961 sizetab[type_SimpleArrayUnsignedByte4] = size_vector_unsigned_byte_4;
3962 sizetab[type_SimpleArrayUnsignedByte8] = size_vector_unsigned_byte_8;
3963 sizetab[type_SimpleArrayUnsignedByte16] = size_vector_unsigned_byte_16;
3964 sizetab[type_SimpleArrayUnsignedByte32] = size_vector_unsigned_byte_32;
3965 #ifdef type_SimpleArraySignedByte8
3966 sizetab[type_SimpleArraySignedByte8] = size_vector_unsigned_byte_8;
3968 #ifdef type_SimpleArraySignedByte16
3969 sizetab[type_SimpleArraySignedByte16] = size_vector_unsigned_byte_16;
3971 #ifdef type_SimpleArraySignedByte30
3972 sizetab[type_SimpleArraySignedByte30] = size_vector_unsigned_byte_32;
3974 #ifdef type_SimpleArraySignedByte32
3975 sizetab[type_SimpleArraySignedByte32] = size_vector_unsigned_byte_32;
3977 sizetab[type_SimpleArraySingleFloat] = size_vector_single_float;
3978 sizetab[type_SimpleArrayDoubleFloat] = size_vector_double_float;
3979 #ifdef type_SimpleArrayLongFloat
3980 sizetab[type_SimpleArrayLongFloat] = size_vector_long_float;
3982 #ifdef type_SimpleArrayComplexSingleFloat
3983 sizetab[type_SimpleArrayComplexSingleFloat] = size_vector_complex_single_float;
3985 #ifdef type_SimpleArrayComplexDoubleFloat
3986 sizetab[type_SimpleArrayComplexDoubleFloat] = size_vector_complex_double_float;
3988 #ifdef type_SimpleArrayComplexLongFloat
3989 sizetab[type_SimpleArrayComplexLongFloat] = size_vector_complex_long_float;
3991 sizetab[type_ComplexString] = size_boxed;
3992 sizetab[type_ComplexBitVector] = size_boxed;
3993 sizetab[type_ComplexVector] = size_boxed;
3994 sizetab[type_ComplexArray] = size_boxed;
3995 sizetab[type_CodeHeader] = size_code_header;
3997 /* We shouldn't see these, so just lose if it happens. */
3998 sizetab[type_FunctionHeader] = size_function_header;
3999 sizetab[type_ClosureFunctionHeader] = size_function_header;
4000 sizetab[type_ReturnPcHeader] = size_return_pc_header;
4002 sizetab[type_ClosureHeader] = size_boxed;
4003 sizetab[type_FuncallableInstanceHeader] = size_boxed;
4004 sizetab[type_ValueCellHeader] = size_boxed;
4005 sizetab[type_SymbolHeader] = size_boxed;
4006 sizetab[type_BaseChar] = size_immediate;
4007 sizetab[type_Sap] = size_unboxed;
4008 sizetab[type_UnboundMarker] = size_immediate;
4009 sizetab[type_WeakPointer] = size_weak_pointer;
4010 sizetab[type_InstanceHeader] = size_boxed;
4011 sizetab[type_Fdefn] = size_boxed;
4014 /* Scan an area looking for an object which encloses the given pointer.
4015 * Return the object start on success or NULL on failure. */
4017 search_space(lispobj *start, size_t words, lispobj *pointer)
4021 lispobj thing = *start;
4023 /* If thing is an immediate then this is a cons */
4025 || ((thing & 3) == 0) /* fixnum */
4026 || (TypeOf(thing) == type_BaseChar)
4027 || (TypeOf(thing) == type_UnboundMarker))
4030 count = (sizetab[TypeOf(thing)])(start);
4032 /* Check whether the pointer is within this object? */
4033 if ((pointer >= start) && (pointer < (start+count))) {
4035 /*FSHOW((stderr,"/found %x in %x %x\n", pointer, start, thing));*/
4039 /* Round up the count */
4040 count = CEILING(count,2);
4049 search_read_only_space(lispobj *pointer)
4051 lispobj* start = (lispobj*)READ_ONLY_SPACE_START;
4052 lispobj* end = (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER);
4053 if ((pointer < start) || (pointer >= end))
4055 return (search_space(start, (pointer+2)-start, pointer));
4059 search_static_space(lispobj *pointer)
4061 lispobj* start = (lispobj*)STATIC_SPACE_START;
4062 lispobj* end = (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER);
4063 if ((pointer < start) || (pointer >= end))
4065 return (search_space(start, (pointer+2)-start, pointer));
4068 /* a faster version for searching the dynamic space. This will work even
4069 * if the object is in a current allocation region. */
4071 search_dynamic_space(lispobj *pointer)
4073 int page_index = find_page_index(pointer);
4076 /* Address may be invalid - do some checks. */
4077 if ((page_index == -1) || (page_table[page_index].allocated == FREE_PAGE))
4079 start = (lispobj *)((void *)page_address(page_index)
4080 + page_table[page_index].first_object_offset);
4081 return (search_space(start, (pointer+2)-start, pointer));
4084 /* FIXME: There is a strong family resemblance between this function
4085 * and the function of the same name in purify.c. Would it be possible
4086 * to implement them as exactly the same function? */
4088 valid_dynamic_space_pointer(lispobj *pointer)
4090 lispobj *start_addr;
4092 /* Find the object start address */
4093 if ((start_addr = search_dynamic_space(pointer)) == NULL) {
4097 /* We need to allow raw pointers into Code objects for return
4098 * addresses. This will also pickup pointers to functions in code
4100 if (TypeOf(*start_addr) == type_CodeHeader) {
4101 /* X Could do some further checks here. */
4105 /* If it's not a return address then it needs to be a valid Lisp
4107 if (!Pointerp((lispobj)pointer)) {
4111 /* Check that the object pointed to is consistent with the pointer
4113 switch (LowtagOf((lispobj)pointer)) {
4114 case type_FunctionPointer:
4115 /* Start_addr should be the enclosing code object, or a closure
4117 switch (TypeOf(*start_addr)) {
4118 case type_CodeHeader:
4119 /* This case is probably caught above. */
4121 case type_ClosureHeader:
4122 case type_FuncallableInstanceHeader:
4123 case type_ByteCodeFunction:
4124 case type_ByteCodeClosure:
4125 if ((unsigned)pointer !=
4126 ((unsigned)start_addr+type_FunctionPointer)) {
4130 pointer, start_addr, *start_addr));
4138 pointer, start_addr, *start_addr));
4142 case type_ListPointer:
4143 if ((unsigned)pointer !=
4144 ((unsigned)start_addr+type_ListPointer)) {
4148 pointer, start_addr, *start_addr));
4151 /* Is it plausible cons? */
4152 if ((Pointerp(start_addr[0])
4153 || ((start_addr[0] & 3) == 0) /* fixnum */
4154 || (TypeOf(start_addr[0]) == type_BaseChar)
4155 || (TypeOf(start_addr[0]) == type_UnboundMarker))
4156 && (Pointerp(start_addr[1])
4157 || ((start_addr[1] & 3) == 0) /* fixnum */
4158 || (TypeOf(start_addr[1]) == type_BaseChar)
4159 || (TypeOf(start_addr[1]) == type_UnboundMarker)))
4165 pointer, start_addr, *start_addr));
4168 case type_InstancePointer:
4169 if ((unsigned)pointer !=
4170 ((unsigned)start_addr+type_InstancePointer)) {
4174 pointer, start_addr, *start_addr));
4177 if (TypeOf(start_addr[0]) != type_InstanceHeader) {
4181 pointer, start_addr, *start_addr));
4185 case type_OtherPointer:
4186 if ((unsigned)pointer !=
4187 ((int)start_addr+type_OtherPointer)) {
4191 pointer, start_addr, *start_addr));
4194 /* Is it plausible? Not a cons. X should check the headers. */
4195 if (Pointerp(start_addr[0]) || ((start_addr[0] & 3) == 0)) {
4199 pointer, start_addr, *start_addr));
4202 switch (TypeOf(start_addr[0])) {
4203 case type_UnboundMarker:
4208 pointer, start_addr, *start_addr));
4211 /* only pointed to by function pointers? */
4212 case type_ClosureHeader:
4213 case type_FuncallableInstanceHeader:
4214 case type_ByteCodeFunction:
4215 case type_ByteCodeClosure:
4219 pointer, start_addr, *start_addr));
4222 case type_InstanceHeader:
4226 pointer, start_addr, *start_addr));
4229 /* the valid other immediate pointer objects */
4230 case type_SimpleVector:
4233 #ifdef type_ComplexSingleFloat
4234 case type_ComplexSingleFloat:
4236 #ifdef type_ComplexDoubleFloat
4237 case type_ComplexDoubleFloat:
4239 #ifdef type_ComplexLongFloat
4240 case type_ComplexLongFloat:
4242 case type_SimpleArray:
4243 case type_ComplexString:
4244 case type_ComplexBitVector:
4245 case type_ComplexVector:
4246 case type_ComplexArray:
4247 case type_ValueCellHeader:
4248 case type_SymbolHeader:
4250 case type_CodeHeader:
4252 case type_SingleFloat:
4253 case type_DoubleFloat:
4254 #ifdef type_LongFloat
4255 case type_LongFloat:
4257 case type_SimpleString:
4258 case type_SimpleBitVector:
4259 case type_SimpleArrayUnsignedByte2:
4260 case type_SimpleArrayUnsignedByte4:
4261 case type_SimpleArrayUnsignedByte8:
4262 case type_SimpleArrayUnsignedByte16:
4263 case type_SimpleArrayUnsignedByte32:
4264 #ifdef type_SimpleArraySignedByte8
4265 case type_SimpleArraySignedByte8:
4267 #ifdef type_SimpleArraySignedByte16
4268 case type_SimpleArraySignedByte16:
4270 #ifdef type_SimpleArraySignedByte30
4271 case type_SimpleArraySignedByte30:
4273 #ifdef type_SimpleArraySignedByte32
4274 case type_SimpleArraySignedByte32:
4276 case type_SimpleArraySingleFloat:
4277 case type_SimpleArrayDoubleFloat:
4278 #ifdef type_SimpleArrayLongFloat
4279 case type_SimpleArrayLongFloat:
4281 #ifdef type_SimpleArrayComplexSingleFloat
4282 case type_SimpleArrayComplexSingleFloat:
4284 #ifdef type_SimpleArrayComplexDoubleFloat
4285 case type_SimpleArrayComplexDoubleFloat:
4287 #ifdef type_SimpleArrayComplexLongFloat
4288 case type_SimpleArrayComplexLongFloat:
4291 case type_WeakPointer:
4298 pointer, start_addr, *start_addr));
4306 pointer, start_addr, *start_addr));
4314 /* Adjust large bignum and vector objects. This will adjust the allocated
4315 * region if the size has shrunk, and move unboxed objects into unboxed
4316 * pages. The pages are not promoted here, and the promoted region is not
4317 * added to the new_regions; this is really only designed to be called from
4318 * preserve_pointer. Shouldn't fail if this is missed, just may delay the
4319 * moving of objects to unboxed pages, and the freeing of pages. */
4321 maybe_adjust_large_object(lispobj *where)
4325 lispobj *source, *dest;
4329 int remaining_bytes;
4336 /* Check whether it's a vector or bignum object. */
4337 switch (TypeOf(where[0])) {
4338 case type_SimpleVector:
4342 case type_SimpleString:
4343 case type_SimpleBitVector:
4344 case type_SimpleArrayUnsignedByte2:
4345 case type_SimpleArrayUnsignedByte4:
4346 case type_SimpleArrayUnsignedByte8:
4347 case type_SimpleArrayUnsignedByte16:
4348 case type_SimpleArrayUnsignedByte32:
4349 #ifdef type_SimpleArraySignedByte8
4350 case type_SimpleArraySignedByte8:
4352 #ifdef type_SimpleArraySignedByte16
4353 case type_SimpleArraySignedByte16:
4355 #ifdef type_SimpleArraySignedByte30
4356 case type_SimpleArraySignedByte30:
4358 #ifdef type_SimpleArraySignedByte32
4359 case type_SimpleArraySignedByte32:
4361 case type_SimpleArraySingleFloat:
4362 case type_SimpleArrayDoubleFloat:
4363 #ifdef type_SimpleArrayLongFloat
4364 case type_SimpleArrayLongFloat:
4366 #ifdef type_SimpleArrayComplexSingleFloat
4367 case type_SimpleArrayComplexSingleFloat:
4369 #ifdef type_SimpleArrayComplexDoubleFloat
4370 case type_SimpleArrayComplexDoubleFloat:
4372 #ifdef type_SimpleArrayComplexLongFloat
4373 case type_SimpleArrayComplexLongFloat:
4375 boxed = UNBOXED_PAGE;
4381 /* Find its current size. */
4382 nwords = (sizetab[TypeOf(where[0])])(where);
4384 first_page = find_page_index((void *)where);
4385 gc_assert(first_page >= 0);
4387 /* Note: Any page write-protection must be removed, else a later
4388 * scavenge_newspace may incorrectly not scavenge these pages.
4389 * This would not be necessary if they are added to the new areas,
4390 * but lets do it for them all (they'll probably be written
4393 gc_assert(page_table[first_page].first_object_offset == 0);
4395 next_page = first_page;
4396 remaining_bytes = nwords*4;
4397 while (remaining_bytes > 4096) {
4398 gc_assert(page_table[next_page].gen == from_space);
4399 gc_assert((page_table[next_page].allocated == BOXED_PAGE)
4400 || (page_table[next_page].allocated == UNBOXED_PAGE));
4401 gc_assert(page_table[next_page].large_object);
4402 gc_assert(page_table[next_page].first_object_offset ==
4403 -4096*(next_page-first_page));
4404 gc_assert(page_table[next_page].bytes_used == 4096);
4406 page_table[next_page].allocated = boxed;
4408 /* Shouldn't be write-protected at this stage. Essential that the
4410 gc_assert(!page_table[next_page].write_protected);
4411 remaining_bytes -= 4096;
4415 /* Now only one page remains, but the object may have shrunk so
4416 * there may be more unused pages which will be freed. */
4418 /* Object may have shrunk but shouldn't have grown - check. */
4419 gc_assert(page_table[next_page].bytes_used >= remaining_bytes);
4421 page_table[next_page].allocated = boxed;
4422 gc_assert(page_table[next_page].allocated ==
4423 page_table[first_page].allocated);
4425 /* Adjust the bytes_used. */
4426 old_bytes_used = page_table[next_page].bytes_used;
4427 page_table[next_page].bytes_used = remaining_bytes;
4429 bytes_freed = old_bytes_used - remaining_bytes;
4431 /* Free any remaining pages; needs care. */
4433 while ((old_bytes_used == 4096) &&
4434 (page_table[next_page].gen == from_space) &&
4435 ((page_table[next_page].allocated == UNBOXED_PAGE)
4436 || (page_table[next_page].allocated == BOXED_PAGE)) &&
4437 page_table[next_page].large_object &&
4438 (page_table[next_page].first_object_offset ==
4439 -(next_page - first_page)*4096)) {
4440 /* It checks out OK, free the page. We don't need to both zeroing
4441 * pages as this should have been done before shrinking the
4442 * object. These pages shouldn't be write protected as they
4443 * should be zero filled. */
4444 gc_assert(page_table[next_page].write_protected == 0);
4446 old_bytes_used = page_table[next_page].bytes_used;
4447 page_table[next_page].allocated = FREE_PAGE;
4448 page_table[next_page].bytes_used = 0;
4449 bytes_freed += old_bytes_used;
4453 if ((bytes_freed > 0) && gencgc_verbose)
4454 FSHOW((stderr, "/adjust_large_object freed %d\n", bytes_freed));
4456 generations[from_space].bytes_allocated -= bytes_freed;
4457 bytes_allocated -= bytes_freed;
4462 /* Take a possible pointer to a list object and mark the page_table
4463 * so that it will not need changing during a GC.
4465 * This involves locating the page it points to, then backing up to
4466 * the first page that has its first object start at offset 0, and
4467 * then marking all pages dont_move from the first until a page that ends
4468 * by being full, or having free gen.
4470 * This ensures that objects spanning pages are not broken.
4472 * It is assumed that all the page static flags have been cleared at
4473 * the start of a GC.
4475 * It is also assumed that the current gc_alloc region has been flushed and
4476 * the tables updated. */
4478 preserve_pointer(void *addr)
4480 int addr_page_index = find_page_index(addr);
4483 unsigned region_allocation;
4485 /* Address is quite likely to have been invalid - do some checks. */
4486 if ((addr_page_index == -1)
4487 || (page_table[addr_page_index].allocated == FREE_PAGE)
4488 || (page_table[addr_page_index].bytes_used == 0)
4489 || (page_table[addr_page_index].gen != from_space)
4490 /* Skip if already marked dont_move */
4491 || (page_table[addr_page_index].dont_move != 0))
4494 region_allocation = page_table[addr_page_index].allocated;
4496 /* Check the offset within the page.
4498 * FIXME: The mask should have a symbolic name, and ideally should
4499 * be derived from page size instead of hardwired to 0xfff.
4500 * (Also fix other uses of 0xfff, elsewhere.) */
4501 if (((unsigned)addr & 0xfff) > page_table[addr_page_index].bytes_used)
4504 if (enable_pointer_filter && !valid_dynamic_space_pointer(addr))
4507 /* Work backwards to find a page with a first_object_offset of 0.
4508 * The pages should be contiguous with all bytes used in the same
4509 * gen. Assumes the first_object_offset is negative or zero. */
4510 first_page = addr_page_index;
4511 while (page_table[first_page].first_object_offset != 0) {
4513 /* Do some checks. */
4514 gc_assert(page_table[first_page].bytes_used == 4096);
4515 gc_assert(page_table[first_page].gen == from_space);
4516 gc_assert(page_table[first_page].allocated == region_allocation);
4519 /* Adjust any large objects before promotion as they won't be copied
4520 * after promotion. */
4521 if (page_table[first_page].large_object) {
4522 maybe_adjust_large_object(page_address(first_page));
4523 /* If a large object has shrunk then addr may now point to a free
4524 * area in which case it's ignored here. Note it gets through the
4525 * valid pointer test above because the tail looks like conses. */
4526 if ((page_table[addr_page_index].allocated == FREE_PAGE)
4527 || (page_table[addr_page_index].bytes_used == 0)
4528 /* Check the offset within the page. */
4529 || (((unsigned)addr & 0xfff)
4530 > page_table[addr_page_index].bytes_used)) {
4532 "weird? ignore ptr 0x%x to freed area of large object\n",
4536 /* It may have moved to unboxed pages. */
4537 region_allocation = page_table[first_page].allocated;
4540 /* Now work forward until the end of this contiguous area is found,
4541 * marking all pages as dont_move. */
4542 for (i = first_page; ;i++) {
4543 gc_assert(page_table[i].allocated == region_allocation);
4545 /* Mark the page static. */
4546 page_table[i].dont_move = 1;
4548 /* Move the page to the new_space. XX I'd rather not do this but
4549 * the GC logic is not quite able to copy with the static pages
4550 * remaining in the from space. This also requires the generation
4551 * bytes_allocated counters be updated. */
4552 page_table[i].gen = new_space;
4553 generations[new_space].bytes_allocated += page_table[i].bytes_used;
4554 generations[from_space].bytes_allocated -= page_table[i].bytes_used;
4556 /* It is essential that the pages are not write protected as they
4557 * may have pointers into the old-space which need scavenging. They
4558 * shouldn't be write protected at this stage. */
4559 gc_assert(!page_table[i].write_protected);
4561 /* Check whether this is the last page in this contiguous block.. */
4562 if ((page_table[i].bytes_used < 4096)
4563 /* ..or it is 4096 and is the last in the block */
4564 || (page_table[i+1].allocated == FREE_PAGE)
4565 || (page_table[i+1].bytes_used == 0) /* next page free */
4566 || (page_table[i+1].gen != from_space) /* diff. gen */
4567 || (page_table[i+1].first_object_offset == 0))
4571 /* Check that the page is now static. */
4572 gc_assert(page_table[addr_page_index].dont_move != 0);
4577 #ifdef CONTROL_STACKS
4578 /* Scavenge the thread stack conservative roots. */
4580 scavenge_thread_stacks(void)
4582 lispobj thread_stacks = SymbolValue(CONTROL_STACKS);
4583 int type = TypeOf(thread_stacks);
4585 if (LowtagOf(thread_stacks) == type_OtherPointer) {
4586 struct vector *vector = (struct vector *) PTR(thread_stacks);
4588 if (TypeOf(vector->header) != type_SimpleVector)
4590 length = fixnum_value(vector->length);
4591 for (i = 0; i < length; i++) {
4592 lispobj stack_obj = vector->data[i];
4593 if (LowtagOf(stack_obj) == type_OtherPointer) {
4594 struct vector *stack = (struct vector *) PTR(stack_obj);
4596 if (TypeOf(stack->header) !=
4597 type_SimpleArrayUnsignedByte32) {
4600 vector_length = fixnum_value(stack->length);
4601 if ((gencgc_verbose > 1) && (vector_length <= 0))
4603 "/weird? control stack vector length %d\n",
4605 if (vector_length > 0) {
4606 lispobj *stack_pointer = (lispobj*)stack->data[0];
4607 if ((stack_pointer < (lispobj *)CONTROL_STACK_START) ||
4608 (stack_pointer > (lispobj *)CONTROL_STACK_END))
4609 lose("invalid stack pointer %x",
4610 (unsigned)stack_pointer);
4611 if ((stack_pointer > (lispobj *)CONTROL_STACK_START) &&
4612 (stack_pointer < (lispobj *)CONTROL_STACK_END)) {
4614 * (1) hardwired word length = 4; and as usual,
4615 * when fixing this, check for other places
4616 * with the same problem
4617 * (2) calling it 'length' suggests bytes;
4618 * perhaps 'size' instead? */
4619 unsigned int length = ((unsigned)CONTROL_STACK_END -
4620 (unsigned)stack_pointer) / 4;
4622 if (length >= vector_length) {
4623 lose("invalid stack size %d >= vector length %d",
4627 if (gencgc_verbose > 1) {
4629 "scavenging %d words of control stack %d of length %d words.\n",
4630 length, i, vector_length));
4632 for (j = 0; j < length; j++) {
4633 preserve_pointer((void *)stack->data[1+j]);
4644 /* If the given page is not write-protected, then scan it for pointers
4645 * to younger generations or the top temp. generation, if no
4646 * suspicious pointers are found then the page is write-protected.
4648 * Care is taken to check for pointers to the current gc_alloc region
4649 * if it is a younger generation or the temp. generation. This frees
4650 * the caller from doing a gc_alloc_update_page_tables. Actually the
4651 * gc_alloc_generation does not need to be checked as this is only
4652 * called from scavenge_generation when the gc_alloc generation is
4653 * younger, so it just checks if there is a pointer to the current
4656 * We return 1 if the page was write-protected, else 0.
4659 update_page_write_prot(int page)
4661 int gen = page_table[page].gen;
4664 void **page_addr = (void **)page_address(page);
4665 int num_words = page_table[page].bytes_used / 4;
4667 /* Shouldn't be a free page. */
4668 gc_assert(page_table[page].allocated != FREE_PAGE);
4669 gc_assert(page_table[page].bytes_used != 0);
4671 /* Skip if it's already write-protected or an unboxed page. */
4672 if (page_table[page].write_protected
4673 || (page_table[page].allocated == UNBOXED_PAGE))
4676 /* Scan the page for pointers to younger generations or the
4677 * top temp. generation. */
4679 for (j = 0; j < num_words; j++) {
4680 void *ptr = *(page_addr+j);
4681 int index = find_page_index(ptr);
4683 /* Check that it's in the dynamic space */
4685 if (/* Does it point to a younger or the temp. generation? */
4686 ((page_table[index].allocated != FREE_PAGE)
4687 && (page_table[index].bytes_used != 0)
4688 && ((page_table[index].gen < gen)
4689 || (page_table[index].gen == NUM_GENERATIONS)))
4691 /* Or does it point within a current gc_alloc region? */
4692 || ((boxed_region.start_addr <= ptr)
4693 && (ptr <= boxed_region.free_pointer))
4694 || ((unboxed_region.start_addr <= ptr)
4695 && (ptr <= unboxed_region.free_pointer))) {
4702 /* Write-protect the page. */
4703 /*FSHOW((stderr, "/write-protecting page %d gen %d\n", page, gen));*/
4705 os_protect((void *)page_addr,
4707 OS_VM_PROT_READ|OS_VM_PROT_EXECUTE);
4709 /* Note the page as protected in the page tables. */
4710 page_table[page].write_protected = 1;
4716 /* Scavenge a generation.
4718 * This will not resolve all pointers when generation is the new
4719 * space, as new objects may be added which are not check here - use
4720 * scavenge_newspace generation.
4722 * Write-protected pages should not have any pointers to the
4723 * from_space so do need scavenging; thus write-protected pages are
4724 * not always scavenged. There is some code to check that these pages
4725 * are not written; but to check fully the write-protected pages need
4726 * to be scavenged by disabling the code to skip them.
4728 * Under the current scheme when a generation is GCed the younger
4729 * generations will be empty. So, when a generation is being GCed it
4730 * is only necessary to scavenge the older generations for pointers
4731 * not the younger. So a page that does not have pointers to younger
4732 * generations does not need to be scavenged.
4734 * The write-protection can be used to note pages that don't have
4735 * pointers to younger pages. But pages can be written without having
4736 * pointers to younger generations. After the pages are scavenged here
4737 * they can be scanned for pointers to younger generations and if
4738 * there are none the page can be write-protected.
4740 * One complication is when the newspace is the top temp. generation.
4742 * Enabling SC_GEN_CK scavenges the write-protected pages and checks
4743 * that none were written, which they shouldn't be as they should have
4744 * no pointers to younger generations. This breaks down for weak
4745 * pointers as the objects contain a link to the next and are written
4746 * if a weak pointer is scavenged. Still it's a useful check. */
4748 scavenge_generation(int generation)
4755 /* Clear the write_protected_cleared flags on all pages. */
4756 for (i = 0; i < NUM_PAGES; i++)
4757 page_table[i].write_protected_cleared = 0;
4760 for (i = 0; i < last_free_page; i++) {
4761 if ((page_table[i].allocated == BOXED_PAGE)
4762 && (page_table[i].bytes_used != 0)
4763 && (page_table[i].gen == generation)) {
4766 /* This should be the start of a contiguous block. */
4767 gc_assert(page_table[i].first_object_offset == 0);
4769 /* We need to find the full extent of this contiguous
4770 * block in case objects span pages. */
4772 /* Now work forward until the end of this contiguous area
4773 * is found. A small area is preferred as there is a
4774 * better chance of its pages being write-protected. */
4775 for (last_page = i; ;last_page++)
4776 /* Check whether this is the last page in this contiguous
4778 if ((page_table[last_page].bytes_used < 4096)
4779 /* Or it is 4096 and is the last in the block */
4780 || (page_table[last_page+1].allocated != BOXED_PAGE)
4781 || (page_table[last_page+1].bytes_used == 0)
4782 || (page_table[last_page+1].gen != generation)
4783 || (page_table[last_page+1].first_object_offset == 0))
4786 /* Do a limited check for write_protected pages. If all pages
4787 * are write_protected then there is no need to scavenge. */
4790 for (j = i; j <= last_page; j++)
4791 if (page_table[j].write_protected == 0) {
4799 scavenge(page_address(i), (page_table[last_page].bytes_used
4800 + (last_page-i)*4096)/4);
4802 /* Now scan the pages and write protect those
4803 * that don't have pointers to younger
4805 if (enable_page_protection) {
4806 for (j = i; j <= last_page; j++) {
4807 num_wp += update_page_write_prot(j);
4816 if ((gencgc_verbose > 1) && (num_wp != 0)) {
4818 "/write protected %d pages within generation %d\n",
4819 num_wp, generation));
4823 /* Check that none of the write_protected pages in this generation
4824 * have been written to. */
4825 for (i = 0; i < NUM_PAGES; i++) {
4826 if ((page_table[i].allocation ! =FREE_PAGE)
4827 && (page_table[i].bytes_used != 0)
4828 && (page_table[i].gen == generation)
4829 && (page_table[i].write_protected_cleared != 0)) {
4830 FSHOW((stderr, "/scavenge_generation %d\n", generation));
4832 "/page bytes_used=%d first_object_offset=%d dont_move=%d\n",
4833 page_table[i].bytes_used,
4834 page_table[i].first_object_offset,
4835 page_table[i].dont_move));
4836 lose("write-protected page %d written to in scavenge_generation",
4844 /* Scavenge a newspace generation. As it is scavenged new objects may
4845 * be allocated to it; these will also need to be scavenged. This
4846 * repeats until there are no more objects unscavenged in the
4847 * newspace generation.
4849 * To help improve the efficiency, areas written are recorded by
4850 * gc_alloc and only these scavenged. Sometimes a little more will be
4851 * scavenged, but this causes no harm. An easy check is done that the
4852 * scavenged bytes equals the number allocated in the previous
4855 * Write-protected pages are not scanned except if they are marked
4856 * dont_move in which case they may have been promoted and still have
4857 * pointers to the from space.
4859 * Write-protected pages could potentially be written by alloc however
4860 * to avoid having to handle re-scavenging of write-protected pages
4861 * gc_alloc does not write to write-protected pages.
4863 * New areas of objects allocated are recorded alternatively in the two
4864 * new_areas arrays below. */
4865 static struct new_area new_areas_1[NUM_NEW_AREAS];
4866 static struct new_area new_areas_2[NUM_NEW_AREAS];
4868 /* Do one full scan of the new space generation. This is not enough to
4869 * complete the job as new objects may be added to the generation in
4870 * the process which are not scavenged. */
4872 scavenge_newspace_generation_one_scan(int generation)
4877 "/starting one full scan of newspace generation %d\n",
4880 for (i = 0; i < last_free_page; i++) {
4881 if ((page_table[i].allocated == BOXED_PAGE)
4882 && (page_table[i].bytes_used != 0)
4883 && (page_table[i].gen == generation)
4884 && ((page_table[i].write_protected == 0)
4885 /* (This may be redundant as write_protected is now
4886 * cleared before promotion.) */
4887 || (page_table[i].dont_move == 1))) {
4890 /* The scavenge will start at the first_object_offset of page i.
4892 * We need to find the full extent of this contiguous block in case
4893 * objects span pages.
4895 * Now work forward until the end of this contiguous area is
4896 * found. A small area is preferred as there is a better chance
4897 * of its pages being write-protected. */
4898 for (last_page = i; ;last_page++) {
4899 /* Check whether this is the last page in this contiguous
4901 if ((page_table[last_page].bytes_used < 4096)
4902 /* Or it is 4096 and is the last in the block */
4903 || (page_table[last_page+1].allocated != BOXED_PAGE)
4904 || (page_table[last_page+1].bytes_used == 0)
4905 || (page_table[last_page+1].gen != generation)
4906 || (page_table[last_page+1].first_object_offset == 0))
4910 /* Do a limited check for write_protected pages. If all pages
4911 * are write_protected then no need to scavenge. Except if the
4912 * pages are marked dont_move. */
4915 for (j = i; j <= last_page; j++)
4916 if ((page_table[j].write_protected == 0)
4917 || (page_table[j].dont_move != 0)) {
4927 /* Calculate the size. */
4929 size = (page_table[last_page].bytes_used
4930 - page_table[i].first_object_offset)/4;
4932 size = (page_table[last_page].bytes_used
4933 + (last_page-i)*4096
4934 - page_table[i].first_object_offset)/4;
4938 int a1 = bytes_allocated;
4941 "/scavenge(%x,%d)\n",
4943 + page_table[i].first_object_offset,
4946 new_areas_ignore_page = last_page;
4948 scavenge(page_address(i)+page_table[i].first_object_offset,size);
4951 /* Flush the alloc regions updating the tables. */
4952 gc_alloc_update_page_tables(0, &boxed_region);
4953 gc_alloc_update_page_tables(1, &unboxed_region);
4955 if ((all_wp != 0) && (a1 != bytes_allocated)) {
4957 "alloc'ed over %d to %d\n",
4960 "/page: bytes_used=%d first_object_offset=%d dont_move=%d wp=%d wpc=%d\n",
4961 page_table[i].bytes_used,
4962 page_table[i].first_object_offset,
4963 page_table[i].dont_move,
4964 page_table[i].write_protected,
4965 page_table[i].write_protected_cleared));
4977 /* Do a complete scavenge of the newspace generation. */
4979 scavenge_newspace_generation(int generation)
4983 /* the new_areas array currently being written to by gc_alloc */
4984 struct new_area (*current_new_areas)[] = &new_areas_1;
4985 int current_new_areas_index;
4986 int current_new_areas_allocated;
4988 /* the new_areas created but the previous scavenge cycle */
4989 struct new_area (*previous_new_areas)[] = NULL;
4990 int previous_new_areas_index;
4991 int previous_new_areas_allocated;
4993 #define SC_NS_GEN_CK 0
4995 /* Clear the write_protected_cleared flags on all pages. */
4996 for (i = 0; i < NUM_PAGES; i++)
4997 page_table[i].write_protected_cleared = 0;
5000 /* Flush the current regions updating the tables. */
5001 gc_alloc_update_page_tables(0, &boxed_region);
5002 gc_alloc_update_page_tables(1, &unboxed_region);
5004 /* Turn on the recording of new areas by gc_alloc. */
5005 new_areas = current_new_areas;
5006 new_areas_index = 0;
5008 /* Don't need to record new areas that get scavenged anyway during
5009 * scavenge_newspace_generation_one_scan. */
5010 record_new_objects = 1;
5012 /* Start with a full scavenge. */
5013 scavenge_newspace_generation_one_scan(generation);
5015 /* Record all new areas now. */
5016 record_new_objects = 2;
5018 /* Flush the current regions updating the tables. */
5019 gc_alloc_update_page_tables(0, &boxed_region);
5020 gc_alloc_update_page_tables(1, &unboxed_region);
5022 /* Grab new_areas_index. */
5023 current_new_areas_index = new_areas_index;
5026 "The first scan is finished; current_new_areas_index=%d.\n",
5027 current_new_areas_index));*/
5029 while (current_new_areas_index > 0) {
5030 /* Move the current to the previous new areas */
5031 previous_new_areas = current_new_areas;
5032 previous_new_areas_index = current_new_areas_index;
5034 /* Scavenge all the areas in previous new areas. Any new areas
5035 * allocated are saved in current_new_areas. */
5037 /* Allocate an array for current_new_areas; alternating between
5038 * new_areas_1 and 2 */
5039 if (previous_new_areas == &new_areas_1)
5040 current_new_areas = &new_areas_2;
5042 current_new_areas = &new_areas_1;
5044 /* Set up for gc_alloc. */
5045 new_areas = current_new_areas;
5046 new_areas_index = 0;
5048 /* Check whether previous_new_areas had overflowed. */
5049 if (previous_new_areas_index >= NUM_NEW_AREAS) {
5050 /* New areas of objects allocated have been lost so need to do a
5051 * full scan to be sure! If this becomes a problem try
5052 * increasing NUM_NEW_AREAS. */
5054 SHOW("new_areas overflow, doing full scavenge");
5056 /* Don't need to record new areas that get scavenge anyway
5057 * during scavenge_newspace_generation_one_scan. */
5058 record_new_objects = 1;
5060 scavenge_newspace_generation_one_scan(generation);
5062 /* Record all new areas now. */
5063 record_new_objects = 2;
5065 /* Flush the current regions updating the tables. */
5066 gc_alloc_update_page_tables(0, &boxed_region);
5067 gc_alloc_update_page_tables(1, &unboxed_region);
5069 /* Work through previous_new_areas. */
5070 for (i = 0; i < previous_new_areas_index; i++) {
5071 int page = (*previous_new_areas)[i].page;
5072 int offset = (*previous_new_areas)[i].offset;
5073 int size = (*previous_new_areas)[i].size / 4;
5074 gc_assert((*previous_new_areas)[i].size % 4 == 0);
5076 /* FIXME: All these bare *4 and /4 should be something
5077 * like BYTES_PER_WORD or WBYTES. */
5080 "/S page %d offset %d size %d\n",
5081 page, offset, size*4));*/
5082 scavenge(page_address(page)+offset, size);
5085 /* Flush the current regions updating the tables. */
5086 gc_alloc_update_page_tables(0, &boxed_region);
5087 gc_alloc_update_page_tables(1, &unboxed_region);
5090 current_new_areas_index = new_areas_index;
5093 "The re-scan has finished; current_new_areas_index=%d.\n",
5094 current_new_areas_index));*/
5097 /* Turn off recording of areas allocated by gc_alloc. */
5098 record_new_objects = 0;
5101 /* Check that none of the write_protected pages in this generation
5102 * have been written to. */
5103 for (i = 0; i < NUM_PAGES; i++) {
5104 if ((page_table[i].allocation != FREE_PAGE)
5105 && (page_table[i].bytes_used != 0)
5106 && (page_table[i].gen == generation)
5107 && (page_table[i].write_protected_cleared != 0)
5108 && (page_table[i].dont_move == 0)) {
5109 lose("write protected page %d written to in scavenge_newspace_generation\ngeneration=%d dont_move=%d",
5110 i, generation, page_table[i].dont_move);
5116 /* Un-write-protect all the pages in from_space. This is done at the
5117 * start of a GC else there may be many page faults while scavenging
5118 * the newspace (I've seen drive the system time to 99%). These pages
5119 * would need to be unprotected anyway before unmapping in
5120 * free_oldspace; not sure what effect this has on paging.. */
5122 unprotect_oldspace(void)
5124 int bytes_freed = 0;
5127 for (i = 0; i < last_free_page; i++) {
5128 if ((page_table[i].allocated != FREE_PAGE)
5129 && (page_table[i].bytes_used != 0)
5130 && (page_table[i].gen == from_space)) {
5131 void *page_start, *addr;
5133 page_start = (void *)page_address(i);
5135 /* Remove any write-protection. We should be able to rely
5136 * on the write-protect flag to avoid redundant calls. */
5137 if (page_table[i].write_protected) {
5138 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5139 page_table[i].write_protected = 0;
5145 /* Work through all the pages and free any in from_space. This
5146 * assumes that all objects have been copied or promoted to an older
5147 * generation. Bytes_allocated and the generation bytes_allocated
5148 * counter are updated. The number of bytes freed is returned. */
5149 extern void i586_bzero(void *addr, int nbytes);
5153 int bytes_freed = 0;
5154 int first_page, last_page;
5159 /* Find a first page for the next region of pages. */
5160 while ((first_page < last_free_page)
5161 && ((page_table[first_page].allocated == FREE_PAGE)
5162 || (page_table[first_page].bytes_used == 0)
5163 || (page_table[first_page].gen != from_space)))
5166 if (first_page >= last_free_page)
5169 /* Find the last page of this region. */
5170 last_page = first_page;
5173 /* Free the page. */
5174 bytes_freed += page_table[last_page].bytes_used;
5175 generations[page_table[last_page].gen].bytes_allocated -=
5176 page_table[last_page].bytes_used;
5177 page_table[last_page].allocated = FREE_PAGE;
5178 page_table[last_page].bytes_used = 0;
5180 /* Remove any write-protection. We should be able to rely
5181 * on the write-protect flag to avoid redundant calls. */
5183 void *page_start = (void *)page_address(last_page);
5185 if (page_table[last_page].write_protected) {
5186 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5187 page_table[last_page].write_protected = 0;
5192 while ((last_page < last_free_page)
5193 && (page_table[last_page].allocated != FREE_PAGE)
5194 && (page_table[last_page].bytes_used != 0)
5195 && (page_table[last_page].gen == from_space));
5197 /* Zero pages from first_page to (last_page-1).
5199 * FIXME: Why not use os_zero(..) function instead of
5200 * hand-coding this again? (Check other gencgc_unmap_zero
5202 if (gencgc_unmap_zero) {
5203 void *page_start, *addr;
5205 page_start = (void *)page_address(first_page);
5207 os_invalidate(page_start, 4096*(last_page-first_page));
5208 addr = os_validate(page_start, 4096*(last_page-first_page));
5209 if (addr == NULL || addr != page_start) {
5210 /* Is this an error condition? I couldn't really tell from
5211 * the old CMU CL code, which fprintf'ed a message with
5212 * an exclamation point at the end. But I've never seen the
5213 * message, so it must at least be unusual..
5215 * (The same condition is also tested for in gc_free_heap.)
5217 * -- WHN 19991129 */
5218 lose("i586_bzero: page moved, 0x%08x ==> 0x%08x",
5225 page_start = (int *)page_address(first_page);
5226 i586_bzero(page_start, 4096*(last_page-first_page));
5229 first_page = last_page;
5231 } while (first_page < last_free_page);
5233 bytes_allocated -= bytes_freed;
5237 /* Print some information about a pointer at the given address. */
5239 print_ptr(lispobj *addr)
5241 /* If addr is in the dynamic space then out the page information. */
5242 int pi1 = find_page_index((void*)addr);
5245 fprintf(stderr," %x: page %d alloc %d gen %d bytes_used %d offset %d dont_move %d\n",
5248 page_table[pi1].allocated,
5249 page_table[pi1].gen,
5250 page_table[pi1].bytes_used,
5251 page_table[pi1].first_object_offset,
5252 page_table[pi1].dont_move);
5253 fprintf(stderr," %x %x %x %x (%x) %x %x %x %x\n",
5265 extern int undefined_tramp;
5268 verify_space(lispobj *start, size_t words)
5270 int is_in_dynamic_space = (find_page_index((void*)start) != -1);
5271 int is_in_readonly_space =
5272 (READ_ONLY_SPACE_START <= (unsigned)start &&
5273 (unsigned)start < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5277 lispobj thing = *(lispobj*)start;
5279 if (Pointerp(thing)) {
5280 int page_index = find_page_index((void*)thing);
5281 int to_readonly_space =
5282 (READ_ONLY_SPACE_START <= thing &&
5283 thing < SymbolValue(READ_ONLY_SPACE_FREE_POINTER));
5284 int to_static_space =
5285 (STATIC_SPACE_START <= thing &&
5286 thing < SymbolValue(STATIC_SPACE_FREE_POINTER));
5288 /* Does it point to the dynamic space? */
5289 if (page_index != -1) {
5290 /* If it's within the dynamic space it should point to a used
5291 * page. XX Could check the offset too. */
5292 if ((page_table[page_index].allocated != FREE_PAGE)
5293 && (page_table[page_index].bytes_used == 0))
5294 lose ("Ptr %x @ %x sees free page.", thing, start);
5295 /* Check that it doesn't point to a forwarding pointer! */
5296 if (*((lispobj *)PTR(thing)) == 0x01) {
5297 lose("Ptr %x @ %x sees forwarding ptr.", thing, start);
5299 /* Check that its not in the RO space as it would then be a
5300 * pointer from the RO to the dynamic space. */
5301 if (is_in_readonly_space) {
5302 lose("ptr to dynamic space %x from RO space %x",
5305 /* Does it point to a plausible object? This check slows
5306 * it down a lot (so it's commented out).
5308 * FIXME: Add a variable to enable this dynamically. */
5309 /* if (!valid_dynamic_space_pointer((lispobj *)thing)) {
5310 * lose("ptr %x to invalid object %x", thing, start); */
5312 /* Verify that it points to another valid space. */
5313 if (!to_readonly_space && !to_static_space
5314 && (thing != (unsigned)&undefined_tramp)) {
5315 lose("Ptr %x @ %x sees junk.", thing, start);
5319 if (thing & 0x3) { /* Skip fixnums. FIXME: There should be an
5320 * is_fixnum for this. */
5322 switch(TypeOf(*start)) {
5325 case type_SimpleVector:
5328 case type_SimpleArray:
5329 case type_ComplexString:
5330 case type_ComplexBitVector:
5331 case type_ComplexVector:
5332 case type_ComplexArray:
5333 case type_ClosureHeader:
5334 case type_FuncallableInstanceHeader:
5335 case type_ByteCodeFunction:
5336 case type_ByteCodeClosure:
5337 case type_ValueCellHeader:
5338 case type_SymbolHeader:
5340 case type_UnboundMarker:
5341 case type_InstanceHeader:
5346 case type_CodeHeader:
5348 lispobj object = *start;
5350 int nheader_words, ncode_words, nwords;
5352 struct function *fheaderp;
5354 code = (struct code *) start;
5356 /* Check that it's not in the dynamic space.
5357 * FIXME: Isn't is supposed to be OK for code
5358 * objects to be in the dynamic space these days? */
5359 if (is_in_dynamic_space
5360 /* It's ok if it's byte compiled code. The trace
5361 * table offset will be a fixnum if it's x86
5362 * compiled code - check. */
5363 && !(code->trace_table_offset & 0x3)
5364 /* Only when enabled */
5365 && verify_dynamic_code_check) {
5367 "/code object at %x in the dynamic space\n",
5371 ncode_words = fixnum_value(code->code_size);
5372 nheader_words = HeaderValue(object);
5373 nwords = ncode_words + nheader_words;
5374 nwords = CEILING(nwords, 2);
5375 /* Scavenge the boxed section of the code data block */
5376 verify_space(start + 1, nheader_words - 1);
5378 /* Scavenge the boxed section of each function object in
5379 * the code data block. */
5380 fheaderl = code->entry_points;
5381 while (fheaderl != NIL) {
5382 fheaderp = (struct function *) PTR(fheaderl);
5383 gc_assert(TypeOf(fheaderp->header) == type_FunctionHeader);
5384 verify_space(&fheaderp->name, 1);
5385 verify_space(&fheaderp->arglist, 1);
5386 verify_space(&fheaderp->type, 1);
5387 fheaderl = fheaderp->next;
5393 /* unboxed objects */
5395 case type_SingleFloat:
5396 case type_DoubleFloat:
5397 #ifdef type_ComplexLongFloat
5398 case type_LongFloat:
5400 #ifdef type_ComplexSingleFloat
5401 case type_ComplexSingleFloat:
5403 #ifdef type_ComplexDoubleFloat
5404 case type_ComplexDoubleFloat:
5406 #ifdef type_ComplexLongFloat
5407 case type_ComplexLongFloat:
5409 case type_SimpleString:
5410 case type_SimpleBitVector:
5411 case type_SimpleArrayUnsignedByte2:
5412 case type_SimpleArrayUnsignedByte4:
5413 case type_SimpleArrayUnsignedByte8:
5414 case type_SimpleArrayUnsignedByte16:
5415 case type_SimpleArrayUnsignedByte32:
5416 #ifdef type_SimpleArraySignedByte8
5417 case type_SimpleArraySignedByte8:
5419 #ifdef type_SimpleArraySignedByte16
5420 case type_SimpleArraySignedByte16:
5422 #ifdef type_SimpleArraySignedByte30
5423 case type_SimpleArraySignedByte30:
5425 #ifdef type_SimpleArraySignedByte32
5426 case type_SimpleArraySignedByte32:
5428 case type_SimpleArraySingleFloat:
5429 case type_SimpleArrayDoubleFloat:
5430 #ifdef type_SimpleArrayComplexLongFloat
5431 case type_SimpleArrayLongFloat:
5433 #ifdef type_SimpleArrayComplexSingleFloat
5434 case type_SimpleArrayComplexSingleFloat:
5436 #ifdef type_SimpleArrayComplexDoubleFloat
5437 case type_SimpleArrayComplexDoubleFloat:
5439 #ifdef type_SimpleArrayComplexLongFloat
5440 case type_SimpleArrayComplexLongFloat:
5443 case type_WeakPointer:
5444 count = (sizetab[TypeOf(*start)])(start);
5460 /* FIXME: It would be nice to make names consistent so that
5461 * foo_size meant size *in* *bytes* instead of size in some
5462 * arbitrary units. (Yes, this caused a bug, how did you guess?:-)
5463 * Some counts of lispobjs are called foo_count; it might be good
5464 * to grep for all foo_size and rename the appropriate ones to
5466 int read_only_space_size =
5467 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER)
5468 - (lispobj*)READ_ONLY_SPACE_START;
5469 int static_space_size =
5470 (lispobj*)SymbolValue(STATIC_SPACE_FREE_POINTER)
5471 - (lispobj*)STATIC_SPACE_START;
5472 int binding_stack_size =
5473 (lispobj*)SymbolValue(BINDING_STACK_POINTER)
5474 - (lispobj*)BINDING_STACK_START;
5476 verify_space((lispobj*)READ_ONLY_SPACE_START, read_only_space_size);
5477 verify_space((lispobj*)STATIC_SPACE_START , static_space_size);
5478 verify_space((lispobj*)BINDING_STACK_START , binding_stack_size);
5482 verify_generation(int generation)
5486 for (i = 0; i < last_free_page; i++) {
5487 if ((page_table[i].allocated != FREE_PAGE)
5488 && (page_table[i].bytes_used != 0)
5489 && (page_table[i].gen == generation)) {
5491 int region_allocation = page_table[i].allocated;
5493 /* This should be the start of a contiguous block */
5494 gc_assert(page_table[i].first_object_offset == 0);
5496 /* Need to find the full extent of this contiguous block in case
5497 objects span pages. */
5499 /* Now work forward until the end of this contiguous area is
5501 for (last_page = i; ;last_page++)
5502 /* Check whether this is the last page in this contiguous
5504 if ((page_table[last_page].bytes_used < 4096)
5505 /* Or it is 4096 and is the last in the block */
5506 || (page_table[last_page+1].allocated != region_allocation)
5507 || (page_table[last_page+1].bytes_used == 0)
5508 || (page_table[last_page+1].gen != generation)
5509 || (page_table[last_page+1].first_object_offset == 0))
5512 verify_space(page_address(i), (page_table[last_page].bytes_used
5513 + (last_page-i)*4096)/4);
5519 /* Check the all the free space is zero filled. */
5521 verify_zero_fill(void)
5525 for (page = 0; page < last_free_page; page++) {
5526 if (page_table[page].allocated == FREE_PAGE) {
5527 /* The whole page should be zero filled. */
5528 int *start_addr = (int *)page_address(page);
5531 for (i = 0; i < size; i++) {
5532 if (start_addr[i] != 0) {
5533 lose("free page not zero at %x", start_addr + i);
5537 int free_bytes = 4096 - page_table[page].bytes_used;
5538 if (free_bytes > 0) {
5539 int *start_addr = (int *)((unsigned)page_address(page)
5540 + page_table[page].bytes_used);
5541 int size = free_bytes / 4;
5543 for (i = 0; i < size; i++) {
5544 if (start_addr[i] != 0) {
5545 lose("free region not zero at %x", start_addr + i);
5553 /* External entry point for verify_zero_fill */
5555 gencgc_verify_zero_fill(void)
5557 /* Flush the alloc regions updating the tables. */
5558 boxed_region.free_pointer = current_region_free_pointer;
5559 gc_alloc_update_page_tables(0, &boxed_region);
5560 gc_alloc_update_page_tables(1, &unboxed_region);
5561 SHOW("verifying zero fill");
5563 current_region_free_pointer = boxed_region.free_pointer;
5564 current_region_end_addr = boxed_region.end_addr;
5568 verify_dynamic_space(void)
5572 for (i = 0; i < NUM_GENERATIONS; i++)
5573 verify_generation(i);
5575 if (gencgc_enable_verify_zero_fill)
5579 /* Write-protect all the dynamic boxed pages in the given generation. */
5581 write_protect_generation_pages(int generation)
5585 gc_assert(generation < NUM_GENERATIONS);
5587 for (i = 0; i < last_free_page; i++)
5588 if ((page_table[i].allocated == BOXED_PAGE)
5589 && (page_table[i].bytes_used != 0)
5590 && (page_table[i].gen == generation)) {
5593 page_start = (void *)page_address(i);
5595 os_protect(page_start,
5597 OS_VM_PROT_READ | OS_VM_PROT_EXECUTE);
5599 /* Note the page as protected in the page tables. */
5600 page_table[i].write_protected = 1;
5603 if (gencgc_verbose > 1) {
5605 "/write protected %d of %d pages in generation %d\n",
5606 count_write_protect_generation_pages(generation),
5607 count_generation_pages(generation),
5612 /* Garbage collect a generation. If raise is 0 the remains of the
5613 * generation are not raised to the next generation. */
5615 garbage_collect_generation(int generation, int raise)
5617 unsigned long allocated = bytes_allocated;
5618 unsigned long bytes_freed;
5620 unsigned long read_only_space_size, static_space_size;
5622 gc_assert(generation <= (NUM_GENERATIONS-1));
5624 /* The oldest generation can't be raised. */
5625 gc_assert((generation != (NUM_GENERATIONS-1)) || (raise == 0));
5627 /* Initialize the weak pointer list. */
5628 weak_pointers = NULL;
5630 /* When a generation is not being raised it is transported to a
5631 * temporary generation (NUM_GENERATIONS), and lowered when
5632 * done. Set up this new generation. There should be no pages
5633 * allocated to it yet. */
5635 gc_assert(generations[NUM_GENERATIONS].bytes_allocated == 0);
5637 /* Set the global src and dest. generations */
5638 from_space = generation;
5640 new_space = generation+1;
5642 new_space = NUM_GENERATIONS;
5644 /* Change to a new space for allocation, resetting the alloc_start_page */
5645 gc_alloc_generation = new_space;
5646 generations[new_space].alloc_start_page = 0;
5647 generations[new_space].alloc_unboxed_start_page = 0;
5648 generations[new_space].alloc_large_start_page = 0;
5649 generations[new_space].alloc_large_unboxed_start_page = 0;
5651 /* Before any pointers are preserved, the dont_move flags on the
5652 * pages need to be cleared. */
5653 for (i = 0; i < last_free_page; i++)
5654 page_table[i].dont_move = 0;
5656 /* Un-write-protect the old-space pages. This is essential for the
5657 * promoted pages as they may contain pointers into the old-space
5658 * which need to be scavenged. It also helps avoid unnecessary page
5659 * faults as forwarding pointer are written into them. They need to
5660 * be un-protected anyway before unmapping later. */
5661 unprotect_oldspace();
5663 /* Scavenge the stack's conservative roots. */
5666 for (ptr = (lispobj **)CONTROL_STACK_END - 1;
5667 ptr > (lispobj **)&raise;
5669 preserve_pointer(*ptr);
5672 #ifdef CONTROL_STACKS
5673 scavenge_thread_stacks();
5676 if (gencgc_verbose > 1) {
5677 int num_dont_move_pages = count_dont_move_pages();
5679 "/non-movable pages due to conservative pointers = %d (%d bytes)\n",
5680 num_dont_move_pages,
5681 /* FIXME: 4096 should be symbolic constant here and
5682 * prob'ly elsewhere too. */
5683 num_dont_move_pages * 4096));
5686 /* Scavenge all the rest of the roots. */
5688 /* Scavenge the Lisp functions of the interrupt handlers, taking
5689 * care to avoid SIG_DFL, SIG_IGN. */
5690 for (i = 0; i < NSIG; i++) {
5691 union interrupt_handler handler = interrupt_handlers[i];
5692 if (!ARE_SAME_HANDLER(handler.c, SIG_IGN) &&
5693 !ARE_SAME_HANDLER(handler.c, SIG_DFL)) {
5694 scavenge((lispobj *)(interrupt_handlers + i), 1);
5698 /* Scavenge the binding stack. */
5699 scavenge(BINDING_STACK_START,
5700 (lispobj *)SymbolValue(BINDING_STACK_POINTER) -
5701 (lispobj *)BINDING_STACK_START);
5703 if (SymbolValue(SCAVENGE_READ_ONLY_SPACE) != NIL) {
5704 read_only_space_size =
5705 (lispobj*)SymbolValue(READ_ONLY_SPACE_FREE_POINTER) -
5706 (lispobj*)READ_ONLY_SPACE_START;
5708 "/scavenge read only space: %d bytes\n",
5709 read_only_space_size * sizeof(lispobj)));
5710 scavenge(READ_ONLY_SPACE_START, read_only_space_size);
5714 (lispobj *)SymbolValue(STATIC_SPACE_FREE_POINTER) -
5715 (lispobj *)STATIC_SPACE_START;
5716 if (gencgc_verbose > 1)
5718 "/scavenge static space: %d bytes\n",
5719 static_space_size * sizeof(lispobj)));
5720 scavenge(STATIC_SPACE_START, static_space_size);
5722 /* All generations but the generation being GCed need to be
5723 * scavenged. The new_space generation needs special handling as
5724 * objects may be moved in - it is handled separately below. */
5725 for (i = 0; i < NUM_GENERATIONS; i++)
5726 if ((i != generation) && (i != new_space))
5727 scavenge_generation(i);
5729 /* Finally scavenge the new_space generation. Keep going until no
5730 * more objects are moved into the new generation */
5731 scavenge_newspace_generation(new_space);
5733 #define RESCAN_CHECK 0
5735 /* As a check re-scavenge the newspace once; no new objects should
5738 int old_bytes_allocated = bytes_allocated;
5739 int bytes_allocated;
5741 /* Start with a full scavenge. */
5742 scavenge_newspace_generation_one_scan(new_space);
5744 /* Flush the current regions, updating the tables. */
5745 gc_alloc_update_page_tables(0, &boxed_region);
5746 gc_alloc_update_page_tables(1, &unboxed_region);
5748 bytes_allocated = bytes_allocated - old_bytes_allocated;
5750 if (bytes_allocated != 0) {
5751 lose("Rescan of new_space allocated %d more bytes.",
5757 scan_weak_pointers();
5759 /* Flush the current regions, updating the tables. */
5760 gc_alloc_update_page_tables(0, &boxed_region);
5761 gc_alloc_update_page_tables(1, &unboxed_region);
5763 /* Free the pages in oldspace, but not those marked dont_move. */
5764 bytes_freed = free_oldspace();
5766 /* If the GC is not raising the age then lower the generation back
5767 * to its normal generation number */
5769 for (i = 0; i < last_free_page; i++)
5770 if ((page_table[i].bytes_used != 0)
5771 && (page_table[i].gen == NUM_GENERATIONS))
5772 page_table[i].gen = generation;
5773 gc_assert(generations[generation].bytes_allocated == 0);
5774 generations[generation].bytes_allocated =
5775 generations[NUM_GENERATIONS].bytes_allocated;
5776 generations[NUM_GENERATIONS].bytes_allocated = 0;
5779 /* Reset the alloc_start_page for generation. */
5780 generations[generation].alloc_start_page = 0;
5781 generations[generation].alloc_unboxed_start_page = 0;
5782 generations[generation].alloc_large_start_page = 0;
5783 generations[generation].alloc_large_unboxed_start_page = 0;
5785 if (generation >= verify_gens) {
5789 verify_dynamic_space();
5792 /* Set the new gc trigger for the GCed generation. */
5793 generations[generation].gc_trigger =
5794 generations[generation].bytes_allocated
5795 + generations[generation].bytes_consed_between_gc;
5798 generations[generation].num_gc = 0;
5800 ++generations[generation].num_gc;
5803 /* Update last_free_page then ALLOCATION_POINTER */
5805 update_x86_dynamic_space_free_pointer(void)
5810 for (i = 0; i < NUM_PAGES; i++)
5811 if ((page_table[i].allocated != FREE_PAGE)
5812 && (page_table[i].bytes_used != 0))
5815 last_free_page = last_page+1;
5817 SetSymbolValue(ALLOCATION_POINTER,
5818 (lispobj)(((char *)heap_base) + last_free_page*4096));
5821 /* GC all generations below last_gen, raising their objects to the
5822 * next generation until all generations below last_gen are empty.
5823 * Then if last_gen is due for a GC then GC it. In the special case
5824 * that last_gen==NUM_GENERATIONS, the last generation is always
5825 * GC'ed. The valid range for last_gen is: 0,1,...,NUM_GENERATIONS.
5827 * The oldest generation to be GCed will always be
5828 * gencgc_oldest_gen_to_gc, partly ignoring last_gen if necessary. */
5830 collect_garbage(unsigned last_gen)
5837 boxed_region.free_pointer = current_region_free_pointer;
5839 FSHOW((stderr, "/entering collect_garbage(%d)\n", last_gen));
5841 if (last_gen > NUM_GENERATIONS) {
5843 "/collect_garbage: last_gen = %d, doing a level 0 GC\n",
5848 /* Flush the alloc regions updating the tables. */
5849 gc_alloc_update_page_tables(0, &boxed_region);
5850 gc_alloc_update_page_tables(1, &unboxed_region);
5852 /* Verify the new objects created by Lisp code. */
5853 if (pre_verify_gen_0) {
5854 SHOW((stderr, "pre-checking generation 0\n"));
5855 verify_generation(0);
5858 if (gencgc_verbose > 1)
5859 print_generation_stats(0);
5862 /* Collect the generation. */
5864 if (gen >= gencgc_oldest_gen_to_gc) {
5865 /* Never raise the oldest generation. */
5870 || (generations[gen].num_gc >= generations[gen].trigger_age);
5873 if (gencgc_verbose > 1) {
5875 "Starting GC of generation %d with raise=%d alloc=%d trig=%d GCs=%d\n",
5878 generations[gen].bytes_allocated,
5879 generations[gen].gc_trigger,
5880 generations[gen].num_gc));
5883 /* If an older generation is being filled then update its memory
5886 generations[gen+1].cum_sum_bytes_allocated +=
5887 generations[gen+1].bytes_allocated;
5890 garbage_collect_generation(gen, raise);
5892 /* Reset the memory age cum_sum. */
5893 generations[gen].cum_sum_bytes_allocated = 0;
5895 if (gencgc_verbose > 1) {
5896 FSHOW((stderr, "GC of generation %d finished:\n", gen));
5897 print_generation_stats(0);
5901 } while ((gen <= gencgc_oldest_gen_to_gc)
5902 && ((gen < last_gen)
5903 || ((gen <= gencgc_oldest_gen_to_gc)
5905 && (generations[gen].bytes_allocated
5906 > generations[gen].gc_trigger)
5907 && (gen_av_mem_age(gen)
5908 > generations[gen].min_av_mem_age))));
5910 /* Now if gen-1 was raised all generations before gen are empty.
5911 * If it wasn't raised then all generations before gen-1 are empty.
5913 * Now objects within this gen's pages cannot point to younger
5914 * generations unless they are written to. This can be exploited
5915 * by write-protecting the pages of gen; then when younger
5916 * generations are GCed only the pages which have been written
5921 gen_to_wp = gen - 1;
5923 /* There's not much point in WPing pages in generation 0 as it is
5924 * never scavenged (except promoted pages). */
5925 if ((gen_to_wp > 0) && enable_page_protection) {
5926 /* Check that they are all empty. */
5927 for (i = 0; i < gen_to_wp; i++) {
5928 if (generations[i].bytes_allocated)
5929 lose("trying to write-protect gen. %d when gen. %d nonempty",
5932 write_protect_generation_pages(gen_to_wp);
5935 /* Set gc_alloc back to generation 0. The current regions should
5936 * be flushed after the above GCs */
5937 gc_assert((boxed_region.free_pointer - boxed_region.start_addr) == 0);
5938 gc_alloc_generation = 0;
5940 update_x86_dynamic_space_free_pointer();
5942 /* This is now done by Lisp SCRUB-CONTROL-STACK in Lisp SUB-GC, so we
5943 * needn't do it here: */
5946 current_region_free_pointer = boxed_region.free_pointer;
5947 current_region_end_addr = boxed_region.end_addr;
5949 SHOW("returning from collect_garbage");
5952 /* This is called by Lisp PURIFY when it is finished. All live objects
5953 * will have been moved to the RO and Static heaps. The dynamic space
5954 * will need a full re-initialization. We don't bother having Lisp
5955 * PURIFY flush the current gc_alloc region, as the page_tables are
5956 * re-initialized, and every page is zeroed to be sure. */
5962 if (gencgc_verbose > 1)
5963 SHOW("entering gc_free_heap");
5965 for (page = 0; page < NUM_PAGES; page++) {
5966 /* Skip free pages which should already be zero filled. */
5967 if (page_table[page].allocated != FREE_PAGE) {
5968 void *page_start, *addr;
5970 /* Mark the page free. The other slots are assumed invalid
5971 * when it is a FREE_PAGE and bytes_used is 0 and it
5972 * should not be write-protected -- except that the
5973 * generation is used for the current region but it sets
5975 page_table[page].allocated = FREE_PAGE;
5976 page_table[page].bytes_used = 0;
5978 /* Zero the page. */
5979 page_start = (void *)page_address(page);
5981 /* First, remove any write-protection. */
5982 os_protect(page_start, 4096, OS_VM_PROT_ALL);
5983 page_table[page].write_protected = 0;
5985 os_invalidate(page_start,4096);
5986 addr = os_validate(page_start,4096);
5987 if (addr == NULL || addr != page_start) {
5988 lose("gc_free_heap: page moved, 0x%08x ==> 0x%08x",
5992 } else if (gencgc_zero_check_during_free_heap) {
5995 /* Double-check that the page is zero filled. */
5996 gc_assert(page_table[page].allocated == FREE_PAGE);
5997 gc_assert(page_table[page].bytes_used == 0);
5999 page_start = (int *)page_address(i);
6001 for (i=0; i<1024; i++) {
6002 if (page_start[i] != 0) {
6003 lose("free region not zero at %x", page_start + i);
6009 bytes_allocated = 0;
6011 /* Initialize the generations. */
6012 for (page = 0; page < NUM_GENERATIONS; page++) {
6013 generations[page].alloc_start_page = 0;
6014 generations[page].alloc_unboxed_start_page = 0;
6015 generations[page].alloc_large_start_page = 0;
6016 generations[page].alloc_large_unboxed_start_page = 0;
6017 generations[page].bytes_allocated = 0;
6018 generations[page].gc_trigger = 2000000;
6019 generations[page].num_gc = 0;
6020 generations[page].cum_sum_bytes_allocated = 0;
6023 if (gencgc_verbose > 1)
6024 print_generation_stats(0);
6026 /* Initialize gc_alloc */
6027 gc_alloc_generation = 0;
6028 boxed_region.first_page = 0;
6029 boxed_region.last_page = -1;
6030 boxed_region.start_addr = page_address(0);
6031 boxed_region.free_pointer = page_address(0);
6032 boxed_region.end_addr = page_address(0);
6034 unboxed_region.first_page = 0;
6035 unboxed_region.last_page = -1;
6036 unboxed_region.start_addr = page_address(0);
6037 unboxed_region.free_pointer = page_address(0);
6038 unboxed_region.end_addr = page_address(0);
6040 #if 0 /* Lisp PURIFY is currently running on the C stack so don't do this. */
6045 SetSymbolValue(ALLOCATION_POINTER, (lispobj)((char *)heap_base));
6047 current_region_free_pointer = boxed_region.free_pointer;
6048 current_region_end_addr = boxed_region.end_addr;
6050 if (verify_after_free_heap) {
6051 /* Check whether purify has left any bad pointers. */
6053 SHOW("checking after free_heap\n");
6065 heap_base = (void*)DYNAMIC_SPACE_START;
6067 /* Initialize each page structure. */
6068 for (i = 0; i < NUM_PAGES; i++) {
6069 /* Initialize all pages as free. */
6070 page_table[i].allocated = FREE_PAGE;
6071 page_table[i].bytes_used = 0;
6073 /* Pages are not write-protected at startup. */
6074 page_table[i].write_protected = 0;
6077 bytes_allocated = 0;
6079 /* Initialize the generations. */
6080 for (i = 0; i < NUM_GENERATIONS; i++) {
6081 generations[i].alloc_start_page = 0;
6082 generations[i].alloc_unboxed_start_page = 0;
6083 generations[i].alloc_large_start_page = 0;
6084 generations[i].alloc_large_unboxed_start_page = 0;
6085 generations[i].bytes_allocated = 0;
6086 generations[i].gc_trigger = 2000000;
6087 generations[i].num_gc = 0;
6088 generations[i].cum_sum_bytes_allocated = 0;
6089 /* the tune-able parameters */
6090 generations[i].bytes_consed_between_gc = 2000000;
6091 generations[i].trigger_age = 1;
6092 generations[i].min_av_mem_age = 0.75;
6095 /* Initialize gc_alloc. */
6096 gc_alloc_generation = 0;
6097 boxed_region.first_page = 0;
6098 boxed_region.last_page = -1;
6099 boxed_region.start_addr = page_address(0);
6100 boxed_region.free_pointer = page_address(0);
6101 boxed_region.end_addr = page_address(0);
6103 unboxed_region.first_page = 0;
6104 unboxed_region.last_page = -1;
6105 unboxed_region.start_addr = page_address(0);
6106 unboxed_region.free_pointer = page_address(0);
6107 unboxed_region.end_addr = page_address(0);
6111 current_region_free_pointer = boxed_region.free_pointer;
6112 current_region_end_addr = boxed_region.end_addr;
6115 /* Pick up the dynamic space from after a core load.
6117 * The ALLOCATION_POINTER points to the end of the dynamic space.
6119 * XX A scan is needed to identify the closest first objects for pages. */
6121 gencgc_pickup_dynamic(void)
6124 int addr = DYNAMIC_SPACE_START;
6125 int alloc_ptr = SymbolValue(ALLOCATION_POINTER);
6127 /* Initialize the first region. */
6129 page_table[page].allocated = BOXED_PAGE;
6130 page_table[page].gen = 0;
6131 page_table[page].bytes_used = 4096;
6132 page_table[page].large_object = 0;
6133 page_table[page].first_object_offset =
6134 (void *)DYNAMIC_SPACE_START - page_address(page);
6137 } while (addr < alloc_ptr);
6139 generations[0].bytes_allocated = 4096*page;
6140 bytes_allocated = 4096*page;
6142 current_region_free_pointer = boxed_region.free_pointer;
6143 current_region_end_addr = boxed_region.end_addr;
6146 /* a counter for how deep we are in alloc(..) calls */
6147 int alloc_entered = 0;
6149 /* alloc(..) is the external interface for memory allocation. It
6150 * allocates to generation 0. It is not called from within the garbage
6151 * collector as it is only external uses that need the check for heap
6152 * size (GC trigger) and to disable the interrupts (interrupts are
6153 * always disabled during a GC).
6155 * The vops that call alloc(..) assume that the returned space is zero-filled.
6156 * (E.g. the most significant word of a 2-word bignum in MOVE-FROM-UNSIGNED.)
6158 * The check for a GC trigger is only performed when the current
6159 * region is full, so in most cases it's not needed. Further MAYBE-GC
6160 * is only called once because Lisp will remember "need to collect
6161 * garbage" and get around to it when it can. */
6165 /* Check for alignment allocation problems. */
6166 gc_assert((((unsigned)current_region_free_pointer & 0x7) == 0)
6167 && ((nbytes & 0x7) == 0));
6169 if (SymbolValue(PSEUDO_ATOMIC_ATOMIC)) {/* if already in a pseudo atomic */
6171 void *new_free_pointer;
6174 if (alloc_entered) {
6175 SHOW("alloc re-entered in already-pseudo-atomic case");
6179 /* Check whether there is room in the current region. */
6180 new_free_pointer = current_region_free_pointer + nbytes;
6182 /* FIXME: Shouldn't we be doing some sort of lock here, to
6183 * keep from getting screwed if an interrupt service routine
6184 * allocates memory between the time we calculate new_free_pointer
6185 * and the time we write it back to current_region_free_pointer?
6186 * Perhaps I just don't understand pseudo-atomics..
6188 * Perhaps I don't. It looks as though what happens is if we
6189 * were interrupted any time during the pseudo-atomic
6190 * interval (which includes now) we discard the allocated
6191 * memory and try again. So, at least we don't return
6192 * a memory area that was allocated out from underneath us
6193 * by code in an ISR.
6194 * Still, that doesn't seem to prevent
6195 * current_region_free_pointer from getting corrupted:
6196 * We read current_region_free_pointer.
6197 * They read current_region_free_pointer.
6198 * They write current_region_free_pointer.
6199 * We write current_region_free_pointer, scribbling over
6200 * whatever they wrote. */
6202 if (new_free_pointer <= boxed_region.end_addr) {
6203 /* If so then allocate from the current region. */
6204 void *new_obj = current_region_free_pointer;
6205 current_region_free_pointer = new_free_pointer;
6207 return((void *)new_obj);
6210 if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
6211 /* Double the trigger. */
6212 auto_gc_trigger *= 2;
6214 /* Exit the pseudo-atomic. */
6215 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
6216 if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
6217 /* Handle any interrupts that occurred during
6219 do_pending_interrupt();
6221 funcall0(SymbolFunction(MAYBE_GC));
6222 /* Re-enter the pseudo-atomic. */
6223 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0));
6224 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
6227 /* Call gc_alloc. */
6228 boxed_region.free_pointer = current_region_free_pointer;
6230 void *new_obj = gc_alloc(nbytes);
6231 current_region_free_pointer = boxed_region.free_pointer;
6232 current_region_end_addr = boxed_region.end_addr;
6238 void *new_free_pointer;
6241 /* At least wrap this allocation in a pseudo atomic to prevent
6242 * gc_alloc from being re-entered. */
6243 SetSymbolValue(PSEUDO_ATOMIC_INTERRUPTED, make_fixnum(0));
6244 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(1));
6247 SHOW("alloc re-entered in not-already-pseudo-atomic case");
6250 /* Check whether there is room in the current region. */
6251 new_free_pointer = current_region_free_pointer + nbytes;
6253 if (new_free_pointer <= boxed_region.end_addr) {
6254 /* If so then allocate from the current region. */
6255 void *new_obj = current_region_free_pointer;
6256 current_region_free_pointer = new_free_pointer;
6258 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
6259 if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED)) {
6260 /* Handle any interrupts that occurred during
6262 do_pending_interrupt();
6266 return((void *)new_obj);
6269 /* KLUDGE: There's lots of code around here shared with the
6270 * the other branch. Is there some way to factor out the
6271 * duplicate code? -- WHN 19991129 */
6272 if (auto_gc_trigger && bytes_allocated > auto_gc_trigger) {
6273 /* Double the trigger. */
6274 auto_gc_trigger *= 2;
6276 /* Exit the pseudo atomic. */
6277 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
6278 if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
6279 /* Handle any interrupts that occurred during
6281 do_pending_interrupt();
6283 funcall0(SymbolFunction(MAYBE_GC));
6287 /* Else call gc_alloc. */
6288 boxed_region.free_pointer = current_region_free_pointer;
6289 result = gc_alloc(nbytes);
6290 current_region_free_pointer = boxed_region.free_pointer;
6291 current_region_end_addr = boxed_region.end_addr;
6294 SetSymbolValue(PSEUDO_ATOMIC_ATOMIC, make_fixnum(0));
6295 if (SymbolValue(PSEUDO_ATOMIC_INTERRUPTED) != 0) {
6296 /* Handle any interrupts that occurred during
6298 do_pending_interrupt();
6307 * noise to manipulate the gc trigger stuff
6311 set_auto_gc_trigger(os_vm_size_t dynamic_usage)
6313 auto_gc_trigger += dynamic_usage;
6317 clear_auto_gc_trigger(void)
6319 auto_gc_trigger = 0;
6322 /* Find the code object for the given pc, or return NULL on failure. */
6324 component_ptr_from_pc(lispobj *pc)
6326 lispobj *object = NULL;
6328 if (object = search_read_only_space(pc))
6330 else if (object = search_static_space(pc))
6333 object = search_dynamic_space(pc);
6335 if (object) /* if we found something */
6336 if (TypeOf(*object) == type_CodeHeader) /* if it's a code object */
6343 * shared support for the OS-dependent signal handlers which
6344 * catch GENCGC-related write-protect violations
6347 /* Depending on which OS we're running under, different signals might
6348 * be raised for a violation of write protection in the heap. This
6349 * function factors out the common generational GC magic which needs
6350 * to invoked in this case, and should be called from whatever signal
6351 * handler is appropriate for the OS we're running under.
6353 * Return true if this signal is a normal generational GC thing that
6354 * we were able to handle, or false if it was abnormal and control
6355 * should fall through to the general SIGSEGV/SIGBUS/whatever logic. */
6357 gencgc_handle_wp_violation(void* fault_addr)
6359 int page_index = find_page_index(fault_addr);
6361 #if defined QSHOW_SIGNALS
6362 FSHOW((stderr, "heap WP violation? fault_addr=%x, page_index=%d\n",
6363 fault_addr, page_index));
6366 /* Check whether the fault is within the dynamic space. */
6367 if (page_index == (-1)) {
6369 /* not within the dynamic space -- not our responsibility */
6374 /* The only acceptable reason for an signal like this from the
6375 * heap is that the generational GC write-protected the page. */
6376 if (page_table[page_index].write_protected != 1) {
6377 lose("access failure in heap page not marked as write-protected");
6380 /* Unprotect the page. */
6381 os_protect(page_address(page_index), 4096, OS_VM_PROT_ALL);
6382 page_table[page_index].write_protected = 0;
6383 page_table[page_index].write_protected_cleared = 1;
6385 /* Don't worry, we can handle it. */