lowtag-mask))
(new-pointer (+ *static-space-free-pointer* nwords))
(new-free (* new-pointer n-word-bytes)))
+ ;; FIXME: don't signal while in WITHOUT-GCING, the handler
+ ;; risks deadlock with SIG_STOP_FOR_GC.
(unless (> static-space-end new-free)
(error 'simple-storage-condition
:format-control "Not enough memory left in static space to ~
;;; For GENCGC all generations < GEN will be GC'ed.
-(defvar *already-in-gc*
- (sb!thread:make-mutex :name "GC lock") "ID of thread running SUB-GC")
+(defvar *already-in-gc* (sb!thread:make-mutex :name "GC lock"))
;;; A unique GC id. This is supplied for code that needs to detect
;;; whether a GC has happened since some earlier point in time. For
(thread-yield)
(return-from get-spinlock t))))
(if (and (not *interrupts-enabled*) *allow-with-interrupts*)
- ;; If interrupts are enabled, but we are allowed to enabled them,
- ;; check for pending interrupts every once in a while.
+ ;; If interrupts are disabled, but we are allowed to
+ ;; enabled them, check for pending interrupts every once
+ ;; in a while.
(loop
(loop repeat 128 do (cas)) ; 128 is arbitrary here
(sb!unix::%check-interrupts))
;; of Allegro's *cl-default-special-bindings*, as that is at
;; least accessible to users to secure their own libraries.
;; --njf, 2006-07-15
+ ;;
+ ;; As it is, this lambda must not cons until we are ready
+ ;; to run GC. Be very careful.
(let* ((*current-thread* thread)
(*restart-clusters* nil)
(*handler-clusters* (sb!kernel::initial-handler-clusters))
(setf (values e-sec e-msec) (system-real-time-values)
c-sec 0
c-msec 0))
- ;; If two threads call this at the same time, we're still safe, I believe,
- ;; as long as NOW is updated before either of C-MSEC or C-SEC. Same applies
- ;; to interrupts. --NS
+ ;; If two threads call this at the same time, we're still safe, I
+ ;; believe, as long as NOW is updated before either of C-MSEC or
+ ;; C-SEC. Same applies to interrupts. --NS
+ ;;
+ ;; I believe this is almost correct with x86/x86-64 cache
+ ;; coherency, but if the new value of C-SEC, C-MSEC can become
+ ;; visible to another CPU without NOW doing the same then it's
+ ;; unsafe. It's `almost' correct on x86 because writes by other
+ ;; processors may become visible in any order provided transitity
+ ;; holds. With at least three cpus, C-MSEC and C-SEC may be from
+ ;; different threads and an incorrect value may be returned.
+ ;; Considering that this failure is not detectable by the caller -
+ ;; it looks like time passes a bit slowly - and that it should be
+ ;; an extremely rare occurance I'm inclinded to leave it as it is.
+ ;; --MG
(defun get-internal-real-time ()
(multiple-value-bind (sec msec) (system-real-time-values)
(unless (and (= msec c-msec) (= sec c-sec))
*universal-type*))))
(recurse array-type)))))
-;;; Like CMU CL, we use HEAPSORT. However, other than that, this code
-;;; isn't really related to the CMU CL code, since instead of trying
-;;; to generalize the CMU CL code to allow START and END values, this
-;;; code has been written from scratch following Chapter 7 of
-;;; _Introduction to Algorithms_ by Corman, Rivest, and Shamir.
(define-source-transform sb!impl::sort-vector (vector start end predicate key)
;; Like CMU CL, we use HEAPSORT. However, other than that, this code
;; isn't really related to the CMU CL code, since instead of trying
#ifdef LISP_FEATURE_C_STACK_IS_CONTROL_STACK
lisp_memory_fault_error(context, fault_addr);
#else
+ /* FIXME: never returns 0 */
if (!maybe_gc(context)) {
interrupt_handle_now(signal, siginfo, context);
}
#include <genesis/simple-fun.h>
/* disabling gc assertions made no discernable difference to GC speed,
- * last I tried it - dan 2003.12.21 */
+ * last I tried it - dan 2003.12.21
+ *
+ * And it's unsafe to do so while things like gc_assert(0 ==
+ * thread_mutex_lock(&allocation_lock)) exist. - MG 2009-01-13
+ */
#if 1
# define gc_assert(ex) \
do { \
void
interrupt_handle_pending(os_context_t *context)
{
- /* There are three ways we can get here. First, if an interrupt
+ /* There are three ways we can get here. First, if an interrupt
* occurs within pseudo-atomic, it will be deferred, and we'll
- * trap to here at the end of the pseudo-atomic block. Second, if
+ * trap to here at the end of the pseudo-atomic block. Second, if
* the GC (in alloc()) decides that a GC is required, it will set
- * *GC-PENDING* and pseudo-atomic-interrupted, and alloc() is
- * always called from within pseudo-atomic, and thus we end up
- * here again. Third, when calling GC-ON or at the end of a
- * WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to here if
- * there is a pending GC. */
+ * *GC-PENDING* and pseudo-atomic-interrupted if not *GC-INHIBIT*,
+ * and alloc() is always called from within pseudo-atomic, and
+ * thus we end up here again. Third, when calling GC-ON or at the
+ * end of a WITHOUT-GCING, MAYBE-HANDLE-PENDING-GC will trap to
+ * here if there is a pending GC. Fourth, ahem, at the end of
+ * WITHOUT-INTERRUPTS (bar complications with nesting). */
/* Win32 only needs to handle the GC cases (for now?) */
;;; checkins which aren't released. (And occasionally for internal
;;; versions, especially for internal versions off the main CVS
;;; branch, it gets hairier, e.g. "0.pre7.14.flaky4.13".)
-"1.0.25.13"
+"1.0.25.14"