X-Git-Url: http://repo.macrolet.net/gitweb/?a=blobdiff_plain;f=src%2Fcode%2Fcross-float.lisp;h=397e12760d3da49d918d9e667f005836ada83ba2;hb=c8af15e61b030c8d4b0e950bc9b7618530044618;hp=7023302708ecec14befaf88ebc71dd8b3777f5fe;hpb=a530bbe337109d898d5b4a001fc8f1afa3b5dc39;p=sbcl.git diff --git a/src/code/cross-float.lisp b/src/code/cross-float.lisp index 7023302..397e127 100644 --- a/src/code/cross-float.lisp +++ b/src/code/cross-float.lisp @@ -13,12 +13,9 @@ (in-package "SB!IMPL") -(file-comment - "$Header$") - -;;; There seems to be no portable way to mask float traps, but we shouldn't -;;; encounter any float traps when cross-compiling SBCL itself, anyway, so we -;;; just make this a no-op. +;;; There seems to be no portable way to mask float traps, but we +;;; shouldn't encounter any float traps when cross-compiling SBCL +;;; itself, anyway, so we just make this a no-op. (defmacro sb!vm::with-float-traps-masked (traps &body body) (declare (ignore traps)) ;; FIXME: should become STYLE-WARNING? @@ -39,19 +36,21 @@ (logior uresult (logand -1 (lognot mask)))))) -;;; portable implementations of SINGLE-FLOAT-BITS, DOUBLE-FLOAT-LOW-BITS, and -;;; DOUBLE-FLOAT-HIGH-BITS +;;; portable implementations of SINGLE-FLOAT-BITS, +;;; DOUBLE-FLOAT-LOW-BITS, and DOUBLE-FLOAT-HIGH-BITS ;;; -;;; KLUDGE: These will fail if the target's floating point isn't IEEE, and so -;;; I'd be more comfortable if there were an assertion "target's floating point -;;; is IEEE" in the code, but I can't see how to express that. +;;; KLUDGE: These will fail if the target's floating point isn't IEEE, +;;; and so I'd be more comfortable if there were an assertion +;;; "target's floating point is IEEE" in the code, but I can't see how +;;; to express that. ;;; -;;; KLUDGE: It's sort of weird that these functions return signed 32-bit values -;;; instead of unsigned 32-bit values. This is the way that the CMU CL -;;; machine-dependent functions behaved, and I've copied that behavior, but it -;;; seems to me that it'd be more idiomatic to return unsigned 32-bit values. -;;; Maybe someday the machine-dependent functions could be tweaked to return -;;; unsigned 32-bit values? +;;; KLUDGE: It's sort of weird that these functions return signed +;;; 32-bit values instead of unsigned 32-bit values. This is the way +;;; that the CMU CL machine-dependent functions behaved, and I've +;;; copied that behavior, but it seems to me that it'd be more +;;; idiomatic to return unsigned 32-bit values. Maybe someday the +;;; machine-dependent functions could be tweaked to return unsigned +;;; 32-bit values? (defun single-float-bits (x) (declare (type single-float x)) (assert (= (float-radix x) 2)) @@ -82,7 +81,7 @@ (>= significand (expt 2 23)) (assert (< 0 significand (expt 2 24))) ;; Exponent 0 is reserved for denormalized numbers, - ;; and 255 is reserved for specials a la NaN. + ;; and 255 is reserved for specials like NaN. (assert (< 0 exponent 255)) (return (logior (ash exponent 23) (logand significand @@ -127,7 +126,7 @@ (>= significand (expt 2 52)) (assert (< 0 significand (expt 2 53))) ;; Exponent 0 is reserved for denormalized numbers, - ;; and 2047 is reserved for specials a la NaN. + ;; and 2047 is reserved for specials like NaN. (assert (< 0 exponent 2047)) (return (logior (ash exponent 52) (logand significand @@ -174,7 +173,7 @@ ;;; cross-compilation host Lisps are likely to have exactly the same ;;; floating point precision as the target Lisp. If it turns out to be ;;; a problem, there are possible workarounds involving portable -;;; representations for target floating point numbers, a la +;;; representations for target floating point numbers, like ;;; (DEFSTRUCT TARGET-SINGLE-FLOAT ;;; (SIGN (REQUIRED-ARGUMENT) :TYPE BIT) ;;; (EXPONENT (REQUIRED-ARGUMENT) :TYPE UNSIGNED-BYTE)