示例#1
0
/*
 * tan(x) = sin(x) / cos(x)
 */
struct fpn *
fpu_tan(struct fpemu *fe)
{
    struct fpn x;
    struct fpn s;
    struct fpn *r;

    if (ISNAN(&fe->fe_f2))
        return &fe->fe_f2;
    if (ISINF(&fe->fe_f2))
        return fpu_newnan(fe);

    /* if x is +0/-0, return +0/-0 */
    if (ISZERO(&fe->fe_f2))
        return &fe->fe_f2;

    CPYFPN(&x, &fe->fe_f2);

    /* sin(x) */
    CPYFPN(&fe->fe_f2, &x);
    r = fpu_sin(fe);
    CPYFPN(&s, r);

    /* cos(x) */
    CPYFPN(&fe->fe_f2, &x);
    r = fpu_cos(fe);
    CPYFPN(&fe->fe_f2, r);

    CPYFPN(&fe->fe_f1, &s);
    r = fpu_div(fe);
    return r;
}
示例#2
0
/*
 * arccos(x) = pi/2 - arcsin(x)
 */
struct fpn *
fpu_acos(struct fpemu *fe)
{
    struct fpn *r;

    if (ISNAN(&fe->fe_f2))
        return &fe->fe_f2;
    if (ISINF(&fe->fe_f2))
        return fpu_newnan(fe);

    r = fpu_asin(fe);
    CPYFPN(&fe->fe_f2, r);

    /* pi/2 - asin(x) */
    fpu_const(&fe->fe_f1, FPU_CONST_PI);
    fe->fe_f1.fp_exp--;
    fe->fe_f2.fp_sign = !fe->fe_f2.fp_sign;
    r = fpu_add(fe);

    return r;
}
示例#3
0
/*
 *                          x
 * arcsin(x) = arctan(---------------)
 *                     sqrt(1 - x^2)
 */
struct fpn *
fpu_asin(struct fpemu *fe)
{
    struct fpn x;
    struct fpn *r;

    if (ISNAN(&fe->fe_f2))
        return &fe->fe_f2;
    if (ISZERO(&fe->fe_f2))
        return &fe->fe_f2;

    if (ISINF(&fe->fe_f2))
        return fpu_newnan(fe);

    CPYFPN(&x, &fe->fe_f2);

    /* x^2 */
    CPYFPN(&fe->fe_f1, &fe->fe_f2);
    r = fpu_mul(fe);

    /* 1 - x^2 */
    CPYFPN(&fe->fe_f2, r);
    fe->fe_f2.fp_sign = 1;
    fpu_const(&fe->fe_f1, FPU_CONST_1);
    r = fpu_add(fe);

    /* sqrt(1-x^2) */
    CPYFPN(&fe->fe_f2, r);
    r = fpu_sqrt(fe);

    /* x/sqrt */
    CPYFPN(&fe->fe_f2, r);
    CPYFPN(&fe->fe_f1, &x);
    r = fpu_div(fe);

    /* arctan */
    CPYFPN(&fe->fe_f2, r);
    return fpu_atan(fe);
}
示例#4
0
/*
 * Our task is to calculate the square root of a floating point number x0.
 * This number x normally has the form:
 *
 *		    exp
 *	x = mant * 2		(where 1 <= mant < 2 and exp is an integer)
 *
 * This can be left as it stands, or the mantissa can be doubled and the
 * exponent decremented:
 *
 *			  exp-1
 *	x = (2 * mant) * 2	(where 2 <= 2 * mant < 4)
 *
 * If the exponent `exp' is even, the square root of the number is best
 * handled using the first form, and is by definition equal to:
 *
 *				exp/2
 *	sqrt(x) = sqrt(mant) * 2
 *
 * If exp is odd, on the other hand, it is convenient to use the second
 * form, giving:
 *
 *				    (exp-1)/2
 *	sqrt(x) = sqrt(2 * mant) * 2
 *
 * In the first case, we have
 *
 *	1 <= mant < 2
 *
 * and therefore
 *
 *	sqrt(1) <= sqrt(mant) < sqrt(2)
 *
 * while in the second case we have
 *
 *	2 <= 2*mant < 4
 *
 * and therefore
 *
 *	sqrt(2) <= sqrt(2*mant) < sqrt(4)
 *
 * so that in any case, we are sure that
 *
 *	sqrt(1) <= sqrt(n * mant) < sqrt(4),	n = 1 or 2
 *
 * or
 *
 *	1 <= sqrt(n * mant) < 2,		n = 1 or 2.
 *
 * This root is therefore a properly formed mantissa for a floating
 * point number.  The exponent of sqrt(x) is either exp/2 or (exp-1)/2
 * as above.  This leaves us with the problem of finding the square root
 * of a fixed-point number in the range [1..4).
 *
 * Though it may not be instantly obvious, the following square root
 * algorithm works for any integer x of an even number of bits, provided
 * that no overflows occur:
 *
 *	let q = 0
 *	for k = NBITS-1 to 0 step -1 do -- for each digit in the answer...
 *		x *= 2			-- multiply by radix, for next digit
 *		if x >= 2q + 2^k then	-- if adding 2^k does not
 *			x -= 2q + 2^k	-- exceed the correct root,
 *			q += 2^k	-- add 2^k and adjust x
 *		fi
 *	done
 *	sqrt = q / 2^(NBITS/2)		-- (and any remainder is in x)
 *
 * If NBITS is odd (so that k is initially even), we can just add another
 * zero bit at the top of x.  Doing so means that q is not going to acquire
 * a 1 bit in the first trip around the loop (since x0 < 2^NBITS).  If the
 * final value in x is not needed, or can be off by a factor of 2, this is
 * equivalant to moving the `x *= 2' step to the bottom of the loop:
 *
 *	for k = NBITS-1 to 0 step -1 do if ... fi; x *= 2; done
 *
 * and the result q will then be sqrt(x0) * 2^floor(NBITS / 2).
 * (Since the algorithm is destructive on x, we will call x's initial
 * value, for which q is some power of two times its square root, x0.)
 *
 * If we insert a loop invariant y = 2q, we can then rewrite this using
 * C notation as:
 *
 *	q = y = 0; x = x0;
 *	for (k = NBITS; --k >= 0;) {
 * #if (NBITS is even)
 *		x *= 2;
 * #endif
 *		t = y + (1 << k);
 *		if (x >= t) {
 *			x -= t;
 *			q += 1 << k;
 *			y += 1 << (k + 1);
 *		}
 * #if (NBITS is odd)
 *		x *= 2;
 * #endif
 *	}
 *
 * If x0 is fixed point, rather than an integer, we can simply alter the
 * scale factor between q and sqrt(x0).  As it happens, we can easily arrange
 * for the scale factor to be 2**0 or 1, so that sqrt(x0) == q.
 *
 * In our case, however, x0 (and therefore x, y, q, and t) are multiword
 * integers, which adds some complication.  But note that q is built one
 * bit at a time, from the top down, and is not used itself in the loop
 * (we use 2q as held in y instead).  This means we can build our answer
 * in an integer, one word at a time, which saves a bit of work.  Also,
 * since 1 << k is always a `new' bit in q, 1 << k and 1 << (k+1) are
 * `new' bits in y and we can set them with an `or' operation rather than
 * a full-blown multiword add.
 *
 * We are almost done, except for one snag.  We must prove that none of our
 * intermediate calculations can overflow.  We know that x0 is in [1..4)
 * and therefore the square root in q will be in [1..2), but what about x,
 * y, and t?
 *
 * We know that y = 2q at the beginning of each loop.  (The relation only
 * fails temporarily while y and q are being updated.)  Since q < 2, y < 4.
 * The sum in t can, in our case, be as much as y+(1<<1) = y+2 < 6, and.
 * Furthermore, we can prove with a bit of work that x never exceeds y by
 * more than 2, so that even after doubling, 0 <= x < 8.  (This is left as
 * an exercise to the reader, mostly because I have become tired of working
 * on this comment.)
 *
 * If our floating point mantissas (which are of the form 1.frac) occupy
 * B+1 bits, our largest intermediary needs at most B+3 bits, or two extra.
 * In fact, we want even one more bit (for a carry, to avoid compares), or
 * three extra.  There is a comment in fpu_emu.h reminding maintainers of
 * this, so we have some justification in assuming it.
 */
struct fpn *
fpu_sqrt(struct fpemu *fe)
{
	struct fpn *x = &fe->fe_f1;
	u_int bit, q, tt;
	u_int x0, x1, x2, x3;
	u_int y0, y1, y2, y3;
	u_int d0, d1, d2, d3;
	int e;
	FPU_DECL_CARRY;

	/*
	 * Take care of special cases first.  In order:
	 *
	 *	sqrt(NaN) = NaN
	 *	sqrt(+0) = +0
	 *	sqrt(-0) = -0
	 *	sqrt(x < 0) = NaN	(including sqrt(-Inf))
	 *	sqrt(+Inf) = +Inf
	 *
	 * Then all that remains are numbers with mantissas in [1..2).
	 */
	DPRINTF(FPE_REG, ("fpu_sqer:\n"));
	DUMPFPN(FPE_REG, x);
	DPRINTF(FPE_REG, ("=>\n"));
	if (ISNAN(x)) {
		fe->fe_cx |= FPSCR_VXSNAN;
		DUMPFPN(FPE_REG, x);
		return (x);
	}
	if (ISZERO(x)) {
		fe->fe_cx |= FPSCR_ZX;
		x->fp_class = FPC_INF;
		DUMPFPN(FPE_REG, x);
		return (x);
	}
	if (x->fp_sign) {
		return (fpu_newnan(fe));
	}
	if (ISINF(x)) {
		fe->fe_cx |= FPSCR_VXSQRT;
		DUMPFPN(FPE_REG, 0);
		return (0);
	}

	/*
	 * Calculate result exponent.  As noted above, this may involve
	 * doubling the mantissa.  We will also need to double x each
	 * time around the loop, so we define a macro for this here, and
	 * we break out the multiword mantissa.
	 */
#ifdef FPU_SHL1_BY_ADD
#define	DOUBLE_X { \
	FPU_ADDS(x3, x3, x3); FPU_ADDCS(x2, x2, x2); \
	FPU_ADDCS(x1, x1, x1); FPU_ADDC(x0, x0, x0); \
}
#else
#define	DOUBLE_X { \
	x0 = (x0 << 1) | (x1 >> 31); x1 = (x1 << 1) | (x2 >> 31); \
	x2 = (x2 << 1) | (x3 >> 31); x3 <<= 1; \
}
#endif
#if (FP_NMANT & 1) != 0
# define ODD_DOUBLE	DOUBLE_X
# define EVEN_DOUBLE	/* nothing */
#else
# define ODD_DOUBLE	/* nothing */
# define EVEN_DOUBLE	DOUBLE_X
#endif
	x0 = x->fp_mant[0];
	x1 = x->fp_mant[1];
	x2 = x->fp_mant[2];
	x3 = x->fp_mant[3];
	e = x->fp_exp;
	if (e & 1)		/* exponent is odd; use sqrt(2mant) */
		DOUBLE_X;
	/* THE FOLLOWING ASSUMES THAT RIGHT SHIFT DOES SIGN EXTENSION */
	x->fp_exp = e >> 1;	/* calculates (e&1 ? (e-1)/2 : e/2 */

	/*
	 * Now calculate the mantissa root.  Since x is now in [1..4),
	 * we know that the first trip around the loop will definitely
	 * set the top bit in q, so we can do that manually and start
	 * the loop at the next bit down instead.  We must be sure to
	 * double x correctly while doing the `known q=1.0'.
	 *
	 * We do this one mantissa-word at a time, as noted above, to
	 * save work.  To avoid `(1U << 31) << 1', we also do the top bit
	 * outside of each per-word loop.
	 *
	 * The calculation `t = y + bit' breaks down into `t0 = y0, ...,
	 * t3 = y3, t? |= bit' for the appropriate word.  Since the bit
	 * is always a `new' one, this means that three of the `t?'s are
	 * just the corresponding `y?'; we use `#define's here for this.
	 * The variable `tt' holds the actual `t?' variable.
	 */

	/* calculate q0 */
#define	t0 tt
	bit = FP_1;
	EVEN_DOUBLE;
	/* if (x >= (t0 = y0 | bit)) { */	/* always true */
		q = bit;
		x0 -= bit;
		y0 = bit << 1;
	/* } */
	ODD_DOUBLE;
	while ((bit >>= 1) != 0) {	/* for remaining bits in q0 */
		EVEN_DOUBLE;
		t0 = y0 | bit;		/* t = y + bit */
		if (x0 >= t0) {		/* if x >= t then */
			x0 -= t0;	/*	x -= t */
			q |= bit;	/*	q += bit */
			y0 |= bit << 1;	/*	y += bit << 1 */
		}
		ODD_DOUBLE;
	}
	x->fp_mant[0] = q;
#undef t0

	/* calculate q1.  note (y0&1)==0. */
#define t0 y0
#define t1 tt
	q = 0;
	y1 = 0;
	bit = 1 << 31;
	EVEN_DOUBLE;
	t1 = bit;
	FPU_SUBS(d1, x1, t1);
	FPU_SUBC(d0, x0, t0);		/* d = x - t */
	if ((int)d0 >= 0) {		/* if d >= 0 (i.e., x >= t) then */
		x0 = d0, x1 = d1;	/*	x -= t */
		q = bit;		/*	q += bit */
		y0 |= 1;		/*	y += bit << 1 */
	}
	ODD_DOUBLE;
	while ((bit >>= 1) != 0) {	/* for remaining bits in q1 */
		EVEN_DOUBLE;		/* as before */
		t1 = y1 | bit;
		FPU_SUBS(d1, x1, t1);
		FPU_SUBC(d0, x0, t0);
		if ((int)d0 >= 0) {
			x0 = d0, x1 = d1;
			q |= bit;
			y1 |= bit << 1;
		}
		ODD_DOUBLE;
	}
	x->fp_mant[1] = q;
#undef t1

	/* calculate q2.  note (y1&1)==0; y0 (aka t0) is fixed. */
#define t1 y1
#define t2 tt
	q = 0;
	y2 = 0;
	bit = 1 << 31;
	EVEN_DOUBLE;
	t2 = bit;
	FPU_SUBS(d2, x2, t2);
	FPU_SUBCS(d1, x1, t1);
	FPU_SUBC(d0, x0, t0);
	if ((int)d0 >= 0) {
		x0 = d0, x1 = d1, x2 = d2;
		q |= bit;
		y1 |= 1;		/* now t1, y1 are set in concrete */
	}
	ODD_DOUBLE;
	while ((bit >>= 1) != 0) {
		EVEN_DOUBLE;
		t2 = y2 | bit;
		FPU_SUBS(d2, x2, t2);
		FPU_SUBCS(d1, x1, t1);
		FPU_SUBC(d0, x0, t0);
		if ((int)d0 >= 0) {
			x0 = d0, x1 = d1, x2 = d2;
			q |= bit;
			y2 |= bit << 1;
		}
		ODD_DOUBLE;
	}
	x->fp_mant[2] = q;
#undef t2

	/* calculate q3.  y0, t0, y1, t1 all fixed; y2, t2, almost done. */
#define t2 y2
#define t3 tt
	q = 0;
	y3 = 0;
	bit = 1 << 31;
	EVEN_DOUBLE;
	t3 = bit;
	FPU_SUBS(d3, x3, t3);
	FPU_SUBCS(d2, x2, t2);
	FPU_SUBCS(d1, x1, t1);
	FPU_SUBC(d0, x0, t0);
	ODD_DOUBLE;
	if ((int)d0 >= 0) {
		x0 = d0, x1 = d1, x2 = d2;
		q |= bit;
		y2 |= 1;
	}
	while ((bit >>= 1) != 0) {
		EVEN_DOUBLE;
		t3 = y3 | bit;
		FPU_SUBS(d3, x3, t3);
		FPU_SUBCS(d2, x2, t2);
		FPU_SUBCS(d1, x1, t1);
		FPU_SUBC(d0, x0, t0);
		if ((int)d0 >= 0) {
			x0 = d0, x1 = d1, x2 = d2;
			q |= bit;
			y3 |= bit << 1;
		}
		ODD_DOUBLE;
	}
	x->fp_mant[3] = q;

	/*
	 * The result, which includes guard and round bits, is exact iff
	 * x is now zero; any nonzero bits in x represent sticky bits.
	 */
	x->fp_sticky = x0 | x1 | x2 | x3;
	DUMPFPN(FPE_REG, x);
	return (x);
}
示例#5
0
struct fpn *
fpu_div(struct fpemu *fe)
{
	struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2;
	u_int q, bit;
	u_int r0, r1, r2, r3, d0, d1, d2, d3, y0, y1, y2, y3;
	FPU_DECL_CARRY

	/*
	 * Since divide is not commutative, we cannot just use ORDER.
	 * Check either operand for NaN first; if there is at least one,
	 * order the signalling one (if only one) onto the right, then
	 * return it.  Otherwise we have the following cases:
	 *
	 *	Inf / Inf = NaN, plus NV exception
	 *	Inf / num = Inf [i.e., return x]
	 *	Inf / 0   = Inf [i.e., return x]
	 *	0 / Inf = 0 [i.e., return x]
	 *	0 / num = 0 [i.e., return x]
	 *	0 / 0   = NaN, plus NV exception
	 *	num / Inf = 0
	 *	num / num = num (do the divide)
	 *	num / 0   = Inf, plus DZ exception
	 */
	DPRINTF(FPE_REG, ("fpu_div:\n"));
	DUMPFPN(FPE_REG, x);
	DUMPFPN(FPE_REG, y);
	DPRINTF(FPE_REG, ("=>\n"));
	if (ISNAN(x) || ISNAN(y)) {
		ORDER(x, y);
		fe->fe_cx |= FPSCR_VXSNAN;
		DUMPFPN(FPE_REG, y);
		return (y);
	}
	/*
	 * Need to split the following out cause they generate different
	 * exceptions. 
	 */
	if (ISINF(x)) {
		if (x->fp_class == y->fp_class) {
			fe->fe_cx |= FPSCR_VXIDI;
			return (fpu_newnan(fe));
		}
		DUMPFPN(FPE_REG, x);
		return (x);
	}
	if (ISZERO(x)) {
		fe->fe_cx |= FPSCR_ZX;
		if (x->fp_class == y->fp_class) {
			fe->fe_cx |= FPSCR_VXZDZ;
			return (fpu_newnan(fe));
		}
		DUMPFPN(FPE_REG, x);
		return (x);
	}

	/* all results at this point use XOR of operand signs */
	x->fp_sign ^= y->fp_sign;
	if (ISINF(y)) {
		x->fp_class = FPC_ZERO;
		DUMPFPN(FPE_REG, x);
		return (x);
	}
	if (ISZERO(y)) {
		fe->fe_cx = FPSCR_ZX;
		x->fp_class = FPC_INF;
		DUMPFPN(FPE_REG, x);
		return (x);
	}

	/*
	 * Macros for the divide.  See comments at top for algorithm.
	 * Note that we expand R, D, and Y here.
	 */

#define	SUBTRACT		/* D = R - Y */ \
	FPU_SUBS(d3, r3, y3); FPU_SUBCS(d2, r2, y2); \
	FPU_SUBCS(d1, r1, y1); FPU_SUBC(d0, r0, y0)

#define	NONNEGATIVE		/* D >= 0 */ \
	((int)d0 >= 0)

#ifdef FPU_SHL1_BY_ADD
#define	SHL1			/* R <<= 1 */ \
	FPU_ADDS(r3, r3, r3); FPU_ADDCS(r2, r2, r2); \
	FPU_ADDCS(r1, r1, r1); FPU_ADDC(r0, r0, r0)
#else
#define	SHL1 \
	r0 = (r0 << 1) | (r1 >> 31), r1 = (r1 << 1) | (r2 >> 31), \
	r2 = (r2 << 1) | (r3 >> 31), r3 <<= 1
#endif

#define	LOOP			/* do ... while (bit >>= 1) */ \
	do { \
		SHL1; \
		SUBTRACT; \
		if (NONNEGATIVE) { \
			q |= bit; \
			r0 = d0, r1 = d1, r2 = d2, r3 = d3; \
		} \
	} while ((bit >>= 1) != 0)

#define	WORD(r, i)			/* calculate r->fp_mant[i] */ \
	q = 0; \
	bit = 1 << 31; \
	LOOP; \
	(x)->fp_mant[i] = q

	/* Setup.  Note that we put our result in x. */
	r0 = x->fp_mant[0];
	r1 = x->fp_mant[1];
	r2 = x->fp_mant[2];
	r3 = x->fp_mant[3];
	y0 = y->fp_mant[0];
	y1 = y->fp_mant[1];
	y2 = y->fp_mant[2];
	y3 = y->fp_mant[3];

	bit = FP_1;
	SUBTRACT;
	if (NONNEGATIVE) {
		x->fp_exp -= y->fp_exp;
		r0 = d0, r1 = d1, r2 = d2, r3 = d3;
		q = bit;
		bit >>= 1;
	} else {
示例#6
0
/*
 * The multiplication algorithm for normal numbers is as follows:
 *
 * The fraction of the product is built in the usual stepwise fashion.
 * Each step consists of shifting the accumulator right one bit
 * (maintaining any guard bits) and, if the next bit in y is set,
 * adding the multiplicand (x) to the accumulator.  Then, in any case,
 * we advance one bit leftward in y.  Algorithmically:
 *
 *	A = 0;
 *	for (bit = 0; bit < FP_NMANT; bit++) {
 *		sticky |= A & 1, A >>= 1;
 *		if (Y & (1 << bit))
 *			A += X;
 *	}
 *
 * (X and Y here represent the mantissas of x and y respectively.)
 * The resultant accumulator (A) is the product's mantissa.  It may
 * be as large as 11.11111... in binary and hence may need to be
 * shifted right, but at most one bit.
 *
 * Since we do not have efficient multiword arithmetic, we code the
 * accumulator as four separate words, just like any other mantissa.
 * We use local variables in the hope that this is faster than memory.
 * We keep x->fp_mant in locals for the same reason.
 *
 * In the algorithm above, the bits in y are inspected one at a time.
 * We will pick them up 32 at a time and then deal with those 32, one
 * at a time.  Note, however, that we know several things about y:
 *
 *    - the guard and round bits at the bottom are sure to be zero;
 *
 *    - often many low bits are zero (y is often from a single or double
 *	precision source);
 *
 *    - bit FP_NMANT-1 is set, and FP_1*2 fits in a word.
 *
 * We can also test for 32-zero-bits swiftly.  In this case, the center
 * part of the loop---setting sticky, shifting A, and not adding---will
 * run 32 times without adding X to A.  We can do a 32-bit shift faster
 * by simply moving words.  Since zeros are common, we optimize this case.
 * Furthermore, since A is initially zero, we can omit the shift as well
 * until we reach a nonzero word.
 */
struct fpn *
fpu_mul(struct fpemu *fe)
{
	struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2;
	u_int a3, a2, a1, a0, x3, x2, x1, x0, bit, m;
	int sticky;
	FPU_DECL_CARRY;

	/*
	 * Put the `heavier' operand on the right (see fpu_emu.h).
	 * Then we will have one of the following cases, taken in the
	 * following order:
	 *
	 *  - y = NaN.  Implied: if only one is a signalling NaN, y is.
	 *	The result is y.
	 *  - y = Inf.  Implied: x != NaN (is 0, number, or Inf: the NaN
	 *    case was taken care of earlier).
	 *	If x = 0, the result is NaN.  Otherwise the result
	 *	is y, with its sign reversed if x is negative.
	 *  - x = 0.  Implied: y is 0 or number.
	 *	The result is 0 (with XORed sign as usual).
	 *  - other.  Implied: both x and y are numbers.
	 *	The result is x * y (XOR sign, multiply bits, add exponents).
	 */
	DPRINTF(FPE_REG, ("fpu_mul:\n"));
	DUMPFPN(FPE_REG, x);
	DUMPFPN(FPE_REG, y);
	DPRINTF(FPE_REG, ("=>\n"));

	ORDER(x, y);
	if (ISNAN(y)) {
		y->fp_sign ^= x->fp_sign;
		fe->fe_cx |= FPSCR_VXSNAN;
		DUMPFPN(FPE_REG, y);
		return (y);
	}
	if (ISINF(y)) {
		if (ISZERO(x)) {
			fe->fe_cx |= FPSCR_VXIMZ;
			return (fpu_newnan(fe));
		}
		y->fp_sign ^= x->fp_sign;
			DUMPFPN(FPE_REG, y);
		return (y);
	}
	if (ISZERO(x)) {
		x->fp_sign ^= y->fp_sign;
		DUMPFPN(FPE_REG, x);
		return (x);
	}

	/*
	 * Setup.  In the code below, the mask `m' will hold the current
	 * mantissa byte from y.  The variable `bit' denotes the bit
	 * within m.  We also define some macros to deal with everything.
	 */
	x3 = x->fp_mant[3];
	x2 = x->fp_mant[2];
	x1 = x->fp_mant[1];
	x0 = x->fp_mant[0];
	sticky = a3 = a2 = a1 = a0 = 0;

#define	ADD	/* A += X */ \
	FPU_ADDS(a3, a3, x3); \
	FPU_ADDCS(a2, a2, x2); \
	FPU_ADDCS(a1, a1, x1); \
	FPU_ADDC(a0, a0, x0)

#define	SHR1	/* A >>= 1, with sticky */ \
	sticky |= a3 & 1, a3 = (a3 >> 1) | (a2 << 31), \
	a2 = (a2 >> 1) | (a1 << 31), a1 = (a1 >> 1) | (a0 << 31), a0 >>= 1

#define	SHR32	/* A >>= 32, with sticky */ \
	sticky |= a3, a3 = a2, a2 = a1, a1 = a0, a0 = 0

#define	STEP	/* each 1-bit step of the multiplication */ \
	SHR1; if (bit & m) { ADD; }; bit <<= 1

	/*
	 * We are ready to begin.  The multiply loop runs once for each
	 * of the four 32-bit words.  Some words, however, are special.
	 * As noted above, the low order bits of Y are often zero.  Even
	 * if not, the first loop can certainly skip the guard bits.
	 * The last word of y has its highest 1-bit in position FP_NMANT-1,
	 * so we stop the loop when we move past that bit.
	 */
	if ((m = y->fp_mant[3]) == 0) {
		/* SHR32; */			/* unneeded since A==0 */
	} else {
		bit = 1 << FP_NG;
		do {
			STEP;
		} while (bit != 0);
	}
	if ((m = y->fp_mant[2]) == 0) {
		SHR32;
	} else {
		bit = 1;
		do {
			STEP;
		} while (bit != 0);
	}
	if ((m = y->fp_mant[1]) == 0) {
		SHR32;
	} else {
		bit = 1;
		do {
			STEP;
		} while (bit != 0);
	}
	m = y->fp_mant[0];		/* definitely != 0 */
	bit = 1;
	do {
		STEP;
	} while (bit <= m);

	/*
	 * Done with mantissa calculation.  Get exponent and handle
	 * 11.111...1 case, then put result in place.  We reuse x since
	 * it already has the right class (FP_NUM).
	 */
	m = x->fp_exp + y->fp_exp;
	if (a0 >= FP_2) {
		SHR1;
		m++;
	}
	x->fp_sign ^= y->fp_sign;
	x->fp_exp = m;
	x->fp_sticky = sticky;
	x->fp_mant[3] = a3;
	x->fp_mant[2] = a2;
	x->fp_mant[1] = a1;
	x->fp_mant[0] = a0;

	DUMPFPN(FPE_REG, x);
	return (x);
}
示例#7
0
文件: fpu_add.c 项目: MarginC/kame
struct fpn *
fpu_add(struct fpemu *fe)
{
	struct fpn *x = &fe->fe_f1, *y = &fe->fe_f2, *r;
	u_int r0, r1, r2, r3;
	int rd;

	/*
	 * Put the `heavier' operand on the right (see fpu_emu.h).
	 * Then we will have one of the following cases, taken in the
	 * following order:
	 *
	 *  - y = NaN.  Implied: if only one is a signalling NaN, y is.
	 *	The result is y.
	 *  - y = Inf.  Implied: x != NaN (is 0, number, or Inf: the NaN
	 *    case was taken care of earlier).
	 *	If x = -y, the result is NaN.  Otherwise the result
	 *	is y (an Inf of whichever sign).
	 *  - y is 0.  Implied: x = 0.
	 *	If x and y differ in sign (one positive, one negative),
	 *	the result is +0 except when rounding to -Inf.  If same:
	 *	+0 + +0 = +0; -0 + -0 = -0.
	 *  - x is 0.  Implied: y != 0.
	 *	Result is y.
	 *  - other.  Implied: both x and y are numbers.
	 *	Do addition a la Hennessey & Patterson.
	 */
	DPRINTF(FPE_REG, ("fpu_add:\n"));
	DUMPFPN(FPE_REG, x);
	DUMPFPN(FPE_REG, y);
	DPRINTF(FPE_REG, ("=>\n"));
	ORDER(x, y);
	if (ISNAN(y)) {
		fe->fe_cx |= FPSCR_VXSNAN;
		DUMPFPN(FPE_REG, y);
		return (y);
	}
	if (ISINF(y)) {
		if (ISINF(x) && x->fp_sign != y->fp_sign) {
			fe->fe_cx |= FPSCR_VXISI;
			return (fpu_newnan(fe));
		}
		DUMPFPN(FPE_REG, y);
		return (y);
	}
	rd = ((fe->fe_fpscr) & FPSCR_RN);
	if (ISZERO(y)) {
		if (rd != FSR_RD_RM)	/* only -0 + -0 gives -0 */
			y->fp_sign &= x->fp_sign;
		else			/* any -0 operand gives -0 */
			y->fp_sign |= x->fp_sign;
		DUMPFPN(FPE_REG, y);
		return (y);
	}
	if (ISZERO(x)) {
		DUMPFPN(FPE_REG, y);
		return (y);
	}
	/*
	 * We really have two numbers to add, although their signs may
	 * differ.  Make the exponents match, by shifting the smaller
	 * number right (e.g., 1.011 => 0.1011) and increasing its
	 * exponent (2^3 => 2^4).  Note that we do not alter the exponents
	 * of x and y here.
	 */
	r = &fe->fe_f3;
	r->fp_class = FPC_NUM;
	if (x->fp_exp == y->fp_exp) {
		r->fp_exp = x->fp_exp;
		r->fp_sticky = 0;
	} else {
		if (x->fp_exp < y->fp_exp) {
			/*
			 * Try to avoid subtract case iii (see below).
			 * This also guarantees that x->fp_sticky = 0.
			 */
			SWAP(x, y);
		}
		/* now x->fp_exp > y->fp_exp */
		r->fp_exp = x->fp_exp;
		r->fp_sticky = fpu_shr(y, x->fp_exp - y->fp_exp);
	}
	r->fp_sign = x->fp_sign;
	if (x->fp_sign == y->fp_sign) {
		FPU_DECL_CARRY

		/*
		 * The signs match, so we simply add the numbers.  The result
		 * may be `supernormal' (as big as 1.111...1 + 1.111...1, or
		 * 11.111...0).  If so, a single bit shift-right will fix it
		 * (but remember to adjust the exponent).
		 */
		/* r->fp_mant = x->fp_mant + y->fp_mant */
		FPU_ADDS(r->fp_mant[3], x->fp_mant[3], y->fp_mant[3]);
		FPU_ADDCS(r->fp_mant[2], x->fp_mant[2], y->fp_mant[2]);
		FPU_ADDCS(r->fp_mant[1], x->fp_mant[1], y->fp_mant[1]);
		FPU_ADDC(r0, x->fp_mant[0], y->fp_mant[0]);
		if ((r->fp_mant[0] = r0) >= FP_2) {
			(void) fpu_shr(r, 1);
			r->fp_exp++;
		}
	} else {
		FPU_DECL_CARRY

		/*
		 * The signs differ, so things are rather more difficult.
		 * H&P would have us negate the negative operand and add;
		 * this is the same as subtracting the negative operand.
		 * This is quite a headache.  Instead, we will subtract
		 * y from x, regardless of whether y itself is the negative
		 * operand.  When this is done one of three conditions will
		 * hold, depending on the magnitudes of x and y:
		 *   case i)   |x| > |y|.  The result is just x - y,
		 *	with x's sign, but it may need to be normalized.
		 *   case ii)  |x| = |y|.  The result is 0 (maybe -0)
		 *	so must be fixed up.
		 *   case iii) |x| < |y|.  We goofed; the result should
		 *	be (y - x), with the same sign as y.
		 * We could compare |x| and |y| here and avoid case iii,
		 * but that would take just as much work as the subtract.
		 * We can tell case iii has occurred by an overflow.
		 *
		 * N.B.: since x->fp_exp >= y->fp_exp, x->fp_sticky = 0.
		 */
		/* r->fp_mant = x->fp_mant - y->fp_mant */
		FPU_SET_CARRY(y->fp_sticky);
		FPU_SUBCS(r3, x->fp_mant[3], y->fp_mant[3]);
		FPU_SUBCS(r2, x->fp_mant[2], y->fp_mant[2]);
		FPU_SUBCS(r1, x->fp_mant[1], y->fp_mant[1]);
		FPU_SUBC(r0, x->fp_mant[0], y->fp_mant[0]);
		if (r0 < FP_2) {
			/* cases i and ii */
			if ((r0 | r1 | r2 | r3) == 0) {
				/* case ii */
				r->fp_class = FPC_ZERO;
				r->fp_sign = rd == FSR_RD_RM;
				return (r);
			}
		} else {
			/*
			 * Oops, case iii.  This can only occur when the
			 * exponents were equal, in which case neither
			 * x nor y have sticky bits set.  Flip the sign
			 * (to y's sign) and negate the result to get y - x.
			 */
#ifdef DIAGNOSTIC
			if (x->fp_exp != y->fp_exp || r->fp_sticky)
				panic("fpu_add");
#endif
			r->fp_sign = y->fp_sign;
			FPU_SUBS(r3, 0, r3);
			FPU_SUBCS(r2, 0, r2);
			FPU_SUBCS(r1, 0, r1);
			FPU_SUBC(r0, 0, r0);
		}
		r->fp_mant[3] = r3;
		r->fp_mant[2] = r2;
		r->fp_mant[1] = r1;
		r->fp_mant[0] = r0;
		if (r0 < FP_1)
			fpu_norm(r);
	}
	DUMPFPN(FPE_REG, r);
	return (r);
}
static struct fpn *
__fpu_modrem(struct fpemu *fe, int is_mod)
{
	static struct fpn X, Y;
	struct fpn *x, *y, *r;
	uint32_t signX, signY, signQ;
	int j, k, l, q;
	int cmp;

	if (ISNAN(&fe->fe_f1) || ISNAN(&fe->fe_f2))
		return fpu_newnan(fe);
	if (ISINF(&fe->fe_f1) || ISZERO(&fe->fe_f2))
		return fpu_newnan(fe);

	CPYFPN(&X, &fe->fe_f1);
	CPYFPN(&Y, &fe->fe_f2);
	x = &X;
	y = &Y;
	q = 0;
	r = &fe->fe_f2;

	/*
	 * Step 1
	 */
	signX = x->fp_sign;
	signY = y->fp_sign;
	signQ = (signX ^ signY);
	x->fp_sign = y->fp_sign = 0;

	/* Special treatment that just return input value but Q is necessary */
	if (ISZERO(x) || ISINF(y)) {
		r = &fe->fe_f1;
		goto Step7;
	}

	/*
	 * Step 2
	 */
	l = x->fp_exp - y->fp_exp;
	k = 0;
	CPYFPN(r, x);
	if (l >= 0) {
		r->fp_exp -= l;
		j = l;

		/*
		 * Step 3
		 */
		for (;;) {
			cmp = abscmp3(r, y);

			/* Step 3.1 */
			if (cmp == 0)
				break;

			/* Step 3.2 */
			if (cmp > 0) {
				CPYFPN(&fe->fe_f1, r);
				CPYFPN(&fe->fe_f2, y);
				fe->fe_f2.fp_sign = 1;
				r = fpu_add(fe);
				q++;
			}

			/* Step 3.3 */
			if (j == 0)
				goto Step4;

			/* Step 3.4 */
			k++;
			j--;
			q += q;
			r->fp_exp++;
		}
		/* R == Y */
		q++;
		r->fp_class = FPC_ZERO;
		goto Step7;
	}
 Step4:
	r->fp_sign = signX;

	/*
	 * Step 5
	 */
	if (is_mod)
		goto Step7;

	/*
	 * Step 6
	 */
	/* y = y / 2 */
	y->fp_exp--;
	/* abscmp3 ignore sign */
	cmp = abscmp3(r, y);
	/* revert y */
	y->fp_exp++;

	if (cmp > 0 || (cmp == 0 && q % 2)) {
		q++;
		CPYFPN(&fe->fe_f1, r);
		CPYFPN(&fe->fe_f2, y);
		fe->fe_f2.fp_sign = !signX;
		r = fpu_add(fe);
	}

	/*
	 * Step 7
	 */
 Step7:
	q &= 0x7f;
	q |= (signQ << 7);
	fe->fe_fpframe->fpf_fpsr =
	fe->fe_fpsr =
	    (fe->fe_fpsr & ~FPSR_QTT) | (q << 16);
	return r;
}
示例#9
0
/*
 * sin(x):
 *
 *	if (x < 0) {
 *		x = abs(x);
 *		sign = 1;
 *	}
 *	if (x > 2*pi) {
 *		x %= 2*pi;
 *	}
 *	if (x > pi) {
 *		x -= pi;
 *		sign inverse;
 *	}
 *	if (x > pi/2) {
 *		y = cos(x - pi/2);
 *	} else {
 *		y = sin(x);
 *	}
 *	if (sign) {
 *		y = -y;
 *	}
 */
struct fpn *
fpu_sin(struct fpemu *fe)
{
    struct fpn x;
    struct fpn p;
    struct fpn *r;
    int sign;

    if (ISNAN(&fe->fe_f2))
        return &fe->fe_f2;
    if (ISINF(&fe->fe_f2))
        return fpu_newnan(fe);

    /* if x is +0/-0, return +0/-0 */
    if (ISZERO(&fe->fe_f2))
        return &fe->fe_f2;

    CPYFPN(&x, &fe->fe_f2);

    /* x = abs(input) */
    sign = x.fp_sign;
    x.fp_sign = 0;

    /* p <- 2*pi */
    fpu_const(&p, FPU_CONST_PI);
    p.fp_exp++;

    /*
     * if (x > 2*pi*N)
     *  sin(x) is sin(x - 2*pi*N)
     */
    CPYFPN(&fe->fe_f1, &x);
    CPYFPN(&fe->fe_f2, &p);
    r = fpu_cmp(fe);
    if (r->fp_sign == 0) {
        CPYFPN(&fe->fe_f1, &x);
        CPYFPN(&fe->fe_f2, &p);
        r = fpu_mod(fe);
        CPYFPN(&x, r);
    }

    /* p <- pi */
    p.fp_exp--;

    /*
     * if (x > pi)
     *  sin(x) is -sin(x - pi)
     */
    CPYFPN(&fe->fe_f1, &x);
    CPYFPN(&fe->fe_f2, &p);
    fe->fe_f2.fp_sign = 1;
    r = fpu_add(fe);
    if (r->fp_sign == 0) {
        CPYFPN(&x, r);
        sign ^= 1;
    }

    /* p <- pi/2 */
    p.fp_exp--;

    /*
     * if (x > pi/2)
     *  sin(x) is cos(x - pi/2)
     * else
     *  sin(x)
     */
    CPYFPN(&fe->fe_f1, &x);
    CPYFPN(&fe->fe_f2, &p);
    fe->fe_f2.fp_sign = 1;
    r = fpu_add(fe);
    if (r->fp_sign == 0) {
        __fpu_sincos_cordic(fe, r);
        r = &fe->fe_f2;
    } else {
        __fpu_sincos_cordic(fe, &x);
        r = &fe->fe_f1;
    }
    r->fp_sign = sign;
    return r;
}