boost::shared_ptr<SmileSection>
    SwaptionVolatilityHullWhite::smileSectionImpl(Time optionTime,
                                               Time swapLength) const {

		calculate();
		Date optionDate = Date(static_cast<BigInteger>(optionInterpolator_(optionTime)));
        Rounding rounder(0);
        Period swapTenor(static_cast<Integer>(rounder(swapLength*12.0)), Months);
        return smileSection(optionDate, swapTenor);
        
    }
Example #2
0
int typeSize(refObject type)
{ switch (toHook(car(type)))
  { case arrayHook:
    { type = cdr(type);
      return toInteger(car(type)) * typeSize(cadr(type)); }
    case char0Hook:
    { return sizeof(char0Type); }
    case char1Hook:
    { return sizeof(char1Type); }
    case int0Hook:
    { return sizeof(int0Type); }
    case int1Hook:
    { return sizeof(int1Type); }
    case int2Hook:
    { return sizeof(int2Type); }
    case nullHook:
    case referHook:
    case rowHook:
    { return sizeof(pointerType); }
    case procHook:
    { return sizeof(procType); }
    case real0Hook:
    { return sizeof(real0Type); }
    case real1Hook:
    { return sizeof(real1Type); }
    case skoHook:
    case varHook:
    { return typeSize(cadr(type)); }
    case strTypeHook:
    { return toInteger(cadddr(type)); }
    case tupleHook:
    { int slotAlign;
      refObject slotType;
      int tupleAlign = 1;
      int tupleSize = 0;
      type = cdr(type);
      while (type != nil)
      { slotType = car(type);
        slotAlign = typeAlign(slotType);
        tupleAlign = (slotAlign > tupleAlign ? slotAlign : tupleAlign);
        tupleSize += typeSize(slotType);
        tupleSize += rounder(tupleSize, slotAlign);
        type = cddr(type); }
      return tupleSize + rounder(tupleSize, tupleAlign); }
    case voidHook:
    { return sizeof(voidType); }
    default:
    { fail("Type has undefined size in typeSize!"); }}}
Example #3
0
    boost::shared_ptr<SmileSection>
    SwaptionVolCube2::smileSectionImpl(Time optionTime,
                                       Time swapLength) const {

        calculate();
        Date optionDate = optionDateFromTime(optionTime);
        Rounding rounder(0);
        Period swapTenor(static_cast<Integer>(rounder(swapLength*12.0)), Months);
        // ensure that option date is valid fixing date
        optionDate =
            swapTenor > shortSwapIndexBase_->tenor()
                ? swapIndexBase_->fixingCalendar().adjust(optionDate, Following)
                : shortSwapIndexBase_->fixingCalendar().adjust(optionDate,
                                                               Following);
        return smileSectionImpl(optionDate, swapTenor);
    }
Example #4
0
wakeup_queue round_wakeups( const wakeup_queue &q )
{
  wakeup_queue new_wakeups;

  for_each( q.begin(), q.end(),
	    [&new_wakeups]( const Event &x ) {
	      Event ne( rounder( x.time ),
			x.addr,
			x.sort_order );
	      new_wakeups.push( ne );
	    } );

  return new_wakeups;
}
Example #5
0
bvt float_utilst::from_unsigned_integer(const bvt &src)
{
  unbiased_floatt result;

  result.fraction=src;

  // build an exponent (unbiased) -- this is signed!
  result.exponent=
    bv_utils.build_constant(
      src.size()-1,
      address_bits(src.size()-1).to_long()+1);

  result.sign=const_literal(false);

  return rounder(result);
}
Example #6
0
bvt float_utilst::mul(const bvt &src1, const bvt &src2)
{
  // unpack
  const unbiased_floatt unpacked1=unpack(src1);
  const unbiased_floatt unpacked2=unpack(src2);

  // zero-extend the fractions
  const bvt fraction1=bv_utils.zero_extension(unpacked1.fraction, unpacked1.fraction.size()*2);
  const bvt fraction2=bv_utils.zero_extension(unpacked2.fraction, unpacked2.fraction.size()*2);

  // multiply fractions
  unbiased_floatt result;
  result.fraction=bv_utils.unsigned_multiplier(fraction1, fraction2);

  // extend exponents to account for overflow
  // add two bits, as we do extra arithmetic on it later
  const bvt exponent1=bv_utils.sign_extension(unpacked1.exponent, unpacked1.exponent.size()+2);
  const bvt exponent2=bv_utils.sign_extension(unpacked2.exponent, unpacked2.exponent.size()+2);

  bvt added_exponent=bv_utils.add(exponent1, exponent2);

  // adjust, we are thowing in an extra fraction bit
  // it has been extended above
  result.exponent=bv_utils.inc(added_exponent);

  // new sign
  result.sign=prop.lxor(unpacked1.sign, unpacked2.sign);

  // infinity?
  result.infinity=prop.lor(unpacked1.infinity, unpacked2.infinity);

  // NaN?
  {
    bvt NaN_cond;

    NaN_cond.push_back(is_NaN(src1));
    NaN_cond.push_back(is_NaN(src2));

    // infinity * 0 is NaN!
    NaN_cond.push_back(prop.land(unpacked1.zero, unpacked2.infinity));
    NaN_cond.push_back(prop.land(unpacked2.zero, unpacked1.infinity));

    result.NaN=prop.lor(NaN_cond);
  }

  return rounder(result);
}
Example #7
0
bvt float_utilst::from_signed_integer(const bvt &src)
{
  unbiased_floatt result;

  // we need to convert negative integers
  result.sign=sign_bit(src);

  result.fraction=bv_utils.absolute_value(src);

  // build an exponent (unbiased) -- this is signed!
  result.exponent=
    bv_utils.build_constant(
      src.size()-1,
      address_bits(src.size()-1).to_long()+1);

  return rounder(result);
}
Example #8
0
//here comes the magic
long privkeyexp(phiofn){
  srand(time(NULL));
  int r= rand();
  // a function x -> pos where 0 < x < 1 and where 1 < pos < phi of n
  float num = (float)r/(float)(RAND_MAX/1); //random 0 < x < 1
  long possible_priv_key = (rounder(num * phiofn));
  // until priv_key_exp = something repeat this process
  long priv_key_exp = 1;
  while (priv_key_exp == 1){
    // if pos is prime and coprime with phiofn then it is a suitable key
    if(isprime(possible_priv_key)==1){
      if((gcd(possible_priv_key, phiofn)==1)){
	priv_key_exp = possible_priv_key;
      }
    }
    // add one and then make sure it will be smaller than
    if((possible_priv_key % (phiofn - 1)) == 0){
      possible_priv_key =  (possible_priv_key + 2) % phiofn;
    }
    possible_priv_key = (possible_priv_key + 1) % phiofn;
  }
  return priv_key_exp;
}
Example #9
0
void
fdct_mm32( short *blk )
{
    static __int64 xt70[2]; // xt7xt6xt5xt4, xt3xt2xt1xt0
    static int a0, a1, a2, a3, b0, b1, b2, b3;
    static short *sptr, *optr, *tf; // tf = table_ptr
    static short *xt = (short *) &xt70[0];
    static int j;
    
    const static short _tg_1_16   = 13036;  //tg * (2<<16) + 0.5
    const static short _tg_2_16   = 27146;  //tg * (2<<16) + 0.5
    const static short _tg_3_16   =-21746;  //tg * (2<<16) + 0.5
    const static short _cos_4_16  =-19195;  //cos * (2<<16) + 0.5
    const static short _ocos_4_16 = 23170;  //cos * (2<<15) + 0.5
    const static short _one_corr  =     1;  //rounding compensation

    static short t0, t1, t2, t3, t4, t5, t6, t7;
    static short tp03, tm03, tp12, tm12, tp65, tm65;
    static short tp465, tm465, tp765, tm765;

    __asm {

    ////////////////////////////////////////////////////////////////////////
    //
    // The high-level pseudocode for the fdct_mm32() routine :
    //
    // fdct_mm32()
    // {
    //    forward_dct_col03(); // dct_column transform on cols 0-3
    //    forward_dct_col47(); // dct_column transform on cols 4-7
    //    for ( j = 0; j < 8; j=j+1 )
    //      forward_dct_row1(j); // dct_row transform on row #j
    // }

	mov INP, dword ptr [blk];		;// input data is row 0 of blk[]
    ;// transform the left half of the matrix (4 columns)

    lea TABLEF, dword ptr [tg_all_16];
    mov OUT, INP;

//	lea round_frw_col, dword ptr [r_frw_col]
    // for ( i = 0; i < 2; i = i + 1)
    // the for-loop is executed twice.  We are better off unrolling the 
    // loop to avoid branch misprediction.
	mmx32_fdct_col03: // begin processing columns 0-3
    movq mm0, [x1] ; 0 ; x1
     ;//

    movq mm1, [x6] ; 1 ; x6
     movq mm2, mm0 ; 2 ; x1

    movq mm3, [x2] ; 3 ; x2
     paddsw mm0, mm1 ; t1 = x[1] + x[6]

    movq mm4, [x5] ; 4 ; x5
     psllw mm0, SHIFT_FRW_COL ; t1

    movq mm5, [x0] ; 5 ; x0
     paddsw mm4, mm3 ; t2 = x[2] + x[5]

    paddsw mm5, [x7] ; t0 = x[0] + x[7]
     psllw mm4, SHIFT_FRW_COL ; t2

    movq mm6, mm0 ; 6 ; t1
     psubsw mm2, mm1 ; 1 ; t6 = x[1] - x[6]

    movq mm1, qword ptr [tg_2_16] ; 1 ; tg_2_16
     psubsw mm0, mm4 ; tm12 = t1 - t2

    movq mm7, [x3] ; 7 ; x3
     pmulhw mm1, mm0 ; tm12*tg_2_16

    paddsw mm7, [x4] ; t3 = x[3] + x[4]
     psllw mm5, SHIFT_FRW_COL ; t0

    paddsw mm6, mm4 ; 4 ; tp12 = t1 + t2
     psllw mm7, SHIFT_FRW_COL ; t3

    movq mm4, mm5 ; 4 ; t0
     psubsw mm5, mm7 ; tm03 = t0 - t3

    paddsw mm1, mm5 ; y2 = tm03 + tm12*tg_2_16
     paddsw mm4, mm7 ; 7 ; tp03 = t0 + t3

    por mm1, qword ptr one_corr ; correction y2 +0.5
     psllw mm2, SHIFT_FRW_COL+1 ; t6

    pmulhw mm5, qword ptr [tg_2_16] ; tm03*tg_2_16
     movq mm7, mm4 ; 7 ; tp03

    psubsw mm3, [x5] ; t5 = x[2] - x[5]
     psubsw mm4, mm6 ; y4 = tp03 - tp12

    movq [y2], mm1 ; 1 ; save y2
     paddsw mm7, mm6 ; 6 ; y0 = tp03 + tp12
     
    movq mm1, [x3] ; 1 ; x3
     psllw mm3, SHIFT_FRW_COL+1 ; t5

    psubsw mm1, [x4] ; t4 = x[3] - x[4]
     movq mm6, mm2 ; 6 ; t6
    
    movq [y4], mm4 ; 4 ; save y4
     paddsw mm2, mm3 ; t6 + t5

    pmulhw mm2, qword ptr [ocos_4_16] ; tp65 = (t6 + t5)*cos_4_16
     psubsw mm6, mm3 ; 3 ; t6 - t5

    pmulhw mm6, qword ptr [ocos_4_16] ; tm65 = (t6 - t5)*cos_4_16
     psubsw mm5, mm0 ; 0 ; y6 = tm03*tg_2_16 - tm12

    por mm5, qword ptr one_corr ; correction y6 +0.5
     psllw mm1, SHIFT_FRW_COL ; t4

    por mm2, qword ptr one_corr ; correction tp65 +0.5
     movq mm4, mm1 ; 4 ; t4

    movq mm3, [x0] ; 3 ; x0
     paddsw mm1, mm6 ; tp465 = t4 + tm65

    psubsw mm3, [x7] ; t7 = x[0] - x[7]
     psubsw mm4, mm6 ; 6 ; tm465 = t4 - tm65

    movq mm0, qword ptr [tg_1_16] ; 0 ; tg_1_16
     psllw mm3, SHIFT_FRW_COL ; t7

    movq mm6, qword ptr [tg_3_16] ; 6 ; tg_3_16
     pmulhw mm0, mm1 ; tp465*tg_1_16

    movq [y0], mm7 ; 7 ; save y0
     pmulhw mm6, mm4 ; tm465*tg_3_16

    movq [y6], mm5 ; 5 ; save y6
     movq mm7, mm3 ; 7 ; t7

    movq mm5, qword ptr [tg_3_16] ; 5 ; tg_3_16
     psubsw mm7, mm2 ; tm765 = t7 - tp65

    paddsw mm3, mm2 ; 2 ; tp765 = t7 + tp65
     pmulhw mm5, mm7 ; tm765*tg_3_16

    paddsw mm0, mm3 ; y1 = tp765 + tp465*tg_1_16
     paddsw mm6, mm4 ; tm465*tg_3_16

    pmulhw mm3, qword ptr [tg_1_16] ; tp765*tg_1_16
     ;//

    por mm0, qword ptr one_corr ; correction y1 +0.5
     paddsw mm5, mm7 ; tm765*tg_3_16

    psubsw mm7, mm6 ; 6 ; y3 = tm765 - tm465*tg_3_16
     add INP, 0x08   ; // increment pointer

    movq [y1], mm0 ; 0 ; save y1
     paddsw mm5, mm4 ; 4 ; y5 = tm765*tg_3_16 + tm465

    movq [y3], mm7 ; 7 ; save y3
     psubsw mm3, mm1 ; 1 ; y7 = tp765*tg_1_16 - tp465

    movq [y5], mm5 ; 5 ; save y5


  mmx32_fdct_col47: // begin processing columns 4-7
    movq mm0, [x1] ; 0 ; x1
     ;//
    movq [y7], mm3 ; 3 ; save y7 (columns 0-4)
     ;//

    movq mm1, [x6] ; 1 ; x6
     movq mm2, mm0 ; 2 ; x1

    movq mm3, [x2] ; 3 ; x2
     paddsw mm0, mm1 ; t1 = x[1] + x[6]

    movq mm4, [x5] ; 4 ; x5
     psllw mm0, SHIFT_FRW_COL ; t1

    movq mm5, [x0] ; 5 ; x0
     paddsw mm4, mm3 ; t2 = x[2] + x[5]

    paddsw mm5, [x7] ; t0 = x[0] + x[7]
     psllw mm4, SHIFT_FRW_COL ; t2

    movq mm6, mm0 ; 6 ; t1
     psubsw mm2, mm1 ; 1 ; t6 = x[1] - x[6]

    movq mm1, qword ptr [tg_2_16] ; 1 ; tg_2_16
     psubsw mm0, mm4 ; tm12 = t1 - t2

    movq mm7, [x3] ; 7 ; x3
     pmulhw mm1, mm0 ; tm12*tg_2_16

    paddsw mm7, [x4] ; t3 = x[3] + x[4]
     psllw mm5, SHIFT_FRW_COL ; t0

    paddsw mm6, mm4 ; 4 ; tp12 = t1 + t2
     psllw mm7, SHIFT_FRW_COL ; t3

    movq mm4, mm5 ; 4 ; t0
     psubsw mm5, mm7 ; tm03 = t0 - t3

    paddsw mm1, mm5 ; y2 = tm03 + tm12*tg_2_16
     paddsw mm4, mm7 ; 7 ; tp03 = t0 + t3

    por mm1, qword ptr one_corr ; correction y2 +0.5
     psllw mm2, SHIFT_FRW_COL+1 ; t6

    pmulhw mm5, qword ptr [tg_2_16] ; tm03*tg_2_16
     movq mm7, mm4 ; 7 ; tp03

    psubsw mm3, [x5] ; t5 = x[2] - x[5]
     psubsw mm4, mm6 ; y4 = tp03 - tp12

    movq [y2+8], mm1 ; 1 ; save y2
     paddsw mm7, mm6 ; 6 ; y0 = tp03 + tp12
     
    movq mm1, [x3] ; 1 ; x3
     psllw mm3, SHIFT_FRW_COL+1 ; t5

    psubsw mm1, [x4] ; t4 = x[3] - x[4]
     movq mm6, mm2 ; 6 ; t6
    
    movq [y4+8], mm4 ; 4 ; save y4
     paddsw mm2, mm3 ; t6 + t5

    pmulhw mm2, qword ptr [ocos_4_16] ; tp65 = (t6 + t5)*cos_4_16
     psubsw mm6, mm3 ; 3 ; t6 - t5

    pmulhw mm6, qword ptr [ocos_4_16] ; tm65 = (t6 - t5)*cos_4_16
     psubsw mm5, mm0 ; 0 ; y6 = tm03*tg_2_16 - tm12

    por mm5, qword ptr one_corr ; correction y6 +0.5
     psllw mm1, SHIFT_FRW_COL ; t4

    por mm2, qword ptr one_corr ; correction tp65 +0.5
     movq mm4, mm1 ; 4 ; t4

    movq mm3, [x0] ; 3 ; x0
     paddsw mm1, mm6 ; tp465 = t4 + tm65

    psubsw mm3, [x7] ; t7 = x[0] - x[7]
     psubsw mm4, mm6 ; 6 ; tm465 = t4 - tm65

    movq mm0, qword ptr [tg_1_16] ; 0 ; tg_1_16
     psllw mm3, SHIFT_FRW_COL ; t7

    movq mm6, qword ptr [tg_3_16] ; 6 ; tg_3_16
     pmulhw mm0, mm1 ; tp465*tg_1_16

    movq [y0+8], mm7 ; 7 ; save y0
     pmulhw mm6, mm4 ; tm465*tg_3_16

    movq [y6+8], mm5 ; 5 ; save y6
     movq mm7, mm3 ; 7 ; t7

    movq mm5, qword ptr [tg_3_16] ; 5 ; tg_3_16
     psubsw mm7, mm2 ; tm765 = t7 - tp65

    paddsw mm3, mm2 ; 2 ; tp765 = t7 + tp65
     pmulhw mm5, mm7 ; tm765*tg_3_16

    paddsw mm0, mm3 ; y1 = tp765 + tp465*tg_1_16
     paddsw mm6, mm4 ; tm465*tg_3_16

    pmulhw mm3, qword ptr [tg_1_16] ; tp765*tg_1_16
     ;//

    por mm0, qword ptr one_corr ; correction y1 +0.5
     paddsw mm5, mm7 ; tm765*tg_3_16

    psubsw mm7, mm6 ; 6 ; y3 = tm765 - tm465*tg_3_16
     ;//

    movq [y1+8], mm0 ; 0 ; save y1
     paddsw mm5, mm4 ; 4 ; y5 = tm765*tg_3_16 + tm465

    movq [y3+8], mm7 ; 7 ; save y3
     psubsw mm3, mm1 ; 1 ; y7 = tp765*tg_1_16 - tp465

    movq [y5+8], mm5 ; 5 ; save y5

    movq [y7+8], mm3 ; 3 ; save y7

  //   emms;
  //  }   // end of forward_dct_col07() 
    //  done with dct_col transform


  ////////////////////////////////////////////////////////////////////////
  //
  // fdct_mmx32_rows() --
  // the following subroutine performs the row-transform operation,
  //
  //  The output is stored into blk[], destroying the original
  //  source data.

  //  v1.01 - output is range-clipped to {-2048, +2047}

	mov INP, dword ptr [blk];		;// row 0
	 mov edi, 0x08;	//x = 8

	lea TABLE, dword ptr [tab_frw_01234567]; // row 0
	 mov OUT, INP;

	lea round_frw_row, dword ptr [r_frw_row];
	// for ( x = 8; x > 0; --x )  // transform 1 row per iteration

// ---------- loop begin
  lp_mmx_fdct_row1:
    movd mm5, dword ptr [INP+12]; // mm5 = 7 6

    punpcklwd mm5, dword ptr [INP+8] // mm5 =  5 7 4 6

    movq mm2, mm5;     // mm2 = 5 7 4 6
    psrlq mm5, 32;     // mm5 = _ _ 5 7

    movq mm0, qword ptr [INP]; // mm0 = 3 2 1 0
    punpcklwd mm5, mm2;// mm5 = 4 5 6 7

    movq mm1, mm0;     // mm1 = 3 2 1 0
    paddsw mm0, mm5;   // mm0 = [3+4, 2+5, 1+6, 0+7] (xt3, xt2, xt1, xt0)

    psubsw mm1, mm5;   // mm1 = [3-4, 2-5, 1-6, 0-7] (xt7, xt6, xt5, xt4)
    movq mm2, mm0;     // mm2 = [ xt3 xt2 xt1 xt0 ]

    //movq [ xt3xt2xt1xt0 ], mm0; // debugging
    //movq [ xt7xt6xt5xt4 ], mm1; // debugging

    punpcklwd mm0, mm1;// mm0 = [ xt5 xt1 xt4 xt0 ]

    punpckhwd mm2, mm1;// mm2 = [ xt7 xt3 xt6 xt2 ]
    movq mm1, mm2;     // mm1

    ;// shuffle bytes around

//  movq mm0, qword ptr [INP] ; 0 ; x3 x2 x1 x0

//  movq mm1, qword ptr [INP+8] ; 1 ; x7 x6 x5 x4
    movq mm2, mm0 ; 2 ; x3 x2 x1 x0

    movq mm3, qword ptr [TABLE] ; 3 ; w06 w04 w02 w00
    punpcklwd mm0, mm1 ; x5 x1 x4 x0

    movq mm5, mm0 ; 5 ; x5 x1 x4 x0
    punpckldq mm0, mm0 ; x4 x0 x4 x0  [ xt2 xt0 xt2 xt0 ]

    movq mm4, qword ptr [TABLE+8] ; 4 ; w07 w05 w03 w01
    punpckhwd mm2, mm1 ; 1 ; x7 x3 x6 x2

    pmaddwd mm3, mm0 ; x4*w06+x0*w04 x4*w02+x0*w00
    movq mm6, mm2 ; 6 ; x7 x3 x6 x2

    movq mm1, qword ptr [TABLE+32] ; 1 ; w22 w20 w18 w16
    punpckldq mm2, mm2 ; x6 x2 x6 x2  [ xt3 xt1 xt3 xt1 ]

    pmaddwd mm4, mm2 ; x6*w07+x2*w05 x6*w03+x2*w01
    punpckhdq mm5, mm5 ; x5 x1 x5 x1  [ xt6 xt4 xt6 xt4 ]

    pmaddwd mm0, qword ptr [TABLE+16] ; x4*w14+x0*w12 x4*w10+x0*w08
    punpckhdq mm6, mm6 ; x7 x3 x7 x3  [ xt7 xt5 xt7 xt5 ]

    movq mm7, qword ptr [TABLE+40] ; 7 ; w23 w21 w19 w17
    pmaddwd mm1, mm5 ; x5*w22+x1*w20 x5*w18+x1*w16
//mm3 = a1, a0 (y2,y0)
//mm1 = b1, b0 (y3,y1)
//mm0 = a3,a2  (y6,y4)
//mm5 = b3,b2  (y7,y5)

    paddd mm3, qword ptr [round_frw_row] ; +rounder (y2,y0)
    pmaddwd mm7, mm6 ; x7*w23+x3*w21 x7*w19+x3*w17

    pmaddwd mm2, qword ptr [TABLE+24] ; x6*w15+x2*w13 x6*w11+x2*w09
    paddd mm3, mm4 ; 4 ; a1=sum(even1) a0=sum(even0) // now ( y2, y0)

    pmaddwd mm5, qword ptr [TABLE+48] ; x5*w30+x1*w28 x5*w26+x1*w24
    ;//

    pmaddwd mm6, qword ptr [TABLE+56] ; x7*w31+x3*w29 x7*w27+x3*w25
    paddd mm1, mm7 ; 7 ; b1=sum(odd1) b0=sum(odd0) // now ( y3, y1)

    paddd mm0, qword ptr [round_frw_row] ; +rounder (y6,y4)
    psrad mm3, SHIFT_FRW_ROW_CLIP1 ;// (y2, y0) 

    paddd mm1, qword ptr [round_frw_row] ; +rounder (y3,y1)
    paddd mm0, mm2 ; 2 ; a3=sum(even3) a2=sum(even2) // now (y6, y4)

    paddd mm5, qword ptr [round_frw_row] ; +rounder (y7,y5)
    psrad mm1, SHIFT_FRW_ROW_CLIP1 ;// y1=a1+b1 y0=a0+b0

    paddd mm5, mm6 ; 6 ; b3=sum(odd3) b2=sum(odd2) // now ( y7, y5)
    psrad mm0, SHIFT_FRW_ROW_CLIP1 ;//y3=a3+b3 y2=a2+b2

    add OUT, 16;  // increment row-output address by 1 row
    psrad mm5, SHIFT_FRW_ROW_CLIP1;// y4=a3-b3 y5=a2-b2

    add INP, 16;  // increment row-address by 1 row
    packssdw mm3, mm0 ;// 0 ; y6 y4 y2 y0, saturate {-32768,+32767}

    packssdw mm1, mm5 ;// 3 ; y7 y5 y3 y1, saturate {-32768,+32767}
    movq mm6, mm3;    // mm0 = y6 y4 y2 y0

    punpcklwd mm3, mm1; // y3 y2 y1 y0
    sub edi, 0x01;   // i = i - 1
    
    punpckhwd mm6, mm1; // y7 y6 y5 y4
    add TABLE,64;  // increment to next table

    psraw mm3, SHIFT_FRW_ROW_CLIP2;  // descale [y3 y2 y1 y0] to {-2048,+2047}

    psraw mm6, SHIFT_FRW_ROW_CLIP2;  // descale [y7 y6 y5 y4] to {-2048,+2047}

    movq qword ptr [OUT-16], mm3 ; 1 ; save y3 y2 y1 y0

    movq qword ptr [OUT-8], mm6 ; 7 ; save y7 y6 y5 y4

    cmp edi, 0x00;
    jg lp_mmx_fdct_row1;  // begin fdct processing on next row
    emms;
    }
    
/*
    ////////////////////////////////////////////////////////////////////////
    //
    // DCT_8_FRW_COL(), equivalent c_code
    //
    // This C-code can be substituted for the same __asm block
    //
    // I found several *DISCREPANCIES* between the AP-922 C-listing 
    // and actual corrected code (shown below).
    //
    ////////////////////////////////////////////////////////////////////////

    sptr = (short *) blk;
    optr = (short *) blk; // output will overwrite source data!

    for ( j = 0; j < 8; j=j+1 ) // dct_frw_col1 loop
    {
      // read source-data column #j into xt[0..7]
      xt[7] = sptr[7*8];
      xt[6] = sptr[6*8];
      xt[5] = sptr[5*8];
      xt[4] = sptr[4*8];

      xt[3] = sptr[3*8];
      xt[2] = sptr[2*8];
      xt[1] = sptr[1*8];
      xt[0] = sptr[0*8];
 
#define  LEFT_SHIFT( x ) ((x) << (SHIFT_FRW_COL) )   // left shift
#define LEFT_SHIFT1( x ) ((x) << (SHIFT_FRW_COL+1) ) // left shift+1

      t0 = LEFT_SHIFT ( xt[0] + xt[7] );
      t1 = LEFT_SHIFT ( xt[1] + xt[6] );
      t2 = LEFT_SHIFT ( xt[2] + xt[5] );
      t3 = LEFT_SHIFT ( xt[3] + xt[4] );
      t4 = LEFT_SHIFT ( xt[3] - xt[4] );
      t5 = LEFT_SHIFT1( xt[2] - xt[5] ); // *** DISCREPANCY
      t6 = LEFT_SHIFT1( xt[1] - xt[6] ); // *** DISCREPANCY
      t7 = LEFT_SHIFT ( xt[0] - xt[7] );
 
      tp03 = t0 + t3;
      tm03 = t0 - t3;
      tp12 = t1 + t2;
      tm12 = t1 - t2;

// pmulhw/pmulhrw emulation macros 
#define X86_PMULHW( X ) ((short) ( ((int)X)>>16 ))   //Intel MMX
//#define X86_PMULHRW( X ) ((short) ( ( (((int)X)>>15)+1) >>1) ) //3DNow-MMX

      optr[0*8] = tp03 + tp12;
      optr[4*8] = tp03 - tp12;
      optr[2*8] = tm03 + X86_PMULHW( tm12 * _tg_2_16 );
      optr[2*8] = optr[2*8] | _one_corr; // one_correction
      optr[6*8] = X86_PMULHW( tm03 * _tg_2_16 ) - tm12;
      optr[6*8] = optr[6*8] | _one_corr; // one_correction
 
      tp65 = X86_PMULHW( (t6 +t5 )*_ocos_4_16 ); // *** DISCREPANCY
      tp65 = tp65 | _one_corr; // one_correction
      tm65 = X86_PMULHW( (t6 -t5 )*_ocos_4_16 ); // *** DISCREPANCY
  
      tp765 = t7 + tp65;
      tm765 = t7 - tp65;
      tp465 = t4 + tm65;
      tm465 = t4 - tm65;
 
      optr[1*8]  = tp765 + X86_PMULHW( tp465 * _tg_1_16 );
      optr[1*8]  = optr[1*8] | _one_corr; // one_correction
      optr[7*8] = X86_PMULHW( tp765 * _tg_1_16 ) - tp465;

//    optr[5*8] = X86_PMULHW( tm765 * _tg_3_16 ) + tm465; // *** DISCREPANCY
      // from pg8 of AP-922,  ICONST = [ const*(2^16) + 0.5 ]
      //                      const * x = PMULHW( ICONST,x ) + x
      // The constant "tg_3_16" > 0.5, thus _tg_3_16 is encoded as tg_3_16-1.0
      // optr[5*8] = X86_PMULHW( tm765 * ( tg_3_16 - 1.0 ) ) + tm465
      //           = [tm765*tg_3_16 - tm765] + tm465
      //
      // optr[5*8] + tm765 = [ tm765*tg_3_16 ] + tm465 + tm765
      //                   = [ tm765*tg_3_16 ] + tm465 <-- what we want

      optr[5*8] = X86_PMULHW( tm765 * _tg_3_16 ) + tm465 + tm765;

//    optr[3*8] = tm765 - X86_PMULHW( tm465 * _tg_3_16 ); // *** DISCREPANCY
      // The following operations must be performed in the shown order!
      // same trick (as shown for optr[5*8]) applies to optr[3*8]

      optr[3*8] = X86_PMULHW( tm465 * _tg_3_16 ) + tm465;
      optr[3*8] = tm765 - optr[3*8];

    ++sptr;   // increment source pointer +1 column
    ++optr;   // increment output pointer +1 column
  } // end for ( j = 0 ..., end of C_equivalent code for forward_dct_col_1
 
    ////////////////////////////////////////////////////////////////////////
    //
    // DCT8_FRW_ROW1(), equivalent c_code
    //
    // This C-code can be substituted for the same __asm block
    // For a derivation of this code, please read fdctmm32.doc
    ////////////////////////////////////////////////////////////////////////

    sptr = (short *) blk;
    optr = (short *) blk; // output will overwrite source data!
    tf = &tab_frw_01234567[ 0 ]; // fdct_row load table_forward_w

    for ( j = 0; j < 8; j=j+1 ) // dct_frw_row1 loop
    {
        // forward_dct_row input arithmetic + shuffle
        xt[3] = sptr[3] + sptr[4];
        xt[2] = sptr[2] + sptr[5];
        xt[1] = sptr[1] + sptr[6];
        xt[0] = sptr[0] + sptr[7];

        xt[7] = sptr[3] - sptr[4];
        xt[6] = sptr[2] - sptr[5];
        xt[5] = sptr[1] - sptr[6];
        xt[4] = sptr[0] - sptr[7];
  

        a3 = ( xt[0]*tf[10]+ xt[2]*tf[11]) + ( xt[1]*tf[14]+ xt[3]*tf[15]);
        a2 = ( xt[0]*tf[8] + xt[2]*tf[9] ) + ( xt[1]*tf[12]+ xt[3]*tf[13]);
        a1 = ( xt[0]*tf[2] + xt[2]*tf[3] ) + ( xt[1]*tf[6] + xt[3]*tf[7] );
        a0 = ( xt[0]*tf[0] + xt[2]*tf[1] ) + ( xt[1]*tf[4] + xt[3]*tf[5] );
        tf += 16;  // increment table pointer

        b3 = ( xt[4]*tf[10]+ xt[6]*tf[11]) + ( xt[5]*tf[14]+ xt[7]*tf[15]);
        b2 = ( xt[4]*tf[8] + xt[6]*tf[9] ) + ( xt[5]*tf[12]+ xt[7]*tf[13]);
        b1 = ( xt[4]*tf[2] + xt[6]*tf[3] ) + ( xt[5]*tf[6] + xt[7]*tf[7] );
        b0 = ( xt[4]*tf[0] + xt[6]*tf[1] ) + ( xt[5]*tf[4] + xt[7]*tf[5] );
        tf += 16;  // increment table pointer

        // apply rounding constants to scaled elements
        // note, in the MMX implementation, the shift&round is done *last.*
        // Here, the C-code applies the shifts 1st, then the clipping.
#define SHIFT_AND_ROUND_FRW_ROW( x )  ( ((x)+RND_FRW_ROW) >> SHIFT_FRW_ROW )

        a3 = SHIFT_AND_ROUND_FRW_ROW( a3 );
        a2 = SHIFT_AND_ROUND_FRW_ROW( a2 );
        a1 = SHIFT_AND_ROUND_FRW_ROW( a1 );
        a0 = SHIFT_AND_ROUND_FRW_ROW( a0 );

        b3 = SHIFT_AND_ROUND_FRW_ROW( b3 );
        b2 = SHIFT_AND_ROUND_FRW_ROW( b2 );
        b1 = SHIFT_AND_ROUND_FRW_ROW( b1 );
        b0 = SHIFT_AND_ROUND_FRW_ROW( b0 );

        // v1.01, clip output results to range {-2048, +2047}

        // In the MMX implementation, the "clipper" is integrated into
        // the shift&round operation (thanks to packssdw)
        a3 = (a3 > 2047) ?  2047 : a3; // ceiling @ +2047
        a2 = (a2 > 2047) ?  2047 : a2; // ceiling @ +2047
        a1 = (a1 > 2047) ?  2047 : a1; // ceiling @ +2047
        a0 = (a0 > 2047) ?  2047 : a0; // ceiling @ +2047
        b3 = (b3 > 2047) ?  2047 : b3; // ceiling @ +2047
        b2 = (b2 > 2047) ?  2047 : b2; // ceiling @ +2047
        b1 = (b1 > 2047) ?  2047 : b1; // ceiling @ +2047
        b0 = (b0 > 2047) ?  2047 : b0; // ceiling @ +2047

        a3 = (a3 <-2048) ? -2048 : a3; // floor   @ -2048
        a2 = (a2 <-2048) ? -2048 : a2; // floor   @ -2048
        a1 = (a1 <-2048) ? -2048 : a1; // floor   @ -2048
        a0 = (a0 <-2048) ? -2048 : a0; // floor   @ -2048
        b3 = (b3 <-2048) ? -2048 : b3; // floor   @ -2048
        b2 = (b2 <-2048) ? -2048 : b2; // floor   @ -2048
        b1 = (b1 <-2048) ? -2048 : b1; // floor   @ -2048
        b0 = (b0 <-2048) ? -2048 : b0; // floor   @ -2048


        // forward_dct_row, assign outputs
        optr[ 3 ] = b1;
        optr[ 2 ] = a1;
        optr[ 1 ] = b0;
        optr[ 0 ] = a0;

        optr[ 7 ] = b3;
        optr[ 6 ] = a3;
        optr[ 5 ] = b2;
        optr[ 4 ] = a2;

        sptr += 8;   // increment source pointer +1 row
        optr += 8;   // increment output pointer +1 row
    } // end for ( j = 0 ..., end of C_equivalent code for forward_dct_row_1
  */  
} // fdct_mm32( short *blk )
Example #10
0
bvt float_utilst::div(const bvt &src1, const bvt &src2)
{
  // unpack
  const unbiased_floatt unpacked1=unpack(src1);
  const unbiased_floatt unpacked2=unpack(src2);

  std::size_t div_width=unpacked1.fraction.size()*2+1;

  // pad fraction1 with zeros
  bvt fraction1=unpacked1.fraction;
  fraction1.reserve(div_width);
  while(fraction1.size()<div_width)
    fraction1.insert(fraction1.begin(), const_literal(false));

  // zero-extend fraction2
  const bvt fraction2=
    bv_utils.zero_extension(unpacked2.fraction, div_width);

  // divide fractions
  unbiased_floatt result;
  bvt rem;
  bv_utils.unsigned_divider(fraction1, fraction2, result.fraction, rem);

  // is there a remainder?
  literalt have_remainder=bv_utils.is_not_zero(rem);

  // we throw this into the result, as one additional bit,
  // to get the right rounding decision
  result.fraction.insert(
    result.fraction.begin(), have_remainder);

  // We will subtract the exponents;
  // to account for overflow, we add a bit.
  // we add a second bit for the adjust by extra fraction bits
  const bvt exponent1=bv_utils.sign_extension(unpacked1.exponent, unpacked1.exponent.size()+2);
  const bvt exponent2=bv_utils.sign_extension(unpacked2.exponent, unpacked2.exponent.size()+2);

  // subtract exponents
  bvt added_exponent=bv_utils.sub(exponent1, exponent2);

  // adjust, as we have thown in extra fraction bits
  result.exponent=bv_utils.add(
    added_exponent,
    bv_utils.build_constant(spec.f, added_exponent.size()));

  // new sign
  result.sign=prop.lxor(unpacked1.sign, unpacked2.sign);

  // Infinity? This happens when
  // 1) dividing a non-nan/non-zero by zero, or
  // 2) first operand is inf and second is non-nan and non-zero
  // In particular, inf/0=inf.
  result.infinity=
    prop.lor(
      prop.land(!unpacked1.zero,
      prop.land(!unpacked1.NaN,
                unpacked2.zero)),
      prop.land(unpacked1.infinity,
      prop.land(!unpacked2.NaN,
                !unpacked2.zero)));

  // NaN?
  result.NaN=prop.lor(unpacked1.NaN,
             prop.lor(unpacked2.NaN,
             prop.lor(prop.land(unpacked1.zero, unpacked2.zero),
                      prop.land(unpacked1.infinity, unpacked2.infinity))));

  // Division by infinity produces zero, unless we have NaN
  literalt force_zero=
    prop.land(!unpacked1.NaN, unpacked2.infinity);

  result.fraction=bv_utils.select(force_zero,
    bv_utils.zeros(result.fraction.size()), result.fraction);

  return rounder(result);
}
Example #11
0
bvt float_utilst::add_sub(
  const bvt &src1,
  const bvt &src2,
  bool subtract)
{
  unbiased_floatt unpacked1=unpack(src1);
  unbiased_floatt unpacked2=unpack(src2);

  // subtract?
  if(subtract)
    unpacked2.sign=!unpacked2.sign;

  // figure out which operand has the bigger exponent
  const bvt exponent_difference=subtract_exponents(unpacked1, unpacked2);
  literalt src2_bigger=exponent_difference.back();

  const bvt bigger_exponent=
    bv_utils.select(src2_bigger, unpacked2.exponent, unpacked1.exponent);

  // swap fractions as needed
  const bvt new_fraction1=
    bv_utils.select(src2_bigger, unpacked2.fraction, unpacked1.fraction);

  const bvt new_fraction2=
    bv_utils.select(src2_bigger, unpacked1.fraction, unpacked2.fraction);

  // compute distance
  const bvt distance=bv_utils.absolute_value(exponent_difference);

  // limit the distance: shifting more than f+3 bits is unnecessary
  const bvt limited_dist=limit_distance(distance, spec.f+3);

  // pad fractions with 2 zeros from below
  const bvt fraction1_padded=bv_utils.concatenate(bv_utils.zeros(3), new_fraction1);
  const bvt fraction2_padded=bv_utils.concatenate(bv_utils.zeros(3), new_fraction2);

  // shift new_fraction2
  literalt sticky_bit;
  const bvt fraction1_shifted=fraction1_padded;
  const bvt fraction2_shifted=sticky_right_shift(
    fraction2_padded, limited_dist, sticky_bit);

  // sticky bit: or of the bits lost by the right-shift
  bvt fraction2_stickied=fraction2_shifted;
  fraction2_stickied[0]=prop.lor(fraction2_shifted[0], sticky_bit);

  // need to have two extra fraction bits for addition and rounding
  const bvt fraction1_ext=bv_utils.zero_extension(fraction1_shifted, fraction1_shifted.size()+2);
  const bvt fraction2_ext=bv_utils.zero_extension(fraction2_stickied, fraction2_stickied.size()+2);

  unbiased_floatt result;

  // now add/sub them
  literalt subtract_lit=prop.lxor(unpacked1.sign, unpacked2.sign);
  result.fraction=
    bv_utils.add_sub(fraction1_ext, fraction2_ext, subtract_lit);

  // sign of result
  literalt fraction_sign=result.fraction.back();
  result.fraction=bv_utils.absolute_value(result.fraction);

  result.exponent=bigger_exponent;

  // adjust the exponent for the fact that we added two bits to the fraction
  result.exponent=
    bv_utils.add(bv_utils.sign_extension(result.exponent, result.exponent.size()+1),
      bv_utils.build_constant(2, result.exponent.size()+1));

  // NaN?
  result.NaN=prop.lor(
      prop.land(prop.land(unpacked1.infinity, unpacked2.infinity),
                prop.lxor(unpacked1.sign, unpacked2.sign)),
      prop.lor(unpacked1.NaN, unpacked2.NaN));

  // infinity?
  result.infinity=prop.land(
      !result.NaN,
      prop.lor(unpacked1.infinity, unpacked2.infinity));

  // zero?
  // Note that:
  //  1. The zero flag isn't used apart from in divide and
  //     is only set on unpack
  //  2. Subnormals mean that addition or subtraction can't round to 0,
  //     thus we can perform this test now
  //  3. The rules for sign are different for zero
  result.zero = prop.land(
      !prop.lor(result.infinity, result.NaN),
      !prop.lor(result.fraction));


  // sign
  literalt add_sub_sign=
    prop.lxor(prop.lselect(src2_bigger, unpacked2.sign, unpacked1.sign),
              fraction_sign);

  literalt infinity_sign=
    prop.lselect(unpacked1.infinity, unpacked1.sign, unpacked2.sign);

  #if 1
  literalt zero_sign=
    prop.lselect(rounding_mode_bits.round_to_minus_inf,
                 prop.lor(unpacked1.sign, unpacked2.sign),
                 prop.land(unpacked1.sign, unpacked2.sign));

  result.sign=prop.lselect(
    result.infinity,
    infinity_sign,
    prop.lselect(result.zero,
                 zero_sign,
                 add_sub_sign));
  #else
  result.sign=prop.lselect(
    result.infinity,
    infinity_sign,
    add_sub_sign);
  #endif

  #if 0
  result.sign=const_literal(false);
  result.fraction.resize(spec.f+1, const_literal(true));
  result.exponent.resize(spec.e, const_literal(false));
  result.NaN=const_literal(false);
  result.infinity=const_literal(false);
  //for(std::size_t i=0; i<result.fraction.size(); i++)
  //  result.fraction[i]=const_literal(true);

  for(std::size_t i=0; i<result.fraction.size(); i++)
    result.fraction[i]=new_fraction2[i];

  return pack(bias(result));
  #endif

  return rounder(result);
}
Example #12
0
bvt float_utilst::conversion(
  const bvt &src,
  const ieee_float_spect &dest_spec)
{
  assert(src.size()==spec.width());

  #if 1
  // Catch the special case in which we extend,
  // e.g. single to double.
  // In this case, rounding can be avoided,
  // but a denormal number may be come normal.
  // Be careful to exclude the difficult case
  // when denormalised numbers in the old format
  // can be converted to denormalised numbers in the
  // new format.  Note that this is rare and will only
  // happen with very non-standard formats.

  int sourceSmallestNormalExponent = -((1 << (spec.e - 1)) - 1);
  int sourceSmallestDenormalExponent =
    sourceSmallestNormalExponent - spec.f;

  // Using the fact that f doesn't include the hidden bit

  int destSmallestNormalExponent = -((1 << (dest_spec.e - 1)) - 1);

  if(dest_spec.e>=spec.e &&
     dest_spec.f>=spec.f &&
     !(sourceSmallestDenormalExponent < destSmallestNormalExponent))
  {
    unbiased_floatt unpacked_src=unpack(src);
    unbiased_floatt result;

    // the fraction gets zero-padded
    std::size_t padding=dest_spec.f-spec.f;
    result.fraction=
      bv_utils.concatenate(bv_utils.zeros(padding), unpacked_src.fraction);

    // the exponent gets sign-extended
    result.exponent=
      bv_utils.sign_extension(unpacked_src.exponent, dest_spec.e);

    // if the number was denormal and is normal in the new format,
    // normalise it!
    if(dest_spec.e > spec.e)
    {
      normalization_shift(result.fraction,result.exponent);
    }

    // the flags get copied
    result.sign=unpacked_src.sign;
    result.NaN=unpacked_src.NaN;
    result.infinity=unpacked_src.infinity;

    // no rounding needed!
    spec=dest_spec;
    return pack(bias(result));
  }
  else
  #endif
  {
    // we actually need to round
    unbiased_floatt result=unpack(src);
    spec=dest_spec;
    return rounder(result);
  }
}
Example #13
0
void drawLine(bsCurve cv) {
    float x1, x2, y1, y2, sp, sp2;
    float slope;
    float newx, newy;
    y1 = y2 = x1 = x2 = 0;
    Vector3 red(1,0,0);
    Vector3 green(0,1,0);
    Vector3 color;
    if (cv.ctype == 0)
        color = red;
    else
        color = green;
    for (int i = 0; i < cv.numctrlpts - 1; i++)
    {        
        x1 = cv.ctrlpts[i].x;
        y1 = cv.ctrlpts[i].y;
        x2 = cv.ctrlpts[i + 1].x;
        y2 = cv.ctrlpts[i + 1].y;

        
        
        if(x1 == x2) {
            if (y2 < y1) {
                sp = y1;
                y1 = y2;
                y2 = sp;
            }
            for (int i = y1; i < y2; i++) {
                drawPoint(x1, i, color);
            }
        }
        else if(y1 == y2) {
            if (x2 < x1) {
                sp = x1;
                x1 = x2;
                x2 = sp;
            }
            for (int i = x1; i < x2; i++) {
                drawPoint(i, y1, color);
            }
        }
        else if ((y2-y1)/(x2-x1) <= 1 && (y2-y1)/(x2-x1) >= -1) {
            if(x2 < x1) {
                sp = x1;
                sp2 = y1;
                x1 = x2;
                y1 = y2;
                x2 = sp;
                y2 = sp2;
            }
            slope = (y2-y1)/(x2-x1);
            newy = y1;
            for (int i = x1+1; i < x2; i++) {
                newy += slope;
                drawPoint(i, rounder(newy), color);
            }
        }
        else if ((float)(y2-y1)/(x2-x1) > 1 || (float)(y2-y1)/(x2-x1) < -1) {
            if(y2 < y1) {
                sp = x1;
                sp2 = y1;
                x1 = x2;
                y1 = y2;
                x2 = sp;
                y2 = sp2;
            }
            slope = (x2-x1)/(y2-y1);
            newx = x1;
            for (int i = y1 + 1; i < y2; i++) {
                newx += slope;
                drawPoint(rounder(newx), i, color);
            }
        }
    }
}