static double lsx_kaiser_beta(double att, double tr_bw) { if (att >= 60) { static const double coefs[][4] = { {-6.784957e-10,1.02856e-05,0.1087556,-0.8988365+.001}, {-6.897885e-10,1.027433e-05,0.10876,-0.8994658+.002}, {-1.000683e-09,1.030092e-05,0.1087677,-0.9007898+.003}, {-3.654474e-10,1.040631e-05,0.1087085,-0.8977766+.006}, {8.106988e-09,6.983091e-06,0.1091387,-0.9172048+.015}, {9.519571e-09,7.272678e-06,0.1090068,-0.9140768+.025}, {-5.626821e-09,1.342186e-05,0.1083999,-0.9065452+.05}, {-9.965946e-08,5.073548e-05,0.1040967,-0.7672778+.085}, {1.604808e-07,-5.856462e-05,0.1185998,-1.34824+.1}, {-1.511964e-07,6.363034e-05,0.1064627,-0.9876665+.18}, }; double realm = log(tr_bw/.0005)/log(2.); double const * c0 = coefs[range_limit( (int)realm, 0, (int)array_length(coefs)-1)]; double const * c1 = coefs[range_limit(1+(int)realm, 0, (int)array_length(coefs)-1)]; double b0 = ((c0[0]*att + c0[1])*att + c0[2])*att + c0[3]; double b1 = ((c1[0]*att + c1[1])*att + c1[2])*att + c1[3]; return b0 + (b1 - b0) * (realm - (int)realm); } if (att > 50 ) return .1102 * (att - 8.7); if (att > 20.96) return .58417 * pow(att -20.96, .4) + .07886 * (att - 20.96); return 0; }
static int setup(sox_format_t * ft) { priv_t * p = (priv_t *)ft->priv; snd_pcm_hw_params_t * params = NULL; snd_pcm_format_mask_t * mask = NULL; snd_pcm_uframes_t min, max; unsigned n; int err; _(snd_pcm_open, (&p->pcm, ft->filename, ft->mode == 'r'? SND_PCM_STREAM_CAPTURE : SND_PCM_STREAM_PLAYBACK, 0)); _(snd_pcm_hw_params_malloc, (¶ms)); _(snd_pcm_hw_params_any, (p->pcm, params)); #if SND_LIB_VERSION >= 0x010009 /* Disable alsa-lib resampling: */ _(snd_pcm_hw_params_set_rate_resample, (p->pcm, params, 0)); #endif _(snd_pcm_hw_params_set_access, (p->pcm, params, SND_PCM_ACCESS_RW_INTERLEAVED)); _(snd_pcm_format_mask_malloc, (&mask)); /* Set format: */ snd_pcm_hw_params_get_format_mask(params, mask); _(select_format, (&ft->encoding.encoding, &ft->encoding.bits_per_sample, mask, &p->format)); _(snd_pcm_hw_params_set_format, (p->pcm, params, p->format)); snd_pcm_format_mask_free(mask), mask = NULL; n = ft->signal.rate; /* Set rate: */ _(snd_pcm_hw_params_set_rate_near, (p->pcm, params, &n, 0)); ft->signal.rate = n; n = ft->signal.channels; /* Set channels: */ _(snd_pcm_hw_params_set_channels_near, (p->pcm, params, &n)); ft->signal.channels = n; /* Set buf_len > > sox_globals.bufsiz for no underrun: */ p->buf_len = sox_globals.bufsiz * 8 / NBYTES / ft->signal.channels; _(snd_pcm_hw_params_get_buffer_size_min, (params, &min)); _(snd_pcm_hw_params_get_buffer_size_max, (params, &max)); p->period = range_limit(p->buf_len, min, max) / 8; p->buf_len = p->period * 8; _(snd_pcm_hw_params_set_period_size_near, (p->pcm, params, &p->period, 0)); _(snd_pcm_hw_params_set_buffer_size_near, (p->pcm, params, &p->buf_len)); if (p->period * 2 > p->buf_len) { lsx_fail_errno(ft, SOX_EPERM, "buffer too small"); goto error; } _(snd_pcm_hw_params, (p->pcm, params)); /* Configure ALSA */ snd_pcm_hw_params_free(params), params = NULL; _(snd_pcm_prepare, (p->pcm)); p->buf_len *= ft->signal.channels; /* No longer in `frames' */ p->buf = lsx_malloc(p->buf_len * NBYTES); return SOX_SUCCESS; error: if (mask) snd_pcm_format_mask_free(mask); if (params) snd_pcm_hw_params_free(params); return SOX_EOF; }
static double * lpf(double Fn, double Fc, double tbw, int * num_taps, double att, double * beta, sox_bool round) { if ((Fc /= Fn) <= 0 || Fc >= 1) { *num_taps = 0; return NULL; } att = att? att : 120; *beta = *beta < 0? lsx_kaiser_beta(att) : *beta; if (!*num_taps) { int n = lsx_lpf_num_taps(att, (tbw? tbw / Fn : .05) * .5, 0); *num_taps = range_limit(n, 11, 32767); if (round) *num_taps = 1 + 2 * (int)((int)((*num_taps / 2) * Fc + .5) / Fc + .5); lsx_report("num taps = %i (from %i)", *num_taps, n); } return lsx_make_lpf(*num_taps |= 1, Fc, *beta, 1., sox_false); }
void jpeg_idct_islow (SHORT *inbuf, WORD *quantptr) { LONG tmp0, tmp1, tmp2, tmp3; LONG tmp10, tmp11, tmp12, tmp13; LONG z1, z2, z3, z4, z5; BYTE ctr; SHORT *inptr = inbuf, *outptr; DCTELEM *wsptr; DCTELEM workspace[DCTSIZE2]; /* buffers data between passes */ wsptr = workspace; /* Pass 1: process columns from input, store into work array. */ /* Note results are scaled up by sqrt(8) compared to a true IDCT; */ /* furthermore, we scale the results by 2**PASS1_BITS. */ for (ctr = DCTSIZE; ctr > 0; ctr--) { /* Due to quantization, we will usually find that many of the input * coefficients are zero, especially the AC terms. We can exploit this * by short-circuiting the IDCT calculation for any column in which all * the AC terms are zero. In that case each output is equal to the * DC coefficient (with scale factor as needed). * With typical images and quantization tables, half or more of the * column DCT calculations can be simplified this way. */ if (inptr[DCTSIZE*1] == 0 && inptr[DCTSIZE*2] == 0 && inptr[DCTSIZE*3] == 0 && inptr[DCTSIZE*4] == 0 && inptr[DCTSIZE*5] == 0 && inptr[DCTSIZE*6] == 0 && inptr[DCTSIZE*7] == 0) { /* AC terms all zero */ LONG dcval = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]) << PASS1_BITS; wsptr[DCTSIZE*0] = dcval; wsptr[DCTSIZE*1] = dcval; wsptr[DCTSIZE*2] = dcval; wsptr[DCTSIZE*3] = dcval; wsptr[DCTSIZE*4] = dcval; wsptr[DCTSIZE*5] = dcval; wsptr[DCTSIZE*6] = dcval; wsptr[DCTSIZE*7] = dcval; inptr++; /* advance pointers to next column */ quantptr++; wsptr++; continue; } /* Even part: reverse the even part of the forward DCT. */ /* The rotator is sqrt(2)*c(-6). */ z2 = DEQUANTIZE(inptr[DCTSIZE*2], quantptr[DCTSIZE*2]); z3 = DEQUANTIZE(inptr[DCTSIZE*6], quantptr[DCTSIZE*6]); z1 = MULTIPLY(z2 + z3, FIX_0_541196100); tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); z2 = DEQUANTIZE(inptr[DCTSIZE*0], quantptr[DCTSIZE*0]); z3 = DEQUANTIZE(inptr[DCTSIZE*4], quantptr[DCTSIZE*4]); tmp0 = (z2 + z3) << CONST_BITS; tmp1 = (z2 - z3) << CONST_BITS; tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; /* Odd part per figure 8; the matrix is unitary and hence its * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. */ tmp0 = DEQUANTIZE(inptr[DCTSIZE*7], quantptr[DCTSIZE*7]); tmp1 = DEQUANTIZE(inptr[DCTSIZE*5], quantptr[DCTSIZE*5]); tmp2 = DEQUANTIZE(inptr[DCTSIZE*3], quantptr[DCTSIZE*3]); tmp3 = DEQUANTIZE(inptr[DCTSIZE*1], quantptr[DCTSIZE*1]); z1 = tmp0 + tmp3; z2 = tmp1 + tmp2; z3 = tmp0 + tmp2; z4 = tmp1 + tmp3; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ z3 += z5; z4 += z5; tmp0 += z1 + z3; tmp1 += z2 + z4; tmp2 += z2 + z3; tmp3 += z1 + z4; /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ wsptr[DCTSIZE*0] = (LONG) DESCALE((tmp10 + tmp3), (CONST_BITS-PASS1_BITS)); wsptr[DCTSIZE*7] = (LONG) DESCALE((tmp10 - tmp3), (CONST_BITS-PASS1_BITS)); wsptr[DCTSIZE*1] = (LONG) DESCALE((tmp11 + tmp2), (CONST_BITS-PASS1_BITS)); wsptr[DCTSIZE*6] = (LONG) DESCALE((tmp11 - tmp2), (CONST_BITS-PASS1_BITS)); wsptr[DCTSIZE*2] = (LONG) DESCALE((tmp12 + tmp1), (CONST_BITS-PASS1_BITS)); wsptr[DCTSIZE*5] = (LONG) DESCALE((tmp12 - tmp1), (CONST_BITS-PASS1_BITS)); wsptr[DCTSIZE*3] = (LONG) DESCALE((tmp13 + tmp0), (CONST_BITS-PASS1_BITS)); wsptr[DCTSIZE*4] = (LONG) DESCALE((tmp13 - tmp0), (CONST_BITS-PASS1_BITS)); inptr++; /* advance pointers to next column */ quantptr++; wsptr++; } /* Pass 2: process rows from work array, store into output array. */ /* Note that we must descale the results by a factor of 8 == 2**3, */ /* and also undo the PASS1_BITS scaling. */ wsptr = workspace; outptr = &inbuf[0]; for (ctr = 0; ctr < DCTSIZE; ctr++) { /* Rows of zeroes can be exploited in the same way as we did with columns. * However, the column calculation has created many nonzero AC terms, so * the simplification applies less often (typically 5% to 10% of the time). * On machines with very fast multiplication, it's possible that the * test takes more time than it's worth. In that case this section * may be commented out. */ #ifndef NO_ZERO_ROW_TEST if (wsptr[1] == 0 && wsptr[2] == 0 && wsptr[3] == 0 && wsptr[4] == 0 && wsptr[5] == 0 && wsptr[6] == 0 && wsptr[7] == 0) { /* AC terms all zero */ JSAMPLE dcval = range_limit[(LONG) DESCALE((INT32) wsptr[0], PASS1_BITS+3) & RANGE_MASK]; outptr[0] = dcval; outptr[1] = dcval; outptr[2] = dcval; outptr[3] = dcval; outptr[4] = dcval; outptr[5] = dcval; outptr[6] = dcval; outptr[7] = dcval; wsptr += DCTSIZE; /* advance pointer to next row */ continue; } #endif /* Even part: reverse the even part of the forward DCT. */ /* The rotator is sqrt(2)*c(-6). */ z2 = (INT32) wsptr[2]; z3 = (INT32) wsptr[6]; z1 = MULTIPLY(z2 + z3, FIX_0_541196100); tmp2 = z1 + MULTIPLY(z3, - FIX_1_847759065); tmp3 = z1 + MULTIPLY(z2, FIX_0_765366865); tmp0 = ((INT32) wsptr[0] + (INT32) wsptr[4]) << CONST_BITS; tmp1 = ((INT32) wsptr[0] - (INT32) wsptr[4]) << CONST_BITS; tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; /* Odd part per figure 8; the matrix is unitary and hence its * transpose is its inverse. i0..i3 are y7,y5,y3,y1 respectively. */ tmp0 = (INT32) wsptr[7]; tmp1 = (INT32) wsptr[5]; tmp2 = (INT32) wsptr[3]; tmp3 = (INT32) wsptr[1]; z1 = tmp0 + tmp3; z2 = tmp1 + tmp2; z3 = tmp0 + tmp2; z4 = tmp1 + tmp3; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp0 = MULTIPLY(tmp0, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp1 = MULTIPLY(tmp1, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp2 = MULTIPLY(tmp2, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp3 = MULTIPLY(tmp3, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ z3 += z5; z4 += z5; tmp0 += z1 + z3; tmp1 += z2 + z4; tmp2 += z2 + z3; tmp3 += z1 + z4; /* Final output stage: inputs are tmp10..tmp13, tmp0..tmp3 */ outptr[0] = (SHORT)range_limit((LONG) DESCALE(tmp10 + tmp3, CONST_BITS+PASS1_BITS+3)); outptr[7] = (SHORT)range_limit((LONG) DESCALE(tmp10 - tmp3, CONST_BITS+PASS1_BITS+3)); outptr[1] = (SHORT)range_limit((LONG) DESCALE(tmp11 + tmp2, CONST_BITS+PASS1_BITS+3)); outptr[6] = (SHORT)range_limit((LONG) DESCALE(tmp11 - tmp2, CONST_BITS+PASS1_BITS+3)); outptr[2] = (SHORT)range_limit((LONG) DESCALE(tmp12 + tmp1, CONST_BITS+PASS1_BITS+3)); outptr[5] = (SHORT)range_limit((LONG) DESCALE(tmp12 - tmp1, CONST_BITS+PASS1_BITS+3)); outptr[3] = (SHORT)range_limit((LONG) DESCALE(tmp13 + tmp0, CONST_BITS+PASS1_BITS+3)); outptr[4] = (SHORT)range_limit((LONG) DESCALE(tmp13 - tmp0, CONST_BITS+PASS1_BITS+3)); outptr += DCTSIZE; /* advance pointer to next row */ wsptr += DCTSIZE; /* advance pointer to next row */ } }
int _lsx_set_dft_length(int num_taps) /* Set to 4 x nearest power of 2 */ { /* or half of that if danger of causing too many cache misses. */ int min = 10 /* sox_globals.log2_dft_min_size */; double d = log((double)num_taps) / log(2.); return 1 << range_limit((int)(d + 2.77), min, max((int)(d + 1.77), 17)); }