int main(int argc, char *argv[]) {
	
	if (argc < 2) {
		printf("%s << ERROR! No input PLY (mesh/cloud) file has been provided.\n", __FUNCTION__);
		return 1;
	}

	pcl::PointCloud<pcl::PointXYZ>::Ptr cloud;
	cloud = pcl::PointCloud<pcl::PointXYZ>::Ptr(new pcl::PointCloud<pcl::PointXYZ>);

	pcl::PolygonMesh::Ptr mesh;
	mesh = pcl::PolygonMesh::Ptr(new pcl::PolygonMesh());

	char input_ply_name[256];
	sprintf(input_ply_name, "%s", argv[1]);

	if (pcl::io::loadPLYFile(input_ply_name, *mesh) < 0) {
		printf("%s << ERROR! Loading of file (%s) failed.\n", __FUNCTION__, input_ply_name);
		return 1;
	}
	fromPCLPointCloud2(mesh->cloud, *cloud);

	cv::Mat depth;

	int cols = 640, rows = 480;

	depth = cv::Mat::zeros(rows, cols, CV_16UC1);

	double fx = 570.6, fy = 558.8;
	double cx = 320.7, cy = 243.0;

	int iii, jjj;
	for (int xxx = 0; xxx < cloud->points.size(); xxx++) {
		
		iii = float2int((cloud->points[xxx].x*fx)/float(-cloud->points[xxx].z) + cx);
		jjj = float2int((-cloud->points[xxx].y*fy)/float(-cloud->points[xxx].z) + cy);
	
		if ((iii < cols) && (iii >= 0) && (jjj < rows) && (jjj >= 0)) {
			int depth_val = -cloud->points[xxx].z*DEFAULT_LEVELS_PER_MM;
			if ((depth.at<unsigned short>(jjj,iii) == 0) || (depth_val < depth.at<unsigned short>(jjj,iii))) { // Only fill pixel if current point lies closer to the camera than previous
				depth.at<unsigned short>(jjj,iii) = depth_val; // is this correct??
			}
			
		} else {
			printf("%s << Dodgy point: (%d, %d) from (%f, %f, %f)\n", __FUNCTION__, iii, jjj, cloud->points[xxx].x, cloud->points[xxx].y, cloud->points[xxx].z);
		}
	}

	char output_png_name[256];
	sprintf(output_png_name, "%s_depth.png", input_ply_name);
	cv::imwrite(output_png_name, depth);
	
	return 0;
	
}
Пример #2
0
void init_YUVtoRGB(ImageParameters *p_Img, InputParameters *p_Inp)
{ 
  float conv_scale = (float) (65536.0f);

  p_Img->wka0 = float2int(  conv_scale * K0);
  p_Img->wka1 = float2int(  conv_scale * K1);
  p_Img->wka2 = float2int( -conv_scale * K2);
  p_Img->wka3 = float2int( -conv_scale * K3);
  p_Img->wka4 = float2int(  conv_scale * K4);

#ifdef YUV2RGB_YOFFSET
  p_Img->offset_y = OFFSET_Y << (p_Inp->output.bit_depth[0] - 8);
  p_Img->offset_cr = 1 << (p_Inp->output.bit_depth[0] - 1);
#endif
}
Пример #3
0
/*
 Para floats
 */
void writePGMfloat ( char* filename, float** tabla, int size ){
	int** tablatemp = float2int(tabla, size);
	
	writePGMint(filename, tablatemp, size);
	
	freeTablaInt(tablatemp, size);
} 
Пример #4
0
/* Carrega constante (FLOAT) */
void Mepa::CRCF()
{
    float tmp;
    s = s + 1;
    tmp = p->get_field(0);
    m[s] = float2int(&tmp);
}
Пример #5
0
static __inline celt_int16_t FLOAT2INT16(float x)
{
   x = x*CELT_SIG_SCALE;
   x = MAX32(x, -32768);
   x = MIN32(x, 32767);
   return (celt_int16_t)float2int(x);
}
Пример #6
0
void cPlayer::SetTouchGround(bool a_bTouchGround)
{
	// If just
	m_bTouchGround = a_bTouchGround;

	if (!m_bTouchGround)
	{
		if (GetPosY() > m_LastJumpHeight)
		{
			m_LastJumpHeight = (float)GetPosY();
		}
		cWorld * World = GetWorld();
		if ((GetPosY() >= 0) && (GetPosY() < 256))
		{
			BLOCKTYPE BlockType = World->GetBlock( float2int(GetPosX()), float2int(GetPosY()), float2int(GetPosZ()) );
			if (BlockType != E_BLOCK_AIR)
			{
				// LOGD("TouchGround set to true by server");
				m_bTouchGround = true;
			}
			if (
				(BlockType == E_BLOCK_WATER) ||
				(BlockType == E_BLOCK_STATIONARY_WATER) ||
				(BlockType == E_BLOCK_LADDER) ||
				(BlockType == E_BLOCK_VINES)
			)
			{
				// LOGD("Water / Ladder / Torch");
				m_LastGroundHeight = (float)GetPosY();
			}
		}
	}

	if (m_bTouchGround)
	{
		float Dist = (float)(m_LastGroundHeight - floor(GetPosY()));
		int Damage = (int)(Dist - 3.f);
		if(m_LastJumpHeight > m_LastGroundHeight) Damage++;
		m_LastJumpHeight = (float)GetPosY();
		if (Damage > 0)
		{
			TakeDamage(dtFalling, NULL, Damage, Damage, 0);
		}

		m_LastGroundHeight = (float)GetPosY();
	}
}
Пример #7
0
void float2char(float *f, int n, float min, float max, unsigned char *c) /*includefile*/
{
	int *i,j;
	i=(int *) malloc(sizeof(int)*n);
	float2int(f,n,8,min,max,i);
	for (j=0;j<n;j++) c[j]=(unsigned char) i[j];
	free(i);
}
Пример #8
0
/* Le um valor e o guarda no topo da pilha (FLOAT) */
void Mepa::LEIF()
{
    float tmp;
    s = s + 1;
    cout << "\033[31m" "Entre com um numero: " "\033[0m";
    scanf("%f", &tmp);
    m[s] = float2int(&tmp);
}
Пример #9
0
void float2short(float *f, int n, float min, float max, unsigned short *s) /*includefile*/
{
	int *i,j;
	i=(int *) malloc(sizeof(int)*n);
	float2int(f,n,16,min,max,i);
	for (j=0;j<n;j++) s[j]=(unsigned short) i[j];
	free(i);
}
Пример #10
0
/* Multiplica o valor do topo da pilha com um valor anterior (FLOAT)*/
void Mepa::MULF()
{
    assert( (s-1) > 0 );
    float f1 = int2float(&m[s-1]);
    float f2 = int2float(&m[s]);
    float prod = f1 * f2;
    m[s-1] = float2int(&prod);
    s = s - 1;
}
Пример #11
0
/* Soma o valor no topo da pilha e o da posicao anterior (FLOAT) */
void Mepa::SOMF()
{
    assert(s > 0);
    float f1 = int2float(&m[s-1]);
    float f2 = int2float(&m[s]);
    float sum = f1 + f2;
    m[s-1] = float2int(&sum);
    s = s - 1;
}
Пример #12
0
void float2one(float *f, int n, float min, float max, unsigned char *c) /*includefile*/
{
	int *i,j;
	i=(int *) malloc(sizeof(int)*n);
	float2int(f,n,4,min,max,i);
	for (j=0;j<n;j++) c[j]=0;
	for (j=0;j<n-7;j+=8) c[j/8]=charof8ints(i+j);
	free(i);
}
Пример #13
0
void float2two(float *f, int n, float min, float max, unsigned char *c) /*includefile*/
{
	int *i,j;
	i=(int *) malloc(sizeof(int)*n);
	float2int(f,n,2,min,max,i);
	for (j=0;j<n;j++) c[j]=0;
	for (j=0;j<n-3;j+=4) c[j/4]=charof4ints(i[j],i[j+1],i[j+2],i[j+3]);
	free(i);
}
Пример #14
0
void float2four(float *f, int n, float min, float max, unsigned char *c) /*includefile*/
{
	int *i,j;
	i=(int *) malloc(sizeof(int)*n);
	float2int(f,n,4,min,max,i);
	for (j=0;j<n;j++) c[j]=0;
	for (j=0;j<n-1;j+=2) c[j/2]=charof2ints(i[j],i[j+1]);
	free(i);
}
Пример #15
0
/* Subtrai do valor anterior ao topo da pilha o valor do topo (FLOAT) */
void Mepa::SUBF()
{
    assert(s > 0);
    float f1 = int2float(&m[s-1]);
    float f2 = int2float(&m[s]);
    float dif = f1 - f2;
    m[s-1] = float2int(&dif);
    s = s - 1;
}
Пример #16
0
void cPlayer::SetTouchGround(bool a_bTouchGround)
{
    m_bTouchGround = a_bTouchGround;

    if (!m_bTouchGround)
    {
        if (GetPosY() > m_LastJumpHeight)
        {
            m_LastJumpHeight = (float)GetPosY();
        }
        cWorld * World = GetWorld();
        if ((GetPosY() >= 0) && (GetPosY() < cChunkDef::Height))
        {
            BLOCKTYPE BlockType = World->GetBlock(float2int(GetPosX()), float2int(GetPosY()), float2int(GetPosZ()));
            if (BlockType != E_BLOCK_AIR)
            {
                m_bTouchGround = true;
            }
            if (
                (BlockType == E_BLOCK_WATER) ||
                (BlockType == E_BLOCK_STATIONARY_WATER) ||
                (BlockType == E_BLOCK_LADDER) ||
                (BlockType == E_BLOCK_VINES)
            )
            {
                m_LastGroundHeight = (float)GetPosY();
            }
        }
    }
    else
    {
        float Dist = (float)(m_LastGroundHeight - floor(GetPosY()));
        int Damage = (int)(Dist - 3.f);
        if (m_LastJumpHeight > m_LastGroundHeight) Damage++;
        m_LastJumpHeight = (float)GetPosY();

        if ((Damage > 0) && (!IsGameModeCreative()))
        {
            TakeDamage(dtFalling, NULL, Damage, Damage, 0);
        }

        m_LastGroundHeight = (float)GetPosY();
    }
}
/* This implements a 16 bit quantization with full triangular dither
   and IIR noise shaping. The noise shaping filters were designed by
   Sebastian Gesemann based on the LAME ATH curves with flattening
   to limit their peak gain to 20 dB.
   (Everyone elses' noise shaping filters are mildly crazy)
   The 48kHz version of this filter is just a warped version of the
   44.1kHz filter and probably could be improved by shifting the
   HF shelf up in frequency a little bit since 48k has a bit more
   room and being more conservative against bat-ears is probably
   more important than more noise suppression.
   This process can increase the peak level of the signal (in theory
   by the peak error of 1.5 +20 dB though this much is unobservable rare)
   so to avoid clipping the signal is attenuated by a couple thousandths
   of a dB. Initially the approach taken here was to only attenuate by
   the 99.9th percentile, making clipping rare but not impossible (like
   SoX) but the limited gain of the filter means that the worst case was
   only two thousandths of a dB more, so this just uses the worst case.
   The attenuation is probably also helpful to prevent clipping in the DAC
   reconstruction filters or downstream resampling in any case.*/
static inline void shape_dither_toshort(shapestate *_ss, short *_o, float *_i, int _n, int _CC)
{
  const float gains[3]={32768.f-15.f,32768.f-15.f,32768.f-3.f};
  const float fcoef[3][8] =
  {
    {2.2374f, -.7339f, -.1251f, -.6033f, 0.9030f, .0116f, -.5853f, -.2571f}, /* 48.0kHz noise shaping filter sd=2.34*/
    {2.2061f, -.4706f, -.2534f, -.6214f, 1.0587f, .0676f, -.6054f, -.2738f}, /* 44.1kHz noise shaping filter sd=2.51*/
    {1.0000f, 0.0000f, 0.0000f, 0.0000f, 0.0000f,0.0000f, 0.0000f, 0.0000f}, /* lowpass noise shaping filter sd=0.65*/
  };
  int i;
  int rate=_ss->fs==44100?1:(_ss->fs==48000?0:2);
  float gain=gains[rate];
  float *b_buf;
  float *a_buf;
  int mute=_ss->mute;
  b_buf=_ss->b_buf;
  a_buf=_ss->a_buf;
  /*In order to avoid replacing digital silence with quiet dither noise
    we mute if the output has been silent for a while*/
  if(mute>64)
    memset(a_buf,0,sizeof(float)*_CC*4);
  for(i=0;i<_n;i++)
  {
    int c;
    int pos = i*_CC;
    int silent=1;
    for(c=0;c<_CC;c++)
    {
      int j, si;
      float r,s,err=0;
      silent&=_i[pos+c]==0;
      s=_i[pos+c]*gain;
      for(j=0;j<4;j++)
        err += fcoef[rate][j]*b_buf[c*4+j] - fcoef[rate][j+4]*a_buf[c*4+j];
      memmove(&a_buf[c*4+1],&a_buf[c*4],sizeof(float)*3);
      memmove(&b_buf[c*4+1],&b_buf[c*4],sizeof(float)*3);
      a_buf[c*4]=err;
      s = s - err;
      r=(float)fast_rand()*(1/(float)UINT_MAX) - (float)fast_rand()*(1/(float)UINT_MAX);
      if (mute>16)r=0;
      /*Clamp in float out of paranoia that the input will be >96 dBFS and wrap if the
        integer is clamped.*/
      _o[pos+c] = si = float2int(fmaxf(-32768,fminf(s + r,32767)));
      /*Including clipping in the noise shaping is generally disastrous:
        the futile effort to restore the clipped energy results in more clipping.
        However, small amounts-- at the level which could normally be created by
        dither and rounding-- are harmless and can even reduce clipping somewhat
        due to the clipping sometimes reducing the dither+rounding error.*/
      b_buf[c*4] = (mute>16)?0:fmaxf(-1.5f,fminf(si - s,1.5f));
    }
    mute++;
    if(!silent)mute=0;
  }
  _ss->mute=MINI(mute,960);
}
Пример #18
0
int main(int argc, char* argv[]) {
	for (int i=1; i < argc; i++) {
		const float value     = atof(argv[i]);
		const int   casted    = (int)value;
		const int   converted = float2int(value);
		const int   overflow  = (errno == ERANGE);

		printf("input: '%s'\n", argv[i]);
		printf("\tvalue     : %0.9f\n", value);
		printf("\tcasted    : %d\n", casted);
		printf("\tfloat2int : %d (overflow=%s)\n", converted, overflow ? "yes" : "no");
	}
}
Пример #19
0
// Filter data through filter
static struct mp_audio* play(struct af_instance* af, struct mp_audio* data)
{
  struct mp_audio*   l   = af->data;	// Local data
  struct mp_audio*   c   = data;	// Current working data
  int 	       len = c->len/c->bps; // Length in samples of current audio block

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  // Change to cpu native endian format
  if((c->format&AF_FORMAT_END_MASK)!=AF_FORMAT_NE)
    endian(c->audio,c->audio,len,c->bps);

  // Conversion table
  if((c->format & AF_FORMAT_POINT_MASK) == AF_FORMAT_F) {
      float2int(c->audio, l->audio, len, l->bps);
      if((l->format&AF_FORMAT_SIGN_MASK) == AF_FORMAT_US)
	si2us(l->audio,len,l->bps);
  } else {
    // Input must be int

    // Change signed/unsigned
    if((c->format&AF_FORMAT_SIGN_MASK) != (l->format&AF_FORMAT_SIGN_MASK)){
      si2us(c->audio,len,c->bps);
    }
    // Convert to special formats
    switch(l->format&AF_FORMAT_POINT_MASK){
    case(AF_FORMAT_F):
      int2float(c->audio, l->audio, len, c->bps);
      break;
    default:
      // Change the number of bits
      if(c->bps != l->bps)
	change_bps(c->audio,l->audio,len,c->bps,l->bps);
      else
	memcpy(l->audio,c->audio,len*c->bps);
      break;
    }
  }

  // Switch from cpu native endian to the correct endianness
  if((l->format&AF_FORMAT_END_MASK)!=AF_FORMAT_NE)
    endian(l->audio,l->audio,len,l->bps);

  // Set output data
  c->audio  = l->audio;
  c->len    = len*l->bps;
  c->bps    = l->bps;
  c->format = l->format;
  return c;
}
Пример #20
0
/* Divide o valor anterior ao topo pelo valor no topo da pilha (FLOAT) */
void Mepa::DIVF()
{
    assert( (s-1) > 0 );
    float fnum = int2float(&m[s-1]); /* numerador */
    float fden = int2float(&m[s]); /* denominador */
    float quoc = fnum / fden;
    if( fabs(fden) < 1e-10 ) 
	{
        perror("Divisao por zero\n");
        abort();
    }
    m[s-1] = float2int(&quoc);
    s = s - 1;
}
Пример #21
0
static int jack_process_callback(jack_nframes_t nframes, void *arg)
{
        struct state_jack_capture *s = (struct state_jack_capture *) arg;
        int i;
        int channel_size = nframes * sizeof(int32_t);

        for (i = 0; i < s->frame.ch_count; ++i) {
                jack_default_audio_sample_t *in = jack_port_get_buffer(s->input_ports[i], nframes);
                float2int((char *) in, (char *) in, channel_size);
                mux_channel(s->tmp, (char *) in, sizeof(int32_t), channel_size, s->frame.ch_count, i);
        }

        ring_buffer_write(s->data, s->tmp, channel_size * s->frame.ch_count);

        return 0;
}
Пример #22
0
static struct mp_audio* play_float_s16(struct af_instance* af, struct mp_audio* data)
{
  struct mp_audio*   l   = af->data;	// Local data
  struct mp_audio*   c   = data;	// Current working data
  int 	       len = c->len/4; // Length in samples of current audio block

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  float2int(c->audio, l->audio, len, 2);

  c->audio = l->audio;
  mp_audio_set_format(c, l->format);
  c->len = len*2;

  return c;
}
Пример #23
0
static af_data_t* play_float_s16(struct af_instance_s* af, af_data_t* data)
{
  af_data_t*   l   = af->data;	// Local data
  af_data_t*   c   = data;	// Current working data
  int 	       len = c->len/4; // Length in samples of current audio block

  if(AF_OK != RESIZE_LOCAL_BUFFER(af,data))
    return NULL;

  float2int(c->audio, l->audio, len, 2);

  c->audio = l->audio;
  c->len = len*2;
  c->bps = 2;
  c->format = l->format;

  return c;
}
Пример #24
0
    inline void getInterpolation(vnl_vector_fixed<double, 3> N)
    {
        float nx = N[0];
        float ny = N[1];
        float nz = N[2];

        if (nz > 0.5)
        {
            int x = float2int(nx);
            int y = float2int(ny);
            int i = 3*6*(x+y*size);  // (:,1,x,y)
            idx = indices+i;
            interpw = barycoords +i;
            return;
        }
        if (nz < -0.5)
        {
            int x = float2int(nx);
            int y = float2int(ny);
            int i = 3*(1+6*(x+y*size));  // (:,2,x,y)
            idx = indices+i;
            interpw = barycoords +i;
            return;
        }
        if (nx > 0.5)
        {
            int z = float2int(nz);
            int y = float2int(ny);
            int i = 3*(2+6*(z+y*size));  // (:,2,x,y)
            idx = indices+i;
            interpw = barycoords +i;
            return;
        }
        if (nx < -0.5)
        {
            int z = float2int(nz);
            int y = float2int(ny);
            int i = 3*(3+6*(z+y*size));  // (:,2,x,y)
            idx = indices+i;
            interpw = barycoords +i;
            return;
        }
        if (ny > 0)
        {
            int x = float2int(nx);
            int z = float2int(nz);
            int i = 3*(4+6*(x+z*size));  // (:,1,x,y)
            idx = indices+i;
            interpw = barycoords +i;
            return;
        }
        else
        {
            int x = float2int(nx);
            int z = float2int(nz);
            int i = 3*(5+6*(x+z*size));  // (:,1,x,y)
            idx = indices+i;
            interpw = barycoords +i;
            return;
        }

    }
Пример #25
0
/* Inverter sinal (FLOAT) */
inline void Mepa::INVF()
{
    float f = int2float(&m[s]);
    f = -f;
    m[s] = float2int(&f);
}
Пример #26
0
void FormantFilter::setpos(float input)
{
    int p1, p2;

    if (firsttime != 0)
        slowinput = input;
    else
        slowinput = slowinput * (1.0f - formantslowness) + input * formantslowness;

    if ((fabsf(oldinput-input) < 0.001f) && (fabsf(slowinput - input) < 0.001f) &&
            (fabsf(Qfactor - oldQfactor) < 0.001f))
    {
        //	oldinput=input; daca setez asta, o sa faca probleme la schimbari foarte lente
        firsttime = 0;
        return;
    } else
        oldinput = input;

    float pos = fmodf(input * sequencestretch, 1.0f);
    if (pos < 0.0f)
        pos += 1.0f;

    p2 = float2int(pos * sequencesize);
    p1 = p2 - 1;
    if (p1 < 0)
        p1 += sequencesize;

    pos = fmodf(pos * sequencesize, 1.0f);
    if (pos < 0.0f)
        pos = 0.0f;
    else if (pos > 1.0f)
        pos = 1.0f;
    pos = (atanf((pos * 2.0f - 1.0f) * vowelclearness) / atanf(vowelclearness) + 1.0f) * 0.5f;

    p1 = sequence[p1].nvowel;
    p2 = sequence[p2].nvowel;

    if (firsttime != 0)
    {
        for (int i = 0; i < numformants; ++i)
        {
            currentformants[i].freq =
                formantpar[p1][i].freq * (1.0f - pos) + formantpar[p2][i].freq * pos;
            currentformants[i].amp =
                formantpar[p1][i].amp * (1.0f - pos) + formantpar[p2][i].amp * pos;
            currentformants[i].q =
                formantpar[p1][i].q * (1.0f - pos) + formantpar[p2][i].q * pos;
            formant[i]->setfreq_and_q(currentformants[i].freq,
                                      currentformants[i].q * Qfactor);
            oldformantamp[i] = currentformants[i].amp;
        }
        firsttime = 0;
    } else {
        for (int i = 0; i < numformants; ++i)
        {
            currentformants[i].freq =
                currentformants[i].freq * (1.0f - formantslowness)
                + (formantpar[p1][i].freq
                    * (1.0f - pos) + formantpar[p2][i].freq * pos)
                * formantslowness;

            currentformants[i].amp =
                currentformants[i].amp * (1.0f - formantslowness)
                + (formantpar[p1][i].amp * (1.0f - pos)
                   + formantpar[p2][i].amp * pos) * formantslowness;

            currentformants[i].q =
                currentformants[i].q * (1.0f - formantslowness)
                    + (formantpar[p1][i].q * (1.0f - pos)
                        + formantpar[p2][i].q * pos) * formantslowness;

            formant[i]->setfreq_and_q(currentformants[i].freq,
                                      currentformants[i].q * Qfactor);
        }
    }
    oldQfactor = Qfactor;
}
Пример #27
0
static void tonality_analysis(TonalityAnalysisState *tonal, const CELTMode *celt_mode, const void *x, int len, int offset, int c1, int c2, int C, int lsb_depth, downmix_func downmix)
{
    int i, b;
    const kiss_fft_state *kfft;
    VARDECL(kiss_fft_cpx, in);
    VARDECL(kiss_fft_cpx, out);
    int N = 480, N2=240;
    float * OPUS_RESTRICT A = tonal->angle;
    float * OPUS_RESTRICT dA = tonal->d_angle;
    float * OPUS_RESTRICT d2A = tonal->d2_angle;
    VARDECL(float, tonality);
    VARDECL(float, noisiness);
    float band_tonality[NB_TBANDS];
    float logE[NB_TBANDS];
    float BFCC[8];
    float features[25];
    float frame_tonality;
    float max_frame_tonality;
    /*float tw_sum=0;*/
    float frame_noisiness;
    const float pi4 = (float)(M_PI*M_PI*M_PI*M_PI);
    float slope=0;
    float frame_stationarity;
    float relativeE;
    float frame_probs[2];
    float alpha, alphaE, alphaE2;
    float frame_loudness;
    float bandwidth_mask;
    int bandwidth=0;
    float maxE = 0;
    float noise_floor;
    int remaining;
    AnalysisInfo *info;
    float hp_ener;
    float tonality2[240];
    float midE[8];
    float spec_variability=0;
    float band_log2[NB_TBANDS+1];
    float leakage_from[NB_TBANDS+1];
    float leakage_to[NB_TBANDS+1];
    SAVE_STACK;

    alpha = 1.f/IMIN(10, 1+tonal->count);
    alphaE = 1.f/IMIN(25, 1+tonal->count);
    alphaE2 = 1.f/IMIN(500, 1+tonal->count);

    if (tonal->Fs == 48000)
    {
       /* len and offset are now at 24 kHz. */
       len/= 2;
       offset /= 2;
    } else if (tonal->Fs == 16000) {
       len = 3*len/2;
       offset = 3*offset/2;
    }

    if (tonal->count<4) {
       if (tonal->application == OPUS_APPLICATION_VOIP)
          tonal->music_prob = .1f;
       else
          tonal->music_prob = .625f;
    }
    kfft = celt_mode->mdct.kfft[0];
    if (tonal->count==0)
       tonal->mem_fill = 240;
    tonal->hp_ener_accum += (float)downmix_and_resample(downmix, x,
          &tonal->inmem[tonal->mem_fill], tonal->downmix_state,
          IMIN(len, ANALYSIS_BUF_SIZE-tonal->mem_fill), offset, c1, c2, C, tonal->Fs);
    if (tonal->mem_fill+len < ANALYSIS_BUF_SIZE)
    {
       tonal->mem_fill += len;
       /* Don't have enough to update the analysis */
       RESTORE_STACK;
       return;
    }
    hp_ener = tonal->hp_ener_accum;
    info = &tonal->info[tonal->write_pos++];
    if (tonal->write_pos>=DETECT_SIZE)
       tonal->write_pos-=DETECT_SIZE;

    ALLOC(in, 480, kiss_fft_cpx);
    ALLOC(out, 480, kiss_fft_cpx);
    ALLOC(tonality, 240, float);
    ALLOC(noisiness, 240, float);
    for (i=0;i<N2;i++)
    {
       float w = analysis_window[i];
       in[i].r = (kiss_fft_scalar)(w*tonal->inmem[i]);
       in[i].i = (kiss_fft_scalar)(w*tonal->inmem[N2+i]);
       in[N-i-1].r = (kiss_fft_scalar)(w*tonal->inmem[N-i-1]);
       in[N-i-1].i = (kiss_fft_scalar)(w*tonal->inmem[N+N2-i-1]);
    }
    OPUS_MOVE(tonal->inmem, tonal->inmem+ANALYSIS_BUF_SIZE-240, 240);
    remaining = len - (ANALYSIS_BUF_SIZE-tonal->mem_fill);
    tonal->hp_ener_accum = (float)downmix_and_resample(downmix, x,
          &tonal->inmem[240], tonal->downmix_state, remaining,
          offset+ANALYSIS_BUF_SIZE-tonal->mem_fill, c1, c2, C, tonal->Fs);
    tonal->mem_fill = 240 + remaining;
    opus_fft(kfft, in, out, tonal->arch);
#ifndef FIXED_POINT
    /* If there's any NaN on the input, the entire output will be NaN, so we only need to check one value. */
    if (celt_isnan(out[0].r))
    {
       info->valid = 0;
       RESTORE_STACK;
       return;
    }
#endif

    for (i=1;i<N2;i++)
    {
       float X1r, X2r, X1i, X2i;
       float angle, d_angle, d2_angle;
       float angle2, d_angle2, d2_angle2;
       float mod1, mod2, avg_mod;
       X1r = (float)out[i].r+out[N-i].r;
       X1i = (float)out[i].i-out[N-i].i;
       X2r = (float)out[i].i+out[N-i].i;
       X2i = (float)out[N-i].r-out[i].r;

       angle = (float)(.5f/M_PI)*fast_atan2f(X1i, X1r);
       d_angle = angle - A[i];
       d2_angle = d_angle - dA[i];

       angle2 = (float)(.5f/M_PI)*fast_atan2f(X2i, X2r);
       d_angle2 = angle2 - angle;
       d2_angle2 = d_angle2 - d_angle;

       mod1 = d2_angle - (float)float2int(d2_angle);
       noisiness[i] = ABS16(mod1);
       mod1 *= mod1;
       mod1 *= mod1;

       mod2 = d2_angle2 - (float)float2int(d2_angle2);
       noisiness[i] += ABS16(mod2);
       mod2 *= mod2;
       mod2 *= mod2;

       avg_mod = .25f*(d2A[i]+mod1+2*mod2);
       /* This introduces an extra delay of 2 frames in the detection. */
       tonality[i] = 1.f/(1.f+40.f*16.f*pi4*avg_mod)-.015f;
       /* No delay on this detection, but it's less reliable. */
       tonality2[i] = 1.f/(1.f+40.f*16.f*pi4*mod2)-.015f;

       A[i] = angle2;
       dA[i] = d_angle2;
       d2A[i] = mod2;
    }
    for (i=2;i<N2-1;i++)
    {
       float tt = MIN32(tonality2[i], MAX32(tonality2[i-1], tonality2[i+1]));
       tonality[i] = .9f*MAX32(tonality[i], tt-.1f);
    }
    frame_tonality = 0;
    max_frame_tonality = 0;
    /*tw_sum = 0;*/
    info->activity = 0;
    frame_noisiness = 0;
    frame_stationarity = 0;
    if (!tonal->count)
    {
       for (b=0;b<NB_TBANDS;b++)
       {
          tonal->lowE[b] = 1e10;
          tonal->highE[b] = -1e10;
       }
    }
    relativeE = 0;
    frame_loudness = 0;
    /* The energy of the very first band is special because of DC. */
    {
       float E = 0;
       float X1r, X2r;
       X1r = 2*(float)out[0].r;
       X2r = 2*(float)out[0].i;
       E = X1r*X1r + X2r*X2r;
       for (i=1;i<4;i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
          E += binE;
       }
       E = SCALE_ENER(E);
       band_log2[0] = .5f*1.442695f*(float)log(E+1e-10f);
    }
    for (b=0;b<NB_TBANDS;b++)
    {
       float E=0, tE=0, nE=0;
       float L1, L2;
       float stationarity;
       for (i=tbands[b];i<tbands[b+1];i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
          binE = SCALE_ENER(binE);
          E += binE;
          tE += binE*MAX32(0, tonality[i]);
          nE += binE*2.f*(.5f-noisiness[i]);
       }
#ifndef FIXED_POINT
       /* Check for extreme band energies that could cause NaNs later. */
       if (!(E<1e9f) || celt_isnan(E))
       {
          info->valid = 0;
          RESTORE_STACK;
          return;
       }
#endif

       tonal->E[tonal->E_count][b] = E;
       frame_noisiness += nE/(1e-15f+E);

       frame_loudness += (float)sqrt(E+1e-10f);
       logE[b] = (float)log(E+1e-10f);
       band_log2[b+1] = .5f*1.442695f*(float)log(E+1e-10f);
       tonal->logE[tonal->E_count][b] = logE[b];
       if (tonal->count==0)
          tonal->highE[b] = tonal->lowE[b] = logE[b];
       if (tonal->highE[b] > tonal->lowE[b] + 7.5)
       {
          if (tonal->highE[b] - logE[b] > logE[b] - tonal->lowE[b])
             tonal->highE[b] -= .01f;
          else
             tonal->lowE[b] += .01f;
       }
       if (logE[b] > tonal->highE[b])
       {
          tonal->highE[b] = logE[b];
          tonal->lowE[b] = MAX32(tonal->highE[b]-15, tonal->lowE[b]);
       } else if (logE[b] < tonal->lowE[b])
       {
          tonal->lowE[b] = logE[b];
          tonal->highE[b] = MIN32(tonal->lowE[b]+15, tonal->highE[b]);
       }
       relativeE += (logE[b]-tonal->lowE[b])/(1e-15f + (tonal->highE[b]-tonal->lowE[b]));

       L1=L2=0;
       for (i=0;i<NB_FRAMES;i++)
       {
          L1 += (float)sqrt(tonal->E[i][b]);
          L2 += tonal->E[i][b];
       }

       stationarity = MIN16(0.99f,L1/(float)sqrt(1e-15+NB_FRAMES*L2));
       stationarity *= stationarity;
       stationarity *= stationarity;
       frame_stationarity += stationarity;
       /*band_tonality[b] = tE/(1e-15+E)*/;
       band_tonality[b] = MAX16(tE/(1e-15f+E), stationarity*tonal->prev_band_tonality[b]);
#if 0
       if (b>=NB_TONAL_SKIP_BANDS)
       {
          frame_tonality += tweight[b]*band_tonality[b];
          tw_sum += tweight[b];
       }
#else
       frame_tonality += band_tonality[b];
       if (b>=NB_TBANDS-NB_TONAL_SKIP_BANDS)
          frame_tonality -= band_tonality[b-NB_TBANDS+NB_TONAL_SKIP_BANDS];
#endif
       max_frame_tonality = MAX16(max_frame_tonality, (1.f+.03f*(b-NB_TBANDS))*frame_tonality);
       slope += band_tonality[b]*(b-8);
       /*printf("%f %f ", band_tonality[b], stationarity);*/
       tonal->prev_band_tonality[b] = band_tonality[b];
    }

    leakage_from[0] = band_log2[0];
    leakage_to[0] = band_log2[0] - LEAKAGE_OFFSET;
    for (b=1;b<NB_TBANDS+1;b++)
    {
       float leak_slope = LEAKAGE_SLOPE*(tbands[b]-tbands[b-1])/4;
       leakage_from[b] = MIN16(leakage_from[b-1]+leak_slope, band_log2[b]);
       leakage_to[b] = MAX16(leakage_to[b-1]-leak_slope, band_log2[b]-LEAKAGE_OFFSET);
    }
    for (b=NB_TBANDS-2;b>=0;b--)
    {
       float leak_slope = LEAKAGE_SLOPE*(tbands[b+1]-tbands[b])/4;
       leakage_from[b] = MIN16(leakage_from[b+1]+leak_slope, leakage_from[b]);
       leakage_to[b] = MAX16(leakage_to[b+1]-leak_slope, leakage_to[b]);
    }
    celt_assert(NB_TBANDS+1 <= LEAK_BANDS);
    for (b=0;b<NB_TBANDS+1;b++)
    {
       /* leak_boost[] is made up of two terms. The first, based on leakage_to[],
          represents the boost needed to overcome the amount of analysis leakage
          cause in a weaker band b by louder neighbouring bands.
          The second, based on leakage_from[], applies to a loud band b for
          which the quantization noise causes synthesis leakage to the weaker
          neighbouring bands. */
       float boost = MAX16(0, leakage_to[b] - band_log2[b]) +
             MAX16(0, band_log2[b] - (leakage_from[b]+LEAKAGE_OFFSET));
       info->leak_boost[b] = IMIN(255, (int)floor(.5 + 64.f*boost));
    }
    for (;b<LEAK_BANDS;b++) info->leak_boost[b] = 0;

    for (i=0;i<NB_FRAMES;i++)
    {
       int j;
       float mindist = 1e15f;
       for (j=0;j<NB_FRAMES;j++)
       {
          int k;
          float dist=0;
          for (k=0;k<NB_TBANDS;k++)
          {
             float tmp;
             tmp = tonal->logE[i][k] - tonal->logE[j][k];
             dist += tmp*tmp;
          }
          if (j!=i)
             mindist = MIN32(mindist, dist);
       }
       spec_variability += mindist;
    }
    spec_variability = (float)sqrt(spec_variability/NB_FRAMES/NB_TBANDS);
    bandwidth_mask = 0;
    bandwidth = 0;
    maxE = 0;
    noise_floor = 5.7e-4f/(1<<(IMAX(0,lsb_depth-8)));
    noise_floor *= noise_floor;
    for (b=0;b<NB_TBANDS;b++)
    {
       float E=0;
       int band_start, band_end;
       /* Keep a margin of 300 Hz for aliasing */
       band_start = tbands[b];
       band_end = tbands[b+1];
       for (i=band_start;i<band_end;i++)
       {
          float binE = out[i].r*(float)out[i].r + out[N-i].r*(float)out[N-i].r
                     + out[i].i*(float)out[i].i + out[N-i].i*(float)out[N-i].i;
          E += binE;
       }
       E = SCALE_ENER(E);
       maxE = MAX32(maxE, E);
       tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E);
       E = MAX32(E, tonal->meanE[b]);
       /* Use a simple follower with 13 dB/Bark slope for spreading function */
       bandwidth_mask = MAX32(.05f*bandwidth_mask, E);
       /* Consider the band "active" only if all these conditions are met:
          1) less than 10 dB below the simple follower
          2) less than 90 dB below the peak band (maximal masking possible considering
             both the ATH and the loudness-dependent slope of the spreading function)
          3) above the PCM quantization noise floor
          We use b+1 because the first CELT band isn't included in tbands[]
       */
       if (E>.1*bandwidth_mask && E*1e9f > maxE && E > noise_floor*(band_end-band_start))
          bandwidth = b+1;
    }
    /* Special case for the last two bands, for which we don't have spectrum but only
       the energy above 12 kHz. */
    if (tonal->Fs == 48000) {
       float ratio;
       float E = hp_ener*(1.f/(240*240));
       ratio = tonal->prev_bandwidth==20 ? 0.03f : 0.07f;
#ifdef FIXED_POINT
       /* silk_resampler_down2_hp() shifted right by an extra 8 bits. */
       E *= 256.f*(1.f/Q15ONE)*(1.f/Q15ONE);
#endif
       maxE = MAX32(maxE, E);
       tonal->meanE[b] = MAX32((1-alphaE2)*tonal->meanE[b], E);
       E = MAX32(E, tonal->meanE[b]);
       /* Use a simple follower with 13 dB/Bark slope for spreading function */
       bandwidth_mask = MAX32(.05f*bandwidth_mask, E);
       if (E>ratio*bandwidth_mask && E*1e9f > maxE && E > noise_floor*160)
          bandwidth = 20;
       /* This detector is unreliable, so if the bandwidth is close to SWB, assume it's FB. */
       if (bandwidth >= 17)
          bandwidth = 20;
    }
    if (tonal->count<=2)
       bandwidth = 20;
    frame_loudness = 20*(float)log10(frame_loudness);
    tonal->Etracker = MAX32(tonal->Etracker-.003f, frame_loudness);
    tonal->lowECount *= (1-alphaE);
    if (frame_loudness < tonal->Etracker-30)
       tonal->lowECount += alphaE;

    for (i=0;i<8;i++)
    {
       float sum=0;
       for (b=0;b<16;b++)
          sum += dct_table[i*16+b]*logE[b];
       BFCC[i] = sum;
    }
    for (i=0;i<8;i++)
    {
       float sum=0;
       for (b=0;b<16;b++)
          sum += dct_table[i*16+b]*.5f*(tonal->highE[b]+tonal->lowE[b]);
       midE[i] = sum;
    }

    frame_stationarity /= NB_TBANDS;
    relativeE /= NB_TBANDS;
    if (tonal->count<10)
       relativeE = .5f;
    frame_noisiness /= NB_TBANDS;
#if 1
    info->activity = frame_noisiness + (1-frame_noisiness)*relativeE;
#else
    info->activity = .5*(1+frame_noisiness-frame_stationarity);
#endif
    frame_tonality = (max_frame_tonality/(NB_TBANDS-NB_TONAL_SKIP_BANDS));
    frame_tonality = MAX16(frame_tonality, tonal->prev_tonality*.8f);
    tonal->prev_tonality = frame_tonality;

    slope /= 8*8;
    info->tonality_slope = slope;

    tonal->E_count = (tonal->E_count+1)%NB_FRAMES;
    tonal->count = IMIN(tonal->count+1, ANALYSIS_COUNT_MAX);
    info->tonality = frame_tonality;

    for (i=0;i<4;i++)
       features[i] = -0.12299f*(BFCC[i]+tonal->mem[i+24]) + 0.49195f*(tonal->mem[i]+tonal->mem[i+16]) + 0.69693f*tonal->mem[i+8] - 1.4349f*tonal->cmean[i];

    for (i=0;i<4;i++)
       tonal->cmean[i] = (1-alpha)*tonal->cmean[i] + alpha*BFCC[i];

    for (i=0;i<4;i++)
        features[4+i] = 0.63246f*(BFCC[i]-tonal->mem[i+24]) + 0.31623f*(tonal->mem[i]-tonal->mem[i+16]);
    for (i=0;i<3;i++)
        features[8+i] = 0.53452f*(BFCC[i]+tonal->mem[i+24]) - 0.26726f*(tonal->mem[i]+tonal->mem[i+16]) -0.53452f*tonal->mem[i+8];

    if (tonal->count > 5)
    {
       for (i=0;i<9;i++)
          tonal->std[i] = (1-alpha)*tonal->std[i] + alpha*features[i]*features[i];
    }
    for (i=0;i<4;i++)
       features[i] = BFCC[i]-midE[i];

    for (i=0;i<8;i++)
    {
       tonal->mem[i+24] = tonal->mem[i+16];
       tonal->mem[i+16] = tonal->mem[i+8];
       tonal->mem[i+8] = tonal->mem[i];
       tonal->mem[i] = BFCC[i];
    }
    for (i=0;i<9;i++)
       features[11+i] = (float)sqrt(tonal->std[i]) - std_feature_bias[i];
    features[18] = spec_variability - 0.78f;
    features[20] = info->tonality - 0.154723f;
    features[21] = info->activity - 0.724643f;
    features[22] = frame_stationarity - 0.743717f;
    features[23] = info->tonality_slope + 0.069216f;
    features[24] = tonal->lowECount - 0.067930f;

    mlp_process(&net, features, frame_probs);
    frame_probs[0] = .5f*(frame_probs[0]+1);
    /* Curve fitting between the MLP probability and the actual probability */
    /*frame_probs[0] = .01f + 1.21f*frame_probs[0]*frame_probs[0] - .23f*(float)pow(frame_probs[0], 10);*/
    /* Probability of active audio (as opposed to silence) */
    frame_probs[1] = .5f*frame_probs[1]+.5f;
    frame_probs[1] *= frame_probs[1];

    /* Probability of speech or music vs noise */
    info->activity_probability = frame_probs[1];

    /*printf("%f %f\n", frame_probs[0], frame_probs[1]);*/
    {
       /* Probability of state transition */
       float tau;
       /* Represents independence of the MLP probabilities, where
          beta=1 means fully independent. */
       float beta;
       /* Denormalized probability of speech (p0) and music (p1) after update */
       float p0, p1;
       /* Probabilities for "all speech" and "all music" */
       float s0, m0;
       /* Probability sum for renormalisation */
       float psum;
       /* Instantaneous probability of speech and music, with beta pre-applied. */
       float speech0;
       float music0;
       float p, q;

       /* More silence transitions for speech than for music. */
       tau = .001f*tonal->music_prob + .01f*(1-tonal->music_prob);
       p = MAX16(.05f,MIN16(.95f,frame_probs[1]));
       q = MAX16(.05f,MIN16(.95f,tonal->vad_prob));
       beta = .02f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p));
       /* p0 and p1 are the probabilities of speech and music at this frame
          using only information from previous frame and applying the
          state transition model */
       p0 = (1-tonal->vad_prob)*(1-tau) +    tonal->vad_prob *tau;
       p1 =    tonal->vad_prob *(1-tau) + (1-tonal->vad_prob)*tau;
       /* We apply the current probability with exponent beta to work around
          the fact that the probability estimates aren't independent. */
       p0 *= (float)pow(1-frame_probs[1], beta);
       p1 *= (float)pow(frame_probs[1], beta);
       /* Normalise the probabilities to get the Marokv probability of music. */
       tonal->vad_prob = p1/(p0+p1);
       info->vad_prob = tonal->vad_prob;
       /* Consider that silence has a 50-50 probability of being speech or music. */
       frame_probs[0] = tonal->vad_prob*frame_probs[0] + (1-tonal->vad_prob)*.5f;

       /* One transition every 3 minutes of active audio */
       tau = .0001f;
       /* Adapt beta based on how "unexpected" the new prob is */
       p = MAX16(.05f,MIN16(.95f,frame_probs[0]));
       q = MAX16(.05f,MIN16(.95f,tonal->music_prob));
       beta = .02f+.05f*ABS16(p-q)/(p*(1-q)+q*(1-p));
       /* p0 and p1 are the probabilities of speech and music at this frame
          using only information from previous frame and applying the
          state transition model */
       p0 = (1-tonal->music_prob)*(1-tau) +    tonal->music_prob *tau;
       p1 =    tonal->music_prob *(1-tau) + (1-tonal->music_prob)*tau;
       /* We apply the current probability with exponent beta to work around
          the fact that the probability estimates aren't independent. */
       p0 *= (float)pow(1-frame_probs[0], beta);
       p1 *= (float)pow(frame_probs[0], beta);
       /* Normalise the probabilities to get the Marokv probability of music. */
       tonal->music_prob = p1/(p0+p1);
       info->music_prob = tonal->music_prob;

       /*printf("%f %f %f %f\n", frame_probs[0], frame_probs[1], tonal->music_prob, tonal->vad_prob);*/
       /* This chunk of code deals with delayed decision. */
       psum=1e-20f;
       /* Instantaneous probability of speech and music, with beta pre-applied. */
       speech0 = (float)pow(1-frame_probs[0], beta);
       music0  = (float)pow(frame_probs[0], beta);
       if (tonal->count==1)
       {
          if (tonal->application == OPUS_APPLICATION_VOIP)
             tonal->pmusic[0] = .1f;
          else
             tonal->pmusic[0] = .625f;
          tonal->pspeech[0] = 1-tonal->pmusic[0];
       }
       /* Updated probability of having only speech (s0) or only music (m0),
          before considering the new observation. */
       s0 = tonal->pspeech[0] + tonal->pspeech[1];
       m0 = tonal->pmusic [0] + tonal->pmusic [1];
       /* Updates s0 and m0 with instantaneous probability. */
       tonal->pspeech[0] = s0*(1-tau)*speech0;
       tonal->pmusic [0] = m0*(1-tau)*music0;
       /* Propagate the transition probabilities */
       for (i=1;i<DETECT_SIZE-1;i++)
       {
          tonal->pspeech[i] = tonal->pspeech[i+1]*speech0;
          tonal->pmusic [i] = tonal->pmusic [i+1]*music0;
       }
       /* Probability that the latest frame is speech, when all the previous ones were music. */
       tonal->pspeech[DETECT_SIZE-1] = m0*tau*speech0;
       /* Probability that the latest frame is music, when all the previous ones were speech. */
       tonal->pmusic [DETECT_SIZE-1] = s0*tau*music0;

       /* Renormalise probabilities to 1 */
       for (i=0;i<DETECT_SIZE;i++)
          psum += tonal->pspeech[i] + tonal->pmusic[i];
       psum = 1.f/psum;
       for (i=0;i<DETECT_SIZE;i++)
       {
          tonal->pspeech[i] *= psum;
          tonal->pmusic [i] *= psum;
       }
       psum = tonal->pmusic[0];
       for (i=1;i<DETECT_SIZE;i++)
          psum += tonal->pspeech[i];

       /* Estimate our confidence in the speech/music decisions */
       if (frame_probs[1]>.75)
       {
          if (tonal->music_prob>.9)
          {
             float adapt;
             adapt = 1.f/(++tonal->music_confidence_count);
             tonal->music_confidence_count = IMIN(tonal->music_confidence_count, 500);
             tonal->music_confidence += adapt*MAX16(-.2f,frame_probs[0]-tonal->music_confidence);
          }
          if (tonal->music_prob<.1)
          {
             float adapt;
             adapt = 1.f/(++tonal->speech_confidence_count);
             tonal->speech_confidence_count = IMIN(tonal->speech_confidence_count, 500);
             tonal->speech_confidence += adapt*MIN16(.2f,frame_probs[0]-tonal->speech_confidence);
          }
       }
    }
    tonal->last_music = tonal->music_prob>.5f;
#ifdef MLP_TRAINING
    for (i=0;i<25;i++)
       printf("%f ", features[i]);
    printf("\n");
#endif

    info->bandwidth = bandwidth;
    tonal->prev_bandwidth = bandwidth;
    /*printf("%d %d\n", info->bandwidth, info->opus_bandwidth);*/
    info->noisiness = frame_noisiness;
    info->valid = 1;
    RESTORE_STACK;
}
Пример #28
0
void KoRgb32fTest::testConversion()
{
    const KoColorSpace* rgb32f = KoColorSpaceRegistry::instance()->colorSpace(RGBAColorModelID.id(), Float32BitsColorDepthID.id(), 0);
    QVERIFY(rgb32f);
    KoRgbTraits<float>::Pixel p32f;
    quint8* p32fPtr = reinterpret_cast<quint8*>(&p32f);
    KoRgbTraits<float>::Pixel p32f1;
    quint8* p32fPtr1 = reinterpret_cast<quint8*>(&p32f1);
    KoRgbTraits<float>::Pixel p32f2;
    quint8* p32fPtr2 = reinterpret_cast<quint8*>(&p32f2);
    KoRgbTraits<float>::Pixel p32f3;
    quint8* p32fPtr3 = reinterpret_cast<quint8*>(&p32f3);
    KoRgbU16Traits::Pixel p16u;
    quint8* p16uPtr = reinterpret_cast<quint8*>(&p16u);

    // Test alpha function
    p32f.alpha = 1.0;
    QCOMPARE(qint32(rgb32f->opacityU8(p32fPtr)), 255);
    p32f.alpha = 0.5;
    QCOMPARE(qint32(rgb32f->opacityU8(p32fPtr)), qint32(float2int(255 * 0.5)));

    // Test setAlpha
    rgb32f->setOpacity(p32fPtr, quint8(255), 1);
    QCOMPARE(p32f.alpha, 1.0f);
    rgb32f->setOpacity(p32fPtr, quint8(0), 1);
    QCOMPARE(p32f.alpha, 0.0f);
    rgb32f->setOpacity(p32fPtr, quint8(127), 1);
    QCOMPARE(p32f.alpha, float(127 / 255.0));

    // Test conversion of black from 32f to 16u back to 32f
    p32f.red = 0.0;
    p32f.green = 0.0;
    p32f.blue = 0.0;
    p32f.alpha = 1.0;
    randomizator<quint16>(p16u);
    rgb32f->toRgbA16(p32fPtr, p16uPtr, 1);
    QCOMPARE(p16u.red, quint16(0));
    QCOMPARE(p16u.green, quint16(0));
    QCOMPARE(p16u.blue, quint16(0));
    QCOMPARE(p16u.alpha, quint16(65535));
    rgb32f->fromRgbA16(p16uPtr, p32fPtr, 1);
    QCOMPARE(p32f.red, 0.0f);
    QCOMPARE(p32f.green, 0.0f);
    QCOMPARE(p32f.blue, 0.0f);
    QCOMPARE(p32f.alpha, 1.0f);

    // Test conversion to QColor
    QColor color;
    rgb32f->toQColor(p32fPtr, &color, 0);
    QCOMPARE(color.red(), 0);
    QCOMPARE(color.green(), 0);
    QCOMPARE(color.blue(), 0);
    QCOMPARE(color.alpha(), 255);
    rgb32f->fromQColor(color, p32fPtr, 0);
    QCOMPARE(p32f.red, 0.0f);
    QCOMPARE(p32f.green, 0.0f);
    QCOMPARE(p32f.blue, 0.0f);
    QCOMPARE(p32f.alpha, 1.0f);

    // Test conversion of white from 32f to 16u back to 32f
    p32f.red = 1.0;
    p32f.green = 1.0;
    p32f.blue = 1.0;
    p32f.alpha = 1.0;
    randomizator<quint16>(p16u);
    rgb32f->toRgbA16(p32fPtr, p16uPtr, 1);
    QCOMPARE(p16u.red, quint16(47803));
    QCOMPARE(p16u.green, quint16(47803));
    QCOMPARE(p16u.blue, quint16(47803));
    QCOMPARE(p16u.alpha, quint16(65535));
    rgb32f->fromRgbA16(p16uPtr, p32fPtr, 1);
    QCOMPARE(p32f.red, 1.0f);
    QCOMPARE(p32f.green, 1.0f);
    QCOMPARE(p32f.blue, 1.0f);
    QCOMPARE(p32f.alpha, 1.0f);

    // Test mix op
    quint8* colors[3];
    colors[0] = p32fPtr;
    colors[1] = p32fPtr1;
    colors[2] = p32fPtr2;
    p32f.red = 0.5; p32f.green = 0.1; p32f.blue = 0.6; p32f.alpha = 1.0;
    p32f1.red = 0.3; p32f1.green = 0.5; p32f1.blue = 0.8; p32f1.alpha = 1.0;
    p32f2.red = 0.7; p32f2.green = 0.6; p32f2.blue = 0.7; p32f2.alpha = 1.0;
    p32f3.red = -1.0; p32f3.green = -1.0; p32f3.blue = -1.0; p32f3.alpha = -1.0;
    qint16 weights[3];
    weights[0] = qint16(255 / 3);
    weights[1] = qint16(255 / 3);
    weights[2] = qint16(255 / 3);
    rgb32f->mixColorsOp()->mixColors(colors, weights, 3, p32fPtr3);
    QCOMPARE(p32f3.red, 0.5f);
    QCOMPARE(p32f3.green, 0.4f);
    QCOMPARE(p32f3.blue, 0.7f);
    QCOMPARE(p32f3.alpha, 1.0f);

    // Test composite op
    const KoCompositeOp* over = rgb32f->compositeOp(COMPOSITE_OVER);
    QVERIFY(over);
    // Test no mask, full opacity
    p32f.red = 0.5; p32f.green = 0.1; p32f.blue = 0.6; p32f.alpha = 1.0;
    p32f1.red = 0.3; p32f1.green = 0.5; p32f1.blue = 0.8; p32f1.alpha = 1.0;
    over->composite(p32fPtr1, rgb32f->pixelSize(), p32fPtr, rgb32f->pixelSize(), 0, 0, 1, 1, 255);
    QCOMPARE(p32f1.red, 0.5f);
    QCOMPARE(p32f1.green, 0.1f);
    QCOMPARE(p32f1.blue, 0.6f);
    QCOMPARE(p32f1.alpha, 1.0f);

    // Test no mask, half opacity
    p32f.red = 0.5; p32f.green = 0.1; p32f.blue = 0.6; p32f.alpha = 1.0;
    p32f1.red = 0.3; p32f1.green = 0.5; p32f1.blue = 0.8; p32f1.alpha = 1.0;
    over->composite(p32fPtr1, rgb32f->pixelSize(), p32fPtr, rgb32f->pixelSize(), 0, 0, 1, 1, 127);
    QCOMPARE(p32f1.red, 0.399608f);
    QCOMPARE(p32f1.green, 0.300784f);
    QCOMPARE(p32f1.blue, 0.700392f);
    QCOMPARE(p32f1.alpha, 1.0f);

    // Test mask, full opacity
    quint8 mask; mask = 127;
    p32f.red = 0.5; p32f.green = 0.1; p32f.blue = 0.6; p32f.alpha = 1.0;
    p32f1.red = 0.3; p32f1.green = 0.5; p32f1.blue = 0.8; p32f1.alpha = 1.0;
    over->composite(p32fPtr1, rgb32f->pixelSize(), p32fPtr, rgb32f->pixelSize(), &mask, 1, 1, 1, 255);
    QCOMPARE(p32f1.red, 0.399608f);
    QCOMPARE(p32f1.green, 0.300784f);
    QCOMPARE(p32f1.blue, 0.700392f);
    QCOMPARE(p32f1.alpha, 1.0f);

    // Test mask, full opacity
    p32f.red = 0.5; p32f.green = 0.1; p32f.blue = 0.6; p32f.alpha = 1.0;
    p32f1.red = 0.3; p32f1.green = 0.5; p32f1.blue = 0.8; p32f1.alpha = 1.0;
    over->composite(p32fPtr1, rgb32f->pixelSize(), p32fPtr, rgb32f->pixelSize(), &mask, 1, 1, 1, 127);
    QCOMPARE(p32f1.red, 0.349609f);
    QCOMPARE(p32f1.green, 0.400783f);
    QCOMPARE(p32f1.blue, 0.750391f);
    QCOMPARE(p32f1.alpha, 1.0f);

    // Test no mask, full opacity, transparent source
    p32f.red = 0.5; p32f.green = 0.1; p32f.blue = 0.6; p32f.alpha = 0.0;
    p32f1.red = 0.3; p32f1.green = 0.5; p32f1.blue = 0.8; p32f1.alpha = 1.0;
    over->composite(p32fPtr1, rgb32f->pixelSize(), p32fPtr, rgb32f->pixelSize(), 0, 0, 1, 1, 255);
    QCOMPARE(p32f1.red, 0.3f);
    QCOMPARE(p32f1.green, 0.5f);
    QCOMPARE(p32f1.blue, 0.8f);
    QCOMPARE(p32f1.alpha, 1.0f);

    // Test no mask, full opacity, transparent dst
    p32f.red = 0.5; p32f.green = 0.1; p32f.blue = 0.6; p32f.alpha = 1.0;
    p32f1.red = 0.3; p32f1.green = 0.5; p32f1.blue = 0.8; p32f1.alpha = 0.0;
    over->composite(p32fPtr1, rgb32f->pixelSize(), p32fPtr, rgb32f->pixelSize(), 0, 0, 1, 1, 255);
    QCOMPARE(p32f1.red, 0.5f);
    QCOMPARE(p32f1.green, 0.1f);
    QCOMPARE(p32f1.blue, 0.6f);
    QCOMPARE(p32f1.alpha, 1.0f);

    // Test no mask, full opacity, half-transparent dst
    p32f.red = 0.5; p32f.green = 0.1; p32f.blue = 0.6; p32f.alpha = 1.0;
    p32f1.red = 0.3; p32f1.green = 0.5; p32f1.blue = 0.8; p32f1.alpha = 0.5;
    over->composite(p32fPtr1, rgb32f->pixelSize(), p32fPtr, rgb32f->pixelSize(), 0, 0, 1, 1, 255);
    QCOMPARE(p32f1.red, 0.5f);
    QCOMPARE(p32f1.green, 0.1f);
    QCOMPARE(p32f1.blue, 0.6f);
    QCOMPARE(p32f1.alpha, 1.0f);
}
Пример #29
0
void get_voltage(int channel, int *intpart, int *floatpart) {
    xSemaphoreTake(adc_mutex, portMAX_DELAY);

    float2int((float) adc_vals[channel] / RESOLUTION * MAX_VOLTAGE, intpart, floatpart);
    xSemaphoreGive(adc_mutex);
}
opus_int64 audio_write(float *pcm, int channels, int frame_size, FILE *fout, SpeexResamplerState *resampler,
                       int *skip, shapestate *shapemem, int file, opus_int64 maxout)
{
   opus_int64 sampout=0;
   int i,ret,tmp_skip;
   unsigned out_len;
   short *out;
   float *buf;
   float *output;
   out=alloca(sizeof(short)*MAX_FRAME_SIZE*channels);
   buf=alloca(sizeof(float)*MAX_FRAME_SIZE*channels);
   maxout=maxout<0?0:maxout;
   do {
     if (skip){
       tmp_skip = (*skip>frame_size) ? (int)frame_size : *skip;
       *skip -= tmp_skip;
     } else {
       tmp_skip = 0;
     }
     if (resampler){
       unsigned in_len;
       output=buf;
       in_len = frame_size-tmp_skip;
       out_len = 1024<maxout?1024:maxout;
       speex_resampler_process_interleaved_float(resampler, pcm+channels*tmp_skip, &in_len, buf, &out_len);
       pcm += channels*(in_len+tmp_skip);
       frame_size -= in_len+tmp_skip;
     } else {
       output=pcm+channels*tmp_skip;
       out_len=frame_size-tmp_skip;
       frame_size=0;
     }

     /*Convert to short and save to output file*/
     if (shapemem){
       shape_dither_toshort(shapemem,out,output,out_len,channels);
     }else{
       for (i=0;i<(int)out_len*channels;i++)
         out[i]=(short)float2int(fmaxf(-32768,fminf(output[i]*32768.f,32767)));
     }
     if ((le_short(1)!=1)&&file){
       for (i=0;i<(int)out_len*channels;i++)
         out[i]=le_short(out[i]);
     }

     if(maxout>0)
     {
#if defined WIN32 || defined _WIN32
       if(!file){
         ret=WIN_Play_Samples (out, sizeof(short) * channels * (out_len<maxout?out_len:maxout));
         if(ret>0)ret/=sizeof(short)*channels;
         else fprintf(stderr, "Error playing audio.\n");
       }else
#elif defined HAVE_LIBSNDIO
       if(!file){
         ret=sio_write (hdl, out, sizeof(short) * channels * (out_len<maxout?out_len:maxout));
         if(ret>0)ret/=sizeof(short)*channels;
         else fprintf(stderr, "Error playing audio.\n");
       }else
#endif
         ret=fwrite(out, 2*channels, out_len<maxout?out_len:maxout, fout);
       sampout+=ret;
       maxout-=ret;
     }
   } while (frame_size>0 && maxout>0);
   return sampout;
}