Ejemplo n.º 1
0
void buffer_setup(t_HoaConvolve *x)
{
		int vectorSize = 0;
        if(x->f_buffer != NULL)
        {
            if(vectorSize < x->f_buffer->b_frames)
				vectorSize = x->f_buffer->b_frames;
            float* datas = new float[vectorSize];
            
            ATOMIC_INCREMENT(&x->f_buffer->b_inuse);
            if (!x->f_buffer->b_valid)
            {
                ATOMIC_DECREMENT(&x->f_buffer->b_inuse);
            }
            else
            {
                for(long i = 0; i < x->f_buffer->b_frames; i++)
                {
                    datas[i] = x->f_buffer->b_samples[i * x->f_buffer->b_nchans + (x->f_channel - 1)];
                }
                ATOMIC_DECREMENT(&x->f_buffer->b_inuse);
            }
            x->f_ambiConvolve->setImpulseResponse(datas, x->f_buffer->b_frames);
            free(datas);
        }
}
Ejemplo n.º 2
0
void bed_fadeout(t_bed *x, double fadetime)
{
    if (!bed_attach_buffer(x)) {
        return;
    }

    t_buffer *b;
    b = x->buffer;

    ATOMIC_INCREMENT(&b->b_inuse);

    if (!b->b_valid) {
        ATOMIC_DECREMENT(&b->b_inuse);
        post("bed • Not a valid buffer!");
        return;
    }

    long fadeframes = fadetime * 0.001 * b->b_sr;
    if (fadetime <= 0 || fadeframes > b->b_frames) {
        post("bed • %.0fms is not a valid fade-out time", fadetime);
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    }

    long chunksize = fadeframes * b->b_nchans * sizeof(float);
    if (x->undo_samples == NULL) {
        x->undo_samples = (float *)sysmem_newptr(chunksize);
    } else {
        x->undo_samples = (float *)sysmem_resizeptr(x->undo_samples, chunksize);
    }

    if (x->undo_samples == NULL) {
        error("bed • Cannot allocate memory for undo");
        x->can_undo = 0;
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    } else {
        x->can_undo = 1;
        x->undo_start = b->b_frames - fadeframes;
        x->undo_frames = fadeframes;
        x->undo_resize = 0;
        x->undo_cut = 0;
        sysmem_copyptr(b->b_samples + x->undo_start, x->undo_samples, chunksize);
    }

    for (int ii = (int)x->undo_start; ii < x->undo_start + fadeframes; ii++) {
        for (int jj = 0; jj < b->b_nchans; jj++) {
            b->b_samples[(ii * b->b_nchans) + jj] *=
                1 - (float)(ii - x->undo_start) / (float)fadeframes;
        }
    }

    object_method(&b->b_obj, gensym("dirty"));
    ATOMIC_DECREMENT(&b->b_inuse);
}
Ejemplo n.º 3
0
void bed_reverse(t_bed *x)
{
    if (!bed_attach_buffer(x)) {
        return;
    }

    t_buffer *b;
    b = x->buffer;

    ATOMIC_INCREMENT(&b->b_inuse);

    if (!b->b_valid) {
        ATOMIC_DECREMENT(&b->b_inuse);
        post("bed • Not a valid buffer!");
        return;
    }

    long chunksize = b->b_frames * b->b_nchans * sizeof(float);
    if (x->undo_samples == NULL) {
        x->undo_samples = (float *)sysmem_newptr(chunksize);
    } else {
        x->undo_samples = (float *)sysmem_resizeptr(x->undo_samples, chunksize);
    }

    if (x->undo_samples == NULL) {
        error("bed • Cannot allocate memory for undo");
        x->can_undo = 0;
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    } else {
        x->can_undo = 1;
        x->undo_start = 0;
        x->undo_frames = b->b_frames;
        x->undo_resize = 0;
        x->undo_cut = 0;
        sysmem_copyptr(b->b_samples, x->undo_samples, chunksize);
    }

    float temp;
    for (int ii = 0; ii < ceil(b->b_frames / 2); ii++) {
        for (int jj = 0; jj < b->b_nchans; jj++) {
            temp = b->b_samples[(ii * b->b_nchans) + jj];
            b->b_samples[(ii * b->b_nchans) + jj] =
                b->b_samples[(b->b_frames - 1 -  ii * b->b_nchans) + jj];
            b->b_samples[(b->b_frames - 1 -  ii * b->b_nchans) + jj] = temp;
        }
    }

    object_method(&b->b_obj, gensym("dirty"));
    ATOMIC_DECREMENT(&b->b_inuse);
}
Ejemplo n.º 4
0
void bed_ring_modulation(t_bed *x, double frequency)
{
    if (!bed_attach_buffer(x)) {
        return;
    }

    t_buffer *b;
    b = x->buffer;

    ATOMIC_INCREMENT(&b->b_inuse);

    if (!b->b_valid) {
        ATOMIC_DECREMENT(&b->b_inuse);
        post("bed • Not a valid buffer!");
        return;
    }

    long chunksize = b->b_frames * b->b_nchans * sizeof(float);
    if (x->undo_samples == NULL) {
        x->undo_samples = (float *)sysmem_newptr(chunksize);
    } else {
        x->undo_samples = (float *)sysmem_resizeptr(x->undo_samples, chunksize);
    }

    if (x->undo_samples == NULL) {
        error("bed • Cannot allocate memory for undo");
        x->can_undo = 0;
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    } else {
        x->can_undo = 1;
        x->undo_start = 0;
        x->undo_frames = b->b_frames;
        x->undo_resize = 0;
        x->undo_cut = 0;
        sysmem_copyptr(b->b_samples, x->undo_samples, chunksize);
    }

    float twopi = 8.0 * atan(1.0);
    float oneoversr = 1.0 / b->b_sr;
    for (int ii = 0; ii < b->b_frames; ii++) {
        for (int jj = 0; jj < b->b_nchans; jj++) {
            b->b_samples[(ii * b->b_nchans) + jj] *=
                sin(twopi * frequency * ii * oneoversr);
        }
    }

    object_method(&b->b_obj, gensym("dirty"));
    ATOMIC_DECREMENT(&b->b_inuse);
}
Ejemplo n.º 5
0
void bed_paste (t_bed *x, t_symbol *destname)
{
    if (x->can_undo && x->undo_cut) {
        if (!bed_attach_buffer(x)) {
            return;
        }

        t_buffer *destbuf = NULL;
        if (bed_attach_any_buffer(&destbuf, destname)) {
            if (x->buffer->b_nchans != destbuf->b_nchans) {
                post("bed • Different number of channels of origin (%d) "
                     "and number of channel of destination (%d)",
                     x->buffer->b_nchans, destbuf->b_nchans);
                return;
            }

            t_atom rv;
            object_method_long(&destbuf->b_obj, gensym("sizeinsamps"),
                               x->undo_frames, &rv);
            ATOMIC_INCREMENT(&destbuf->b_inuse);
            long chunksize = x->undo_frames * destbuf->b_nchans * sizeof(float);
            sysmem_copyptr(x->undo_samples, destbuf->b_samples, chunksize);
            ATOMIC_DECREMENT(&destbuf->b_inuse);

        } else {
            post("bed • \"%s\" is not a valid buffer", destname->s_name);
            return;
        }
    } else {
        post("bed • Nothing to paste");
        return;
    }
}
Ejemplo n.º 6
0
t_int *index_perform(t_int *w)
{
    t_index *x = (t_index *)(w[1]);
    t_float *in = (t_float *)(w[2]);
    t_float *out = (t_float *)(w[3]);
    int n = (int)(w[4]);
	t_buffer *b = x->l_buf;
	float *tab;
	double temp;
	double f;
	long index,chan,frames,nc;
	
	if (x->l_obj.z_disabled)
		goto out;
	if (!b)
		goto zero;
	ATOMIC_INCREMENT(&b->b_inuse);
	if (!b->b_valid) {
		ATOMIC_DECREMENT(&b->b_inuse);
		goto zero;
	}
	tab = b->b_samples;
	chan = x->l_chan;
	frames = b->b_frames;
	nc = b->b_nchans;
	while (n--) {
		temp = *in++;
		f = temp + 0.5;
		index = f;
		if (index < 0)
			index = 0;
		else if (index >= frames)
			index = frames - 1;
		if (nc > 1)
			index = index * nc + chan;
		*out++ = tab[index];
	}
	ATOMIC_DECREMENT(&b->b_inuse);
	return w + 5;
zero:
	while (n--) *out++ = 0.;
out:
	return w + 5;
}
Ejemplo n.º 7
0
GradientEffect::~GradientEffect()
{
    RETAILMSG(ZONE_OBJECT | ZONE_VERBOSE, "\t~GradientEffect( %4d )", m_ID);

    // Delete the shaders when the last instance is freed
    if (0 == ATOMIC_DECREMENT( GradientEffect::s_NumInstances ))
    {
        RETAILMSG(ZONE_INFO, "~GradientEffect: last instance deleted, freeing Color shaders");
        Deinit();
    }
}
Ejemplo n.º 8
0
void buffer_setup(t_HoaConvolve *x)
{
	if(x->f_check)
	{
		int vectorSize = 0;
        if(x->f_buffer != NULL)
		{
			if(vectorSize < x->f_buffer->b_frames)
				vectorSize = x->f_buffer->b_frames;
		}
		double* datas = new double[vectorSize];		   
		for(int j = 0; j < x->f_numberOfHarmonics; j++)
		{
			if(x->f_buffer != NULL)
			{
				ATOMIC_INCREMENT(&x->f_buffer->b_inuse);
				if (!x->f_buffer->b_valid) 
				{
					ATOMIC_DECREMENT(&x->f_buffer->b_inuse);
                    object_error(x, "hoa.convolve~ can't load this buffer.");
				}
				else
				{
                    double offset = x->f_offset[j] * x->f_buffer->b_msr * 100.;
					long size = x->f_buffer->b_frames - offset;
				
					for(long i = 0; i < x->f_buffer->b_frames; i++)
					{
						datas[i] = x->f_buffer->b_samples[i * x->f_buffer->b_nchans + (x->f_channel - 1)];
					}
					ATOMIC_DECREMENT(&x->f_buffer->b_inuse);

					x->f_ambiConvolve->setImpulseResponse(j, datas, size, 0);
				}
			}
		}
		free(datas);
	}
}
Ejemplo n.º 9
0
RippleEffect::~RippleEffect()
{
    RETAILMSG(ZONE_OBJECT | ZONE_VERBOSE, "\t~RippleEffect( %4d )", m_ID);

    SAFE_ARRAY_DELETE(m_pRippledVertices);

    // Delete the shaders when the last instance is freed
    if (0 == ATOMIC_DECREMENT( RippleEffect::s_NumInstances ))
    {
        RETAILMSG(ZONE_INFO, "~RippleEffect: last instance deleted, freeing Ripple shaders");
        Deinit();
    }
}
Ejemplo n.º 10
0
void descriptor_compute(t_buf *x, t_window *w)
{
	int				i, j, k, l, m, hopeSize;
	double			frequecyBand, ratio;
	double			*real;
	fftw_complex	*complex;
	fftw_plan		plan;	

	if(x->f_buffer != NULL)
	{
		real		= (double *)fftw_malloc(x->f_windowSize * sizeof(double));
		complex		= (fftw_complex *)fftw_malloc(x->f_windowSize * sizeof(fftw_complex));
		plan		= fftw_plan_dft_r2c_1d(x->f_windowSize, real, complex, FFTW_ESTIMATE);
		if(real && complex && plan)
		{
			ATOMIC_INCREMENT(&x->f_buffer->b_inuse);
			if (!x->f_buffer->b_valid) 
			{
				ATOMIC_DECREMENT(&x->f_buffer->b_inuse);
			}
			else
			{
				hopeSize = (x->f_windowSize / x->f_overlapping) * (x->f_overlapping - 1);
				frequecyBand = (double)x->f_buffer->b_sr / (double)x->f_windowSize;
				window_setup(w, x->f_windowSize, w->f_mode);
				
				for(j = 0; j < x->f_nChannels; j++)
				{
					for(i = 0, k = 0, m = 0; m < x->f_nFrames; i++)
					{
						if(i < x->f_nSamples)
						{
							real[k] = x->f_buffer->b_samples[i * x->f_buffer->b_nchans + j] * w->f_envelope[k];
						}
						else
						{
							real[k] = 0.;
						}
						k++;
						if(k == x->f_windowSize)
						{
							fftw_execute(plan);
							descriptor_sonogram(x, complex, j, m);
							descriptor_energy(x, j, m);
							descriptor_moment(x, j, m, frequecyBand);
							descriptor_gradient(x, j, m, frequecyBand, (double)x->f_buffer->b_sr);
							k = 0;
							m++;
							i -= hopeSize;
						}
					}
				}
				ATOMIC_DECREMENT(&x->f_buffer->b_inuse);
			}
			if(plan)
				fftw_destroy_plan(plan);
			if(real)
				fftw_free(real);
			if(complex)
				fftw_free(complex);
		}
	}
}
Ejemplo n.º 11
0
int release_object(struct objc_object ** obj)
{
	return ATOMIC_DECREMENT(REFCOUNT(*obj));
}
Ejemplo n.º 12
0
t_int *max_jit_peek_perform(t_int *w)
{
	t_max_jit_peek *x = (t_max_jit_peek *)(w[1]);
	long n = (int)(w[2]);
	long i,j,dimcount;
	char *bp,*p;
	float *out_val=x->vectors[0];
	float **in_dim=x->vectors+1;
	long tmp,outofbounds,typesize;
	long dim_int[JIT_MATRIX_MAX_DIMCOUNT];
	float dim_frak[JIT_MATRIX_MAX_DIMCOUNT];
	long mult[JIT_MATRIX_MAX_DIMCOUNT]; // added to perform routine for normalization

	ATOMIC_INCREMENT(&x->inperform);

	if (x->ob.z_disabled)
		goto out;

	if (x->mvalid&&x->mdata) {

		bp = x->mdata;

		if ((!bp)||(x->plane>=x->minfo.planecount)||(x->plane<0)) {
			goto zero;
		}

		dimcount = MIN(x->dimcount,x->minfo.dimcount);

		if (x->normalize) // set the multiplication factor for the input vectors to the matrix dim if 'normalize' is 1
		{
			for(j=0; j<dimcount; j++)
			{
				mult[j]=(x->minfo.dim[j]-1);
			}

		}
		else
		{
			for(j=0; j<dimcount; j++)
			{
				mult[j] = 1;
			}
		}


		if (x->interp) {
			if (x->minfo.type==_jit_sym_char) {
				typesize = 1;
			} else if (x->minfo.type==_jit_sym_long) {
				typesize = 4;
			} else if (x->minfo.type==_jit_sym_float32) {
				typesize = 4;
			} else if (x->minfo.type==_jit_sym_float64) {
				typesize = 8;
			}
			bp += x->plane*typesize;


			for (i=0; i<n; i++) {
				for (j=0; j<dimcount; j++) {
					dim_int[j] = in_dim[j][i]*mult[j];
					dim_frak[j] = in_dim[j][i]*mult[j] - (float) dim_int[j];
					dim_int[j] = dim_int[j]%x->minfo.dim[j];
				}

				*out_val++ = recursive_interp(bp,dimcount,&x->minfo,dim_int,dim_frak);

			}

		}
		else {
			if (x->minfo.type==_jit_sym_char) {
				bp += x->plane;
				for (i=0; i<n; i++) {
					p = bp;
					outofbounds = FALSE;
					for (j=0; j<dimcount; j++) {
						tmp = in_dim[j][i]*mult[j];
						if ((tmp<0)||(tmp>=x->minfo.dim[j])) {
							outofbounds = TRUE;
						}
						p += tmp * x->minfo.dimstride[j];
					}
					if (outofbounds) {
						*out_val++ = 0.;
					} else {
						*out_val++ = (float)(*((uchar *)p)) * (1./255.);
					}
				}
			} else if (x->minfo.type==_jit_sym_long) {
				bp += x->plane*4;
				for (i=0; i<n; i++) {
					p = bp;
					outofbounds = FALSE;
					for (j=0; j<dimcount; j++) {
						tmp = in_dim[j][i]*mult[j];
						if ((tmp<0)||(tmp>=x->minfo.dim[j])) {
							outofbounds = TRUE;
						}
						p += tmp * x->minfo.dimstride[j];
					}
					if (outofbounds) {
						*out_val++ = 0.;
					} else {
						*out_val++ = (float)(*((t_int32 *)p));
					}
				}
			} else if (x->minfo.type==_jit_sym_float32) {
				bp += x->plane*4;
				for (i=0; i<n; i++) {
					p = bp;
					outofbounds = FALSE;
					for (j=0; j<dimcount; j++) {
						tmp = in_dim[j][i]*mult[j];
						if ((tmp<0)||(tmp>=x->minfo.dim[j])) {
							outofbounds = TRUE;
						}
						p += tmp * x->minfo.dimstride[j];
					}
					if (outofbounds) {
						*out_val++ = 0.;
					} else {
						*out_val++ = (*((float *)p));
					}
				}
			} else if (x->minfo.type==_jit_sym_float64) {
				bp += x->plane*8;
				for (i=0; i<n; i++) {
					p = bp;
					outofbounds = FALSE;
					for (j=0; j<dimcount; j++) {
						tmp = in_dim[j][i]*mult[j];
						if ((tmp<0)||(tmp>=x->minfo.dim[j])) {
							outofbounds = TRUE;
						}
						p += tmp * x->minfo.dimstride[j];
					}
					if (outofbounds) {
						*out_val++ = 0.;
					} else {
						*out_val++ = (float)(*((double *)p));
					}
				}
			}

		}
	}

out:
	ATOMIC_DECREMENT(&x->inperform);
	return (w+3);

zero:
	while (n--) *out_val++ = 0.;
	ATOMIC_DECREMENT(&x->inperform);
	return (w+3);
}
Ejemplo n.º 13
0
t_int *poki_perform(t_int *w)
  {
  t_float * in = (t_float *)(w[1]);
  t_float * out = (t_float *)(w[2]);
  t_poki * x = (t_poki *)(w[3]);
  t_float * index = x->p_connected ? (t_float *)(w[4]) : NULL;
	int n = (int)(w[5]);
	
	if (index == NULL)
    goto out;
	
	t_buffer *b = x->p_buf;
	float *tab;
  long frames,nc;
	
	if (x->p_obj.z_disabled)
		goto out;
  
	if (!b)
		goto out;
  
  // buffer data structure exists
	ATOMIC_INCREMENT(&b->b_inuse);
	if (!b->b_valid) {
		ATOMIC_DECREMENT(&b->b_inuse);
		goto out;
	}
  
	tab = b->b_samples;
	frames = b->b_frames;
	nc = b->b_nchans;
	if (nc != 1)
	{
    ATOMIC_DECREMENT(&b->b_inuse);
    goto zero;
	}
	else
	{
		while (n--)
    {
			const long idx = (long)(*index + 0.5f) % frames;
        
			const int step = (idx - x->p_idx0) % frames;
			int interpCount = step-1;
      float input = *in;

			if (x->p_preFadeFlag)
      {
        x->p_preLevel += x->p_preFadeInc;
        if (absDif(x->p_preLevel, x->p_preLevelTarget) < 0.001)
        {
          x->p_preLevel = x->p_preLevelTarget;
          x->p_preFadeFlag = 0;
        }
      }

      if (x->p_recFadeFlag)
      {
        x->p_recLevel += x->p_recFadeInc;
        if (absDif(x->p_recLevel, x->p_recLevelTarget) < 0.001)
        {
          x->p_recLevel = x->p_recLevelTarget;
          x->p_recFadeFlag = 0;
        }
      }
      // these macros are weirdly undefined in deployment builds for some target architectures
      //if (FIX_DENORM_DOUBLE(x->p_recLevel) > 0.00001)
      if (absDif(x->p_recLevel, 0.0) > 1e-6) // -120dB
      { // recording level is non-zero
        input *= x->p_recLevel;
        //if (FIX_DENORM_DOUBLE(x->p_preLevel) > 0.0)
        if (absDif(x->p_preLevel, 0.0) > 1e-6)
        {
          if (absDif(x->p_preLevel, 1.0) < 0.001)
          {
            input += tab[idx];
          }
          else
          { // pre-level is non-unity
            input += (tab[idx] * x->p_preLevel);
          }
        }
      }
      else
      { 
        /* // FIXME: this behavior is potentially useful, should be an option
        // no recording, use overdub level only
        input = 0.0;
        if (absDif(x->p_preLevel, 1.0) < 0.001)
        { // pre level is unity
          input = tab[idx]; // TODO: should just skip this sample
        }
        else
        { // pre level is non-unity
          input = tab[idx] * FIX_DENORM_DOUBLE(x->p_preLevel);
        }
         */
        // with no recording, we don't change the buffer
        input = tab[idx];
        interpCount =  x->p_interpThresholdSamps + 1; // this should cause the interp steps to be skipped
      }
      
      // perform interpolation
			if (interpCount <= x->p_interpThresholdSamps)
			{
				// FIXME: get higher-order interpolation working and add option to switch.
				// usually there is not really an audible improvement.
				// but i can imaginge someone wanting it for some extreme purpose. -emb
        /*
				const float y3 = x->p_y0;
				const float y2 = tab[(x->p_idx0 - step) % frames];
				const float y1 = tab[(idx - (step*2)) % frames];
				const float y0 = tab[(idx - (step*3)) % frames];		
         */
				const float phaseInc = 1.f / ((float)step);
				float phase=phaseInc;
				int interpIdx = (x->p_idx0 + 1) % frames;
				
				while (interpCount > 0)
				{
					// 3rd-order:
					// tab[interpIdx] = hermite_interp(phase, y0, y1, y2, y3);
					// linear:
					tab[interpIdx] = x->p_y0 + (phase * (input - x->p_y0));
					phase += phaseInc;
					interpIdx = ((interpIdx + 1) % frames);
					interpCount--;
				}
			} // interp count < thresh
      
			// no interpolation
			//*out = tab[idx];
      
      // location and float offset 
      const long iIdx = (long)(*index);
      const float fIdx = *index - (long)(*index);
      
      // 3rd-order:
      // *out = hermite_interp(fIdx,
      //                      tab[(iIdx+1) % frames],
      //                      tab[(iIdx+2) % frames], 
      //                      tab[(iIdx+3) % frames],
      //                      tab[(iIdx+4) % frames]
      //                      );
      
      // linear:
      const float tab0 = tab[(iIdx+1) % frames];
      *out =  tab0 + fIdx * (tab[(iIdx + 2) % frames] - tab0);
    
			tab[idx] = input;
			x->p_y0 = input;
      x->p_idx0 = idx;
			out++;
			in++;
			index++;
		} // sample loop
    object_method(b, gensym("dirty"));
    ATOMIC_DECREMENT(&b->b_inuse);
	} // test for mono
	return w + 6;
zero:
  while(n--) *out++ = 0.0;
out:
	return w + 6;
}
Ejemplo n.º 14
0
void bed_undo(t_bed *x)
{
    if (!x->can_undo) {
        post("bed • Nothing to undo");
        return;
    }

    if (!bed_attach_buffer(x)) {
        return;
    }

    t_buffer *b;
    b = x->buffer;

    ATOMIC_INCREMENT(&b->b_inuse);

    if (!b->b_valid) {
        ATOMIC_DECREMENT(&b->b_inuse);
        post("bed • Not a valid buffer!");
        return;
    }

    if (x->undo_cut) {
        long bufferframes = b->b_frames;
        long buffersize = bufferframes * b->b_nchans * sizeof(float);
        float *local_buffer = (float *)sysmem_newptr(buffersize);
        if (local_buffer == NULL) {
            error("bed • Cannot allocate memory for undo");
            x->can_undo = 0;
            ATOMIC_DECREMENT(&b->b_inuse);
            return;
        } else {
            sysmem_copyptr(b->b_samples, local_buffer, buffersize);
        }

        ATOMIC_DECREMENT(&b->b_inuse);
        t_atom rv;
        object_method_long(&b->b_obj, gensym("sizeinsamps"),
                           (bufferframes + x->undo_frames), &rv);
        ATOMIC_INCREMENT(&b->b_inuse);

        long chunksize = x->undo_start * b->b_nchans * sizeof(float);
        sysmem_copyptr(local_buffer, b->b_samples, chunksize);

        chunksize = x->undo_frames * b->b_nchans * sizeof(float);
        sysmem_copyptr(x->undo_samples,
                       b->b_samples + (x->undo_start * b->b_nchans),
                       chunksize);
        chunksize = (bufferframes - x->undo_start) * b->b_nchans * sizeof(float);
        sysmem_copyptr(local_buffer + (x->undo_start * b->b_nchans),
                       b->b_samples + (x->undo_start + x->undo_frames) * b->b_nchans,
                       chunksize);

        sysmem_freeptr(local_buffer);

        x->undo_cut = 0;
        
        object_method(&b->b_obj, gensym("dirty"));
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    }

    if (x->undo_resize) {
        ATOMIC_DECREMENT(&b->b_inuse);
        t_atom rv;
        object_method_long(&b->b_obj, gensym("sizeinsamps"), x->undo_frames, &rv);
        ATOMIC_INCREMENT(&b->b_inuse);
    }

    long chunksize = x->undo_frames * b->b_nchans * sizeof(float);
    sysmem_copyptr(x->undo_samples, b->b_samples + x->undo_start, chunksize);
    x->can_undo = 0;

    object_method(&b->b_obj, gensym("dirty"));
    ATOMIC_DECREMENT(&b->b_inuse);
}
Ejemplo n.º 15
0
	/**@public @memberof t_OMax_learn
	 * @brief Add state in both FO and Data Structure
	 * @remarks Input message in Max5: @c add*/
	void OMax_learn_add(t_OMax_learn *x, t_symbol *s, short ac, t_atom * av)
	{
		/// Check for binding
		if (OMax_learn_bind(x))
		{
			int out;				
			if (ac>0)
			{
				/// Create new Data state from the input message
				switch (x->datatype)
				{
					case LETTERS:
					{
						if (av->a_type == A_SYM)
						{
							int statenb;
							O_char newdata (*atom_getsym(av)->s_name);
							statenb = x->builder.get_data()->get_size()-1;
							if (statenb>0)
								newdata.set_bufferef(statenb);
							else
								newdata.set_bufferef(0);
							newdata.set_duration(1);
							
							ATOMIC_INCREMENT(&((t_OMax_oracle *)(x->oname->s_thing))->wflag);
							ATOMIC_INCREMENT(&((t_OMax_data *)(x->dataname->s_thing))->wflag);
							if (!(((t_OMax_oracle *)(x->oname->s_thing))->readcount)
								&& !(((t_OMax_data *)(x->dataname->s_thing))->readcount))
							{
								/// Add state to both structures
								out = x->builder.add(newdata);
							}
							else
								object_error((t_object *)x,"Oracle %s being read (%d, %d)",x->oname->s_name, ((t_OMax_oracle *)(x->oname->s_thing))->readcount, ((t_OMax_data *)(x->dataname->s_thing))->readcount);
							ATOMIC_DECREMENT(&((t_OMax_oracle *)(x->oname->s_thing))->wflag);
							ATOMIC_DECREMENT(&((t_OMax_data *)(x->dataname->s_thing))->wflag);
						}
						else
							object_error((t_object *)x,"Wrong type of data");
						break;
						
					}
					case MIDI_MONO:
					{
						O_MIDI_mono * newdata = new O_MIDI_mono();
						bool valid = TRUE;
						switch(ac)
						{
							case 6:
								if ((av+5)->a_type == A_LONG)
									((O_label*)newdata)->set_duration(atom_getlong(av+5));
								else
								{
									object_error((t_object *)x, "Error in input, duration must be int");
									valid = FALSE;
									break;
								}
							case 5:
								if ((av+4)->a_type == A_LONG)
									((O_label*)newdata)->set_bufferef(atom_getlong(av+4));
								else
								{
									object_error((t_object *)x, "Error in input, buffer reference must be int");
									valid = FALSE;
									break;
								}
							case 4:
								if ((av+3)->a_type == A_LONG)
									((O_label*)newdata)->set_section(atom_getlong(av+3));
								else
								{
									object_error((t_object *)x, "Error in input, section must be int");
									valid = FALSE;
									break;
								}
							case 3:
								if ((av+2)->a_type == A_LONG)
									((O_label*)newdata)->set_phrase(atom_getlong(av+2));
								else
								{
									object_error((t_object *)x, "Error in input, phrase must be int");
									valid = FALSE;
									break;
								}
							case 2:
								if ((av+1)->a_type == A_LONG)
									newdata->set_velocity(atom_getlong(av+1));
								else
								{
									object_error((t_object *)x, "Error in input, velocity must be int");
									valid = FALSE;
									break;
								}
							case 1:
								if (av->a_type == A_LONG)
									newdata->set_pitch(atom_getlong(av));
								else
								{
									object_error((t_object *)x, "Error in input, pitch must be int");
									valid = FALSE;
								}
								break;
							default:
								object_error((t_object *)x, "Error in input, too many arguments");
								valid = FALSE;
								break;
						}
						ATOMIC_INCREMENT(&((t_OMax_oracle *)(x->oname->s_thing))->wflag);
						ATOMIC_INCREMENT(&((t_OMax_data *)(x->dataname->s_thing))->wflag);
						if (!(((t_OMax_oracle *)(x->oname->s_thing))->readcount)
							&& !(((t_OMax_data *)(x->dataname->s_thing))->readcount))
						{
							/// Add state to both structures
							out = x->builder.add(*newdata);
						}
						else
							object_error((t_object *)x,"Oracle %s being read (%d, %d)",x->oname->s_name, ((t_OMax_oracle *)(x->oname->s_thing))->readcount, ((t_OMax_data *)(x->dataname->s_thing))->readcount);
						ATOMIC_DECREMENT(&((t_OMax_oracle *)(x->oname->s_thing))->wflag);
						ATOMIC_DECREMENT(&((t_OMax_data *)(x->dataname->s_thing))->wflag);
						break;
					}
					case SPECTRAL:
						int pitchin;
						int coeffcount;
						bool valid = TRUE;
						O_spectral * newdata;
						if(ac < (x->nbcoeffs+1)) {
							object_error((t_object *)x, "Missing coefficients");
							valid = FALSE;
						}
						else
						{
							if ((av)->a_type == A_LONG)
								pitchin = atom_getlong(av);
							else
								valid = FALSE;
							list<float> coeffsin;
							for (coeffcount = 0; coeffcount < x->nbcoeffs; coeffcount++)
							{
								if((av+coeffcount+1)->a_type == A_FLOAT)
									coeffsin.push_back(atom_getfloat(av+coeffcount+1));
								else {
									object_error((t_object *)x, "Wrong types in coefficents");
									valid = FALSE;
								}
							}
							newdata = new O_spectral(pitchin, coeffsin);
							if (ac >= x->nbcoeffs+2) {
								if ((av+x->nbcoeffs+1)->a_type == A_LONG)
									((O_label*)newdata)->set_phrase(atom_getlong(av+x->nbcoeffs+1));
								else
								{
									object_error((t_object *)x, "Error in input, phrase must be int");
									valid = FALSE;
								}
								if (ac >= x->nbcoeffs+3) {
									if ((av+x->nbcoeffs+2)->a_type == A_LONG)
										((O_label*)newdata)->set_section(atom_getlong(av+x->nbcoeffs+2));
									else
									{
										object_error((t_object *)x, "Error in input, section must be int");
										valid = FALSE;
									}
									if (ac >= x->nbcoeffs+4) {
										if ((av+x->nbcoeffs+3)->a_type == A_LONG)
											((O_label*)newdata)->set_bufferef(atom_getlong(av+x->nbcoeffs+3));
										else
										{
											object_error((t_object *)x, "Error in input, buffer reference must be int");
											valid = FALSE;
										}
										if (ac == x->nbcoeffs+5) {
											if ((av+x->nbcoeffs+4)->a_type == A_LONG)
												((O_label*)newdata)->set_duration(atom_getlong(av+x->nbcoeffs+4));
											else
											{
												object_error((t_object *)x, "Error in input, duration must be int");
												valid = FALSE;
											}
											
										}
										else {
											object_error((t_object *)x, "Error in input, too many arguments");
											valid = FALSE;
										}
									}}}
						}
						ATOMIC_INCREMENT(&((t_OMax_oracle *)(x->oname->s_thing))->wflag);
						ATOMIC_INCREMENT(&((t_OMax_data *)(x->dataname->s_thing))->wflag);
						if (!(((t_OMax_oracle *)(x->oname->s_thing))->readcount)
							&& !(((t_OMax_data *)(x->dataname->s_thing))->readcount))
						{
							/// Add state to both structures
							out = x->builder.add(*newdata);
						}
						else
							object_error((t_object *)x,"Oracle %s being read (%d, %d)",x->oname->s_name, ((t_OMax_oracle *)(x->oname->s_thing))->readcount, ((t_OMax_data *)(x->dataname->s_thing))->readcount);
						ATOMIC_DECREMENT(&((t_OMax_oracle *)(x->oname->s_thing))->wflag);
						ATOMIC_DECREMENT(&((t_OMax_data *)(x->dataname->s_thing))->wflag);
						break;		
				}
				/// Output the index of the added state (identical in both structures)
				outlet_int(x->stateout, out);
			}
			else
				object_error((t_object *)x,"Error in input, too few arguments");
		}
	}
Ejemplo n.º 16
0
int rqueue_write(rqueue_t *rb, void *value) {
    int retries = 0;
    int did_update = 0;
    int did_move_head = 0;

    rqueue_page_t *temp_page = NULL;
    rqueue_page_t *next_page = NULL;
    rqueue_page_t *tail = NULL;
    rqueue_page_t *head = NULL;
    rqueue_page_t *commit;
    ATOMIC_INCREMENT(rb->num_writers);
    do {
        temp_page = ATOMIC_READ(rb->tail);
        commit = ATOMIC_READ(rb->commit);
        next_page = RQUEUE_FLAG_OFF(ATOMIC_READ(temp_page->next), RQUEUE_FLAG_ALL);
        head = ATOMIC_READ(rb->head);
        if (rb->mode == RQUEUE_MODE_BLOCKING) {
            if (temp_page == commit && next_page == head) {
                if (ATOMIC_READ(rb->writes) - ATOMIC_READ(rb->reads) != 0) {
                    //fprintf(stderr, "No buffer space\n");
                    if (ATOMIC_READ(rb->num_writers) == 1)
                        ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail));
                    ATOMIC_DECREMENT(rb->num_writers);
                    return -2;
                }
            } else if (next_page == head) {
                if (ATOMIC_READ(rb->num_writers) == 1) {
                    tail = temp_page;
                    break;
                } else {
                    if (ATOMIC_READ(rb->num_writers) == 1)
                        ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail));
                    ATOMIC_DECREMENT(rb->num_writers);
                    return -2;
                }
            }
        }
        tail = ATOMIC_CAS_RETURN(rb->tail, temp_page, next_page);
    } while (tail != temp_page && !(RQUEUE_CHECK_FLAG(ATOMIC_READ(tail->next), RQUEUE_FLAG_UPDATE)) && retries++ < RQUEUE_MAX_RETRIES);

    if (!tail) {
        if (ATOMIC_READ(rb->num_writers) == 1)
            ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), ATOMIC_READ(rb->tail));
        ATOMIC_DECREMENT(rb->num_writers);
        return -1;
    } 

    rqueue_page_t *nextp = RQUEUE_FLAG_OFF(ATOMIC_READ(tail->next), RQUEUE_FLAG_ALL);

    if (ATOMIC_CAS(tail->next, RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_HEAD), RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_UPDATE))) {
        did_update = 1;
        //fprintf(stderr, "Did update head pointer\n");
        if (rb->mode == RQUEUE_MODE_OVERWRITE) {
            // we need to advance the head if in overwrite mode ...otherwise we must stop
            //fprintf(stderr, "Will advance head and overwrite old data\n");
            rqueue_page_t *nextpp = RQUEUE_FLAG_OFF(ATOMIC_READ(nextp->next), RQUEUE_FLAG_ALL);
            if (ATOMIC_CAS(nextp->next, nextpp, RQUEUE_FLAG_ON(nextpp, RQUEUE_FLAG_HEAD))) {
                if (ATOMIC_READ(rb->tail) != next_page) {
                    ATOMIC_CAS(nextp->next, RQUEUE_FLAG_ON(nextpp, RQUEUE_FLAG_HEAD), nextpp);
                } else {
                    ATOMIC_CAS(rb->head, head, nextpp);
                    did_move_head = 1;
                }
            }
        }
    }

    void *old_value = ATOMIC_READ(tail->value);
    ATOMIC_CAS(tail->value, old_value, value);
    if (old_value && rb->free_value_cb)
        rb->free_value_cb(old_value);



    if (did_update) {
        //fprintf(stderr, "Try restoring head pointer\n");

        ATOMIC_CAS(tail->next,
                       RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_UPDATE),
                       did_move_head
                       ? RQUEUE_FLAG_OFF(nextp, RQUEUE_FLAG_ALL)
                       : RQUEUE_FLAG_ON(nextp, RQUEUE_FLAG_HEAD));

        //fprintf(stderr, "restored head pointer\n");
    }

    ATOMIC_INCREMENT(rb->writes);
    if (ATOMIC_READ(rb->num_writers) == 1)
        ATOMIC_CAS(rb->commit, ATOMIC_READ(rb->commit), tail);
    ATOMIC_DECREMENT(rb->num_writers);
    return 0;
}
Ejemplo n.º 17
0
void bed_normalize(t_bed *x, t_symbol *msg, short argc, t_atom *argv)
{
    if (argc > 1) {
        error("bed • The message must have at most two members");
        return;
    }

    float newmax = 1.0;
    if (argc == 1) {
        newmax = atom_getfloat(argv);
    }

    if (!bed_attach_buffer(x)) {
        return;
    }

    t_buffer *b;
    b = x->buffer;

    ATOMIC_INCREMENT(&b->b_inuse);

    if (!b->b_valid) {
        ATOMIC_DECREMENT(&b->b_inuse);
        post("bed • Not a valid buffer!");
        return;
    }

    long chunksize = b->b_frames * b->b_nchans * sizeof(float);
    if (x->undo_samples == NULL) {
        x->undo_samples = (float *)sysmem_newptr(chunksize);
    } else {
        x->undo_samples = (float *)sysmem_resizeptr(x->undo_samples, chunksize);
    }

    if (x->undo_samples == NULL) {
        error("bed • Cannot allocate memory for undo");
        x->can_undo = 0;
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    } else {
        x->can_undo = 1;
        x->undo_start = 0;
        x->undo_frames = b->b_frames;
        x->undo_resize = 0;
        x->undo_cut = 0;
        sysmem_copyptr(b->b_samples, x->undo_samples, chunksize);
    }

    float maxamp = 0.0;
    for (int ii = 0; ii < b->b_frames * b->b_nchans; ii++) {
        if (maxamp < fabs(b->b_samples[ii])) {
            maxamp = fabs(b->b_samples[ii]);
        }
    }

    float rescale;
    if (maxamp > 1e-6) {
        rescale = newmax / maxamp;
    } else {
        post("bed • Amplitude is too low to rescale: %.2f", maxamp);
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    }

    for (int ii = 0; ii < b->b_frames * b->b_nchans; ii++) {
        b->b_samples[ii] *= rescale;
    }

    object_method(&b->b_obj, gensym("dirty"));
    ATOMIC_DECREMENT(&b->b_inuse);
}
Ejemplo n.º 18
0
void bed_cut(t_bed *x, double start, double end)
{
    if (!bed_attach_buffer(x)) {
        return;
    }

    t_buffer *b;
    b = x->buffer;

    ATOMIC_INCREMENT(&b->b_inuse);

    if (!b->b_valid) {
        ATOMIC_DECREMENT(&b->b_inuse);
        post("bed • Not a valid buffer!");
        return;
    }

    long startframe = start * 0.001 * b->b_sr;
    long endframe = end * 0.001 * b->b_sr;
    long cutframes = endframe - startframe;

    if (startframe < 0 || endframe > b->b_frames || startframe > endframe) {
        post("bed • %.0fms and %.0fms are not valid cut times", start, end);
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    }

    long chunksize = cutframes * b->b_nchans * sizeof(float);
    if (x->undo_samples == NULL) {
        x->undo_samples = (float *)sysmem_newptr(chunksize);
    } else {
        x->undo_samples = (float *)sysmem_resizeptr(x->undo_samples, chunksize);
    }

    if (x->undo_samples == NULL) {
        error("bed • Cannot allocate memory for undo");
        x->can_undo = 0;
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    } else {
        x->can_undo = 1;
        x->undo_start = startframe;
        x->undo_frames = cutframes;
        x->undo_resize = 1;
        x->undo_cut = 1;
        sysmem_copyptr(b->b_samples + (startframe * b->b_nchans),
                       x->undo_samples, chunksize);
    }

    long bufferframes = b->b_frames;
    long buffersize = bufferframes * b->b_nchans * sizeof(float);
    float *local_buffer = (float *)sysmem_newptr(buffersize);
    if (local_buffer == NULL) {
        error("bed • Cannot allocate memory for undo");
        x->can_undo = 0;
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    } else {
        sysmem_copyptr(b->b_samples, local_buffer, buffersize);
    }

    ATOMIC_DECREMENT(&b->b_inuse);
    t_atom rv;
    object_method_long(&b->b_obj, gensym("sizeinsamps"),
                       (b->b_frames - cutframes), &rv);
    ATOMIC_INCREMENT(&b->b_inuse);

    chunksize = startframe * b->b_nchans * sizeof(float);
    sysmem_copyptr(local_buffer, b->b_samples, chunksize);
    chunksize = (bufferframes - endframe) * b->b_nchans * sizeof(float);
    sysmem_copyptr(local_buffer + (endframe * b->b_nchans),
                   b->b_samples + (startframe * b->b_nchans),
                   chunksize);

    sysmem_freeptr(local_buffer);

    object_method(&b->b_obj, gensym("dirty"));
    ATOMIC_DECREMENT(&b->b_inuse);
}
Ejemplo n.º 19
0
void bed_shuffle_n_segments(t_bed *x, long segments)
{
    if (!bed_attach_buffer(x)) {
        return;
    }

    t_buffer *b;
    b = x->buffer;

    ATOMIC_INCREMENT(&b->b_inuse);

    if (!b->b_valid) {
        ATOMIC_DECREMENT(&b->b_inuse);
        post("bed • Not a valid buffer!");
        return;
    }

    long chunksize = b->b_frames * b->b_nchans * sizeof(float);
    if (x->undo_samples == NULL) {
        x->undo_samples = (float *)sysmem_newptr(chunksize);
    } else {
        x->undo_samples = (float *)sysmem_resizeptr(x->undo_samples, chunksize);
    }

    if (x->undo_samples == NULL) {
        error("bed • Cannot allocate memory for undo");
        x->can_undo = 0;
        ATOMIC_DECREMENT(&b->b_inuse);
        return;
    } else {
        x->can_undo = 1;
        x->undo_start = 0;
        x->undo_frames = b->b_frames;
        x->undo_resize = 0;
        x->undo_cut = 0;
        sysmem_copyptr(b->b_samples, x->undo_samples, chunksize);
    }

    long totallength = b->b_frames;
    long basesegmentlength = ceil(totallength / segments);

    long randomsegment;
    long start;
    long end;
    long bufferlength;
    long buffersize;
    float *local_buffer = NULL;

    while (segments > 0) {
        randomsegment = rand() % segments;
        start = randomsegment * basesegmentlength;
        end = start + basesegmentlength;

        if (end > totallength) {
            end = totallength;
        }

        bufferlength = (end - start);
        buffersize = bufferlength * b->b_nchans * sizeof(float);
        if (local_buffer == NULL) {
            local_buffer = (float *)sysmem_newptr(buffersize);
        } else {
            local_buffer = (float *)sysmem_resizeptr(local_buffer, buffersize);
        }
        sysmem_copyptr(b->b_samples + (start * b->b_nchans),
                       local_buffer,
                       buffersize);

        for (long ii = end; ii < totallength; ii++) {
            for (int jj = 0; jj < b->b_nchans; jj++) {
                b->b_samples[((ii - bufferlength) * b->b_nchans) + jj] =
                    b->b_samples[(ii * b->b_nchans) + jj];
            }
        }
        sysmem_copyptr(local_buffer,
                       b->b_samples + (totallength - bufferlength) * b->b_nchans,
                       buffersize);

        totallength -= bufferlength;
        segments--;
    }

    sysmem_freeptr(local_buffer);

    object_method(&b->b_obj, gensym("dirty"));
    ATOMIC_DECREMENT(&b->b_inuse);
}
Ejemplo n.º 20
0
/* Move the object to the given state. If the state transition requires,
* fetch, evict or destroy the object. */
static inline int
arc_move(arc_t *cache, arc_object_t *obj, arc_state_t *state)
{
    // In the first conditional we check If the object is being locked,
    // which means someone is fetching its value and we don't what
    // don't mess up with it. Whoever is fetching will also take care of moving it
    // to one of the lists (or dropping it)
    // NOTE: while the object is being fetched it doesn't belong
    //       to any list, so there is no point in going ahead
    //       also arc_balance() should never go through this object
    //       (since in none of the lists) so it won't be affected.
    //       The only call which would silently fail is arc_remove()
    //       but if the object is being fetched and need to be removed
    //       will be determined by who is fetching the object or by the
    //       next call to arc_balance() (which would anyway happen if
    //       the object will be put into the cache by the fetcher)
    //
    // In the second conditional instead we handle a specific corner case which
    // happens when concurring threads access an item which has been just fetched
    // but also dropped (so its state is NULL).
    // If a thread entering arc_lookup() manages to get the object out of the hashtable
    // before it's being deleted it will try putting the object to the mfu list without checking first
    // if it was already in a list or not (new objects should be first moved to the 
    // mru list and not the mfu one)
    if (UNLIKELY(obj->locked || (state == &cache->mfu && ATOMIC_READ(obj->state) == NULL)))
        return 0;

    MUTEX_LOCK(&cache->lock);

    arc_state_t *obj_state = ATOMIC_READ(obj->state);

    if (LIKELY(obj_state != NULL)) {

        if (LIKELY(obj_state == state)) {
            // short path for recurring keys
            // (those in the mfu list being hit again)
            if (LIKELY(state->head.next != &obj->head))
                arc_list_move_to_head(&obj->head, &state->head);
            MUTEX_UNLOCK(&cache->lock);
            return 0;
        }

        // if the state is not NULL
        // (and the object is not going to be being removed)
        // move the ^ (p) marker
        if (LIKELY(state != NULL)) {
            if (obj_state == &cache->mrug) {
                size_t csize = cache->mrug.size
                             ? (cache->mfug.size / cache->mrug.size)
                             : cache->mfug.size / 2;
                cache->p = MIN(cache->c, cache->p + MAX(csize, 1));
            } else if (obj_state == &cache->mfug) {
                size_t csize = cache->mfug.size
                             ? (cache->mrug.size / cache->mfug.size)
                             : cache->mrug.size / 2;
                cache->p = MAX(0, cache->p - MAX(csize, 1));
            }
        }

        ATOMIC_DECREASE(obj_state->size, obj->size);
        arc_list_remove(&obj->head);
        ATOMIC_DECREMENT(obj_state->count);
        ATOMIC_SET(obj->state, NULL);
    }

    if (state == NULL) {
        if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
            release_ref(cache->refcnt, obj->node);
    } else if (state == &cache->mrug || state == &cache->mfug) {
        obj->async = 0;
        arc_list_prepend(&obj->head, &state->head);
        ATOMIC_INCREMENT(state->count);
        ATOMIC_SET(obj->state, state);
        ATOMIC_INCREASE(state->size, obj->size);
    } else if (obj_state == NULL) {

        obj->locked = 1;
        
        // unlock the cache while the backend is fetching the data
        // (the object has been locked while being fetched so nobody
        // will change its state)
        MUTEX_UNLOCK(&cache->lock);
        size_t size = 0;
        int rc = cache->ops->fetch(obj->ptr, &size, cache->ops->priv);
        switch (rc) {
            case 1:
            case -1:
            {
                if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
                    release_ref(cache->refcnt, obj->node);
                return rc;
            }
            default:
            {
                if (size >= cache->c) {
                    // the (single) object doesn't fit in the cache, let's return it
                    // to the getter without (re)adding it to the cache
                    if (ht_delete_if_equals(cache->hash, (void *)obj->key, obj->klen, obj, sizeof(arc_object_t)) == 0)
                        release_ref(cache->refcnt, obj->node);
                    return 1;
                }
                MUTEX_LOCK(&cache->lock);
                obj->size = ARC_OBJ_BASE_SIZE(obj) + cache->cos + size;
                arc_list_prepend(&obj->head, &state->head);
                ATOMIC_INCREMENT(state->count);
                ATOMIC_SET(obj->state, state);
                ATOMIC_INCREASE(state->size, obj->size);
                ATOMIC_INCREMENT(cache->needs_balance);
                break;
            }
        }
        // since this object is going to be put back into the cache,
        // we need to unmark it so that it won't be ignored next time
        // it's going to be moved to another list
        obj->locked = 0;
    } else {
        arc_list_prepend(&obj->head, &state->head);
        ATOMIC_INCREMENT(state->count);
        ATOMIC_SET(obj->state, state);
        ATOMIC_INCREASE(state->size, obj->size);
    }
    MUTEX_UNLOCK(&cache->lock);
    return 0;
}