__LIBC_HIDDEN__ int pthread_mutex_unlock_impl(pthread_mutex_t *mutex) { int mvalue, mtype, tid, shared; if (__unlikely(mutex == NULL)) return EINVAL; mvalue = mutex->value; mtype = (mvalue & MUTEX_TYPE_MASK); shared = (mvalue & MUTEX_SHARED_MASK); /* Handle common case first */ if (__likely(mtype == MUTEX_TYPE_BITS_NORMAL)) { _normal_unlock(mutex, shared); return 0; } /* Do we already own this recursive or error-check mutex ? */ tid = __get_thread()->tid; if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) ) return EPERM; /* If the counter is > 0, we can simply decrement it atomically. * Since other threads can mutate the lower state bits (and only the * lower state bits), use a cmpxchg to do it. */ if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) { for (;;) { int newval = mvalue - MUTEX_COUNTER_BITS_ONE; if (__likely(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) { /* success: we still own the mutex, so no memory barrier */ return 0; } /* the value changed, so reload and loop */ mvalue = mutex->value; } } /* the counter is 0, so we're going to unlock the mutex by resetting * its value to 'unlocked'. We need to perform a swap in order * to read the current state, which will be 2 if there are waiters * to awake. * * TODO: Change this to __bionic_swap_release when we implement it * to get rid of the explicit memory barrier below. */ ANDROID_MEMBAR_FULL(); /* RELEASE BARRIER */ mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value); /* Wake one waiting thread, if any */ if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) { __futex_wake_ex(&mutex->value, shared, 1); } return 0; }
char*strtok_r(char*s,const char*delim,char**ptrptr) { char*tmp=0; if (s==0) s=*ptrptr; s+=strspn(s,delim); /* overread leading delimiter */ if (__likely(*s)) { tmp=s; s+=strcspn(s,delim); if (__likely(*s)) *s++=0; /* not the end ? => terminate it */ } *ptrptr=s; return tmp; }
int pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr) { int value = 0; if (mutex == NULL) return EINVAL; if (__likely(attr == NULL)) { mutex->value = MUTEX_TYPE_BITS_NORMAL; return 0; } if ((*attr & MUTEXATTR_SHARED_MASK) != 0) value |= MUTEX_SHARED_MASK; switch (*attr & MUTEXATTR_TYPE_MASK) { case PTHREAD_MUTEX_NORMAL: value |= MUTEX_TYPE_BITS_NORMAL; break; case PTHREAD_MUTEX_RECURSIVE: value |= MUTEX_TYPE_BITS_RECURSIVE; break; case PTHREAD_MUTEX_ERRORCHECK: value |= MUTEX_TYPE_BITS_ERRORCHECK; break; default: return EINVAL; } mutex->value = value; return 0; }
/* This common inlined function is used to increment the counter of an * errorcheck or recursive mutex. * * For errorcheck mutexes, it will return EDEADLK * If the counter overflows, it will return EAGAIN * Otherwise, it atomically increments the counter and returns 0 * after providing an acquire barrier. * * mtype is the current mutex type * mvalue is the current mutex value (already loaded) * mutex pointers to the mutex. */ static __inline__ __attribute__((always_inline)) int _recursive_increment(pthread_mutex_t* mutex, int mvalue, int mtype) { if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) { /* trying to re-lock a mutex we already acquired */ return EDEADLK; } /* Detect recursive lock overflow and return EAGAIN. * This is safe because only the owner thread can modify the * counter bits in the mutex value. */ if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) { return EAGAIN; } /* We own the mutex, but other threads are able to change * the lower bits (e.g. promoting it to "contended"), so we * need to use an atomic cmpxchg loop to update the counter. */ for (;;) { /* increment counter, overflow was already checked */ int newval = mvalue + MUTEX_COUNTER_BITS_ONE; if (__likely(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) { /* mutex is still locked, not need for a memory barrier */ return 0; } /* the value was changed, this happens when another thread changes * the lower state bits from 1 to 2 to indicate contention. This * cannot change the counter, so simply reload and try again. */ mvalue = mutex->value; } }
int timer_delete( timer_t id ) { if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) return __timer_delete( id ); else { thr_timer_table_t* table = __timer_table_get(); thr_timer_t* timer = thr_timer_table_from_id(table, id, 1); if (timer == NULL) { errno = EINVAL; return -1; } /* tell the timer's thread to stop */ thr_timer_lock(timer); timer->done = 1; pthread_cond_signal( &timer->cond ); thr_timer_unlock(timer); /* NOTE: the thread will call __timer_table_free() to free the * timer object. the '1' parameter to thr_timer_table_from_id * above ensured that the object and its timer_id cannot be * reused before that. */ return 0; } }
static int dprintf_helper( void* data, char c ) { if ( __likely( debug != NULL ) ) { debug->ops->putchar( debug, c ); } return 0; }
/** * Get the next (valid) huffman code in the stream. * * To speedup the procedure, we look HUFFMAN_HASH_NBITS bits and the code is * lower than HUFFMAN_HASH_NBITS we have automaticaly the length of the code * and the value by using two lookup table. * Else if the value is not found, just search (linear) into an array for each * bits is the code is present. * * If the code is not present for any reason, -1 is return. */ static int get_next_huffman_code(struct jdec_private *priv, struct huffman_table *huffman_table) { int value, hcode; unsigned int extra_nbits, nbits; uint16_t *slowtable; look_nbits(priv->reservoir, priv->nbits_in_reservoir, priv->stream, HUFFMAN_HASH_NBITS, hcode); value = huffman_table->lookup[hcode]; if (__likely(value >= 0)) { unsigned int code_size = huffman_table->code_size[value]; skip_nbits(priv->reservoir, priv->nbits_in_reservoir, priv->stream, code_size); return value; } /* Decode more bits each time ... */ for (extra_nbits=0; extra_nbits<16-HUFFMAN_HASH_NBITS; extra_nbits++) { nbits = HUFFMAN_HASH_NBITS + 1 + extra_nbits; look_nbits(priv->reservoir, priv->nbits_in_reservoir, priv->stream, nbits, hcode); slowtable = huffman_table->slowtable[extra_nbits]; /* Search if the code is in this array */ while (slowtable[0]) { if (slowtable[0] == hcode) { skip_nbits(priv->reservoir, priv->nbits_in_reservoir, priv->stream, nbits); return slowtable[1]; } slowtable+=2; } } return 0; }
__LIBC_HIDDEN__ int pthread_mutex_trylock_impl(pthread_mutex_t *mutex) { int mvalue, mtype, tid, shared; if (__unlikely(mutex == NULL)) return EINVAL; mvalue = mutex->value; mtype = (mvalue & MUTEX_TYPE_MASK); shared = (mvalue & MUTEX_SHARED_MASK); /* Handle common case first */ if ( __likely(mtype == MUTEX_TYPE_BITS_NORMAL) ) { if (__bionic_cmpxchg(shared|MUTEX_STATE_BITS_UNLOCKED, shared|MUTEX_STATE_BITS_LOCKED_UNCONTENDED, &mutex->value) == 0) { ANDROID_MEMBAR_FULL(); return 0; } return EBUSY; } /* Do we already own this recursive or error-check mutex ? */ tid = __get_thread()->tid; if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) ) return _recursive_increment(mutex, mvalue, mtype); /* Same as pthread_mutex_lock, except that we don't want to wait, and * the only operation that can succeed is a single cmpxchg to acquire the * lock if it is released / not owned by anyone. No need for a complex loop. */ mtype |= shared | MUTEX_STATE_BITS_UNLOCKED; mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED; if (__likely(__bionic_cmpxchg(mtype, mvalue, &mutex->value) == 0)) { ANDROID_MEMBAR_FULL(); return 0; } return EBUSY; }
// Write block FXival FXIOBuffer::writeBlock(const void* ptr,FXival count){ if(__likely(access&WriteOnly)){ FXival remaining=space-pointer; if(count>remaining) count=remaining; memcpy(&buffer[pointer],ptr,count); pointer+=count; return count; } return 0; }
// Read block FXival FXIOBuffer::readBlock(void* ptr,FXival count){ if(__likely(access&ReadOnly)){ FXival remaining=space-pointer; if(count>remaining) count=remaining; memcpy(ptr,&buffer[pointer],count); pointer+=count; return count; } return 0; }
void* iarray_allocate(iarray* ia,size_t pos) { size_t y; /* first the easy case without locking */ if (__likely((y=pos/ia->elemperpage) < ia->pagefence && ia->pages[y])) return ia->pages[y]+(pos%ia->elemperpage)*ia->elemsize; /* the case where ia->pages == NULL is implicit */ #ifdef __MINGW32__ EnterCriticalSection(&ia->cs); #else pthread_mutex_lock(&ia->m); #endif if (__unlikely(y >= ia->pagefence)) { char** np; /* The data structure is an array of pointer to pages. * Each page holds at least one element of the array. * Here we realloc the array of pointers. Each element in this * array is only 4 or 8 bytes, so we should allocate a few more than * we need to cut down on future reallocs. */ size_t z=(y+512)&-512; /* round up to multiple of 512 */ /* It may seem as if there can be no integer overflow in the * indirect index, because then the array would not fit into the * address space in the first place, but remember that this is a * sparse array. Someone might just pass in an unreasonable large * index and have large elements, too */ if (z==0) goto unlockandfail; /* integer overflow */ np=realloc(ia->pages,z*ia->bytesperpage); if (!np) goto unlockandfail; ia->pagefence=z; ia->pages=np; } /* at this point we know the slot exists */ /* through a race between the early-out above and the * pthread_mutex_lock, the page pointer to it could be non-NULL, * however */ if (__unlikely(ia->pages[y]==0 && (ia->pages[y]=malloc(ia->bytesperpage))==0)) { unlockandfail: #ifdef __MINGW32__ LeaveCriticalSection(&ia->cs); #else pthread_mutex_unlock(&ia->m); #endif return 0; } #ifdef __MINGW32__ LeaveCriticalSection(&ia->cs); #else pthread_mutex_unlock(&ia->m); #endif return ia->pages[y] + (pos%ia->elemperpage)*ia->elemsize; }
void GMDBTracks::insert(GMTrack & track,FXint & path_index) { if (__likely(track.index==0)) { FXASSERT(!track.url.empty()); FXASSERT(path_index>=0); if (path_index==0) { path_index = insertPath(FXPath::directory(track.url)); FXASSERT(path_index); if (!path_index) fxwarning("pid==0 for %s\n",FXPath::directory(track.url).text()); } /// Artist FXint album_artist_id = insertArtist(track.getAlbumArtist(default_artist)); FXint artist_id = insertArtist(track.getArtist(default_artist)); FXint composer_id = insertArtist(track.composer); FXint conductor_id = insertArtist(track.conductor); FXint album_id = insertAlbum(track,album_artist_id); FXASSERT(artist_id); FXASSERT(album_id); /// Insert Track insert_track.set(0,path_index); insert_track.set(1,path_index ? FXPath::name(track.url) : track.url); insert_track.set(2,track.title.empty() ? FXPath::title(track.url) : track.title); insert_track.set(3,track.time); insert_track.set(4,track.no); insert_track.set(5,track.year); insert_track.set(6,(track.sampleformat) ? -track.sampleformat : track.bitrate); insert_track.set(7,album_id); insert_track.set(8,artist_id); insert_track.set_null(9,composer_id); insert_track.set_null(10,conductor_id); insert_track.set(11,FXThread::time()); insert_track.set(12,track.samplerate); insert_track.set(13,track.channels); insert_track.set(14,track.filetype); track.index = insert_track.insert(); /// Tags if (track.tags.no()) insertTags(track.index,track.tags); } /// Add to playlist if (playlist) { insert_playlist_track_by_id.set(0,playlist); insert_playlist_track_by_id.set(1,track.index); insert_playlist_track_by_id.set(2,playlist_queue); insert_playlist_track_by_id.execute(); } }
// Move to position FXlong FXIOBuffer::position(FXlong offset,FXuint from){ if(__likely(access&ReadWrite)){ if(from==Current) offset=pointer+offset; else if(from==End) offset=space+offset; if(0<=offset && offset<=(FXlong)space){ pointer=offset; return pointer; } } return -1; }
int fgetc_orig(FILE *file) { struct _IO_file_pvt *f = stdio_pvt(file); unsigned char ch; if (__likely(f->ibytes)) { f->ibytes--; return (unsigned char) *f->data++; } else { return _fread(&ch, 1, file) == 1 ? ch : EOF; } }
/* gcc is broken and has a non-SUSv2 compliant internal prototype. * This causes it to warn about a type mismatch here. Ignore it. */ int memcmp(const void *dst, const void *src, size_t count) { register int r; register const unsigned char *d=dst; register const unsigned char *s=src; ++count; while (__likely(--count)) { if (__unlikely(r=(*d - *s))) return r; ++d; ++s; } return 0; }
// invoke (compiling if necessary) the jlcall function pointer for a method template STATIC_INLINE jl_value_t *jl_call_staged(jl_svec_t *sparam_vals, jl_lambda_info_t *meth, jl_value_t **args, uint32_t nargs) { if (__unlikely(meth->fptr == NULL)) { jl_compile_linfo(meth); jl_generate_fptr(meth); } assert(jl_svec_len(meth->sparam_syms) == jl_svec_len(sparam_vals)); if (__likely(meth->jlcall_api == 0)) return meth->fptr(args[0], &args[1], nargs-1); else return ((jl_fptr_sparam_t)meth->fptr)(sparam_vals, args[0], &args[1], nargs-1); }
char *strstr(const char *haystack, const char *needle) { size_t nl=strlen(needle); size_t hl=strlen(haystack); int i; if (!nl) goto found; if (nl>hl) return 0; for (i=hl-nl+1; __likely(i); --i) { if (*haystack==*needle && !memcmp(haystack,needle,nl)) found: return (char*)haystack; ++haystack; } return 0; }
FXival HttpInput::icy_read(void*ptr,FXival count){ FXchar * out = static_cast<FXchar*>(ptr); FXival nread=0,n=0; if (icy_count<count) { /// Read up to icy buffer nread=client.readBody(out,icy_count); if (__unlikely(nread!=icy_count)) { if (nread>0) { icy_count-=nread; } return nread; } // Adjust output out+=nread; count-=nread; /// Read icy buffer size FXuchar b=0; n=client.readBody(&b,1); if (__unlikely(n!=1)) return -1; /// Read icy buffer if (b) { FXushort icy_size=((FXushort)b)*16; FXString icy_buffer; icy_buffer.length(icy_size); n=client.readBody(&icy_buffer[0],icy_size); if (__unlikely(n!=icy_size)) return -1; icy_parse(icy_buffer); } /// reset icy count icy_count=icy_interval; /// Read remaining bytes n=client.readBody(out,count); if (__unlikely(n!=count)) return -1; nread+=n; icy_count-=n; } else { nread=client.readBody(out,count); if (__likely(nread>0)) { icy_count-=nread; } } return nread; }
int timer_settime( timer_t id, int flags, const struct itimerspec* spec, struct itimerspec* ospec ) { if (spec == NULL) { errno = EINVAL; return -1; } if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) { return __timer_settime( id, flags, spec, ospec ); } else { thr_timer_t* timer = thr_timer_from_id(id); struct timespec expires, now; if (timer == NULL) { errno = EINVAL; return -1; } thr_timer_lock(timer); /* return current timer value if ospec isn't NULL */ if (ospec != NULL) { timer_gettime_internal(timer, ospec ); } /* compute next expiration time. note that if the * new it_interval is 0, we should disarm the timer */ expires = spec->it_value; if (!timespec_is_zero(&expires)) { clock_gettime( timer->clock, &now ); if (!(flags & TIMER_ABSTIME)) { timespec_add(&expires, &now); } else { if (timespec_cmp(&expires, &now) < 0) expires = now; } } timer->expires = expires; timer->period = spec->it_interval; thr_timer_unlock( timer ); /* signal the change to the thread */ pthread_cond_signal( &timer->cond ); } return 0; }
void *bsearch(const void *key, const void *base, size_t nmemb, size_t size, int (*compar)(const void* , const void* )) { size_t m; while (__likely(nmemb)) { int tmp; void *p; m=nmemb/2; p=(void *) (((const char *) base) + (m * size)); if ((tmp=(*compar)(key,p))<0) { nmemb=m; } else if (tmp>0) { base=p+size; nmemb-=m+1; } else return p; } return 0; }
unsigned long int strtoul(const char *ptr, char **endptr, int base) { int neg = 0; unsigned long int v=0; const char* orig; const char* nptr=ptr; while(*nptr == ' ') ++nptr; if (*nptr == '-') { neg=1; nptr++; } else if (*nptr == '+') ++nptr; orig=nptr; if (base==16 && nptr[0]=='0') goto skip0x; if (base) { register unsigned int b=base-2; if (__unlikely(b>34)) { return 0; } } else { if (*nptr=='0') { base=8; skip0x: if ((nptr[1]=='x'||nptr[1]=='X')) { nptr+=2; base=16; } } else base=10; } while(__likely(*nptr)) { register unsigned char c=*nptr; c=(c>='a'?c-'a'+10:c>='A'?c-'A'+10:c<='9'?c-'0':0xff); if (__unlikely(c>=base)) break; /* out of base */ { register unsigned long x=(v&0xff)*base+c; register unsigned long w=(v>>8)*base+(x>>8); v=(w<<8)+(x&0xff); } ++nptr; } if (__unlikely(nptr==orig)) { /* no conversion done */ nptr=ptr; v=0; } if (endptr) *endptr=(char *)nptr; return (neg?-v:v); }
int timer_getoverrun(timer_t id) { if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) { return __timer_getoverrun( id ); } else { thr_timer_t* timer = thr_timer_from_id(id); int result; if (timer == NULL) { errno = EINVAL; return -1; } thr_timer_lock(timer); result = timer->overruns; thr_timer_unlock(timer); return result; } }
// Change number of items in list FXbool FXArrayBase::resize(FXival num,FXival sz){ register FXival old=*(((FXival*)ptr)-1); if(__likely(old!=num)){ register FXptr p; if(0<num){ if(ptr!=EMPTY){ if(__unlikely((p=::realloc(((FXival*)ptr)-1,sizeof(FXival)+num*sz))==NULL)) return false; } else{ if(__unlikely((p=::malloc(sizeof(FXival)+num*sz))==NULL)) return false; } ptr=((FXival*)p)+1; *(((FXival*)ptr)-1)=num; } else{ if(ptr!=EMPTY){ ::free(((FXival*)ptr)-1); ptr=EMPTY; } } } return true; }
static int kprintf_helper( void* data, char c ) { int loglevel; loglevel = *( ( int* )data ); if ( __likely( ( loglevel > DEBUG ) && ( screen != NULL ) ) ) { screen->ops->putchar( screen, c ); } if ( debug != NULL ) { debug->ops->putchar( debug, c ); } if ( kernel_console_size < KERNEL_CONSOLE_SIZE ) { kernel_console[ kernel_write_pos ] = c; kernel_write_pos = ( kernel_write_pos + 1 ) % KERNEL_CONSOLE_SIZE; kernel_console_size++; } return 0; }
int timer_gettime( timer_t id, struct itimerspec* ospec ) { if (ospec == NULL) { errno = EINVAL; return -1; } if ( __likely(!TIMER_ID_IS_WRAPPED(id)) ) { return __timer_gettime( id, ospec ); } else { thr_timer_t* timer = thr_timer_from_id(id); if (timer == NULL) { errno = EINVAL; return -1; } thr_timer_lock(timer); timer_gettime_internal( timer, ospec ); thr_timer_unlock(timer); } return 0; }
/*@ requires file == \null || file == &(stdio_pvt(file)->pub); requires valid_IO_file_pvt(stdio_pvt(file)); assigns stdio_pvt(file)->ibytes, stdio_pvt(file)->pub._IO_eof, stdio_pvt(file)->pub._IO_error, stdio_pvt(file)->obytes, errno; @*/ int fflush(FILE *file) { struct _IO_file_pvt *f; if (__likely(file)) { f = stdio_pvt(file); return __fflush(f); } else { int err = 0; /*@ loop invariant valid_IO_file_pvt(f); loop invariant f != &__stdio_headnode; loop assigns f, err; */ for (f = __stdio_headnode.next; f != &__stdio_headnode; f = f->next) { if (f->obytes) err |= __fflush(f); } return err; } }
char *fgets_unlocked(char *s, int size, FILE *stream) { int l; for (l=0; l<size; ) { register int c; if (l && __likely(stream->bm<stream->bs)) { /* try common case first */ c=(unsigned char)stream->buf[stream->bm++]; } else { c=fgetc_unlocked(stream); if (__unlikely(c==EOF)) { if (!l) return 0; goto fini; } } s[l]=c; ++l; if (c=='\n') { fini: s[l]=0; break; } } return s; }
int fputc_unlocked(int c, FILE *stream) { if (!__likely(stream->flags&CANWRITE) || __fflush4(stream,0)) { kaputt: stream->flags|=ERRORINDICATOR; return EOF; } if (__unlikely(stream->bm>=stream->buflen-1)) if (fflush_unlocked(stream)) goto kaputt; if (stream->flags&NOBUF) { #if __BYTE_ORDER == __LITTLE_ENDIAN if (__libc_write(stream->fd,&c,1) != 1) #else if (__libc_write(stream->fd,(char*)&c+sizeof(c)-1,1) != 1) #endif goto kaputt; return 0; } stream->buf[stream->bm]=c; ++stream->bm; if (((stream->flags&BUFLINEWISE) && c=='\n') || ((stream->flags&NOBUF))) /* puke */ if (fflush_unlocked(stream)) goto kaputt; return 0; }
esvmHogPyr *computeHogScale(cv::Mat img,const int cellWidth, const int maxLevels,const int minDimension,const int interval, const float minScale,const bool enablePadding,const int padding,const int userTasks, const bool useMexResize) { int numRows = img.rows; int numCols = img.cols; int numChannels = 3; float sc = pow(2,1.0/interval); esvmHog **pyr = (esvmHog **)esvmCalloc(maxLevels,sizeof(esvmHog *)); float *scaleArr = (float *)esvmCalloc(maxLevels,sizeof(float)); //do the first level outside the loop. So that you don't have to make a call to resize //OpenCV resize does some weird stuff on resize factor 1.0 int *tmpim = RgbtoIm(img,numRows,numCols,numChannels); scaleArr[0]=1.0f; pyr[0] = computeHog(tmpim, numRows, numCols, numChannels, cellWidth, enablePadding, padding, userTasks); free(tmpim); int counter = 1; float *flIm; cv::Mat dst; if(useMexResize==true) { flIm = RgbtoImFlTranspose(img,numRows,numCols,numChannels); } for(int i=2;i<=maxLevels;i++) { float scale = 1.0 / pow(sc,i-1); scaleArr[i-1] = scale; int nr = round((float)numRows * scale); int nc = round((float)numCols * scale); if(scale < minScale) { break; } if(min(nr,nc)<=minDimension) { break; } int *im; if(useMexResize==false) { dst.create((int)nc,(int)nr,img.type()); cv::resize(img,dst,dst.size(), 0, 0, ESVM_INTERP); im = RgbtoIm(dst,(int)nr,(int)nc,numChannels); } else { //im = mexResize(flIm,numRows,numCols,numChannels,numRows*scale,numCols*scale); im = mexResize(flIm,numRows,numCols,numChannels,nr,nc); } pyr[i-1] = computeHog(im, nr, nc, numChannels, cellWidth, enablePadding, padding, userTasks); if(__unlikely(pyr[i-1]==NULL)) break; counter++; //if(useMexResize==false) // cvReleaseImage(&dst); free(im); if(__likely(enablePadding==true)) { if( max(pyr[i-1]->rows-(padding<<1),pyr[i-1]->cols-(padding<<1)) <= minDimension) { break; } } else { if(max(pyr[i-1]->rows,pyr[i-1]->cols) <= minDimension) { break; } } } scaleArr = (float *)realloc((void *)scaleArr,counter*sizeof(float)); esvmHogPyr *hogpyr = (esvmHogPyr *)esvmMalloc(sizeof(esvmHogPyr)); hogpyr->num = counter; hogpyr->hogs = pyr; hogpyr->scale = scaleArr; //FIXME:: We can shrink pyr from maxLevels to counter, using realloc. //It is not a serious problem since pyr actually contains maxLevels*sizeof(esvmHog *) bytes //cvReleaseImage(&img); if(useMexResize==true) free(flIm); return hogpyr; }
static void set_io_wait_begin(int v) { if (__likely(ios_set_io_wait_func)) { ios_set_io_wait_func(v); } }