Пример #1
0
/*
 * gets the sheets parent adjustments
 * returns TRUE on success
 */
gboolean
sheet_get_adjustments (const Sheet *sheet, GtkAdjustment **hadj, GtkAdjustment **vadj)
{
	GtkWidget *parent;
	GtkScrolledWindow *scrolled;

	if (__unlikely (!sheet))
		return FALSE;
	if (__unlikely (!vadj || !hadj))
		return FALSE;

	parent = gtk_widget_get_parent (GTK_WIDGET (sheet));
	if (__unlikely (!parent || !GTK_IS_SCROLLED_WINDOW (parent)))
		return FALSE;
	scrolled = GTK_SCROLLED_WINDOW (parent);

	*hadj = gtk_scrolled_window_get_hadjustment (scrolled);
	if (__unlikely (!*hadj || !GTK_IS_ADJUSTMENT (*hadj)))
		return FALSE;

	*vadj = gtk_scrolled_window_get_vadjustment (scrolled);
	if (__unlikely (!*vadj || !GTK_IS_ADJUSTMENT (*vadj)))
		return FALSE;

	return TRUE;
}
Пример #2
0
            void* array_get(array* x,uint64 membersize,int64 pos) {
            uint64 wanted;
            if (__unlikely(pos+1<1)) return 0;
            if (__unlikely(!umult64(membersize,pos,&wanted))) return 0;

            if (__unlikely((int64)wanted >= x->allocated || wanted>=x->initialized)) return 0;
            return x->p+pos*membersize;
        }
Пример #3
0
JL_DLLEXPORT void jl_array_del_end(jl_array_t *a, size_t dec)
{
    size_t n = jl_array_nrows(a);
    if (__unlikely(n < dec))
        jl_bounds_error_int((jl_value_t*)a, 0);
    if (__unlikely(a->flags.isshared))
        array_try_unshare(a);
    jl_array_del_at_end(a, n - dec, dec, n);
}
char *strrchr(const char *t, int c) {
  register char ch;
  register const char *l=0;

  ch = c;
  for (;;) {
    if (__unlikely(*t == ch)) l=t; if (__unlikely(!*t)) return (char*)l; ++t;
  }
  return (char*)l;
}
Пример #5
0
void* iarray_allocate(iarray* ia,size_t pos) {
  size_t y;
  /* first the easy case without locking */
  if (__likely((y=pos/ia->elemperpage) < ia->pagefence && ia->pages[y]))
    return ia->pages[y]+(pos%ia->elemperpage)*ia->elemsize;
  /* the case where ia->pages == NULL is implicit */

#ifdef __MINGW32__
  EnterCriticalSection(&ia->cs);
#else
  pthread_mutex_lock(&ia->m);
#endif

  if (__unlikely(y >= ia->pagefence)) {
    char** np;
    /* The data structure is an array of pointer to pages.
     * Each page holds at least one element of the array.
     * Here we realloc the array of pointers.  Each element in this
     * array is only 4 or 8 bytes, so we should allocate a few more than
     * we need to cut down on future reallocs. */
    size_t z=(y+512)&-512;		/* round up to multiple of 512 */
    /* It may seem as if there can be no integer overflow in the
     * indirect index, because then the array would not fit into the
     * address space in the first place, but remember that this is a
     * sparse array.  Someone might just pass in an unreasonable large
     * index and have large elements, too */
    if (z==0) goto unlockandfail;	/* integer overflow */
    np=realloc(ia->pages,z*ia->bytesperpage);
    if (!np) goto unlockandfail;
    ia->pagefence=z;
    ia->pages=np;
  }

  /* at this point we know the slot exists */
  /* through a race between the early-out above and the
   * pthread_mutex_lock, the page pointer to it could be non-NULL,
   * however */
  if (__unlikely(ia->pages[y]==0 && (ia->pages[y]=malloc(ia->bytesperpage))==0)) {
unlockandfail:
#ifdef __MINGW32__
    LeaveCriticalSection(&ia->cs);
#else
    pthread_mutex_unlock(&ia->m);
#endif
    return 0;
  }

#ifdef __MINGW32__
  LeaveCriticalSection(&ia->cs);
#else
  pthread_mutex_unlock(&ia->m);
#endif

  return ia->pages[y] + (pos%ia->elemperpage)*ia->elemsize;
}
Пример #6
0
// Convert string to signed int
FXint __strtol(const FXchar *beg,const FXchar** end,FXint base,FXbool* ok){
  register FXlong value=__strtoll(beg,end,base,ok);
  if(__unlikely(value<INT_MIN)){
    if(ok) *ok=false;
    return INT_MIN;
    }
  if(__unlikely(value>INT_MAX)){
    if(ok) *ok=false;
    return INT_MAX;
    }
  return (FXint)value;
  }
Пример #7
0
void jl_compute_field_offsets(jl_datatype_t *st)
{
    size_t sz = 0, alignm = 1;
    int ptrfree = 1;

    assert(0 <= st->fielddesc_type && st->fielddesc_type <= 2);

    uint64_t max_offset = (((uint64_t)1) <<
                           (1 << (3 + st->fielddesc_type))) - 1;
    uint64_t max_size = max_offset >> 1;

    for(size_t i=0; i < jl_datatype_nfields(st); i++) {
        jl_value_t *ty = jl_field_type(st, i);
        size_t fsz, al;
        if (jl_isbits(ty) && jl_is_leaf_type(ty)) {
            fsz = jl_datatype_size(ty);
            // Should never happen
            if (__unlikely(fsz > max_size))
                jl_throw(jl_overflow_exception);
            al = ((jl_datatype_t*)ty)->alignment;
            jl_field_setisptr(st, i, 0);
            if (((jl_datatype_t*)ty)->haspadding)
                st->haspadding = 1;
        }
        else {
            fsz = sizeof(void*);
            if (fsz > MAX_ALIGN)
                fsz = MAX_ALIGN;
            al = fsz;
            jl_field_setisptr(st, i, 1);
            ptrfree = 0;
        }
        if (al != 0) {
            size_t alsz = LLT_ALIGN(sz, al);
            if (sz & (al - 1))
                st->haspadding = 1;
            sz = alsz;
            if (al > alignm)
                alignm = al;
        }
        jl_field_setoffset(st, i, sz);
        jl_field_setsize(st, i, fsz);
        if (__unlikely(max_offset - sz < fsz))
            jl_throw(jl_overflow_exception);
        sz += fsz;
    }
    st->alignment = alignm;
    st->size = LLT_ALIGN(sz, alignm);
    if (st->size > sz)
        st->haspadding = 1;
    st->pointerfree = ptrfree && !st->abstract;
}
Пример #8
0
void Signal::set() {
#if defined(_WIN32)
  SetEvent(device);
#elif defined(HAVE_EVENTFD)
  const FXlong value=1;
  if (__unlikely(write(device,&value,sizeof(FXlong))!=sizeof(FXlong) && errno!=EAGAIN))
    fxerror("gap: failed to set signal, write to eventfd failed");
#else
  const FXuchar value=1;
  if (__unlikely(write(wrptr,&value,sizeof(FXuchar))!=sizeof(FXuchar) && errno!=EAGAIN))
    fxerror("gap: failed to set signal, write to pipe failed");
#endif
  }
Пример #9
0
int __fflush4(FILE *stream,int next) {
    if (__unlikely(!__stdio_atexit)) {
        __stdio_atexit=1;
        atexit(__stdio_flushall);
    }
    if (__unlikely((stream->flags&BUFINPUT)!=next)) {
        int res=fflush_unlocked(stream);
        stream->flags=(stream->flags&~BUFINPUT)|next;
        return res;
    }
    if (stream->fd==0 && __stdin_is_tty()) __fflush_stdout();
    return 0;
}
Пример #10
0
FXival HttpInput::icy_read(void*ptr,FXival count){
  FXchar * out = static_cast<FXchar*>(ptr);
  FXival nread=0,n=0;
  if (icy_count<count) {

    /// Read up to icy buffer
    nread=client.readBody(out,icy_count);
    if (__unlikely(nread!=icy_count)) {
      if (nread>0) {
        icy_count-=nread;
        }
      return nread;
      }

    // Adjust output
    out+=nread;
    count-=nread;

    /// Read icy buffer size
    FXuchar b=0;
    n=client.readBody(&b,1);
    if (__unlikely(n!=1)) return -1;

    /// Read icy buffer
    if (b) {
      FXushort icy_size=((FXushort)b)*16;
      FXString icy_buffer;
      icy_buffer.length(icy_size);
      n=client.readBody(&icy_buffer[0],icy_size);
      if (__unlikely(n!=icy_size)) return -1;
      icy_parse(icy_buffer);
      }

    /// reset icy count
    icy_count=icy_interval;

    /// Read remaining bytes
    n=client.readBody(out,count);
    if (__unlikely(n!=count)) return -1;
    nread+=n;
    icy_count-=n;
    }
  else {
    nread=client.readBody(out,count);
    if (__likely(nread>0)) {
      icy_count-=nread;
      }
    }
  return nread;
  }
Пример #11
0
STATIC_INLINE void jl_array_grow_at_end(jl_array_t *a, size_t idx,
                                        size_t inc, size_t n)
{
    // optimized for the case of only growing and shrinking at the end
    if (__unlikely(a->flags.isshared)) {
        if (a->flags.how != 3)
            jl_error("cannot resize array with shared data");
        if (inc == 0) {
            // If inc > 0, it will always trigger the slow path and unshare the
            // buffer
            array_try_unshare(a);
            return;
        }
    }
    size_t elsz = a->elsize;
    char *data = (char*)a->data;
    int has_gap = n > idx;
    if (__unlikely((n + inc) > a->maxsize - a->offset)) {
        size_t nb1 = idx * elsz;
        size_t nbinc = inc * elsz;
        size_t newlen = a->maxsize == 0 ? (inc < 4 ? 4 : inc) : a->maxsize * 2;
        while ((n + inc) > newlen - a->offset)
            newlen *= 2;
        newlen = limit_overallocation(a, n, newlen, inc);
        int newbuf = array_resize_buffer(a, newlen);
        char *newdata = (char*)a->data + a->offset * elsz;
        if (newbuf) {
            memcpy(newdata, data, nb1);
            if (has_gap) {
                memcpy(newdata + nb1 + nbinc, data + nb1, n * elsz - nb1);
            }
        }
        else if (has_gap) {
            memmove(newdata + nb1 + nbinc, newdata + nb1, n * elsz - nb1);
        }
        a->data = data = newdata;
    }
    else if (has_gap) {
        size_t nb1 = idx * elsz;
        memmove(data + nb1 + inc * elsz, data + nb1, n * elsz - nb1);
    }
    size_t newnrows = n + inc;
#ifdef STORE_ARRAY_LEN
    a->length = newnrows;
#endif
    a->nrows = newnrows;
    if (a->flags.ptrarray) {
        memset(data + idx * elsz, 0, inc * elsz);
    }
}
Пример #12
0
static kmalloc_block_t* kmalloc_block_create(uint32_t pages) {
    kmalloc_block_t* block;
    kmalloc_chunk_t* chunk;

    block = (kmalloc_block_t*)alloc_pages(pages, MEM_COMMON);

    if (__unlikely(block == NULL)) {
        return NULL;
    }

    used_pages += pages;

    block->magic = KMALLOC_BLOCK_MAGIC;
    block->pages = pages;
    block->next = NULL;

    chunk = (kmalloc_chunk_t*)(block + 1);

    chunk->magic = KMALLOC_CHUNK_MAGIC;
    kmalloc_chunk_set_free(chunk, 1);
    chunk->block = block;
    chunk->size = (pages * PAGE_SIZE) - sizeof(kmalloc_block_t) - sizeof(kmalloc_chunk_t);
    chunk->prev = NULL;
    chunk->next = NULL;

    block->biggest_free = chunk->size;

    return block;
}
Пример #13
0
void __vesacon_copy_to_screen(size_t dst, const uint32_t * src, size_t npixels)
{
    size_t win_pos, win_off;
    size_t win_size = wi.win_size;
    size_t omask = win_size - 1;
    char *win_base = wi.win_base;
    size_t l;
    size_t bytes = npixels * __vesacon_bytes_per_pixel;
    char rowbuf[bytes + 4] __aligned(4);
    const char *s;

    s = (const char *)__vesacon_format_pixels(rowbuf, src, npixels);

    while (bytes) {
	win_off = dst & omask;
	win_pos = dst & ~omask;

	if (__unlikely(win_pos != wi.win_pos))
	    set_window_pos(win_pos);

	l = min(bytes, win_size - win_off);
	memcpy(win_base + win_off, s, l);

	bytes -= l;
	s += l;
	dst += l;
    }
}
Пример #14
0
// Unsafe, assume inbounds and that dest and src have the same eltype
JL_DLLEXPORT void jl_array_ptr_copy(jl_array_t *dest, void **dest_p,
                                    jl_array_t *src, void **src_p, ssize_t n)
{
    assert(dest->flags.ptrarray && src->flags.ptrarray);
    jl_value_t *owner = jl_array_owner(dest);
    // Destination is old and doesn't refer to any young object
    if (__unlikely(jl_astaggedvalue(owner)->bits.gc == GC_OLD_MARKED)) {
        jl_value_t *src_owner = jl_array_owner(src);
        // Source is young or being promoted or might refer to young objects
        // (i.e. source is not an old object that doesn't have wb triggered)
        if (jl_astaggedvalue(src_owner)->bits.gc != GC_OLD_MARKED) {
            ssize_t done;
            if (dest_p < src_p || dest_p > src_p + n) {
                done = jl_array_ptr_copy_forward(owner, src_p, dest_p, n);
                dest_p += done;
                src_p += done;
            }
            else {
                done = jl_array_ptr_copy_backward(owner, src_p, dest_p, n);
            }
            n -= done;
        }
    }
    memmove(dest_p, src_p, n * sizeof(void*));
}
Пример #15
0
/* This function is used by pthread_cond_broadcast and
 * pthread_cond_signal to atomically decrement the counter
 * then wake-up 'counter' threads.
 */
static int
__pthread_cond_pulse(pthread_cond_t *cond, int  counter)
{
    long flags;

    if (__unlikely(cond == NULL))
        return EINVAL;

    flags = (cond->value & ~COND_COUNTER_MASK);
    for (;;) {
        long oldval = cond->value;
        long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
                      | flags;
        if (__bionic_cmpxchg(oldval, newval, &cond->value) == 0)
            break;
    }

    /*
     * Ensure that all memory accesses previously made by this thread are
     * visible to the woken thread(s).  On the other side, the "wait"
     * code will issue any necessary barriers when locking the mutex.
     *
     * This may not strictly be necessary -- if the caller follows
     * recommended practice and holds the mutex before signaling the cond
     * var, the mutex ops will provide correct semantics.  If they don't
     * hold the mutex, they're subject to race conditions anyway.
     */
    ANDROID_MEMBAR_FULL();

    __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
    return 0;
}
Пример #16
0
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
{
    int thread_id, ret = 0;

    if (rwlock == NULL)
        return EINVAL;

    pthread_mutex_lock(&rwlock->lock);
    thread_id = __get_thread_id();
    if (__unlikely(!write_precondition(rwlock, thread_id))) {
        /* If we can't read yet, wait until the rwlock is unlocked
         * and try again. Increment pendingReaders to get the
         * cond broadcast when that happens.
         */
        rwlock->pendingWriters += 1;
        do {
            ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
        } while (ret == 0 && !write_precondition(rwlock, thread_id));
        rwlock->pendingWriters -= 1;
        if (ret != 0)
            goto EXIT;
    }
    rwlock->numLocks ++;
    rwlock->writerThreadId = thread_id;
EXIT:
    pthread_mutex_unlock(&rwlock->lock);
    return ret;
}
Пример #17
0
STATIC_INLINE void jl_array_del_at_beg(jl_array_t *a, size_t idx, size_t dec,
                                       size_t n)
{
    // no error checking
    // assume inbounds, assume unshared
    assert(!a->flags.isshared);
    size_t elsz = a->elsize;
    size_t offset = a->offset;
    offset += dec;
#ifdef STORE_ARRAY_LEN
    a->length = n - dec;
#endif
    a->nrows = n - dec;
    size_t newoffs = jl_array_limit_offset(a, offset);
    assert(newoffs <= offset);
    size_t nbdec = dec * elsz;
    if (__unlikely(newoffs != offset) || idx > 0) {
        char *olddata = (char*)a->data;
        char *newdata = olddata - (a->offset - newoffs) * elsz;
        size_t nb1 = idx * elsz; // size in bytes of the first block
        size_t nbtotal = a->nrows * elsz; // size in bytes of the new array
        // Implicit '\0' for byte arrays
        if (elsz == 1)
            nbtotal++;
        if (idx > 0)
            memmove(newdata, olddata, nb1);
        memmove(newdata + nb1, olddata + nb1 + nbdec, nbtotal - nb1);
        a->data = newdata;
    }
    else {
        a->data = (char*)a->data + nbdec;
    }
    a->offset = newoffs;
}
int  strcasecmp ( const char* s1, const char* s2 )
{
    register unsigned int  x2;
    register unsigned int  x1;

    while (1) {
        x2 = *s2 - 'A'; if (__unlikely(x2 < 26u)) x2 += 32;
        x1 = *s1 - 'A'; if (__unlikely(x1 < 26u)) x1 += 32;
	      s1++; s2++;
        if ( __unlikely(x2 != x1) )
            break;
        if ( __unlikely(x1 == (unsigned int)-'A') )
            break;
    }

    return x1 - x2;
}
unsigned long int strtoul(const char *ptr, char **endptr, int base)
{
  int neg = 0;
  unsigned long int v=0;
  const char* orig;
  const char* nptr=ptr;

  while(*nptr == ' ') ++nptr;

  if (*nptr == '-') { neg=1; nptr++; }
  else if (*nptr == '+') ++nptr;
  orig=nptr;
  if (base==16 && nptr[0]=='0') goto skip0x;
  if (base) {
    register unsigned int b=base-2;
    if (__unlikely(b>34)) { return 0; }
  } else {
    if (*nptr=='0') {
      base=8;
skip0x:
      if ((nptr[1]=='x'||nptr[1]=='X'))  {
	nptr+=2;
	base=16;
      }
    } else
      base=10;
  }
  while(__likely(*nptr)) {
    register unsigned char c=*nptr;
    c=(c>='a'?c-'a'+10:c>='A'?c-'A'+10:c<='9'?c-'0':0xff);
    if (__unlikely(c>=base)) break;	/* out of base */
    {
      register unsigned long x=(v&0xff)*base+c;
      register unsigned long w=(v>>8)*base+(x>>8);
      v=(w<<8)+(x&0xff);
    }
    ++nptr;
  }
  if (__unlikely(nptr==orig)) {		/* no conversion done */
    nptr=ptr;
    v=0;
  }
  if (endptr) *endptr=(char *)nptr;
  return (neg?-v:v);
}
Пример #20
0
JL_DLLEXPORT void jl_array_del_at(jl_array_t *a, ssize_t idx, size_t dec)
{
    size_t n = jl_array_nrows(a);
    size_t last = idx + dec;
    if (__unlikely(idx < 0))
        jl_bounds_error_int((jl_value_t*)a, idx + 1);
    if (__unlikely(last > n))
        jl_bounds_error_int((jl_value_t*)a, last);
    // The unsharing needs to happen before we modify the buffer
    if (__unlikely(a->flags.isshared))
        array_try_unshare(a);
    if (idx < n - last) {
        jl_array_del_at_beg(a, idx, dec, n);
    }
    else {
        jl_array_del_at_end(a, idx, dec, n);
    }
}
Пример #21
0
JL_DLLEXPORT JL_CONST_FUNC jl_tls_states_t *(jl_get_ptls_states)(void)
{
    void *ptls = pthread_getspecific(jl_tls_key);
    if (__unlikely(!ptls)) {
        ptls = calloc(1, sizeof(jl_tls_states_t));
        pthread_setspecific(jl_tls_key, ptls);
    }
    return (jl_tls_states_t*)ptls;
}
Пример #22
0
/**
 *
 * Decode a single block that contains the DCT coefficients.
 * The table coefficients is already dezigzaged at the end of the operation.
 *
 */
static void process_Huffman_data_unit(struct jdec_private *priv, int component)
{
  unsigned char j;
  unsigned int huff_code;
  unsigned char size_val, count_0;

  struct component *c = &priv->component_infos[component];
  short int DCT[64];


  /* Initialize the DCT coef table */
  memset(DCT, 0, sizeof(DCT));

  /* DC coefficient decoding */
  huff_code = get_next_huffman_code(priv, c->DC_table);
  //trace("+ %x\n", huff_code);
  if (huff_code) {
     get_nbits(priv->reservoir, priv->nbits_in_reservoir, priv->stream, huff_code, DCT[0]);
     DCT[0] += c->previous_DC;
     c->previous_DC = DCT[0];
  } else {
     DCT[0] = c->previous_DC;
  }

  /* AC coefficient decoding */
  j = 1;
  while (j<64)
   {
     huff_code = get_next_huffman_code(priv, c->AC_table);
     //trace("- %x\n", huff_code);

     size_val = huff_code & 0xF;
     count_0 = huff_code >> 4;

     if (size_val == 0)
      { /* RLE */
	if (count_0 == 0)
	  break;	/* EOB found, go out */
	else if (count_0 == 0xF)
	  j += 16;	/* skip 16 zeros */
      }
     else
      {
	j += count_0;	/* skip count_0 zeroes */
	if (__unlikely(j >= 64))
	 {
	   //snprintf(error_string, sizeof(error_string), "Bad huffman data (buffer overflow)");
	   break;
	 }
	get_nbits(priv->reservoir, priv->nbits_in_reservoir, priv->stream, size_val, DCT[j]);
	j++;
      }
   }

  for (j = 0; j < 64; j++)
    c->DCT[j] = DCT[zigzag[j]];
}
Пример #23
0
__LIBC_HIDDEN__
int pthread_mutex_unlock_impl(pthread_mutex_t *mutex)
{
    int mvalue, mtype, tid, shared;

    if (__unlikely(mutex == NULL))
        return EINVAL;

    mvalue = mutex->value;
    mtype  = (mvalue & MUTEX_TYPE_MASK);
    shared = (mvalue & MUTEX_SHARED_MASK);

    /* Handle common case first */
    if (__likely(mtype == MUTEX_TYPE_BITS_NORMAL)) {
        _normal_unlock(mutex, shared);
        return 0;
    }

    /* Do we already own this recursive or error-check mutex ? */
    tid = __get_thread()->tid;
    if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
        return EPERM;

    /* If the counter is > 0, we can simply decrement it atomically.
     * Since other threads can mutate the lower state bits (and only the
     * lower state bits), use a cmpxchg to do it.
     */
    if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
        for (;;) {
            int newval = mvalue - MUTEX_COUNTER_BITS_ONE;
            if (__likely(__bionic_cmpxchg(mvalue, newval, &mutex->value) == 0)) {
                /* success: we still own the mutex, so no memory barrier */
                return 0;
            }
            /* the value changed, so reload and loop */
            mvalue = mutex->value;
        }
    }

    /* the counter is 0, so we're going to unlock the mutex by resetting
     * its value to 'unlocked'. We need to perform a swap in order
     * to read the current state, which will be 2 if there are waiters
     * to awake.
     *
     * TODO: Change this to __bionic_swap_release when we implement it
     *        to get rid of the explicit memory barrier below.
     */
    ANDROID_MEMBAR_FULL();  /* RELEASE BARRIER */
    mvalue = __bionic_swap(mtype | shared | MUTEX_STATE_BITS_UNLOCKED, &mutex->value);

    /* Wake one waiting thread, if any */
    if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
        __futex_wake_ex(&mutex->value, shared, 1);
    }
    return 0;
}
Пример #24
0
void jl_compute_field_offsets(jl_datatype_t *st)
{
    size_t sz = 0, alignm = 1;
    int ptrfree = 1;

    for(size_t i=0; i < jl_datatype_nfields(st); i++) {
        jl_value_t *ty = jl_field_type(st, i);
        size_t fsz, al;
        if (jl_isbits(ty) && jl_is_leaf_type(ty)) {
            fsz = jl_datatype_size(ty);
            if (__unlikely(fsz > JL_FIELD_MAX_SIZE))
                jl_throw(jl_overflow_exception);
            al = ((jl_datatype_t*)ty)->alignment;
            st->fields[i].isptr = 0;
            if (((jl_datatype_t*)ty)->haspadding)
                st->haspadding = 1;
        }
        else {
            fsz = sizeof(void*);
            if (fsz > MAX_ALIGN)
                fsz = MAX_ALIGN;
            al = fsz;
            st->fields[i].isptr = 1;
            ptrfree = 0;
        }
        if (al != 0) {
            size_t alsz = LLT_ALIGN(sz, al);
            if (alsz > sz)
                st->haspadding = 1;
            sz = alsz;
            if (al > alignm)
                alignm = al;
        }
        if (__unlikely(sz > JL_FIELD_MAX_OFFSET))
            jl_throw(jl_overflow_exception);
        st->fields[i].offset = sz;
        st->fields[i].size = fsz;
        sz += fsz;
    }
    st->alignment = alignm;
    st->size = LLT_ALIGN(sz, alignm);
    st->pointerfree = ptrfree && !st->abstract;
}
Пример #25
0
JL_DLLEXPORT void jl_pop_handler(int n)
{
    jl_ptls_t ptls = jl_get_ptls_states();
    if (__unlikely(n <= 0))
        return;
    jl_handler_t *eh = ptls->current_task->eh;
    while (--n > 0)
        eh = eh->prev;
    jl_eh_restore_state(eh);
}
Пример #26
0
/*
NAME    {* bdd\_extvarnum *}
SECTION {* kernel *}
SHORT   {* add extra BDD variables *}
PROTO   {* int bdd_extvarnum(int num) *}
DESCR   {* Extends the current number of allocated BDD variables with
	   {\tt num} extra variables. *}
RETURN  {* The old number of allocated variables or a negative error code. *}
ALSO    {* bdd\_setvarnum, bdd\_ithvar, bdd\_nithvar *}
*/
int bdd_extvarnum(int num)
{
   int start = bddvarnum;

   if (__unlikely(num < 0  ||  num > 0x3FFFFFFF))
      return bdd_error(BDD_RANGE);

   bdd_setvarnum(bddvarnum+num);
   return start;
}
Пример #27
0
JL_DLLEXPORT jl_array_t *jl_ptr_to_array(jl_value_t *atype, void *data,
                                         jl_value_t *_dims, int own_buffer)
{
    jl_ptls_t ptls = jl_get_ptls_states();
    size_t elsz, nel = 1;
    jl_array_t *a;
    size_t ndims = jl_nfields(_dims);
    wideint_t prod;
    assert(is_ntuple_long(_dims));
    size_t *dims = (size_t*)_dims;
    for (size_t i = 0; i < ndims; i++) {
        prod = (wideint_t)nel * (wideint_t)dims[i]; 
	    if (prod > (wideint_t) MAXINTVAL)
            jl_error("invalid Array dimensions");
        nel = prod;
    }
    if (__unlikely(ndims == 1))
        return jl_ptr_to_array_1d(atype, data, nel, own_buffer);

    jl_value_t *el_type = jl_tparam0(atype);

    int isunboxed = store_unboxed(el_type);
    if (isunboxed)
        elsz = jl_datatype_size(el_type);
    else
        elsz = sizeof(void*);

    int ndimwords = jl_array_ndimwords(ndims);
    int tsz = JL_ARRAY_ALIGN(sizeof(jl_array_t) + ndimwords*sizeof(size_t), JL_CACHE_BYTE_ALIGNMENT);
    a = (jl_array_t*)jl_gc_alloc(ptls, tsz, atype);
    // No allocation or safepoint allowed after this
    a->flags.pooled = tsz <= GC_MAX_SZCLASS;
    a->data = data;
#ifdef STORE_ARRAY_LEN
    a->length = nel;
#endif
    a->elsize = elsz;
    a->flags.ptrarray = !isunboxed;
    a->flags.ndims = ndims;
    a->offset = 0;
    a->flags.isshared = 1;
    a->flags.isaligned = 0;
    if (own_buffer) {
        a->flags.how = 2;
        jl_gc_track_malloced_array(ptls, a);
        jl_gc_count_allocd(nel*elsz + (elsz == 1 ? 1 : 0));
    }
    else {
        a->flags.how = 0;
    }

    assert(ndims != 1); // handled above
    memcpy(&a->nrows, dims, ndims * sizeof(size_t));
    return a;
}
Пример #28
0
/* gcc is broken and has a non-SUSv2 compliant internal prototype.
 * This causes it to warn about a type mismatch here.  Ignore it. */
char *strncat(char *s, const char *t, size_t n) {
  char *dest=s;
  register char *max;
  s+=strlen(s);
  if (__unlikely((max=s+n)==s)) goto fini;
  for (;;) {
    if (__unlikely(!(*s = *t))) break; if (__unlikely(++s==max)) break; ++t;
#ifndef WANT_SMALL_STRING_ROUTINES
    if (__unlikely(!(*s = *t))) break; if (__unlikely(++s==max)) break; ++t;
    if (__unlikely(!(*s = *t))) break; if (__unlikely(++s==max)) break; ++t;
    if (__unlikely(!(*s = *t))) break; if (__unlikely(++s==max)) break; ++t;
#endif
  }
  *s=0;
fini:
  return dest;
}
Пример #29
0
void Signal::clear() {
#if defined(_WIN32)
  ResetEvent(device);
#elif defined(HAVE_EVENTFD)
  FXlong value;
  if (__unlikely(read(device,&value,sizeof(FXlong))!=sizeof(FXlong) && errno!=EAGAIN))
    fxerror("gap: failed to clear signal, read from eventfd failed");
#else
  FXuchar value[16];
  while(read(device,value,16)>0);
#endif
  }
Пример #30
0
// invoke (compiling if necessary) the jlcall function pointer for a method template
STATIC_INLINE jl_value_t *jl_call_staged(jl_svec_t *sparam_vals, jl_lambda_info_t *meth,
                                         jl_value_t **args, uint32_t nargs)
{
    if (__unlikely(meth->fptr == NULL)) {
        jl_compile_linfo(meth);
        jl_generate_fptr(meth);
    }
    assert(jl_svec_len(meth->sparam_syms) == jl_svec_len(sparam_vals));
    if (__likely(meth->jlcall_api == 0))
        return meth->fptr(args[0], &args[1], nargs-1);
    else
        return ((jl_fptr_sparam_t)meth->fptr)(sparam_vals, args[0], &args[1], nargs-1);
}