Exemple #1
0
/*
 * new_name creates a new dictionary (name) entry and returns it
 */
static struct dict_name *new_name(
    struct dict_name *link, char *name, int length, int hidden)
{
    struct dict_name *pnm;  /* the new name */

    assert(ALIGNED(ph) == (intptr_t)ph, "misaligned (new_name)");

    /*
     * Since we're using the high bit of the length as a "hidden" or
     * "deleted" flag, cap the length at 127.
     */
    length = MIN(length, 127);

    /* Allot space for name + length byte so that suffix is aligned. */
    pnm = (struct dict_name *)ALIGNED((intptr_t)ph + length - SUFFIX_LEN);

    /* copy name string */
    memcpy(pnm->suffix + SUFFIX_LEN - length, name, length);
    pnm->length = length + (hidden ? 128 : 0);

    /* set link pointer */
    pnm->link = link;

    /* Allot entry */
    ph = (cell *)(pnm + 1);

#if defined(BEING_DEFINED)
    fprintf(stderr, "%p %p %.*s\n", pnm, link, length, name);
#endif

    return pnm;
}
Exemple #2
0
static long *
dummy_compact (long *r, char *org_stack)
{
  memmove (org_stack, r,
           ALIGNED (2*sizeof (long)) + ALIGNED ((mpfr_custom_get_size) (p)));
  return (long *) org_stack;
}
Exemple #3
0
/* a[0] is the kind, a[1] is the exponent, &a[2] is the mantissa */
static long *
dummy_new (void)
{
  long *r;

  r = (long *) new_st (ALIGNED (2 * sizeof (long)) +
                       ALIGNED (mpfr_custom_get_size (p)));
  (mpfr_custom_init) (&r[2], p);
  r[0] = (int) MPFR_NAN_KIND;
  r[1] = 0;
  return r;
}
Exemple #4
0
char *
strcpy (char *to, const char *from)
{
  char *return_value = to;
  if (to == from)
    return to;
  else if (ALIGNED (to) && ALIGNED (from))
    {
      unsigned long *to1 = (unsigned long *) to;
      const unsigned long *from1 = (const unsigned long *) from;
      unsigned long c;
      unsigned long magic = MAGIC;
      unsigned long not_magic = ~magic;
/*      unsigned long hi_bit = 0x80000000; */

      while ((c = *from1) != 0)
        {
          if (HAS_ZERO(c)) 
            {
              to = (char *) to1;
              from = (const char *) from1;
              goto slow_loop;
            }
          else
            {
              *to1 = c;
              to1++; 
              from1++;
            }
        }

      to = (char *) to1;
      *to = (char) 0;
      return return_value;
    }
  else
    {
      char c;

    slow_loop:

      while ((c = *from) != 0)
        {
          *to = c;
          to++;
          from++;
        }
      *to = (char) 0;
    }
  return return_value;
}
Exemple #5
0
void *n64_memcpy(void *dst, const void *src, size_t size)
{
    uint8_t *bdst = (uint8_t *)dst;
    uint8_t *bsrc = (uint8_t *)src;
    uint32_t *wdst = (uint32_t *)dst;
    uint32_t *wsrc = (uint32_t *)src;

    int size_to_copy = size;

    if (ALIGNED(bdst) && ALIGNED(bsrc))
    {
        int words_to_copy = size_to_copy / 4;
        int bytes_to_copy = size_to_copy % 4;

        while (words_to_copy--)
        {
            *wdst++ = *wsrc++;
        }

        bdst = (uint8_t *)wdst;
        bsrc = (uint8_t *)wsrc;

        while (bytes_to_copy--)
        {
            *bdst++ = *bsrc++;
        }
    }
    else
    {
        int w_to_copy = size_to_copy / 4;
        int b_to_copy = size_to_copy % 4;

        while (w_to_copy > 0)
        {
            *bdst++ = *bsrc++;
            *bdst++ = *bsrc++;
            *bdst++ = *bsrc++;
            *bdst++ = *bsrc++;

            w_to_copy--;
        }

        while(b_to_copy--)
        {
            *bdst++ = *bsrc++;
        }
    }

    return dst;
}
Exemple #6
0
 /* Garbage the stack by keeping only x  */
static mpfr_ptr
return_mpfr (mpfr_ptr x, char *old_stack)
{
  void *mantissa       = mpfr_custom_get_significand (x);
  size_t size_mantissa = mpfr_custom_get_size (mpfr_get_prec (x));
  mpfr_ptr newx;

  memmove (old_stack, x, sizeof (mpfr_t));
  memmove (old_stack + ALIGNED (sizeof (mpfr_t)), mantissa, size_mantissa);
  newx = (mpfr_ptr) old_stack;
  mpfr_custom_move (newx, old_stack + ALIGNED (sizeof (mpfr_t)));
  stack = old_stack + ALIGNED (sizeof (mpfr_t)) + ALIGNED (size_mantissa);
  return newx;
}
static int hc2cfv_okp(const R *Rp, const R *Ip, const R *Rm, const R *Im, 
		      INT rs, INT mb, INT me, INT ms, 
		      const planner *plnr)
{
     return (1
	     && !NO_SIMDP(plnr)
	     && SIMD_STRIDE_OK(rs)
	     && SIMD_VSTRIDE_OK(ms)
             && ((me - mb) % VL) == 0
             && ((mb - 1) % VL) == 0 /* twiddle factors alignment */
	     && ALIGNED(Rp)
	     && ALIGNED(Rm)
	     && Ip == Rp + 1
	     && Im == Rm + 1);
}
void
CVMmemDisableWriteNotify(CVMMemHandle *h)
{
    CVMMemPrivateData *c;

    CVMassert(wnlLock != NULL);

    CVMmutexLock(wnlLock);

    c = writeNotifyList;
    while (c != NULL) {
        if (d2h(c) == h) {
            CVMMemPrivateData *region;
            CVMAddr start = c->dataStart.start;
            CVMAddr end = c->end;

            CVMAddr alignedStart = ALIGNED(start);
            CVMAddr alignedEnd = ALIGNEDNEXT(end);

            /* Found the region. Unprotect it. */

            /* Now traverse the list again to check if part of the
             * aligned pages (first page and last page) belong to 
             * other regions.
             */
            region = writeNotifyList;
            while (region != NULL) {
		if (region->end < start &&
                    region->end >= alignedStart) {
                    alignedStart = ALIGNEDNEXT(start);
		}
                if (region->dataStart.start > end &&
                    region->dataStart.start <= alignedEnd) {
                    alignedEnd = ALIGNED(end);
                }
                region = region->next;
            }
            /* Unprotect the adjusted aligned region, so the next
             * write within the range would not cause a signal.
             */
            CVMmprotect((void*)alignedStart, (void*)alignedEnd, CVM_FALSE);
            CVMmutexUnlock(wnlLock);
            return;
        }
        c = c->nextWriteNotify;
    }
    CVMmutexUnlock(wnlLock);
}
Exemple #9
0
void *Z_Calloc(size_t size, int tag, void *user)
{
    void *ptr = Z_Malloc(size, tag, user);

    memset(ptr, 0, ALIGNED(size));
    return ptr;
}
Exemple #10
0
/* Our fastpath can't handle OP_movs of uninit, which is common
 * w/ realloc, so we use a regular OP_mov loop.
 * XXX: share w/ drmem's replace_memcpy
 */
DO_NOT_OPTIMIZE
static void *
memcpy_no_movs(void *dst, const void *src, size_t size)
{
    register unsigned char *d = (unsigned char *) dst;
    register unsigned char *s = (unsigned char *) src;
    if (((ptr_uint_t)dst & 3) == ((ptr_uint_t)src & 3)) {
        /* same alignment, so we can do 4 aligned bytes at a time and stay
         * on fastpath
         */
        while (!ALIGNED(d, 4) && size > 0) {
            *d++ = *s++;
            size--;
        }
        while (size > 3) {
            *((unsigned int *)d) = *((unsigned int *)s);
            s += 4;
            d += 4;
            size -= 4;
        }
        while (size > 0) {
            *d++ = *s++;
            size--;
        }
    } else {
        while (size-- > 0) /* loop will terminate before underflow */
            *d++ = *s++;
    }
    return dst;
}
Exemple #11
0
int do_layer1(mpg123_handle *fr)
{
  int clip=0;
  int i,stereo = fr->stereo;
  unsigned int balloc[2*SBLIMIT];
  unsigned int scale_index[2][SBLIMIT];
  ALIGNED(16) real fraction[2][SBLIMIT];
  int single = fr->single;

  fr->jsbound = (fr->mode == MPG_MD_JOINT_STEREO) ? (fr->mode_ext<<2)+4 : 32;

  if(stereo == 1 || single == SINGLE_MIX) /* I don't see mixing handled here */
    single = SINGLE_LEFT;

  I_step_one(balloc,scale_index,fr);

  for (i=0;i<SCALE_BLOCK;i++)
  {
    I_step_two(fraction,balloc,scale_index,fr);

    if(single != SINGLE_STEREO)
    {
      clip += (fr->synth_mono)( (real *) fraction[single], fr);
    }
    else
    {
      clip += (fr->synth)( (real *) fraction[0], 0, fr, 0);
      clip += (fr->synth)( (real *) fraction[1], 1, fr, 1);
    }
  }

  return clip;
}
Exemple #12
0
static void convert_command_line(int argc, char *argv[])
{
    char *pline;

    /* skip arg[0] */
    argc--;
    argv++;

    pcmd_line = (struct counted_string *)ph;
    pline = pcmd_line->data;

    while (argc--)
    {
        pline = str_copy(pline, *argv++);
        *pline++ = ' ';
    }
    pcmd_line->length = pline - pcmd_line->data;

    /* 
     * No need to null-terminate! This string is evaluated by the Forth
     * parser, not C code. Any pieces - like filenames - that get passed to
     * C are copied out of this string into the dictionary and
     * null-terminated first - just like input from _any other_ source.
     */
    ph = (cell *)ALIGNED(pline);
}
Exemple #13
0
void *Z_Realloc(void *ptr, size_t n, int mallocTag)
{
    int     tag = ptr ? Z_GetTag(ptr) : mallocTag;
    void   *p;

    lockZone();

    n = ALIGNED(n);
    p = Z_Malloc(n, tag, 0);    // User always 0;

    if (ptr)
    {
        size_t bsize;

        // Has old data; copy it.
        memblock_t *block = Z_GetBlock(ptr);
#ifdef LIBDENG_FAKE_MEMORY_ZONE
        bsize = block->areaSize;
#else
        bsize = block->size - sizeof(memblock_t);
#endif
        memcpy(p, ptr, MIN_OF(n, bsize));
        Z_Free(ptr);
    }

    unlockZone();
    return p;
}
Exemple #14
0
/* We use volatile int rather than bool since these are used as futexes.
 * 0 is unset, 1 is set, and no other value is used.
 */
bool
ksynch_init_var(volatile int *futex)
{
    ASSERT(ALIGNED(futex, sizeof(int)));
    *futex = 0;
    return true;
}
Exemple #15
0
_CACHED
void *
memset(void *dest_p, int c, size_t n)
{
  void *orig_dest = dest_p;
  char *dst;

  /* fill with longs if applicable */
  if (ALIGNED(dest_p) && n > sizeof(uint32_t))
    {
      uint32_t lc;
      uint32_t *dstl = dest_p;
      c &= 0xff;
      lc = (c<<24)|(c<<16)|(c<<8)|c;
      while (n >= sizeof(uint32_t))
	{
	  *dstl++ = lc;
	  n -= sizeof(uint32_t);
	}
      dest_p = dstl;
    }

  dst = dest_p;
  while (n > 0) {
    *dst++ = c;
    --n;
  }

  return orig_dest;
}
Exemple #16
0
END_DO_NOT_OPTIMIZE

IN_REPLACE_SECTION void *
replace_memcpy(void *dst, const void *src, size_t size)
{
    register unsigned char *d = (unsigned char *) dst;
    register unsigned char *s = (unsigned char *) src;
    if (((ptr_uint_t)dst & 3) == ((ptr_uint_t)src & 3)) {
        /* same alignment, so we can do 4 aligned bytes at a time and stay
         * on fastpath.  when not same alignment, I'm assuming it's faster
         * to have all 1-byte moves on fastpath rather than half 4-byte
         * (aligned) on fastpath and half 4-byte (unaligned) on slowpath.
         */
        while (!ALIGNED(d, 4) && size > 0) {
            *d++ = *s++;
            size--;
        }
        while (size > 3) {
            *((unsigned int *)d) = *((unsigned int *)s);
            s += 4;
            d += 4;
            size -= 4;
        }
        while (size > 0) {
            *d++ = *s++;
            size--;
        }
    } else {
        while (size-- > 0) /* loop will terminate before underflow */
            *d++ = *s++;
    }
    return dst;
}
void
CVMmemManagerDumpStats()
{
    CVMMemPrivateData *d = memList;
    CVMconsolePrintf("Memory status:\n");
    while (d != NULL) {
        if (d->map != NULL) {
            CVMMemType type = d->type;
            int totalPage = (ALIGNEDNEXT(d->end) -  
                             ALIGNED(d->dataStart.start)) / CVMgetPagesize();
            CVMMemDirtyPages *dmap = d->map;
            if (type < CVM_MEM_NUM_TYPES) {
                CVMconsolePrintf("%s: Total Page = %d, Dirty Page = %d\n", 
                                 CVMmemType[type].name,
                                 totalPage, dmap->numberOfDirtypages);
                if (CVMmemType[type].report != NULL) {
                    CVMmemType[type].report();
                }
            } else {
                CVMconsolePrintf("%s: Total Page = %d, Dirty Page = %d\n", 
                        CVMcustomMemType[type - CVM_MEM_NUM_TYPES].name,
                        totalPage, dmap->numberOfDirtypages);
                if (CVMcustomMemType[type - CVM_MEM_NUM_TYPES].report != NULL) {
                    CVMcustomMemType[type - CVM_MEM_NUM_TYPES].report();
                }
            }
        }
        d = d->next;
    }
}
void
CVMmemSetMonitorMode(CVMMemHandle *h, CVMMemMonMode mode)
{
    CVMMemPrivateData *d = h2d(h);
    d->mode = mode; /* set the new mode */
    if (mode == CVM_MEM_MON_NONE) {
        /* disable write notify */      
        CVMmemDisableWriteNotify(h);
    } else if (mode == CVM_MEM_MON_FIRST_WRITE ||
               mode == CVM_MEM_MON_ALL_WRITES) {
        if (d->map == NULL) {
            CVMMemDirtyPages *map;
            map = (CVMMemDirtyPages*)malloc(
                          sizeof(CVMMemDirtyPages));
            if (map != NULL) {
                map->memMap = (CVMUint8*)calloc(
                    sizeof(CVMUint8), 
                    (ALIGNEDNEXT(d->end) - ALIGNED(d->dataStart.start)) /
                                    CVMgetPagesize());
	        if (map->memMap == NULL) {
                    free(map);
                    return;
                }
                map->numberOfDirtypages = 0;
                d->map = map;
   	    } else {
                return;
            }
        }
        CVMmemEnableWriteNotify(
            h, (CVMUint32*)d->dataStart.start, (CVMUint32*)d->end);
    }
    return;
}
void
CVMmemEnableWriteNotify(CVMMemHandle *h, CVMUint32* start, CVMUint32* end)
{
    CVMMemPrivateData *r = h2d(h);
    CVMAddr alignedStart = ALIGNED(start);
    CVMAddr alignedEnd = ALIGNEDNEXT(end);

    if (wnlLock == NULL) {
        wnlLock = malloc(sizeof(CVMMutex));
        CVMmutexInit(wnlLock);
    }
    CVMmutexLock(wnlLock);

    /* Add to the write notify list. */
    if (writeNotifyList == NULL) {
        r->nextWriteNotify = NULL;
    } else {
        r->nextWriteNotify = writeNotifyList;
    }
    writeNotifyList = r;

    CVMmutexUnlock(wnlLock);

    /* Protect the aligned region that contains the start and end to
     * enable write notify.
     */
    CVMmprotect((void*)alignedStart, (void*)alignedEnd, CVM_TRUE);
}
/* allocates a RUN_SIZE-aligned memory block and adds it to mem_map */ 
static void *run_alloc(malloc_t *heap, int type)
{
    uintptr_t addri, alignedi;
    void *addr;

    /* allocate twice the size so we can align ourselves */
    if (cgc_allocate(RUN_SIZE * 2, 0, &addr) != 0)
        return NULL;

    addri = (uintptr_t) addr;
    alignedi = ALIGNED(addri, RUN_SIZE);

    /* cgc_free the memory that is extra */
    if (addri < alignedi)
        cgc_deallocate((void *)addri, alignedi - addri);
    if (alignedi + RUN_SIZE < addri + RUN_SIZE * 2)
        cgc_deallocate((void *)(alignedi + RUN_SIZE), (addri + RUN_SIZE * 2) - (alignedi + RUN_SIZE));

    /* add run to mem_map */
    DBG_ASSERT(heap->mem_map[alignedi / RUN_SIZE] == MM_UNALLOCATED);
    heap->mem_map[alignedi / RUN_SIZE] = type;

    /* return the aligned memory block */
    return (void *)alignedi;
}
Exemple #21
0
char* strncat (char* destination, const char* source, size_t num)
{
	char *s = destination;

	/* Skip over the data in s1 as quickly as possible.  */
	if (ALIGNED (destination))
	{
		unsigned long *aligned_s1 = (unsigned long *)destination;
		
		while (!DETECTNULL (*aligned_s1))
			aligned_s1++;

		destination = (char *)aligned_s1;
	}

	while (*destination)
		destination++;

	/* s1 now points to the its trailing null character, now copy
		up to N bytes from S2 into S1 stopping if a NULL is encountered
		in S2.

		It is not safe to use strncpy here since it copies EXACTLY N
		characters, NULL padding if necessary.  */
	while (num-- != 0 && (*destination++ = *source++))
	{
		if (num == 0)
			*destination = '\0';
	}
	
	return s;
}
Exemple #22
0
// n64_memset is special-cased to fill zeros
// all but two or three calls in Doom had 0 as the fill value
void *n64_memset(void *ptr, int value, size_t num)
{
    uint32_t *w = (uint32_t *)ptr;
    uint8_t  *p = (uint8_t *)ptr;

    if (ALIGNED(ptr))
    {
        int words = num / 4;
        int bytes = num % 4;

        while (words--)
        {
            *w++ = 0x00000000;
        }

        p = (unsigned char*)w;

        while (bytes--)
        {
            *p++ = 0x00;
        }
    }
    else
    {
        while (num--)
        {
            *p++ = 0x00;
        }
    }

    return ptr;
}
Exemple #23
0
static int t_okp_t1bu(const ct_desc *d,
		      const R *rio, const R *iio,
		      INT rs, INT vs, INT m, INT mb, INT me, INT ms,
		      const planner *plnr)
{									
     return  t_okp_commonu(d, rio, iio, rs, vs, m, mb, me, ms, plnr)
	  && rio == iio + 1
	  && ALIGNED(iio);
}
Exemple #24
0
/*
 * Add kernel mappings for pa -> va for a section of size bytes.
 * Called only after the va range is known to be unoccupied.
 */
static int
pdmap(uintptr_t pa, int attr, uintptr_t va, usize size)
{
    uintptr_t pae;
    PTE *pd, *pde, *pt, *pte;
    int pdx, pgsz;
    Page *pg;

    pd = (PTE*)(PDMAP+PDX(PDMAP)*4096);

    for(pae = pa + size; pa < pae; pa += pgsz) {
        pdx = PDX(va);
        pde = &pd[pdx];

        /*
         * Check if it can be mapped using a big page,
         * i.e. is big enough and starts on a suitable boundary.
         * Assume processor can do it.
         */
        if(ALIGNED(pa, PGLSZ(1)) && ALIGNED(va, PGLSZ(1)) && (pae-pa) >= PGLSZ(1)) {
            assert(*pde == 0);
            *pde = pa|attr|PtePS|PteP;
            pgsz = PGLSZ(1);
        }
        else {
            if(*pde == 0) {
                pg = mmuptpalloc();
                assert(pg != nil && pg->pa != 0);
                *pde = pg->pa|PteRW|PteP;
                memset((PTE*)(PDMAP+pdx*4096), 0, 4096);
            }
            assert(*pde != 0);

            pt = (PTE*)(PDMAP+pdx*4096);
            pte = &pt[PTX(va)];
            assert(!(*pte & PteP));
            *pte = pa|attr|PteP;
            pgsz = PGLSZ(0);
        }
        va += pgsz;
    }

    return 0;
}
Exemple #25
0
void hh_collect(value aggressive_val) {
#ifdef _WIN32
  // TODO GRGR
  return;
#else
  int aggressive  = Bool_val(aggressive_val);
  int flags       = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
  int prot        = PROT_READ | PROT_WRITE;
  char* dest;
  size_t mem_size = 0;
  char* tmp_heap;

  float space_overhead = aggressive ? 1.2 : 2.0;
  if(used_heap_size() < (size_t)(space_overhead * heap_init_size)) {
    // We have not grown past twice the size of the initial size
    return;
  }

  tmp_heap = (char*)mmap(NULL, heap_size, prot, flags, 0, 0);
  dest = tmp_heap;

  if(tmp_heap == MAP_FAILED) {
    printf("Error while collecting: %s\n", strerror(errno));
    exit(2);
  }

  assert(my_pid == master_pid); // Comes from the master

  // Walking the table
  size_t i;
  for(i = 0; i < HASHTBL_SIZE; i++) {
    if(hashtbl[i].addr != NULL) { // Found a non empty slot
      size_t bl_size      = Get_buf_size(hashtbl[i].addr);
      size_t aligned_size = ALIGNED(bl_size);
      char* addr          = Get_buf(hashtbl[i].addr);

      memcpy(dest, addr, bl_size);
      // This is where the data ends up after the copy
      hashtbl[i].addr = heap_init + mem_size + sizeof(size_t);
      dest     += aligned_size;
      mem_size += aligned_size;
    }
  }

  // Copying the result back into shared memory
  memcpy(heap_init, tmp_heap, mem_size);
  *heap = heap_init + mem_size;

  if(munmap(tmp_heap, heap_size) == -1) {
    printf("Error while collecting: %s\n", strerror(errno));
    exit(2);
  }
#endif
}
Exemple #26
0
static char* hh_alloc(size_t size) {
  size_t slot_size  = ALIGNED(size + sizeof(size_t));
  char* chunk       = __sync_fetch_and_add(heap, (char*)slot_size);
#ifdef _WIN32
  if (!VirtualAlloc(chunk, slot_size, MEM_COMMIT, PAGE_READWRITE)) {
    win32_maperr(GetLastError());
    uerror("VirtualAlloc1", Nothing);
  }
#endif
  *((size_t*)chunk) = size;
  return (chunk + sizeof(size_t));
}
Exemple #27
0
/* Wakes up at most one thread waiting on the futex if the kernel supports
 * SYS_futex syscall. Does nothing if the kernel doesn't support SYS_futex.
 */
ptr_int_t
ksynch_wake(volatile int *futex)
{
    ptr_int_t res;
    ASSERT(ALIGNED(futex, sizeof(int)));
    if (kernel_futex_support) {
        res = dynamorio_syscall(SYS_futex, 6, futex, FUTEX_WAKE, 1, NULL, NULL, 0);
    } else {
        res = -1;
    }
    return res;
}
Exemple #28
0
static void *
new_st (size_t s)
{
  void *p = (void *) stack;
  stack += ALIGNED (s);
  if (MPFR_UNLIKELY (stack > (char *) &Buffer[BUFFER_SIZE]))
    {
      printf ("Stack overflow.\n");
      exit (1);
    }
  return p;
}
Exemple #29
0
static int okp(const kdft_desc *d,
               const R *ri, const R *ii, const R *ro, const R *io,
               INT is, INT os, INT vl, INT ivs, INT ovs, 
	       const planner *plnr)
{
     return (RIGHT_CPU()
             && ALIGNED(ii)
             && ALIGNED(io)
	     && !NO_SIMDP(plnr)
	     && SIMD_STRIDE_OK(is)
	     && SIMD_STRIDE_OK(os)
	     && SIMD_VSTRIDE_OK(ivs)
	     && SIMD_VSTRIDE_OK(ovs)
             && ri == ii + 1
             && ro == io + 1
             && (vl % VL) == 0
             && (!d->is || (d->is == is))
             && (!d->os || (d->os == os))
             && (!d->ivs || (d->ivs == ivs))
             && (!d->ovs || (d->ovs == ovs))
          );
}
Exemple #30
0
static int n1f_okp(const kdft_desc *d,
		   const R *ri, const R *ii, const R *ro, const R *io,
		   INT is, INT os, INT vl, INT ivs, INT ovs, 
		   const planner *plnr)
{
     return (1
             && ALIGNED(ri)
             && ALIGNED(ro)
	     && !NO_SIMDP(plnr)
	     && SIMD_STRIDE_OK(is)
	     && SIMD_STRIDE_OK(os)
	     && SIMD_VSTRIDE_OK(ivs)
	     && SIMD_VSTRIDE_OK(ovs)
             && ii == ri + 1
             && io == ro + 1
             && (vl % VL) == 0
             && (!d->is || (d->is == is))
             && (!d->os || (d->os == os))
             && (!d->ivs || (d->ivs == ivs))
             && (!d->ovs || (d->ovs == ovs))
          );
}