Esempio n. 1
0
  unsigned char* borrow(size_t size) {
    std::lock_guard<folly::SpinLock> lg(lock_);

    assert(storage_);

    auto as = allocSize(size);
    if (as != allocSize_ || freeList_.empty()) {
      return nullptr;
    }

    auto p = freeList_.back().first;
    if (!freeList_.back().second) {
      PCHECK(0 == ::mprotect(p, pagesize(), PROT_NONE));
    }
    freeList_.pop_back();

    /* We allocate minimum number of pages required, plus a guard page.
       Since we use this for stack storage, requested allocation is aligned
       at the top of the allocated pages, while the guard page is at the bottom.

               -- increasing addresses -->
             Guard page     Normal pages
            |xxxxxxxxxx|..........|..........|
            <- allocSize_ ------------------->
         p -^                <- size -------->
                      limit -^
    */
    auto limit = p + allocSize_ - size;
    assert(limit >= p + pagesize());
    return limit;
  }
    void * allocate( std::size_t size) const
    {
        BOOST_ASSERT( minimum_stacksize() <= size);
        BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

        const std::size_t pages( page_count( size) + 1); // add one guard page
        const std::size_t size_ = pages * pagesize();
        BOOST_ASSERT( 0 < size && 0 < size_);

        void * limit = ::VirtualAlloc( 0, size_, MEM_COMMIT, PAGE_READWRITE);
        if ( ! limit) throw std::bad_alloc();

        std::memset( limit, size_, '\0');

        DWORD old_options;
#if defined(BOOST_DISABLE_ASSERTS)
        ::VirtualProtect(
            limit, pagesize(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options);
#else
        const BOOL result = ::VirtualProtect(
            limit, pagesize(), PAGE_READWRITE | PAGE_GUARD /*PAGE_NOACCESS*/, & old_options);
        BOOST_ASSERT( FALSE != result);
#endif

        return static_cast< char * >( limit) + size_;
    }
Esempio n. 3
0
void Fl_Scrollbar::increment_cb()
{
    double i;
    switch (which_pushed)
    {
        case UP_ARROW: i = -linesize(); break;
        default:i =  linesize(); break;
        case ABOVE_SLIDER: i = -pagesize(); break;
        case BELOW_SLIDER: i =  pagesize(); break;
    }
    handle_drag(value()+i);
}
inline
std::size_t page_count( std::size_t stacksize)
{
    return static_cast< std::size_t >(
        std::ceil(
            static_cast< float >( stacksize) / pagesize() ) );
}
Esempio n. 5
0
int
buffer_file_initw (buffer_file_t* bf,
		   bd_t bd)
{
  bf->bd = bd;
  bf->bd_size = buffer_size (bd);
  if (bf->bd_size == -1) {
    return -1;
  }
  bf->capacity = bf->bd_size * pagesize ();
  if (bf->bd_size != 0) {
    bf->ptr = buffer_map (bd);
    if (bf->ptr == 0) {
      return -1;
    }
  }
  else {
    bf->ptr = 0;
  }
  bf->size = sizeof (size_t);
  bf->position = sizeof (size_t);
  bf->can_update = true;

  return 0;
}
BOOST_CONTEXT_DECL
std::size_t page_count( std::size_t stacksize)
{
    return static_cast< std::size_t >( 
        std::ceil(
            static_cast< float >( stacksize) / pagesize() ) );
}
Esempio n. 7
0
int
vga_op_list_read_bassign (vga_op_list_t* vol,
			  size_t* address,
			  const void** data,
			  size_t* size)
{
  vga_op_type_t type;
  size_t bd_offset;
  if (buffer_file_read (&vol->bf, &type, sizeof (vga_op_type_t)) != 0 ||
      type != VGA_BASSIGN ||
      buffer_file_read (&vol->bf, address, sizeof (size_t)) != 0 ||
      buffer_file_read (&vol->bf, size, sizeof (size_t)) != 0 ||
      buffer_file_read (&vol->bf, &bd_offset, sizeof (size_t)) != 0) {
    return -1;
  }

  if (vol->ptr == 0) {
    return -1;
  }

  if (vol->bdb_size == -1 || bd_offset >= vol->bdb_size) {
    return -1;
  }

  *data = vol->ptr + bd_offset * pagesize ();

  return 0;
}
Esempio n. 8
0
int
vga_op_list_write_bassign (vga_op_list_t* vol,
			   size_t address,
			   size_t size,
			   bd_t bd)
{
  if (reset (vol) != 0) {
    return -1;
  }
  size_t bd_size = buffer_size (bd);
  if (bd_size == -1 || size > bd_size * pagesize ()) {
    /* The buffer was too small. */
    return -1;
  }

  /* Find the offset in pages of the data. */
  size_t bd_offset = buffer_size (vol->bdb);
  /* Append the data. */
  if (buffer_append (vol->bdb, bd) != 0) {
    return -1;
  }
  
  vga_op_type_t type = VGA_BASSIGN;
  if (buffer_file_write (&vol->bf, &type, sizeof (vga_op_type_t)) != 0 ||
      buffer_file_write (&vol->bf, &address, sizeof (size_t)) != 0 ||
      buffer_file_write (&vol->bf, &size, sizeof (size_t)) != 0 ||
      buffer_file_write (&vol->bf, &bd_offset, sizeof (size_t)) != 0) {
    return -1;
  }

  return increment_count (vol);
}
    void deallocate( void * vp, std::size_t size) const
    {
        BOOST_ASSERT( vp);
        BOOST_ASSERT( minimum_stacksize() <= size);
        BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

        const std::size_t pages = page_count( size) + 1;
        const std::size_t size_ = pages * pagesize();
        BOOST_ASSERT( 0 < size && 0 < size_);
        void * limit = static_cast< char * >( vp) - size_;
        ::VirtualFree( limit, 0, MEM_RELEASE);
    }
Esempio n. 10
0
int
buffer_file_write (buffer_file_t* bf,
		   const void* ptr,
		   size_t size)
{
  if (!bf->can_update) {
    return -1;
  }

  size_t new_position = bf->position + size;
  if (new_position < bf->position) {
    /* Overflow. */
    return -1;
  }

  /* Resize if necessary. */
  if (bf->capacity < new_position) {
    buffer_unmap (bf->bd);
    bf->capacity = ALIGN_UP (new_position, pagesize ());
    bf->bd_size = bf->capacity / pagesize ();
    if (buffer_resize (bf->bd, bf->bd_size) != 0) {
      return -1;
    }
    bf->ptr = buffer_map (bf->bd);
    if (bf->ptr == 0) {
      return -1;
    }
  }

  memcpy (bf->ptr + bf->position, ptr, size);
  bf->position = new_position;
  if (bf->position > bf->size) {
    bf->size = bf->position;
    *((size_t*)bf->ptr) = bf->size;
  }

  return 0;
}
void
guarded_stack_allocator::deallocate( void * vp, std::size_t size) const
{
    BOOST_ASSERT( vp);
    BOOST_ASSERT( minimum_stacksize() <= size);
    BOOST_ASSERT( is_stack_unbound() || ( maximum_stacksize() >= size) );

    const std::size_t pages = page_count( size) + 1;
    const std::size_t size_ = pages * pagesize();
    BOOST_ASSERT( 0 < size && 0 < size_);
    void * limit = static_cast< char * >( vp) - size_;
    // conform to POSIX.4 (POSIX.1b-1993, _POSIX_C_SOURCE=199309L)
    ::munmap( limit, size_);
}
Esempio n. 12
0
int
buffer_file_put (buffer_file_t* bf,
		 char c)
{
  if (!bf->can_update) {
    return -1;
  }
  
  size_t new_position = bf->position + 1;
  if (new_position < bf->position) {
    /* Overflow. */
    return -1;
  }
  
  /* Resize if necessary. */
  if (bf->capacity < new_position) {
    buffer_unmap (bf->bd);
    bf->capacity = ALIGN_UP (new_position, pagesize ());
    bf->bd_size = bf->capacity / pagesize ();
    if (buffer_resize (bf->bd, bf->bd_size) != 0) {
      return -1;
    }
    bf->ptr = buffer_map (bf->bd);
    if (bf->ptr == 0) {
      return -1;
    }
  }
  
  *((char*)(bf->ptr + bf->position)) = c;
  bf->position = new_position;
  if (bf->position > bf->size) {
    bf->size = bf->position;
    *((size_t*)bf->ptr) = bf->size;
  }

  return 0;
}
  /* virtual */ void
  isosurface_renderer_fraglist_raycasting::draw ()
  {
    _clean_textures();
    
    std::cout << "page: " << pagesize() 
              << "-" << allocation_grid_width() 
              << "| " << allocation_grid_height()<< " ";
              
    gpucast::gl::time_duration time_generation;

    gpucast::gl::timer t;
    t.start();

    // generate fragment lists and sort them
    basetype::draw();
    glFinish();
    t.stop();
 
    time_generation = t.result();

    basetype::readback();

    // compose result
    glDepthFunc(GL_LESS);
    glEnable(GL_DEPTH_TEST);
    glEnable(GL_CULL_FACE);

    // make sure everything is initialized
    _init_glresources      ();
    _init_cuda             ();
    _init_platform         ();
    _init_shader           ();

    register_cuda_resources ();

    // intersect surfaces and write result back to imagebuffer
    _update_matrices();

    // execute raycasting
    raycast_fragment_lists();

    // draw final result as a quad
    _draw_result();

   std::cout  << " ms fraggen: " << time_generation.as_seconds() * 1000.0f
              << " ~ #frags : " << _usage_indexbuffer  
              << "      \r";
  }
Esempio n. 14
0
/*
 * What is the preferred I/O block size?
 */
static size_t
blksize(int fd)
{
#if defined(HAVE_ST_BLKSIZE)
	struct stat sb;
	if (fstat(fd, &sb) > -1)
	{
		if(sb.st_blksize >= 8192)
			return (size_t) sb.st_blksize;
		return 8192;
	}
	/* else, silent in the face of error */
#endif
	return (size_t) 2 * pagesize();
}
Esempio n. 15
0
/*ARGSUSED*/
static int
look_xmap(void *data,
          const prxmap_t *pmp,
          const char *object_name,
          int last, int doswap)
{
    struct totals *t = data;
    const pstatus_t *Psp = Pstatus(Pr);
    char mname[PATH_MAX];
    char *lname = NULL;
    char *ln;

    /*
     * If the mapping is not anon or not part of the heap, make a name
     * for it.  We don't want to report the heap as a.out's data.
     */
    if (!(pmp->pr_mflags & MA_ANON) ||
            pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase ||
            pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
        lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
                          mname, sizeof (mname));
    }

    if (lname != NULL) {
        if ((ln = strrchr(lname, '/')) != NULL)
            lname = ln + 1;
    } else if ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD) {
        lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
                          pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
    }

    (void) printf("%.*lX", addr_width, (ulong_t)pmp->pr_vaddr);

    printK(ROUNDUP_KB(pmp->pr_size), size_width);
    printK(pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE), size_width);
    printK(ANON(pmp) * (pmp->pr_pagesize / KILOBYTE), size_width);
    printK(pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE), size_width);
    (void) printf(lname ? " %4s %-6s %s\n" : " %4s %s\n",
                  pagesize(pmp), mflags(pmp->pr_mflags), lname);

    t->total_size += ROUNDUP_KB(pmp->pr_size);
    t->total_rss += pmp->pr_rss * (pmp->pr_pagesize / KILOBYTE);
    t->total_anon += ANON(pmp) * (pmp->pr_pagesize / KILOBYTE);
    t->total_locked += (pmp->pr_locked * (pmp->pr_pagesize / KILOBYTE));

    return (0);
}
Esempio n. 16
0
int
buffer_file_initr (buffer_file_t* bf,
		   bd_t bd)
{
  bf->bd = bd;
  bf->bd_size = buffer_size (bd);
  if (bf->bd_size == -1) {
    return -1;
  }
  bf->capacity = bf->bd_size * pagesize ();
  bf->ptr = buffer_map (bd);
  if (bf->ptr == 0) {
    return -1;
  }
  bf->size = *((size_t*)bf->ptr);
  bf->position = sizeof (size_t);
  bf->can_update = false;

  return 0;
}
Esempio n. 17
0
int Fl_Scrollbar::value(int p, int w, int t, int l)
{
    //	p = position, first line displayed
    //	w = window, number of lines displayed
    //	t = top, number of first line
    //	l = length, total number of lines
    if (p+w > t+l) l = p+w-t;
    if (l <= 0) l = 1;
    int b = l-w+t;
    int X=0; int Y=0; int W=this->w(); int H=h(); box()->inset(X,Y,W,H);
    if (vertical()) {int T = W; W = H; H = T; T = b; b = t; t = T;}
    if (W >= 3*H) W -= 2*H;
    int S = W*w/l; if (S < H) S = H; if (S > W) S = W;
    if (S != slider_size() || t != minimum() || b != maximum())
    {
        slider_size(S); minimum(t); maximum(b); redraw();
    }
    int ls = int(linesize());
    pagesize(w>2*ls ? w-ls : ls);
    return Fl_Slider::value(p);
}
Esempio n. 18
0
MP_GLOBAL
void
__mp_newmemory(memoryinfo *i)
{
#if MP_WATCH_SUPPORT
    char b[64];
#endif /* MP_WATCH_SUPPORT */

#if MP_ARRAY_SUPPORT
    memorysize = 0;
#endif /* MP_ARRAY_SUPPORT */
    i->align = minalign();
    i->page = pagesize();
    i->stackdir = __mp_stackdirection(NULL);
    i->prog = progname();
#if MP_MMAP_SUPPORT
    /* On UNIX systems that support the mmap() function call, we default to
     * using sbrk() for user memory and mmap() for internal memory.  If the
     * MP_MMAP_ANONYMOUS macro is set then we don't need to open a file for
     * mapping.
     */
#if MP_MMAP_ANONYMOUS
    i->mfile = 0;
#else /* MP_MMAP_ANONYMOUS */
    i->mfile = open(MP_MMAP_FILENAME, O_RDWR);
#endif /* MP_MMAP_ANONYMOUS */
#else /* MP_MMAP_SUPPORT */
    i->mfile = -1;
#endif /* MP_MMAP_SUPPORT */
#if MP_WATCH_SUPPORT
    sprintf(b, MP_PROCFS_CTLNAME, __mp_processid());
    i->wfile = open(b, O_WRONLY);
#else /* MP_WATCH_SUPPORT */
    i->wfile = -1;
#endif /* MP_WATCH_SUPPORT */
    i->flags = 0;
}
Esempio n. 19
0
int main(int argc, char **argv) {
    int      fd;
    uint8_t *full_map;
    char     path[] = "tmp/testrand.txt";
    _(  fd=open(path,O_RDWR|O_NONBLOCK|O_CREAT|O_APPEND, S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)) _abort();
    //_(  fstat(fd, &filestat)                                                             ) _raise(-1);
    //curr_size = filestat.st_size;
    //mfd->map_size = max(curr_size, pagesize());
    _M( full_map = mmap(NULL, pagesize() << 1, PROT_READ|PROT_WRITE, MAP_SHARED, fd,0)) _abort();

    printf("%x | %x\n", full_map[0], full_map[pagesize()]);

#ifdef __LINUX__
    _M( full_map = mremap(full_map, pagesize() << 1, pagesize() * 3, MREMAP_MAYMOVE)) _abort();
#else
    #error mremap not yet implemented on mac (TODO)
#endif

    printf("%x | %x | %x\n", full_map[0], full_map[pagesize()], full_map[pagesize() << 1]);

    return 0;
}
Esempio n. 20
0
static int initAsm(void)
{
#ifdef I386_ASM
	/* Self-modifying code needs access to modify it self */
	{
		int fd;
		char *file=strdup("/tmp/ocpXXXXXX");
		char *start1, *stop1/*, *start2, *stop2*/;
		int len1/*, len2*/;
		fd=mkstemp(file);

		start1=(void *)remap_range1_start;
		stop1=(void *)remap_range1_stop;
		/*start2=(void *)remap_range2_start;
		stop2=(void *)remap_range2_stop;*/
#ifdef MIXER_DEBUG
		fprintf(stderr, "range1: %p - %p\n", start1, stop1);
		/*fprintf(stderr, "range2: %p - %p\n", start2, stop2);*/
#endif

		start1=(char *)(((int)start1)&~(pagesize()-1));
		/*start2=(char *)(((int)start2)&~(pagesize()-1));*/
		len1=((stop1-start1)+pagesize()-1)& ~(pagesize()-1);
		/*len2=((stop2-start2)+pagesize-1)& ~(pagesize()-1);*/
#ifdef MIXER_DEBUG
		fprintf(stderr, "mprot: %p + %08x\n", start1, len1);
		/*fprintf(stderr, "mprot: %p + %08x\n", start2, len2);*/
#endif
		if (write(fd, start1, len1)!=len1)
		{
#ifdef MIXER_DEBUG
			fprintf(stderr, "write 1 failed\n");
#endif
			return 1;
		}
		/*
		if (write(fd, start2, len2)!=len2)
		{
#ifdef MIXER_DEBUG
	 		fprintf(stderr, "write 2 failed\n");
#endif
			return 0;
		}*/

		if (mmap(start1, len1, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED, fd, 0)==MAP_FAILED)
		{
			perror("mmap()");
			return 1;
		}
		/*
		if (mmap(start2, len2, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED, fd, len1)==MAP_FAILED)
		{
			perror("mmap()");
			return 0;
		}
		*/
/*	if (mprotect((char *)(((int)remap_range1_start)&~(pagesize()-1)), (((char *)remap_range1_stop-(char *)remap_range1_start)+pagesize()-1)& ~(pagesize()-1), PROT_READ|PROT_WRITE|PROT_EXEC) ||
	    mprotect((char *)(((int)remap_range2_start)&~(pagesize()-1)), (((char *)remap_range2_stop-(char *)remap_range2_start)+pagesize()-1)& ~(pagesize()-1), PROT_READ|PROT_WRITE|PROT_EXEC) )
	    {
			perror("Couldn't mprotect"); 
 			return 0;
		}*/
#ifdef MIXER_DEBUG
		fprintf(stderr, "Done ?\n");
#endif
		close(fd);
		unlink(file);
		free(file);
	}
#endif
	return 0;
}
Esempio n. 21
0
    }
    freeList_.pop_back();

    /* We allocate minimum number of pages required, plus a guard page.
       Since we use this for stack storage, requested allocation is aligned
       at the top of the allocated pages, while the guard page is at the bottom.

               -- increasing addresses -->
             Guard page     Normal pages
            |xxxxxxxxxx|..........|..........|
            <- allocSize_ ------------------->
         p -^                <- size -------->
                      limit -^
    */
    auto limit = p + allocSize_ - size;
    assert(limit >= p + pagesize());
    return limit;
  }

  bool giveBack(unsigned char* limit, size_t size) {
    std::lock_guard<folly::SpinLock> lg(lock_);

    assert(storage_);

    auto as = allocSize(size);
    auto p = limit + size - as;
    if (p < storage_ || p >= storage_ + allocSize_ * kNumGuarded) {
      /* not mine */
      return false;
    }
Esempio n. 22
0
MP_API
void *
__mp_alloc(size_t l, size_t a, alloctype f, char *s, char *t, unsigned long u,
           char *g, size_t h, size_t k)
{
    void *p;
    size_t n;

    checkalloca(&l);
    if (l == 0)
        l = 1;
    switch (f)
    {
      case AT_MALLOC:
        p = malloc(l);
        break;
      case AT_CALLOC:
        if ((p = malloc(l)) != NULL)
            memset(p, 0, l);
        break;
      case AT_MEMALIGN:
      case AT_VALLOC:
      case AT_PVALLOC:
        /* We cannot rely on any system having implementations of memalign(),
         * valloc() or pvalloc() and so we must either implement them with
         * malloc() or with memalign() if it exists.  For the former
         * implementation, this is done by allocating extra space and then
         * rounding up the start address of the new allocation to the specified
         * alignment.  This means that there is likely to be some space wasted
         * for each allocation and the memory allocated by such functions
         * cannot be freed with free().  The latter point is also likely to be
         * true even if we allocated the memory with memalign().
         */
        n = pagesize();
        if (f == AT_PVALLOC)
            l = ((l - 1) & ~(n - 1)) + n;
        if ((f == AT_VALLOC) || (f == AT_PVALLOC) || (a > n))
            a = n;
        else if (a < sizeof(long))
            a = sizeof(long);
        else
            a = poweroftwo(a);
#if MP_MEMALIGN_SUPPORT
        p = memalign(a, l);
#else /* MP_MEMALIGN_SUPPORT */
        if (p = malloc(l + a - 1))
            p = (void *) ((((unsigned long) p - 1) & ~(a - 1)) + a);
#endif /* MP_MEMALIGN_SUPPORT */
        break;
      case AT_ALLOCA:
        p = __mp_xmalloc(l + sizeof(allocaheader), s, t, u, g, h);
        ((allocaheader *) p)->data.next = allocastack;
        ((allocaheader *) p)->data.frame = (void *) &l;
        allocastack = (allocaheader *) p;
        p = (char *) p + sizeof(allocaheader);
        break;
      case AT_XMALLOC:
        p = __mp_xmalloc(l, s, t, u, g, h);
        break;
      case AT_XCALLOC:
        p = __mp_xcalloc(l, s, t, u, g, h);
        break;
      case AT_NEW:
      case AT_NEWVEC:
        /* This implementation should really call the new handler if it
         * has been installed, but for now just abort if no memory can be
         * allocated.
         */
        p = __mp_xmalloc(l, s, t, u, g, h);
        break;
      default:
        illegalfunction("__mp_alloc", s, t, u);
        break;
    }
    return p;
}
Esempio n. 23
0
/* Depends on the first two fields of LUKS1 header format, magic and version */
static int device_check(struct reenc_ctx *rc, header_magic set_magic)
{
	char *buf = NULL;
	int r, devfd;
	ssize_t s;
	uint16_t version;
	size_t buf_size = pagesize();

	devfd = open(rc->device, O_RDWR | O_EXCL | O_DIRECT);
	if (devfd == -1) {
		if (errno == EBUSY) {
			log_err(_("Cannot exclusively open %s, device in use.\n"),
				rc->device);
			return -EBUSY;
		}
		log_err(_("Cannot open device %s\n"), rc->device);
		return -EINVAL;
	}

	if (set_magic == CHECK_OPEN) {
		r = 0;
		goto out;
	}

	if (posix_memalign((void *)&buf, alignment(devfd), buf_size)) {
		log_err(_("Allocation of aligned memory failed.\n"));
		r = -ENOMEM;
		goto out;
	}

	s = read(devfd, buf, buf_size);
	if (s < 0 || s != (ssize_t)buf_size) {
		log_err(_("Cannot read device %s.\n"), rc->device);
		r = -EIO;
		goto out;
	}

	/* Be sure that we do not process new version of header */
	memcpy((void*)&version, &buf[MAGIC_L], sizeof(uint16_t));
	version = ntohs(version);

	if (set_magic == MAKE_UNUSABLE && !memcmp(buf, MAGIC, MAGIC_L) &&
	    version == 1) {
		log_verbose(_("Marking LUKS device %s unusable.\n"), rc->device);
		memcpy(buf, NOMAGIC, MAGIC_L);
		r = 0;
	} else if (set_magic == MAKE_USABLE && !memcmp(buf, NOMAGIC, MAGIC_L) &&
		   version == 1) {
		log_verbose(_("Marking LUKS device %s usable.\n"), rc->device);
		memcpy(buf, MAGIC, MAGIC_L);
		r = 0;
	} else if (set_magic == CHECK_UNUSABLE && version == 1) {
		r = memcmp(buf, NOMAGIC, MAGIC_L) ? -EINVAL : 0;
		if (!r)
			rc->device_uuid = strndup(&buf[0xa8], 40);
		goto out;
	} else
		r = -EINVAL;

	if (!r) {
		if (lseek(devfd, 0, SEEK_SET) == -1)
			goto out;
		s = write(devfd, buf, buf_size);
		if (s < 0 || s != (ssize_t)buf_size) {
			log_err(_("Cannot write device %s.\n"), rc->device);
			r = -EIO;
		}
	} else
		log_dbg("LUKS signature check failed for %s.", rc->device);
out:
	if (buf)
		memset(buf, 0, buf_size);
	free(buf);
	close(devfd);
	return r;
}
Esempio n. 24
0
/*ARGSUSED*/
static int
look_smap(void *data,
          const prxmap_t *pmp,
          const char *object_name,
          int last, int doswap)
{
    struct totals *t = data;
    const pstatus_t *Psp = Pstatus(Pr);
    size_t size;
    char mname[PATH_MAX];
    char *lname = NULL;
    const char *format;
    size_t	psz = pmp->pr_pagesize;
    uintptr_t vaddr = pmp->pr_vaddr;
    uintptr_t segment_end = vaddr + pmp->pr_size;
    lgrp_id_t lgrp;
    memory_chunk_t mchunk;

    /*
     * If the mapping is not anon or not part of the heap, make a name
     * for it.  We don't want to report the heap as a.out's data.
     */
    if (!(pmp->pr_mflags & MA_ANON) ||
            pmp->pr_vaddr + pmp->pr_size <= Psp->pr_brkbase ||
            pmp->pr_vaddr >= Psp->pr_brkbase + Psp->pr_brksize) {
        lname = make_name(Pr, lflag, pmp->pr_vaddr, pmp->pr_mapname,
                          mname, sizeof (mname));
    }

    if (lname == NULL &&
            ((pmp->pr_mflags & MA_ANON) || Pstate(Pr) == PS_DEAD)) {
        lname = anon_name(mname, Psp, stacks, nstacks, pmp->pr_vaddr,
                          pmp->pr_size, pmp->pr_mflags, pmp->pr_shmid, NULL);
    }

    /*
     * Adjust the address range if -A is specified.
     */
    size = adjust_addr_range(pmp->pr_vaddr, segment_end, psz,
                             &vaddr, &segment_end);

    if (size == 0)
        return (0);

    if (!Lflag) {
        /*
         * Display the whole mapping
         */
        if (lname != NULL)
            format = "%.*lX %*luK %4s %-6s %s\n";
        else
            format = "%.*lX %*luK %4s %s\n";

        size = ROUNDUP_KB(size);

        (void) printf(format, addr_width, vaddr, size_width - 1, size,
                      pagesize(pmp), mflags(pmp->pr_mflags), lname);

        t->total_size += size;
        return (0);
    }

    if (lname != NULL)
        format = "%.*lX %*luK %4s %-6s%s %s\n";
    else
        format = "%.*lX %*luK %4s%s %s\n";

    /*
     * We need to display lgroups backing physical memory, so we break the
     * segment into individual pages and coalesce pages with the same lgroup
     * into one "segment".
     */

    /*
     * Initialize address descriptions for the mapping.
     */
    mem_chunk_init(&mchunk, segment_end, psz);
    size = 0;

    /*
     * Walk mapping (page by page) and display contiguous ranges of memory
     * allocated to same lgroup.
     */
    do {
        size_t		size_contig;

        /*
         * Get contiguous region of memory starting from vaddr allocated
         * from the same lgroup.
         */
        size_contig = get_contiguous_region(&mchunk, vaddr,
                                            segment_end, pmp->pr_pagesize, &lgrp);

        (void) printf(format, addr_width, vaddr,
                      size_width - 1, size_contig / KILOBYTE,
                      pagesize(pmp), mflags(pmp->pr_mflags),
                      lgrp2str(lgrp), lname);

        vaddr += size_contig;
        size += size_contig;
    } while (vaddr < segment_end && !interrupt);

    t->total_size += ROUNDUP_KB(size);
    return (0);
}
Esempio n. 25
0
int Fl_Scrollbar::handle(int event)
{
    // area of scrollbar:
    int X=0; int Y=0; int W=w(); int H=h(); box()->inset(X,Y,W,H);

    // adjust slider area to be inside the arrow buttons:
    if (vertical())
    {
        if (H >= 3*W) {Y += W; H -= 2*W;}
    }
    else
    {
        if (W >= 3*H) {X += H; W -= 2*H;}
    }

    // which widget part is highlighted?
    int mx = Fl::event_x();
    int my = Fl::event_y();
    int which_part;
    if (!Fl::event_inside(0, 0, w(), h())) which_part = NOTHING;
    else if (vertical())
    {
        if (my < Y) which_part = UP_ARROW;
        else if (my >= Y+H) which_part = DOWN_ARROW;
        else
        {
            int slidery = slider_position(value(), H);
            if (my < Y+slidery) which_part = ABOVE_SLIDER;
            else if (my >= Y+slidery+slider_size()) which_part = BELOW_SLIDER;
            else which_part = SLIDER;
        }
    }                            // horizontal
    else
    {
        if (mx < X) which_part = UP_ARROW;
        else if (mx >= X+W) which_part = DOWN_ARROW;
        else
        {
            int sliderx = slider_position(value(), W);
            if (mx < X+sliderx) which_part = ABOVE_SLIDER;
            else if (mx >= X+sliderx+slider_size()) which_part = BELOW_SLIDER;
            else which_part = SLIDER;
        }
    }
    switch (event)
    {
        case FL_FOCUS:
            return 0;
        case FL_ENTER:
        case FL_MOVE:
            if (!highlight_color()) return 1;
            if (which_part != which_highlight)
            {
                which_highlight = which_part;
                redraw(FL_DAMAGE_HIGHLIGHT);
            }
            return 1;
        case FL_LEAVE:
            if (which_highlight)
            {
                which_highlight = 0;
                redraw(FL_DAMAGE_HIGHLIGHT);
            }
            return 1;
        case FL_PUSH:
            // Clicking on the slider or middle or right click on the trough
            // gives us normal slider behavior:
            if (which_part == SLIDER ||
                Fl::event_button() > 1 && which_part > DOWN_ARROW)
            {
                which_pushed = SLIDER;
                return Fl_Slider::handle(event, X,Y,W,H);
            }
            handle_push();
            goto J1;
        case FL_DRAG:
            if (which_pushed==SLIDER) return Fl_Slider::handle(event, X,Y,W,H);
            if (which_part == SLIDER) which_part = NOTHING;
            // it is okay to switch between arrows and nothing, but no other
            // changes are allowed:
            if (!which_pushed && which_part <= DOWN_ARROW) ;
            else if (!which_part && which_pushed <= DOWN_ARROW) ;
            else which_part = which_pushed;
            J1:
            if (which_part != which_pushed)
            {
                Fl::remove_timeout(timeout_cb, this);
                which_highlight = which_pushed = which_part;
                redraw(FL_DAMAGE_HIGHLIGHT);
                if (which_part)
                {
                    Fl::add_timeout(INITIALREPEAT, timeout_cb, this);
                    increment_cb();
                }
            }
            return 1;
        case FL_RELEASE:
            if (which_pushed == SLIDER)
            {
                Fl_Slider::handle(event, X,Y,W,H);
            }
            else if (which_pushed)
            {
                Fl::remove_timeout(timeout_cb, this);
                handle_release();
                redraw(FL_DAMAGE_HIGHLIGHT);
            }
            which_pushed = NOTHING;
            return 1;
        case FL_MOUSEWHEEL:
            {
                float n = (vertical() ? Fl::event_dy() : Fl::event_dx())
                    * Fl_Style::wheel_scroll_lines * linesize();
                if (fabsf(n) > pagesize()) n = (n<0)?-pagesize():pagesize();
                handle_drag(value()+n);
                return 1;
            }
        case FL_KEY:
            if (vertical()) switch(Fl::event_key())
            {
                case FL_Home: handle_drag(maximum()); return 1;
                case FL_End:  handle_drag(minimum()); return 1;
                case FL_Page_Up: handle_drag(value()-pagesize()); return 1;
                case FL_Page_Down: handle_drag(value()+pagesize()); return 1;
            }                    // else fall through...
        default:
            return Fl_Slider::handle(event);
    }
}
Esempio n. 26
0
 /* Returns a multiple of pagesize() enough to store size + one guard page */
 static size_t allocSize(size_t size) {
   return pagesize() * ((size + pagesize() - 1) / pagesize() + 1);
 }