Exemple #1
2
int	SYSTEM_SWAP_SIZE(const char *cmd, const char *param, unsigned flags, AGENT_RESULT *result)
{
/*
 *  FreeBSD 7.0 i386
 */
#ifdef XSWDEV_VERSION	/* defined in <vm/vm_param.h> */
	char		swapdev[64], mode[64];
	int		mib[16], *mib_dev;
	size_t		sz, mib_sz;
	struct xswdev	xsw;
	zbx_uint64_t	total = 0, used = 0;

        assert(result);

        init_result(result);

	if (num_param(param) > 2)
		return SYSINFO_RET_FAIL;

	if (0 != get_param(param, 1, swapdev, sizeof(swapdev)))
		return SYSINFO_RET_FAIL;

	if (0 != get_param(param, 2, mode, sizeof(mode)))
		*mode = '\0';

	sz = sizeof(mib) / sizeof(mib[0]);
	if (-1 == sysctlnametomib("vm.swap_info", mib, &sz))
		return FAIL;

	mib_sz = sz + 1;
	mib_dev = &(mib[sz]);

	*mib_dev = 0;
	sz = sizeof(xsw);

	while (-1 != sysctl(mib, mib_sz, &xsw, &sz, NULL, 0))
	{
		if ('\0' == *swapdev || 0 == strcmp(swapdev, "all")	/* default parameter */
				|| 0 == strcmp(swapdev, devname(xsw.xsw_dev, S_IFCHR)))
		{
			total += (zbx_uint64_t)xsw.xsw_nblks;
			used += (zbx_uint64_t)xsw.xsw_used;
		}
		(*mib_dev)++;
	}

	if ('\0' == *mode || 0 == strcmp(mode, "free"))	/* default parameter */
	{
		SET_UI64_RESULT(result, (total - used) * getpagesize());
	}
	else if (0 == strcmp(mode, "total"))
	{
		SET_UI64_RESULT(result, total * getpagesize());
	}
	else if (0 == strcmp(mode, "used"))
	{
		SET_UI64_RESULT(result, used * getpagesize());
	}
	else if (0 == strcmp(mode, "pfree"))
	{
		SET_DBL_RESULT(result, total ? ((double)(total - used) * 100.0 / (double)total) : 0.0);
	}
	else if (0 == strcmp(mode, "pused"))
	{
		SET_DBL_RESULT(result, total ? ((double)used * 100.0 / (double)total) : 0.0);
	}
	else
		return SYSINFO_RET_FAIL;

	return SYSINFO_RET_OK;
#else
	return SYSINFO_RET_FAIL;
#endif
}
Exemple #2
0
int initialize_cape(){
	FILE *fd; 			// opened and closed for each file
	char path[MAX_BUF]; // buffer to store file path string
	int i = 0; 			// general use counter
	
	printf("\n");

	// check if another project was using resources
	// kill that process cleanly with sigint if so
	fd = fopen(LOCKFILE, "r");
	if (fd != NULL) {
		int old_pid;
		fscanf(fd,"%d", &old_pid);
		if(old_pid != 0){
			printf("warning, shutting down existing robotics project\n");
			kill((pid_t)old_pid, SIGINT);
			sleep(1);
		}
		// close and delete the old file
		fclose(fd);
		remove(LOCKFILE);
	}
	
	
	// create new lock file with process id
	fd = fopen(LOCKFILE, "ab+");
	if (fd < 0) {
		printf("\n error opening LOCKFILE for writing\n");
		return -1;
	}
	pid_t current_pid = getpid();
	printf("Current Process ID: %d\n", (int)current_pid);
	fprintf(fd,"%d",(int)current_pid);
	fflush(fd);
	fclose(fd);
	
	// ensure gpios are exported
	printf("Initializing GPIO\n");
	for(i=0; i<NUM_OUT_PINS; i++){
		if(gpio_export(out_gpio_pins[i])){
			printf("failed to export gpio %d", out_gpio_pins[i]);
			return -1;
		};
		gpio_set_dir(out_gpio_pins[i], OUTPUT_PIN);
	}
	
	// set up default values for some gpio
	disable_motors();
	deselect_spi1_slave(1);	
	deselect_spi1_slave(2);	
	

	//Set up PWM
	printf("Initializing PWM\n");
	i=0;
	for(i=0; i<4; i++){
		strcpy(path, pwm_files[i]);
		strcat(path, "polarity");
		fd = fopen(path, "a");
		if(fd<0){
			printf("PWM polarity not available in /sys/class/devices/ocp.3\n");
			return -1;
		}
		//set correct polarity such that 'duty' is time spent HIGH
		fprintf(fd,"%c",'0');
		fflush(fd);
		fclose(fd);
	}
	
	//leave duty cycle file open for future writes
	for(i=0; i<4; i++){
		strcpy(path, pwm_files[i]);
		strcat(path, "duty");
		pwm_duty_pointers[i] = fopen(path, "a");
	}
	
	//read in the pwm period defined in device tree overlay .dts
	strcpy(path, pwm_files[0]);
	strcat(path, "period");
	fd = fopen(path, "r");
	if(fd<0){
		printf("PWM period not available in /sys/class/devices/ocp.3\n");
		return -1;
	}
	fscanf(fd,"%i", &pwm_period_ns);
	fclose(fd);
	
	// mmap pwm modules to get fast access to eQep encoder position
	// see mmap_eqep example program for more mmap and encoder info
	printf("Initializing eQep Encoders\n");
	int dev_mem;
	if ((dev_mem = open("/dev/mem", O_RDWR | O_SYNC))==-1){
	  printf("Could not open /dev/mem \n");
	  return -1;
	}
	pwm_map_base[0] = mmap(0,getpagesize(),PROT_READ|PROT_WRITE,MAP_SHARED,dev_mem,PWM0_BASE);
	pwm_map_base[1] = mmap(0,getpagesize(),PROT_READ|PROT_WRITE,MAP_SHARED,dev_mem,PWM1_BASE);
	pwm_map_base[2] = mmap(0,getpagesize(),PROT_READ|PROT_WRITE,MAP_SHARED,dev_mem,PWM2_BASE);
	if(pwm_map_base[0] == (void *) -1) {
		printf("Unable to mmap pwm \n");
		return(-1);
	}
	close(dev_mem);
	
	// Test eqep and reset position
	for(i=1;i<3;i++){
		if(set_encoder_pos(i,0)){
			printf("failed to access eQep register\n");
			printf("eQep driver not loaded\n");
			return -1;
		}
	}
	
	//set up function pointers for button press events
	set_pause_pressed_func(&null_func);
	set_pause_unpressed_func(&null_func);
	set_mode_pressed_func(&null_func);
	set_mode_unpressed_func(&null_func);
	
	//event handler thread for buttons
	printf("Starting Event Handler\n");
	pthread_t event_thread;
	pthread_create(&event_thread, NULL, read_events, (void*) NULL);
	
	// Load binary into PRU
	printf("Starting PRU servo controller\n");
	if(initialize_pru_servos()){
		printf("WARNING: PRU init FAILED");
	}
	
	// Print current battery voltage
	printf("Battery Voltage = %fV\n", getBattVoltage());
	
	// Start Signal Handler
	printf("Enabling exit signal handler\n");
	signal(SIGINT, ctrl_c);	
	
	// all done
	set_state(PAUSED);
	printf("\nRobotics Cape Initialized\n");

	return 0;
}
void *
uxAllocateMemory(usqInt minHeapSize, usqInt desiredHeapSize)
{
	if (heap) {
		fprintf(stderr, "uxAllocateMemory: already called\n");
		exit(1);
	}
	pageSize= getpagesize();
	pageMask= ~(pageSize - 1);
#else /* SPURVM */
void *uxAllocateMemory(usqInt minHeapSize, usqInt desiredHeapSize)
{
# if !ALWAYS_USE_MMAP
  if (!useMmap)
    return malloc(desiredHeapSize);
# endif

  if (heap) {
      fprintf(stderr, "uxAllocateMemory: already called\n");
      exit(1);
  }
  pageSize= getpagesize();
  pageMask= ~(pageSize - 1);

  DPRINTF(("uxAllocateMemory: pageSize 0x%x (%d), mask 0x%x\n", pageSize, pageSize, pageMask));

# if (!MAP_ANON)
  if ((devZero= open("/dev/zero", O_RDWR)) < 0) {
      perror("uxAllocateMemory: /dev/zero");
      return 0;
  }
# endif

  DPRINTF(("uxAllocateMemory: /dev/zero descriptor %d\n", devZero));
  DPRINTF(("uxAllocateMemory: min heap %d, desired %d\n", minHeapSize, desiredHeapSize));

  heapLimit= valign(max(desiredHeapSize, useMmap));

  while ((!heap) && (heapLimit >= minHeapSize)) {
      DPRINTF(("uxAllocateMemory: mapping 0x%08x bytes (%d Mbytes)\n", heapLimit, heapLimit >> 20));
      if (MAP_FAILED == (heap= mmap(0, heapLimit, MAP_PROT, MAP_FLAGS, devZero, 0))) {
	  heap= 0;
	  heapLimit= valign(heapLimit / 4 * 3);
	}
  }

  if (!heap) {
      fprintf(stderr, "uxAllocateMemory: failed to allocate at least %lld bytes)\n", (long long)minHeapSize);
      useMmap= 0;
      return malloc(desiredHeapSize);
  }

  heapSize= heapLimit;

  if (overallocateMemory)
    uxShrinkMemoryBy(heap + heapLimit, heapLimit - desiredHeapSize);

  return heap;
}
#endif /* SPURVM */


static int log_mem_delta = 0;
#define MDPRINTF(foo) if (log_mem_delta) DPRINTF(foo); else 0

/* grow the heap by delta bytes.  answer the new end of memory. */

char *uxGrowMemoryBy(char *oldLimit, sqInt delta)
{
  if (useMmap)
    {
      int newSize=  min(valign(oldLimit - heap + delta), heapLimit);
      int newDelta= newSize - heapSize;
      MDPRINTF(("uxGrowMemory: %p By: %d(%d) (%d -> %d)\n", oldLimit, newDelta, delta, heapSize, newSize));
      assert(0 == (newDelta & ~pageMask));
      assert(0 == (newSize  & ~pageMask));
      assert(newDelta >= 0);
      if (newDelta)
	{
	  MDPRINTF(("was: %p %p %p = 0x%x (%d) bytes\n", heap, heap + heapSize, heap + heapLimit, heapSize, heapSize));
	  if (overallocateMemory)
	    {
	      char *base= heap + heapSize;
	      MDPRINTF(("remap: %p + 0x%x (%d)\n", base, newDelta, newDelta));
	      if (MAP_FAILED == mmap(base, newDelta, MAP_PROT, MAP_FLAGS | MAP_FIXED, devZero, heapSize))
		{
		  perror("mmap");
		  return oldLimit;
		}
	    }
	  heapSize += newDelta;
	  MDPRINTF(("now: %p %p %p = 0x%x (%d) bytes\n", heap, heap + heapSize, heap + heapLimit, heapSize, heapSize));
	  assert(0 == (heapSize  & ~pageMask));
	}
      return heap + heapSize;
    }
  return oldLimit;
}


/* shrink the heap by delta bytes.  answer the new end of memory. */

char *uxShrinkMemoryBy(char *oldLimit, sqInt delta)
{
  if (useMmap)
    {
      int newSize=  max(0, valign((char *)oldLimit - heap - delta));
      int newDelta= heapSize - newSize;
      MDPRINTF(("uxGrowMemory: %p By: %d(%d) (%d -> %d)\n", oldLimit, newDelta, delta, heapSize, newSize));
      assert(0 == (newDelta & ~pageMask));
      assert(0 == (newSize  & ~pageMask));
      assert(newDelta >= 0);
      if (newDelta)
	{
	  MDPRINTF(("was: %p %p %p = 0x%x (%d) bytes\n", heap, heap + heapSize, heap + heapLimit, heapSize, heapSize));
	  if (overallocateMemory)
	    {
	      char *base= heap + heapSize - newDelta;
	      MDPRINTF(("unmap: %p + 0x%x (%d)\n", base, newDelta, newDelta));
	      if (munmap(base, newDelta) < 0)
		{
		  perror("unmap");
		  return oldLimit;
		}
	    }
	  heapSize -= newDelta;
	  MDPRINTF(("now: %p %p %p = 0x%x (%d) bytes\n", heap, heap + heapSize, heap + heapLimit, heapSize, heapSize));
	  assert(0 == (heapSize  & ~pageMask));
	}
      return heap + heapSize;
    }
  return oldLimit;
}


/* answer the number of bytes available for growing the heap. */

sqInt uxMemoryExtraBytesLeft(sqInt includingSwap)
{
  return useMmap ? (heapLimit - heapSize) : 0;
}


#else  /* HAVE_MMAP */

# if COG
void *
uxAllocateMemory(sqInt minHeapSize, sqInt desiredHeapSize)
{
	if (pageMask) {
		fprintf(stderr, "uxAllocateMemory: already called\n");
		exit(1);
	}
	pageSize = getpagesize();
	pageMask = ~(pageSize - 1);
#	if SPURVM
	return malloc(desiredHeapSize);
#	else
	return malloc(desiredHeapSize);
#	endif
}
# else /* COG */
void *uxAllocateMemory(sqInt minHeapSize, sqInt desiredHeapSize)	{ return malloc(desiredHeapSize); }
# endif /* COG */
char *uxGrowMemoryBy(char * oldLimit, sqInt delta)			{ return oldLimit; }
char *uxShrinkMemoryBy(char *oldLimit, sqInt delta)			{ return oldLimit; }
sqInt uxMemoryExtraBytesLeft(sqInt includingSwap)			{ return 0; }

#endif /* HAVE_MMAP */



#if defined(SQ_IMAGE32) && defined(SQ_HOST64)

usqInt sqAllocateMemory(usqInt minHeapSize, usqInt desiredHeapSize)
{
  sqMemoryBase= uxAllocateMemory(minHeapSize, desiredHeapSize);
  if (!sqMemoryBase) return 0;
  sqMemoryBase -= SQ_FAKE_MEMORY_OFFSET;
  return (sqInt)SQ_FAKE_MEMORY_OFFSET;
}
Exemple #4
0
// FreeConfigReg unmaps a memory location that has been mapped with the MallocConfigReg()
// function...
// 
// Parameter: ConfigRegVirtualAddr      - Virtual address of configuration register.
// 
void FreeConfigReg(int* ConfigRegVirtualAddr) {
  t_vcharp MappedAddr;
  unsigned int alignedAddr = (int)ConfigRegVirtualAddr & (~(getpagesize()-1));
  munmap((void*)alignedAddr, getpagesize());
  return;
}
Exemple #5
0
inline void proc_find_top(struct process **cpu, struct process **mem)
{
	struct kinfo_proc *p;
	int n_processes = 0;
	int i, j = 0;
	struct process *processes;
	int mib[2];

	u_int total_pages;
	int64_t usermem;
	int pagesize = getpagesize();

	/* we get total pages count again to be sure it is up to date */
	mib[0] = CTL_HW;
	mib[1] = HW_USERMEM64;
	size_t size = sizeof(usermem);

	if (sysctl(mib, 2, &usermem, &size, NULL, 0) == -1) {
		NORM_ERR("error reading usermem");
	}

	/* translate bytes into page count */
	total_pages = usermem / pagesize;

	int max_size = sizeof(struct kinfo_proc);


	pthread_mutex_lock(&kvm_mutex);
	p = kvm_getprocs(kd, KERN_PROC_ALL, 0, max_size, &n_processes);
	processes = malloc(n_processes * sizeof(struct process));

	for (i = 0; i < n_processes; i++) {
		if (!((p[i].p_flag & P_SYSTEM)) && p[i].p_comm != NULL) {
			processes[j].pid = p[i].p_pid;
			processes[j].name = strndup(p[i].p_comm, text_buffer_size);
			processes[j].amount = 100.0 * p[i].p_pctcpu / FSCALE;
			processes[j].vsize = p[i].p_vm_map_size;
			processes[j].rss = p[i].p_vm_rssize * PAGE_SIZE;
			j++;
		}
	}
	pthread_mutex_unlock(&kvm_mutex);

	qsort(processes, j - 1, sizeof(struct process), comparemem);
	for (i = 0; i < 10; i++) {
		struct process *tmp, *ttmp;

		tmp = malloc(sizeof(struct process));
		tmp->pid = processes[i].pid;
		tmp->amount = processes[i].amount;
		tmp->name = strndup(processes[i].name, text_buffer_size);
		tmp->vsize = processes[i].vsize;
		tmp->rss = processes[i].rss;

		ttmp = mem[i];
		mem[i] = tmp;
		if (ttmp != NULL) {
			free(ttmp->name);
			free(ttmp);
		}
	}

	qsort(processes, j - 1, sizeof(struct process), comparecpu);
	for (i = 0; i < 10; i++) {
		struct process *tmp, *ttmp;

		tmp = malloc(sizeof(struct process));
		tmp->pid = processes[i].pid;
		tmp->amount = processes[i].amount;
		tmp->name = strndup(processes[i].name, text_buffer_size);
		tmp->vsize = processes[i].vsize;
		tmp->rss = processes[i].rss;

		ttmp = cpu[i];
		cpu[i] = tmp;
		if (ttmp != NULL) {
			free(ttmp->name);
			free(ttmp);
		}
	}

	for (i = 0; i < j; i++) {
		free(processes[i].name);
	}
	free(processes);
}
Exemple #6
0
void* valloc(size_t bytes) {
    /* assume page size of 4096 bytes */
    return memalign( getpagesize(), bytes );
}
Exemple #7
0
   available.  */

/* Create a view of SIZE bytes from DESCRIPTOR at OFFSET.  */

int
backtrace_get_view (struct backtrace_state *state ATTRIBUTE_UNUSED,
		    int descriptor, off_t offset, size_t size,
		    backtrace_error_callback error_callback,
		    void *data, struct backtrace_view *view)
{
  size_t pagesize;
  unsigned int inpage;
  off_t pageoff;
  void *map;

  pagesize = getpagesize ();
  inpage = offset % pagesize;
  pageoff = offset - inpage;

  size += inpage;
  size = (size + (pagesize - 1)) & ~ (pagesize - 1);

  map = mmap (NULL, size, PROT_READ, MAP_PRIVATE, descriptor, pageoff);
  if (map == MAP_FAILED)
    {
      error_callback (data, "mmap", errno);
      return 0;
    }

  view->data = (char *) map + inpage;
  view->base = map;
Exemple #8
0
static size_t GetHighResClock(void *buf, size_t maxbuf)
{
    unsigned phys_addr, raddr, cycleval;
    static volatile unsigned *iotimer_addr = NULL;
    static int tries = 0;
    static int cntr_size;
    int mfd;
    long s0[2];
    struct timeval tv;

#ifndef SGI_CYCLECNTR_SIZE
#define SGI_CYCLECNTR_SIZE      165     /* Size user needs to use to read CC */
#endif

    if (iotimer_addr == NULL) {
	if (tries++ > 1) {
	    /* Don't keep trying if it didn't work */
	    return 0;
	}

	/*
	** For SGI machines we can use the cycle counter, if it has one,
	** to generate some truly random numbers
	*/
	phys_addr = syssgi(SGI_QUERY_CYCLECNTR, &cycleval);
	if (phys_addr) {
	    int pgsz = getpagesize();
	    int pgoffmask = pgsz - 1;

	    raddr = phys_addr & ~pgoffmask;
	    mfd = open("/dev/mmem", O_RDONLY);
	    if (mfd < 0) {
		return 0;
	    }
	    iotimer_addr = (unsigned *)
		mmap(0, pgoffmask, PROT_READ, MAP_PRIVATE, mfd, (int)raddr);
	    if (iotimer_addr == (void*)-1) {
		close(mfd);
		iotimer_addr = NULL;
		return 0;
	    }
	    iotimer_addr = (unsigned*)
		((__psint_t)iotimer_addr | (phys_addr & pgoffmask));
	    /*
	     * The file 'mfd' is purposefully not closed.
	     */
	    cntr_size = syssgi(SGI_CYCLECNTR_SIZE);
	    if (cntr_size < 0) {
		struct utsname utsinfo;

		/* 
		 * We must be executing on a 6.0 or earlier system, since the
		 * SGI_CYCLECNTR_SIZE call is not supported.
		 * 
		 * The only pre-6.1 platforms with 64-bit counters are
		 * IP19 and IP21 (Challenge, PowerChallenge, Onyx).
		 */
		uname(&utsinfo);
		if (!strncmp(utsinfo.machine, "IP19", 4) ||
		    !strncmp(utsinfo.machine, "IP21", 4))
			cntr_size = 64;
		else
			cntr_size = 32;
	    }
	    cntr_size /= 8;	/* Convert from bits to bytes */
	}
    }

    s0[0] = *iotimer_addr;
    if (cntr_size > 4)
	s0[1] = *(iotimer_addr + 1);
    memcpy(buf, (char *)&s0[0], cntr_size);
    return CopyLowBits(buf, maxbuf, &s0, cntr_size);
}
Ipp32u vm_mmap_get_page_size(void)
{
    return getpagesize();

} /* Ipp32u vm_mmap_get_page_size(void) */
int XDecode_main_Initialize(XDecode_main *InstancePtr, const char* InstanceName) {
	XDecode_main_uio_info *InfoPtr = &uio_info;
	struct dirent **namelist;
    int i, n;
    char* s;
    char file[ MAX_UIO_PATH_SIZE ];
    char name[ MAX_UIO_NAME_SIZE ];
    int flag = 0;

    assert(InstancePtr != NULL);

    n = scandir("/sys/class/uio", &namelist, 0, alphasort);
    if (n < 0)  return XST_DEVICE_NOT_FOUND;
    for (i = 0;  i < n; i++) {
    	strcpy(file, "/sys/class/uio/");
    	strcat(file, namelist[i]->d_name);
    	strcat(file, "/name");
        if ((line_from_file(file, name) == 0) && (strcmp(name, InstanceName) == 0)) {
            flag = 1;
            s = namelist[i]->d_name;
            s += 3; // "uio"
            InfoPtr->uio_num = atoi(s);
            break;
        }
    }
    if (flag == 0)  return XST_DEVICE_NOT_FOUND;

    uio_info_read_name(InfoPtr);
    uio_info_read_version(InfoPtr);
    for (n = 0; n < MAX_UIO_MAPS; ++n) {
        uio_info_read_map_addr(InfoPtr, n);
        uio_info_read_map_size(InfoPtr, n);
    }

    sprintf(file, "/dev/uio%d", InfoPtr->uio_num);
    if ((InfoPtr->uio_fd = open(file, O_RDWR)) < 0) {
        return XST_OPEN_DEVICE_FAILED;
    }

    // NOTE: slave interface 'Axi4lites' should be mapped to uioX/map0
    InstancePtr->Axi4lites_BaseAddress = (u32)mmap(NULL, InfoPtr->maps[0].size, PROT_READ|PROT_WRITE, MAP_SHARED, InfoPtr->uio_fd, 0 * getpagesize());
    assert(InstancePtr->Axi4lites_BaseAddress);

    InstancePtr->IsReady = XIL_COMPONENT_IS_READY;

    return XST_SUCCESS;
}
Exemple #11
0
/****************************************************************//**
Allocates large pages memory.
@return	allocated memory */
UNIV_INTERN
void*
os_mem_alloc_large(
/*===============*/
	ulint*	n)			/*!< in/out: number of bytes */
{
	void*	ptr;
	ulint	size;
#if defined HAVE_LARGE_PAGES && defined UNIV_LINUX
	int shmid;
	struct shmid_ds buf;

	if (!os_use_large_pages || !os_large_page_size) {
		goto skip;
	}

	/* Align block size to os_large_page_size */
	ut_ad(ut_is_2pow(os_large_page_size));
	size = ut_2pow_round(*n + (os_large_page_size - 1),
			     os_large_page_size);

	shmid = shmget(IPC_PRIVATE, (size_t)size, SHM_HUGETLB | SHM_R | SHM_W);
	if (shmid < 0) {
		fprintf(stderr, "InnoDB: HugeTLB: Warning: Failed to allocate"
			" %lu bytes. errno %d\n", size, errno);
		ptr = NULL;
	} else {
		ptr = shmat(shmid, NULL, 0);
		if (ptr == (void *)-1) {
			fprintf(stderr, "InnoDB: HugeTLB: Warning: Failed to"
				" attach shared memory segment, errno %d\n",
				errno);
			ptr = NULL;
		}

		/* Remove the shared memory segment so that it will be
		automatically freed after memory is detached or
		process exits */
		shmctl(shmid, IPC_RMID, &buf);
	}

	if (ptr) {
		*n = size;
		os_fast_mutex_lock(&ut_list_mutex);
		ut_total_allocated_memory += size;
		os_fast_mutex_unlock(&ut_list_mutex);
# ifdef UNIV_SET_MEM_TO_ZERO
		memset(ptr, '\0', size);
# endif
		UNIV_MEM_ALLOC(ptr, size);
		return(ptr);
	}

	fprintf(stderr, "InnoDB HugeTLB: Warning: Using conventional"
		" memory pool\n");
skip:
#endif /* HAVE_LARGE_PAGES && UNIV_LINUX */

#ifdef __WIN__
	SYSTEM_INFO	system_info;
	GetSystemInfo(&system_info);

	/* Align block size to system page size */
	ut_ad(ut_is_2pow(system_info.dwPageSize));
	/* system_info.dwPageSize is only 32-bit. Casting to ulint is required
	on 64-bit Windows. */
	size = *n = ut_2pow_round(*n + (system_info.dwPageSize - 1),
				  (ulint) system_info.dwPageSize);
	ptr = VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE,
			   PAGE_READWRITE);
	if (!ptr) {
		fprintf(stderr, "InnoDB: VirtualAlloc(%lu bytes) failed;"
			" Windows error %lu\n",
			(ulong) size, (ulong) GetLastError());
	} else {
		os_fast_mutex_lock(&ut_list_mutex);
		ut_total_allocated_memory += size;
		os_fast_mutex_unlock(&ut_list_mutex);
		UNIV_MEM_ALLOC(ptr, size);
	}
#elif !defined OS_MAP_ANON
	size = *n;
	ptr = ut_malloc_low(size, TRUE, FALSE);
#else
# ifdef HAVE_GETPAGESIZE
	size = getpagesize();
# else
	size = UNIV_PAGE_SIZE;
# endif
	/* Align block size to system page size */
	ut_ad(ut_is_2pow(size));
	size = *n = ut_2pow_round(*n + (size - 1), size);
	ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
		   MAP_PRIVATE | OS_MAP_ANON, -1, 0);
	if (UNIV_UNLIKELY(ptr == (void*) -1)) {
		fprintf(stderr, "InnoDB: mmap(%lu bytes) failed;"
			" errno %lu\n",
			(ulong) size, (ulong) errno);
		ptr = NULL;
	} else {
		os_fast_mutex_lock(&ut_list_mutex);
		ut_total_allocated_memory += size;
		os_fast_mutex_unlock(&ut_list_mutex);
		UNIV_MEM_ALLOC(ptr, size);
	}
#endif
	return(ptr);
}
static void
look_rset(int sdl, hwloc_obj_type_t type, struct hwloc_topology *topology, int level)
{
  rsethandle_t rset, rad;
  int i,maxcpus,j;
  int nbnodes;
  struct hwloc_obj *obj;

  if ((topology->flags & HWLOC_TOPOLOGY_FLAG_WHOLE_SYSTEM))
    rset = rs_alloc(RS_ALL);
  else
    rset = rs_alloc(RS_PARTITION);
  rad = rs_alloc(RS_EMPTY);
  nbnodes = rs_numrads(rset, sdl, 0);
  if (nbnodes == -1) {
    perror("rs_numrads");
    return;
  }

  for (i = 0; i < nbnodes; i++) {
    if (rs_getrad(rset, rad, sdl, i, 0)) {
      fprintf(stderr,"rs_getrad(%d) failed: %s\n", i, strerror(errno));
      continue;
    }
    if (!rs_getinfo(rad, R_NUMPROCS, 0))
      continue;

    /* It seems logical processors are numbered from 1 here, while the
     * bindprocessor functions numbers them from 0... */
    obj = hwloc_alloc_setup_object(type, i - (type == HWLOC_OBJ_PU));
    obj->cpuset = hwloc_bitmap_alloc();
    obj->os_level = sdl;
    maxcpus = rs_getinfo(rad, R_MAXPROCS, 0);
    for (j = 0; j < maxcpus; j++) {
      if (rs_op(RS_TESTRESOURCE, rad, NULL, R_PROCS, j))
	hwloc_bitmap_set(obj->cpuset, j);
    }
    switch(type) {
      case HWLOC_OBJ_NODE:
	obj->nodeset = hwloc_bitmap_alloc();
	hwloc_bitmap_set(obj->nodeset, i);
	obj->memory.local_memory = 0; /* TODO: odd, rs_getinfo(rad, R_MEMSIZE, 0) << 10 returns the total memory ... */
	obj->memory.page_types_len = 2;
	obj->memory.page_types = malloc(2*sizeof(*obj->memory.page_types));
	memset(obj->memory.page_types, 0, 2*sizeof(*obj->memory.page_types));
	obj->memory.page_types[0].size = getpagesize();
#ifdef HAVE__SC_LARGE_PAGESIZE
	obj->memory.page_types[1].size = sysconf(_SC_LARGE_PAGESIZE);
#endif
	/* TODO: obj->memory.page_types[1].count = rs_getinfo(rset, R_LGPGFREE, 0) / hugepagesize */
	break;
      case HWLOC_OBJ_CACHE:
	obj->attr->cache.size = _system_configuration.L2_cache_size;
	obj->attr->cache.associativity = _system_configuration.L2_cache_asc;
	obj->attr->cache.linesize = 0; /* TODO: ? */
	obj->attr->cache.depth = 2;
	break;
      case HWLOC_OBJ_GROUP:
	obj->attr->group.depth = level;
	break;
      case HWLOC_OBJ_CORE:
      {
	hwloc_obj_t obj2 = hwloc_alloc_setup_object(HWLOC_OBJ_CACHE, i);
	obj2->cpuset = hwloc_bitmap_dup(obj->cpuset);
	obj2->attr->cache.size = _system_configuration.dcache_size;
	obj2->attr->cache.associativity = _system_configuration.dcache_asc;
	obj2->attr->cache.linesize = _system_configuration.dcache_line;
	obj2->attr->cache.depth = 1;
	hwloc_debug("Adding an L1 cache for core %d\n", i);
	hwloc_insert_object_by_cpuset(topology, obj2);
	break;
      }
      default:
	break;
    }
    hwloc_debug_2args_bitmap("%s %d has cpuset %s\n",
	       hwloc_obj_type_string(type),
	       i, obj->cpuset);
    hwloc_insert_object_by_cpuset(topology, obj);
  }

  rs_free(rset);
  rs_free(rad);
}
Exemple #13
0
static void
swaplist(int lflag, int sflag, int hflag)
{
	size_t mibsize, size;
	struct xswdev xsw;
	int hlen, mib[16], n, pagesize;
	long blocksize;
	long long total = 0;
	long long used = 0;
	long long tmp_total;
	long long tmp_used;
	char buf[32];
	
	pagesize = getpagesize();
	switch(hflag) {
	case 'G':
		blocksize = 1024 * 1024 * 1024;
		strlcpy(buf, "1GB-blocks", sizeof(buf));
		hlen = 10;
		break;
	case 'H':
		blocksize = -1;
		strlcpy(buf, "Bytes", sizeof(buf));
		hlen = 10;
		break;
	case 'K':
		blocksize = 1024;
		strlcpy(buf, "1kB-blocks", sizeof(buf));
		hlen = 10;
		break;
	case 'M':
		blocksize = 1024 * 1024;
		strlcpy(buf, "1MB-blocks", sizeof(buf));
		hlen = 10;
		break;
	default:
		getbsize(&hlen, &blocksize);
		snprintf(buf, sizeof(buf), "%ld-blocks", blocksize);
		break;
	}
	
	mibsize = nitems(mib);
	if (sysctlnametomib("vm.swap_info", mib, &mibsize) == -1)
		err(1, "sysctlnametomib()");
	
	if (lflag) {
		printf("%-13s %*s %*s\n",
		    "Device:", 
		    hlen, buf,
		    hlen, "Used:");
	}
	
	for (n = 0; ; ++n) {
		mib[mibsize] = n;
		size = sizeof xsw;
		if (sysctl(mib, mibsize + 1, &xsw, &size, NULL, 0) == -1)
			break;
		if (xsw.xsw_version != XSWDEV_VERSION)
			errx(1, "xswdev version mismatch");
		
		tmp_total = (long long)xsw.xsw_nblks * pagesize;
		tmp_used  = (long long)xsw.xsw_used * pagesize;
		total += tmp_total;
		used  += tmp_used;
		if (lflag) {
			sizetobuf(buf, sizeof(buf), hflag, tmp_total, hlen,
			    blocksize);
			printf("/dev/%-8s %s ", devname(xsw.xsw_dev, S_IFCHR),
			    buf);
			sizetobuf(buf, sizeof(buf), hflag, tmp_used, hlen,
			    blocksize);
			printf("%s\n", buf);
		}
	}
	if (errno != ENOENT)
		err(1, "sysctl()");
	
	if (sflag) {
		sizetobuf(buf, sizeof(buf), hflag, total, hlen, blocksize);
		printf("Total:        %s ", buf);
		sizetobuf(buf, sizeof(buf), hflag, used, hlen, blocksize);
		printf("%s\n", buf);
	}
}
Exemple #14
0
/* Build geli(8) arguments from mntops */
static char *
swap_on_geli_args(const char *mntops)
{
	const char *aalgo, *ealgo, *keylen_str, *sectorsize_str;
	const char *aflag, *eflag, *lflag, *sflag;
	char *p, *args, *token, *string, *ops;
	int argsize, pagesize;
	size_t pagesize_len;
	u_long ul;

	/* Use built-in defaults for geli(8). */
	aalgo = ealgo = keylen_str = "";
	aflag = eflag = lflag = "";

	/* We will always specify sectorsize. */
	sflag = " -s ";
	sectorsize_str = NULL;

	if (mntops != NULL) {
		string = ops = strdup(mntops);

		while ((token = strsep(&string, ",")) != NULL) {
			if ((p = strstr(token, "aalgo=")) == token) {
				aalgo = p + sizeof("aalgo=") - 1;
				aflag = " -a ";
			} else if ((p = strstr(token, "ealgo=")) == token) {
				ealgo = p + sizeof("ealgo=") - 1;
				eflag = " -e ";
			} else if ((p = strstr(token, "keylen=")) == token) {
				keylen_str = p + sizeof("keylen=") - 1;
				errno = 0;
				ul = strtoul(keylen_str, &p, 10);
				if (errno == 0) {
					if (*p != '\0' || ul > INT_MAX)
						errno = EINVAL;
				}
				if (errno) {
					warn("Invalid keylen: %s", keylen_str);
					free(ops);
					return (NULL);
				}
				lflag = " -l ";
			} else if ((p = strstr(token, "sectorsize=")) == token) {
				sectorsize_str = p + sizeof("sectorsize=") - 1;
				errno = 0;
				ul = strtoul(sectorsize_str, &p, 10);
				if (errno == 0) {
					if (*p != '\0' || ul > INT_MAX)
						errno = EINVAL;
				}
				if (errno) {
					warn("Invalid sectorsize: %s",
					    sectorsize_str);
					free(ops);
					return (NULL);
				}
			} else if (strcmp(token, "sw") != 0) {
				warnx("Invalid option: %s", token);
				free(ops);
				return (NULL);
			}
		}
	} else
		ops = NULL;

	/*
	 * If we do not have a sector size at this point, fill in
	 * pagesize as sector size.
	 */
	if (sectorsize_str == NULL) {
		/* Use pagesize as default sectorsize. */
		pagesize = getpagesize();
		pagesize_len = snprintf(NULL, 0, "%d", pagesize) + 1;
		p = alloca(pagesize_len);
		snprintf(p, pagesize_len, "%d", pagesize);
		sectorsize_str = p;
	}

	argsize = asprintf(&args, "%s%s%s%s%s%s%s%s -d",
	    aflag, aalgo, eflag, ealgo, lflag, keylen_str,
	    sflag, sectorsize_str);

	free(ops);
	return (args);
}
Exemple #15
0
/*
 * This creates the memory mappings in the secondary process to match that of
 * the server process. It goes through each memory segment in the DPDK runtime
 * configuration, mapping them in order to form a contiguous block in the
 * virtual memory space
 */
int
rte_xen_dom0_memory_attach(void)
{
	const struct rte_mem_config *mcfg;
	unsigned s = 0; /* s used to track the segment number */
	int xen_fd = -1;
	int ret = -1;
	void *vir_addr;
	char name[DOM0_NAME_MAX] = {0};
	int page_size = getpagesize();

	mcfg = rte_eal_get_configuration()->mem_config;

	/* Check FD and open once */
	if (xen_fd < 0) {
		xen_fd = open(DOM0_MM_DEV, O_RDWR);
		if (xen_fd < 0) {
			RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
			goto error;
		}
	}

	/* construct memory mangement name for Dom0 */
	snprintf(name, DOM0_NAME_MAX, "%s-%s",
		internal_config.hugefile_prefix, DEFAUL_DOM0_NAME);
	/* attach to memory segments of primary process */
	ret = ioctl(xen_fd, RTE_DOM0_IOCTL_ATTACH_TO_MEMSEG, name);
	if (ret) {
		RTE_LOG(ERR, EAL,"attach memory segments fail.\n");
		goto error;
	}

	/* map all segments into memory to make sure we get the addrs */
	for (s = 0; s < RTE_MAX_MEMSEG; ++s) {

		/*
		 * the first memory segment with len==0 is the one that
		 * follows the last valid segment.
		 */
		if (mcfg->memseg[s].len == 0)
			break;

		vir_addr = mmap(mcfg->memseg[s].addr, mcfg->memseg[s].len,
				PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FIXED, xen_fd,
				s * page_size);
		if (vir_addr == MAP_FAILED) {
			RTE_LOG(ERR, EAL, "Could not mmap %llu bytes "
				"in %s to requested address [%p]\n",
				(unsigned long long)mcfg->memseg[s].len, DOM0_MM_DEV,
				mcfg->memseg[s].addr);
			goto error;
		}
	}
	return 0;

error:
	if (xen_fd >= 0) {
		close(xen_fd);
		xen_fd = -1;
	}
	return -1;
}
Ipp32u vm_mmap_get_alloc_granularity(void)
{
    return 16 * getpagesize();

} /* Ipp32u vm_mmap_get_alloc_granularity(void) */
Exemple #17
0
int main(int argc, char **argv) {

  FILE *input;
  signed char op;

  void (*preprocess_fun)(void) = NULL;
  void (*postprocess_fun)(void) = print_summary;

  progname = "hmine";
  inputfile = "stdin";
  inputline = 0;

  /* set up internationalization */
  if( !setlocale(LC_ALL, "") ) {
    errormsg(E_WARNING,
	    "could not set locale, internationalization disabled\n");
  } else {
    if( u_options & (1<<U_OPTION_VERBOSE) ) {
      errormsg(E_WARNING,
	      "international locales not supported\n");
    }
  }

#if defined(HAVE_GETPAGESIZE)
  system_pagesize = getpagesize();
#endif
  if( system_pagesize == -1 ) { system_pagesize = BUFSIZ; }

  init_signal_handling();

  /* parse the options */
  while( (op = getopt(argc, argv, 
		      "aDvV")) > -1 ) {
    hset_option(op, optarg);
  }


  /* set up callbacks */


  if( preprocess_fun ) { (*preprocess_fun)(); }


  init_header_handling();

  /* now process only the first file on the command line,
     or if none provided read stdin */
  if( (optind > -1) && *(argv + optind) ) {
    /* if it's a filename, process it */
    input = fopen(argv[optind], "rb");
    if( input ) {
      inputfile = argv[optind];

      u_options |= (1<<U_OPTION_STDIN);

      if( (u_options & (1<<U_OPTION_VERBOSE)) && 
	  !(u_options & (1<<U_OPTION_CLASSIFY))) {
	fprintf(stdout, "processing file %s\n", argv[optind]);
      }

      /* set some initial options */
      hprocess_file(input, &head);

      fclose(input);

    } else { /* unrecognized file name */

      errormsg(E_ERROR, "couldn't open %s\n", argv[optind]);
      usage(argv);
      return 0;

    }

  }
  /* in case no files were specified, get input from stdin */
  if( !(u_options & (1<<U_OPTION_STDIN)) &&
      (input = fdopen(fileno(stdin), "rb")) ) {

    if( (u_options & (1<<U_OPTION_VERBOSE)) && 
	!(u_options & (1<<U_OPTION_CLASSIFY)) ) {
      fprintf(stdout, "taking input from stdin\n");
    }

    hprocess_file(input, &head);

    /* must close before freeing in_iobuf, in case setvbuf was called */
    fclose(input); 

  }
  
  if( postprocess_fun ) { (*postprocess_fun)(); }

  cleanup_header_handling();

  cleanup_signal_handling();

  return exit_code;
}
Exemple #18
0
template <typename PointT> int
pcl::PCDWriter::writeBinaryCompressed (const std::string &file_name, 
                                       const pcl::PointCloud<PointT> &cloud)
{
  if (cloud.points.empty ())
  {
    throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Input point cloud has no data!");
    return (-1);
  }
  int data_idx = 0;
  std::ostringstream oss;
  oss << generateHeader<PointT> (cloud) << "DATA binary_compressed\n";
  oss.flush ();
  data_idx = static_cast<int> (oss.tellp ());

#if _WIN32
  HANDLE h_native_file = CreateFileA (file_name.c_str (), GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
  if (h_native_file == INVALID_HANDLE_VALUE)
  {
    throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during CreateFile!");
    return (-1);
  }
#else
  int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
  if (fd < 0)
  {
    throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during open!");
    return (-1);
  }
#endif

  // Mandatory lock file
  boost::interprocess::file_lock file_lock;
  setLockingPermissions (file_name, file_lock);

  std::vector<pcl::PCLPointField> fields;
  size_t fsize = 0;
  size_t data_size = 0;
  size_t nri = 0;
  pcl::getFields (cloud, fields);
  std::vector<int> fields_sizes (fields.size ());
  // Compute the total size of the fields
  for (size_t i = 0; i < fields.size (); ++i)
  {
    if (fields[i].name == "_")
      continue;
    
    fields_sizes[nri] = fields[i].count * pcl::getFieldSize (fields[i].datatype);
    fsize += fields_sizes[nri];
    fields[nri] = fields[i];
    ++nri;
  }
  fields_sizes.resize (nri);
  fields.resize (nri);
 
  // Compute the size of data
  data_size = cloud.points.size () * fsize;

  //////////////////////////////////////////////////////////////////////
  // Empty array holding only the valid data
  // data_size = nr_points * point_size 
  //           = nr_points * (sizeof_field_1 + sizeof_field_2 + ... sizeof_field_n)
  //           = sizeof_field_1 * nr_points + sizeof_field_2 * nr_points + ... sizeof_field_n * nr_points
  char *only_valid_data = static_cast<char*> (malloc (data_size));

  // Convert the XYZRGBXYZRGB structure to XXYYZZRGBRGB to aid compression. For
  // this, we need a vector of fields.size () (4 in this case), which points to
  // each individual plane:
  //   pters[0] = &only_valid_data[offset_of_plane_x];
  //   pters[1] = &only_valid_data[offset_of_plane_y];
  //   pters[2] = &only_valid_data[offset_of_plane_z];
  //   pters[3] = &only_valid_data[offset_of_plane_RGB];
  //
  std::vector<char*> pters (fields.size ());
  int toff = 0;
  for (size_t i = 0; i < pters.size (); ++i)
  {
    pters[i] = &only_valid_data[toff];
    toff += fields_sizes[i] * static_cast<int> (cloud.points.size ());
  }
  
  // Go over all the points, and copy the data in the appropriate places
  for (size_t i = 0; i < cloud.points.size (); ++i)
  {
    for (size_t j = 0; j < fields.size (); ++j)
    {
      memcpy (pters[j], reinterpret_cast<const char*> (&cloud.points[i]) + fields[j].offset, fields_sizes[j]);
      // Increment the pointer
      pters[j] += fields_sizes[j];
    }
  }

  char* temp_buf = static_cast<char*> (malloc (static_cast<size_t> (static_cast<float> (data_size) * 1.5f + 8.0f)));
  // Compress the valid data
  unsigned int compressed_size = pcl::lzfCompress (only_valid_data, 
                                                   static_cast<uint32_t> (data_size), 
                                                   &temp_buf[8], 
                                                   static_cast<uint32_t> (static_cast<float>(data_size) * 1.5f));
  unsigned int compressed_final_size = 0;
  // Was the compression successful?
  if (compressed_size)
  {
    char *header = &temp_buf[0];
    memcpy (&header[0], &compressed_size, sizeof (unsigned int));
    memcpy (&header[4], &data_size, sizeof (unsigned int));
    data_size = compressed_size + 8;
    compressed_final_size = static_cast<uint32_t> (data_size) + data_idx;
  }
  else
  {
#if !_WIN32
    pcl_close (fd);
#endif
    resetLockingPermissions (file_name, file_lock);
    throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during compression!");
    return (-1);
  }

#if !_WIN32
  // Stretch the file size to the size of the data
  off_t result = pcl_lseek (fd, getpagesize () + data_size - 1, SEEK_SET);
  if (result < 0)
  {
    pcl_close (fd);
    resetLockingPermissions (file_name, file_lock);
    PCL_ERROR ("[pcl::PCDWriter::writeBinary] lseek errno: %d strerror: %s\n", errno, strerror (errno));
    
    throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during lseek ()!");
    return (-1);
  }
  // Write a bogus entry so that the new file size comes in effect
  result = static_cast<int> (::write (fd, "", 1));
  if (result != 1)
  {
    pcl_close (fd);
    resetLockingPermissions (file_name, file_lock);
    throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during write ()!");
    return (-1);
  }
#endif

  // Prepare the map
#if _WIN32
  HANDLE fm = CreateFileMapping (h_native_file, NULL, PAGE_READWRITE, 0, compressed_final_size, NULL);
  char *map = static_cast<char*>(MapViewOfFile (fm, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, compressed_final_size));
  CloseHandle (fm);

#else
  char *map = static_cast<char*> (mmap (0, compressed_final_size, PROT_WRITE, MAP_SHARED, fd, 0));
  if (map == reinterpret_cast<char*> (-1)) //MAP_FAILED)
  {
    pcl_close (fd);
    resetLockingPermissions (file_name, file_lock);
    throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during mmap ()!");
    return (-1);
  }
#endif

  // Copy the header
  memcpy (&map[0], oss.str ().c_str (), data_idx);
  // Copy the compressed data
  memcpy (&map[data_idx], temp_buf, data_size);

#if !_WIN32
  // If the user set the synchronization flag on, call msync
  if (map_synchronization_)
    msync (map, compressed_final_size, MS_SYNC);
#endif

  // Unmap the pages of memory
#if _WIN32
    UnmapViewOfFile (map);
#else
  if (munmap (map, (compressed_final_size)) == -1)
  {
    pcl_close (fd);
    resetLockingPermissions (file_name, file_lock);
    throw pcl::IOException ("[pcl::PCDWriter::writeBinaryCompressed] Error during munmap ()!");
    return (-1);
  }
#endif

  // Close file
#if _WIN32
  CloseHandle (h_native_file);
#else
  pcl_close (fd);
#endif
  resetLockingPermissions (file_name, file_lock);

  free (only_valid_data);
  free (temp_buf);
  return (0);
}
Exemple #19
0
void
pebs_init(int nRecords, uint64_t *counter, uint64_t *reset_val ){
	// 1. Set up the precise event buffering utilities.
	// 	a.  Place values in the
	// 		i.   precise event buffer base,
	//		ii.  precise event index
	//		iii. precise event absolute maximum,
	//		iv.  precise event interrupt threshold,
	//		v.   and precise event counter reset fields
	//	    of the DS buffer management area.
	//
	// 2.  Enable PEBS.  Set the Enable PEBS on PMC0 flag 
	// 	(bit 0) in IA32_PEBS_ENABLE_MSR.
	//
	// 3.  Set up the IA32_PMC0 performance counter and 
	// 	IA32_PERFEVTSEL0 for an event listed in Table 
	// 	18-10.
	
	// IA32_DS_AREA points to 0x58 bytes of memory.  
	// (11 entries * 8 bytes each = 88 bytes.)
	
	// Each PEBS record is 0xB0 byes long.
	int pagesize = getpagesize();
	
	init_stomp();	
	
	// I think we can only have one mapping per process, so put the 
	// pds_area on the first page and the pebs records on the second
	// and successive pages.

	pds_area = mmap(
			NULL,						// let kernel choose address
			pagesize + 
				(pagesize*(((sizeof(struct pebs)*nRecords)/pagesize)+1)),	// keep ds and records separate.
			PROT_READ | PROT_WRITE, 
			MAP_ANONYMOUS | MAP_LOCKED | MAP_PRIVATE,
			-1,						// dummy file descriptor
			0);						// offset (ignored).
	if(pds_area == (void*)-1){
		perror("mmap for pds_area failed.");
		assert(0);
	}

	struct pebs *ppebs = (struct pebs*) ( (uint64_t)pds_area + pagesize );

	pds_area->bts_buffer_base 		= 0;
	pds_area->bts_index			= 0;
	pds_area->bts_absolute_maximum		= 0;
	pds_area->bts_interrupt_threshold	= 0;
	pds_area->pebs_buffer_base		= ppebs;
	pds_area->pebs_index			= ppebs;
	pds_area->pebs_absolute_maximum		= ppebs + (nRecords-1) * sizeof(struct pebs);
	pds_area->pebs_interrupt_threshold	= ppebs + (nRecords+1) * sizeof(struct pebs);
	pds_area->pebs_counter0_reset		= reset_val[0];
	pds_area->pebs_counter1_reset		= reset_val[1];
	pds_area->pebs_counter2_reset		= reset_val[2];
	pds_area->pebs_counter3_reset		= reset_val[3];
	pds_area->reserved			= 0;

	write_msr(0, PERF_GLOBAL_CTRL, 0);			// known good state.
	write_msr(0, IA32_DS_AREA, (uint64_t)pds_area);
	write_msr(0, IA32_PEBS_ENABLE, 0xf | ((uint64_t)0xf << 32) );	// Figure 18-14.

	write_msr(0, PMC0, reset_val[0]);
	write_msr(1, PMC1, reset_val[1]);
	write_msr(2, PMC2, reset_val[2]);
	write_msr(3, PMC3, reset_val[3]);

	write_msr(0, IA32_PERFEVTSEL0, 0x410000 | counter[0]);
	write_msr(0, IA32_PERFEVTSEL1, 0x410000 | counter[1]);
	write_msr(0, IA32_PERFEVTSEL2, 0x410000 | counter[2]);
	write_msr(0, IA32_PERFEVTSEL3, 0x410000 | counter[3]);

	write_msr(0, PERF_GLOBAL_CTRL, 0xf);

	stomp();

	write_msr(0, PERF_GLOBAL_CTRL, 0x0);
}
Exemple #20
0
template <typename PointT> int
pcl::PCDWriter::writeBinary (const std::string &file_name, 
                             const pcl::PointCloud<PointT> &cloud, 
                             const std::vector<int> &indices)
{
  if (cloud.points.empty () || indices.empty ())
  {
    throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Input point cloud has no data or empty indices given!");
    return (-1);
  }
  int data_idx = 0;
  std::ostringstream oss;
  oss << generateHeader<PointT> (cloud, static_cast<int> (indices.size ())) << "DATA binary\n";
  oss.flush ();
  data_idx = static_cast<int> (oss.tellp ());

#if _WIN32
  HANDLE h_native_file = CreateFileA (file_name.c_str (), GENERIC_READ | GENERIC_WRITE, 0, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
  if (h_native_file == INVALID_HANDLE_VALUE)
  {
    throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during CreateFile!");
    return (-1);
  }
#else
  int fd = pcl_open (file_name.c_str (), O_RDWR | O_CREAT | O_TRUNC, static_cast<mode_t> (0600));
  if (fd < 0)
  {
    throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during open!");
    return (-1);
  }
#endif
  // Mandatory lock file
  boost::interprocess::file_lock file_lock;
  setLockingPermissions (file_name, file_lock);

  std::vector<pcl::PCLPointField> fields;
  std::vector<int> fields_sizes;
  size_t fsize = 0;
  size_t data_size = 0;
  size_t nri = 0;
  pcl::getFields (cloud, fields);
  // Compute the total size of the fields
  for (size_t i = 0; i < fields.size (); ++i)
  {
    if (fields[i].name == "_")
      continue;
    
    int fs = fields[i].count * getFieldSize (fields[i].datatype);
    fsize += fs;
    fields_sizes.push_back (fs);
    fields[nri++] = fields[i];
  }
  fields.resize (nri);
  
  data_size = indices.size () * fsize;

  // Prepare the map
#if _WIN32
  HANDLE fm = CreateFileMapping (h_native_file, NULL, PAGE_READWRITE, 0, data_idx + data_size, NULL);
  char *map = static_cast<char*>(MapViewOfFile (fm, FILE_MAP_READ | FILE_MAP_WRITE, 0, 0, data_idx + data_size));
  CloseHandle (fm);

#else
  // Stretch the file size to the size of the data
  off_t result = pcl_lseek (fd, getpagesize () + data_size - 1, SEEK_SET);
  if (result < 0)
  {
    pcl_close (fd);
    resetLockingPermissions (file_name, file_lock);
    PCL_ERROR ("[pcl::PCDWriter::writeBinary] lseek errno: %d strerror: %s\n", errno, strerror (errno));
    
    throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during lseek ()!");
    return (-1);
  }
  // Write a bogus entry so that the new file size comes in effect
  result = static_cast<int> (::write (fd, "", 1));
  if (result != 1)
  {
    pcl_close (fd);
    resetLockingPermissions (file_name, file_lock);
    throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during write ()!");
    return (-1);
  }

  char *map = static_cast<char*> (mmap (0, data_idx + data_size, PROT_WRITE, MAP_SHARED, fd, 0));
  if (map == reinterpret_cast<char*> (-1)) //MAP_FAILED)
  {
    pcl_close (fd);
    resetLockingPermissions (file_name, file_lock);
    throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during mmap ()!");
    return (-1);
  }
#endif

  // Copy the header
  memcpy (&map[0], oss.str ().c_str (), data_idx);

  char *out = &map[0] + data_idx;
  // Copy the data
  for (size_t i = 0; i < indices.size (); ++i)
  {
    int nrj = 0;
    for (size_t j = 0; j < fields.size (); ++j)
    {
      memcpy (out, reinterpret_cast<const char*> (&cloud.points[indices[i]]) + fields[j].offset, fields_sizes[nrj]);
      out += fields_sizes[nrj++];
    }
  }

#if !_WIN32
  // If the user set the synchronization flag on, call msync
  if (map_synchronization_)
    msync (map, data_idx + data_size, MS_SYNC);
#endif

  // Unmap the pages of memory
#if _WIN32
    UnmapViewOfFile (map);
#else
  if (munmap (map, (data_idx + data_size)) == -1)
  {
    pcl_close (fd);
    resetLockingPermissions (file_name, file_lock);
    throw pcl::IOException ("[pcl::PCDWriter::writeBinary] Error during munmap ()!");
    return (-1);
  }
#endif
  // Close file
#if _WIN32
  CloseHandle(h_native_file);
#else
  pcl_close (fd);
#endif
  
  resetLockingPermissions (file_name, file_lock);
  return (0);
}
Exemple #21
0
/*
 * mem_pagesize() - returns the page size of the system
 */
size_t mem_pagesize()
{
    return (size_t)getpagesize();
}
Exemple #22
0
/**
 * @brief Gets the memory page size.
 *
 * The page size is used when calling the \c SYS_MemoryMap() and
 * \c SYS_PublicMemoryMap() functions.
 *
 * @return Number of bytes per page.
 */
INTERNAL int SYS_GetPageSize(void)
{
	return getpagesize();
}
int main (int argc, char *argv[]) {
        int i, n, fd, c;
        unsigned long chunk_size[2];
        int rank, noProcessors, done;
        int error;
        off_t offset;
        char **chunk_buf;
        char *read_buf;
        struct stat stat_buf;
        ssize_t ret;
        char *filename = "/mnt/lustre/write_disjoint";
        int numloops = 1000;
        int random = 0;

        error = MPI_Init(&argc, &argv);
        if (error != MPI_SUCCESS)
                rprintf(-1, -1, "MPI_Init failed: %d\n", error);
        /* Parse command line options */
        while ((c = getopt(argc, argv, "f:n:")) != EOF) {
                switch (c) {
                case 'f':
                        filename = optarg;
                        break;
                case 'n':
                        numloops = strtoul(optarg, NULL, 0);
                        break;
                }
        }

        MPI_Comm_size(MPI_COMM_WORLD, &noProcessors);
        MPI_Comm_rank(MPI_COMM_WORLD, &rank);

        chunk_buf = malloc(noProcessors * sizeof(chunk_buf[0]));
        for (i=0; i < noProcessors; i++) {
                chunk_buf[i] = malloc(CHUNK_MAX_SIZE);
                memset(chunk_buf[i], 'A'+ i, CHUNK_MAX_SIZE);
        }
        read_buf = malloc(noProcessors * CHUNK_MAX_SIZE);

        if (rank == 0) {
                fd = open(filename, O_WRONLY|O_CREAT|O_TRUNC, 0666);
                if (fd < 0)
                        rprintf(rank, -1, "open() returned %s\n",
                                strerror(errno));
        }
        MPI_Barrier(MPI_COMM_WORLD);

        fd = open(filename, O_RDWR);
        if (fd < 0)
                rprintf(rank, -1, "open() returned %s\n", strerror(errno));

        for (n = 0; n < numloops; n++) {
                /* reset the environment */
                if (rank == 0) {
                        ret = truncate(filename, 0);
                        if (ret != 0)
                                rprintf(rank, n, "truncate() returned %s\n",
                                        strerror(errno) );
                        random = rand();
                }
                MPI_Bcast(&random, 1, MPI_INT, 0, MPI_COMM_WORLD);
                CHUNK_SIZE(n) = random % CHUNK_MAX_SIZE;

                if (n % 1000 == 0 && rank == 0)
                        printf("loop %d: chunk_size %lu\n", n, CHUNK_SIZE(n));

                if (stat(filename, &stat_buf) < 0)
                        rprintf(rank, n, "error stating %s: %s\n",
                                filename, strerror(errno));

                if (stat_buf.st_size != 0)
                        rprintf(rank, n, "filesize = %lu. "
                                "Should be zero after truncate\n",
                                stat_buf.st_size);

                MPI_Barrier(MPI_COMM_WORLD);

                /* Do the race */
                offset = rank * CHUNK_SIZE(n);
                lseek(fd, offset, SEEK_SET);

                done = 0;
                do {
                        ret = write(fd, chunk_buf[rank] + done,
                                    CHUNK_SIZE(n) - done);
                        if (ret < 0 && errno != EINTR)
                                rprintf(rank, n, "write() returned %s\n",
                                        strerror(errno));
                        if (ret > 0)
                                done += ret;
                } while (done != CHUNK_SIZE(n));

                MPI_Barrier(MPI_COMM_WORLD);

                /* Check the result */
                if (stat(filename, &stat_buf) < 0)
                        rprintf(rank, n, "error stating %s: %s\n",
                                filename, strerror(errno));

                if (stat_buf.st_size != CHUNK_SIZE(n) * noProcessors) {
                        if (n > 0)
                                printf("loop %d: chunk_size %lu, "
                                       "file size was %lu\n",
                                       n - 1, CHUNK_SIZE(n - 1),
                                       CHUNK_SIZE(n - 1) *noProcessors);
                        rprintf(rank, n, "invalid file size %lu"
                                " instead of %lu = %lu * %u\n",
                                (unsigned long)stat_buf.st_size,
                                CHUNK_SIZE(n) * noProcessors,
                                CHUNK_SIZE(n), noProcessors);
                }

                if (rank == 0) {
                        if (lseek(fd, 0, SEEK_SET) < 0)
                                rprintf(rank, n, "error seeking to 0: %s\n",
                                        strerror(errno));

                        done = 0;
                        do {
                                ret = read(fd, read_buf + done,
                                           CHUNK_SIZE(n) * noProcessors - done);
                                if (ret < 0)
                                        rprintf(rank, n, "read returned %s\n",
                                                strerror(errno));

                                done += ret;
                        } while (done != CHUNK_SIZE(n) * noProcessors);

                        for (i = 0; i < noProcessors; i++) {
                                char command[4096];
                                int j;
				
                                if (!memcmp(read_buf + (i * CHUNK_SIZE(n)),
                                            chunk_buf[i], CHUNK_SIZE(n)))
                                        continue;

                                /* print out previous chunk sizes */
                                if (n > 0)
                                        printf("loop %d: chunk_size %lu\n",
                                               n - 1, CHUNK_SIZE(n - 1));

                                printf("loop %d: chunk %d corrupted "
                                       "with chunk_size %lu, page_size %d\n",
                                       n, i, CHUNK_SIZE(n), getpagesize());
                                printf("ranks:\tpage boundry\tchunk boundry\t"
                                       "page boundry\n");
                                for (j = 1 ; j < noProcessors; j++) {
                                        int b = j * CHUNK_SIZE(n);
                                        printf("%c -> %c:\t%d\t%d\t%d\n",
                                               'A' + j - 1, 'A' + j,
                                               b & ~(getpagesize()-1), b,
                                               (b + getpagesize()) &
                                               ~(getpagesize()-1));
                                }

                                sprintf(command, "od -Ad -a %s", filename);
                                ret = system(command);
                                rprintf(0, n, "data check error - exiting\n");
                        }
                }
                MPI_Barrier(MPI_COMM_WORLD);
        }

        printf("Finished after %d loops\n", n);
        MPI_Finalize();
        return 0;
}
Exemple #24
0
uv_err_t uv_resident_set_memory(size_t* rss) {
  FILE* f;
  int itmp;
  char ctmp;
  unsigned int utmp;
  size_t page_size = getpagesize();
  char *cbuf;
  int foundExeEnd;

  f = fopen("/proc/self/stat", "r");
  if (!f) return uv__new_sys_error(errno);

  /* PID */
  if (fscanf(f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* Exec file */
  cbuf = buf;
  foundExeEnd = 0;
  if (fscanf (f, "%c", cbuf++) == 0) goto error;
  while (1) {
    if (fscanf(f, "%c", cbuf) == 0) goto error;
    if (*cbuf == ')') {
      foundExeEnd = 1;
    } else if (foundExeEnd && *cbuf == ' ') {
      *cbuf = 0;
      break;
    }

    cbuf++;
  }
  /* State */
  if (fscanf (f, "%c ", &ctmp) == 0) goto error; /* coverity[secure_coding] */
  /* Parent process */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* Process group */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* Session id */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* TTY */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* TTY owner process group */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* Flags */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* Minor faults (no memory page) */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* Minor faults, children */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* Major faults (memory page faults) */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* Major faults, children */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* utime */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* stime */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* utime, children */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* stime, children */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* jiffies remaining in current time slice */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* 'nice' value */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */
  /* jiffies until next timeout */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* jiffies until next SIGALRM */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* start time (jiffies since system boot) */
  if (fscanf (f, "%d ", &itmp) == 0) goto error; /* coverity[secure_coding] */

  /* Virtual memory size */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */

  /* Resident set size */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  *rss = (size_t) utmp * page_size;

  /* rlim */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* Start of text */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* End of text */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */
  /* Start of stack */
  if (fscanf (f, "%u ", &utmp) == 0) goto error; /* coverity[secure_coding] */

  fclose (f);
  return uv_ok_;

error:
  fclose (f);
  return uv__new_sys_error(errno);
}
Exemple #25
0
/* extern int	getpagesize(); */
size_t
Page_Size(void)
{
	return getpagesize();
}
Exemple #26
0
CAMLprim value caml_ba_map_file(value vfd, value vkind, value vlayout,
                                value vshared, value vdim, value vstart)
{
  int fd, flags, major_dim, shared;
  intnat num_dims, i;
  intnat dim[CAML_BA_MAX_NUM_DIMS];
  file_offset startpos, file_size, data_size;
  struct stat st;
  uintnat array_size, page, delta;
  void * addr;

  fd = Int_val(vfd);
  flags = Int_val(vkind) | Int_val(vlayout);
  startpos = File_offset_val(vstart);
  num_dims = Wosize_val(vdim);
  major_dim = flags & CAML_BA_FORTRAN_LAYOUT ? num_dims - 1 : 0;
  /* Extract dimensions from OCaml array */
  num_dims = Wosize_val(vdim);
  if (num_dims < 1 || num_dims > CAML_BA_MAX_NUM_DIMS)
    caml_invalid_argument("Bigarray.mmap: bad number of dimensions");
  for (i = 0; i < num_dims; i++) {
    dim[i] = Long_val(Field(vdim, i));
    if (dim[i] == -1 && i == major_dim) continue;
    if (dim[i] < 0)
      caml_invalid_argument("Bigarray.create: negative dimension");
  }
  /* Determine file size. We avoid lseek here because it is fragile,
     and because some mappable file types do not support it
   */
  caml_enter_blocking_section();
  if (fstat(fd, &st) == -1) {
    caml_leave_blocking_section();
    caml_sys_error(NO_ARG);
  }
  file_size = st.st_size;
  /* Determine array size in bytes (or size of array without the major
     dimension if that dimension wasn't specified) */
  array_size = caml_ba_element_size[flags & CAML_BA_KIND_MASK];
  for (i = 0; i < num_dims; i++)
    if (dim[i] != -1) array_size *= dim[i];
  /* Check if the major dimension is unknown */
  if (dim[major_dim] == -1) {
    /* Determine major dimension from file size */
    if (file_size < startpos) {
      caml_leave_blocking_section();
      caml_failwith("Bigarray.mmap: file position exceeds file size");
    }
    data_size = file_size - startpos;
    dim[major_dim] = (uintnat) (data_size / array_size);
    array_size = dim[major_dim] * array_size;
    if (array_size != data_size) {
      caml_leave_blocking_section();
      caml_failwith("Bigarray.mmap: file size doesn't match array dimensions");
    }
  } else {
    /* Check that file is large enough, and grow it otherwise */
    if (file_size < startpos + array_size) {
      if (caml_grow_file(fd, startpos + array_size) == -1) { /* PR#5543 */
        caml_leave_blocking_section();
        caml_sys_error(NO_ARG);
      }
    }
  }
  /* Determine offset so that the mapping starts at the given file pos */
  page = getpagesize();
  delta = (uintnat) startpos % page;
  /* Do the mmap */
  shared = Bool_val(vshared) ? MAP_SHARED : MAP_PRIVATE;
  if (array_size > 0)
    addr = mmap(NULL, array_size + delta, PROT_READ | PROT_WRITE,
                shared, fd, startpos - delta);
  else
    addr = NULL;                /* PR#5463 - mmap fails on empty region */
  caml_leave_blocking_section();
  if (addr == (void *) MAP_FAILED) caml_sys_error(NO_ARG);
  addr = (void *) ((uintnat) addr + delta);
  /* Build and return the OCaml bigarray */
  return caml_ba_alloc(flags | CAML_BA_MAPPED_FILE, num_dims, addr, dim);
}
Exemple #27
0
// MPBalloc allocates MPBSIZE bytes of MessagePassing buffer Memory at MPB_ADDR(x,y,core).
// 
// Parameter: MPB                   - Pointer to MPB area (return value, virtal address)
//            x,y,core              - Position of tile (x,y) and core...
// 
void MPBalloc(t_vcharp *MPB, int x, int y, int core, unsigned char isOwnMPB) {
  t_vcharp MappedAddr;
  unsigned int alignedAddr = (isOwnMPB?(MPB_OWN+(MPBSIZE*core)):MPB_ADDR(x,y,core)) & (~(getpagesize()-1));
  unsigned int pageOffset = (isOwnMPB?(MPB_OWN+(MPBSIZE*core)):MPB_ADDR(x,y,core)) - alignedAddr;
  
  if ((x>=NUM_COLS) || (y>=NUM_ROWS) || (core>=NUM_CORES)) {
    printf("MPBalloc: Invalid coordinates (x=%0d, y=%0d, core=%0d)\n", x,y,core);
    *MPB = NULL;
    return;
  }
  
  MappedAddr = (t_vcharp) mmap(NULL, MPBSIZE, PROT_WRITE|PROT_READ, MAP_SHARED, MPBDeviceFD, alignedAddr);
  if (MappedAddr == MAP_FAILED)
  {
          perror("mmap");
          exit(-1);
  }

  *MPB = MappedAddr+pageOffset;
}
Exemple #28
0
int
rte_xen_dom0_memory_init(void)
{
	void *vir_addr, *vma_addr = NULL;
	int err, ret = 0;
	uint32_t i, requested, mem_size, memseg_idx, num_memseg = 0;
	size_t vma_len = 0;
	struct memory_info meminfo;
	struct memseg_info seginfo[RTE_MAX_MEMSEG];
	int flags, page_size = getpagesize();
	struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
	struct rte_memseg *memseg = mcfg->memseg;
	uint64_t total_mem = internal_config.memory;

	memset(seginfo, 0, sizeof(seginfo));
	memset(&meminfo, 0, sizeof(struct memory_info));

	mem_size = get_xen_memory_size();
	requested = (unsigned) (total_mem / 0x100000);
	if (requested > mem_size)
		/* if we didn't satisfy total memory requirements */
		rte_exit(EXIT_FAILURE,"Not enough memory available! Requested: %uMB,"
				" available: %uMB\n", requested, mem_size);
	else if (total_mem != 0)
		mem_size = requested;

	/* Check FD and open once */
	if (xen_fd < 0) {
		xen_fd = open(DOM0_MM_DEV, O_RDWR);
		if (xen_fd < 0) {
			RTE_LOG(ERR, EAL, "Can not open %s\n",DOM0_MM_DEV);
			return -1;
		}
	}

	meminfo.size = mem_size;

	/* construct memory mangement name for Dom0 */
	snprintf(meminfo.name, DOM0_NAME_MAX, "%s-%s",
		internal_config.hugefile_prefix, DEFAUL_DOM0_NAME);

	/* Notify kernel driver to allocate memory */
	ret = ioctl(xen_fd, RTE_DOM0_IOCTL_PREPARE_MEMSEG, &meminfo);
	if (ret < 0) {
		RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memory\n");
		err = -EIO;
		goto fail;
	}

	/* Get number of memory segment from driver */
	ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_NUM_MEMSEG, &num_memseg);
	if (ret < 0) {
		RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memseg count.\n");
		err = -EIO;
		goto fail;
	}

	if(num_memseg > RTE_MAX_MEMSEG){
		RTE_LOG(ERR, EAL, "XEN DOM0: the memseg count %d is greater"
			" than max memseg %d.\n",num_memseg, RTE_MAX_MEMSEG);
		err = -EIO;
		goto fail;
	}

	/* get all memory segements information */
	ret = ioctl(xen_fd, RTE_DOM0_IOCTL_GET_MEMSEG_INFO, seginfo);
	if (ret < 0) {
		RTE_LOG(ERR, EAL, "XEN DOM0:failed to get memseg info.\n");
		err = -EIO;
		goto fail;
	}

	/* map all memory segments to contiguous user space */
	for (memseg_idx = 0; memseg_idx < num_memseg; memseg_idx++)
	{
		vma_len = seginfo[memseg_idx].size;

		/**
		 * get the biggest virtual memory area up to vma_len. If it fails,
		 * vma_addr is NULL, so let the kernel provide the address.
		 */
		vma_addr = xen_get_virtual_area(&vma_len, RTE_PGSIZE_2M);
		if (vma_addr == NULL) {
			flags = MAP_SHARED;
			vma_len = RTE_PGSIZE_2M;
		} else
			flags = MAP_SHARED | MAP_FIXED;

		seginfo[memseg_idx].size = vma_len;
		vir_addr = mmap(vma_addr, seginfo[memseg_idx].size,
			PROT_READ|PROT_WRITE, flags, xen_fd,
			memseg_idx * page_size);
		if (vir_addr == MAP_FAILED) {
			RTE_LOG(ERR, EAL, "XEN DOM0:Could not mmap %s\n",
				DOM0_MM_DEV);
			err = -EIO;
			goto fail;
		}

		memseg[memseg_idx].addr = vir_addr;
		memseg[memseg_idx].phys_addr = page_size *
			seginfo[memseg_idx].pfn ;
		memseg[memseg_idx].len = seginfo[memseg_idx].size;
		for ( i = 0; i < seginfo[memseg_idx].size / RTE_PGSIZE_2M; i++)
			memseg[memseg_idx].mfn[i] = seginfo[memseg_idx].mfn[i];

		/* MFNs are continuous in 2M, so assume that page size is 2M */
		memseg[memseg_idx].hugepage_sz = RTE_PGSIZE_2M;

		memseg[memseg_idx].nchannel = mcfg->nchannel;
		memseg[memseg_idx].nrank = mcfg->nrank;

		/* NUMA is not suppoted in Xen Dom0, so only set socket 0*/
		memseg[memseg_idx].socket_id = 0;
	}

	return 0;
fail:
	if (xen_fd > 0) {
		close(xen_fd);
		xen_fd = -1;
	}
	return err;
}
static twin_bool_t twin_fbdev_apply_config(twin_fbdev_t *tf)
{
	off_t off, pgsize = getpagesize();
	struct fb_cmap cmap;
	size_t len;

	/* Tweak fields to default to 32 bpp argb and virtual == phys */
	tf->fb_var.xres_virtual = tf->fb_var.xres;
	tf->fb_var.yres_virtual = tf->fb_var.yres;
	tf->fb_var.bits_per_pixel = 32;
	tf->fb_var.red.length = 8;
	tf->fb_var.green.length = 8;
	tf->fb_var.blue.length = 8;
	tf->fb_var.transp.length = 8;
	tf->fb_var.red.offset = 0;
	tf->fb_var.green.offset = 0;
	tf->fb_var.blue.offset = 0;
	tf->fb_var.transp.offset = 0;

	/* Apply fbdev settings */
	if (ioctl(tf->fb_fd, FBIOPUT_VSCREENINFO, &tf->fb_var) < 0) {
		SERROR("can't set fb mode");
		return 0;
	}

	/* Get new fbdev configuration */
	if (ioctl(tf->fb_fd, FBIOGET_VSCREENINFO, tf->fb_var) < 0) {
		SERROR("can't get framebuffer config");
		return 0;
	}

	DEBUG("fbdev set config set to:\n");
	DEBUG(" xres          = %d\n", tf->fb_var.xres);
	DEBUG(" yres          = %d\n", tf->fb_var.yres);
	DEBUG(" xres_virtual  = %d\n", tf->fb_var.xres_virtual);
	DEBUG(" yres_virtual  = %d\n", tf->fb_var.yres_virtual);
	DEBUG(" bits_per_pix  = %d\n", tf->fb_var.bits_per_pixel);
	DEBUG(" red.len/off   = %d/%d\n",
	      tf->fb_var.red.length, tf->fb_var.red.offset);
	DEBUG(" green.len/off = %d/%d\n",
	      tf->fb_var.green.length, tf->fb_var.green.offset);
	DEBUG(" blue.len/off  = %d/%d\n",
	      tf->fb_var.blue.length, tf->fb_var.blue.offset);
	DEBUG(" trans.len/off = %d/%d\n",
	      tf->fb_var.transp.length, tf->fb_var.transp.offset);

	/* Check bits per pixel */
	if (tf->fb_var.bits_per_pixel != 32) {
		SERROR("can't set fb bpp to 32");
		return 0;
	}

	/* Set colormap */
	cmap.start = 0;
	cmap.len = 256;
	cmap.red = tf->cmap[0];
	cmap.green = tf->cmap[1];
	cmap.blue = tf->cmap[2];
	cmap.transp = NULL;
	ioctl(tf->fb_fd, FBIOPUTCMAP, &cmap);

	/* Get remaining settings */
	ioctl(tf->fb_fd, FBIOGET_FSCREENINFO, &tf->fb_fix);

	DEBUG(" line_lenght   = %d\n", tf->fb_fix.line_length);

	/* Map the fb */
	off = (off_t)tf->fb_fix.smem_start & (pgsize - 1);
	len = (size_t)tf->fb_fix.smem_len + off + (pgsize - 1);
	len &= ~(pgsize - 1);
	tf->fb_len = len;

	tf->fb_base = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED,
			   tf->fb_fd, 0);
	if (tf->fb_base == MAP_FAILED) {
		SERROR("can't mmap framebuffer");
		return 0;
	}
	tf->fb_ptr = tf->fb_base + off;

	return 1;
}
Exemple #30
0
void serverStart()
{
    dataPopped = 0;
    //fprintf (stderr, "Hello from sever start\n");
    char *dev_file = "/dev/bsemu0";
	pcie_dev = open(dev_file, O_RDWR);
	if (pcie_dev < 0) {
		fprintf (stderr, "Error: Failed to open %s: %s\n", dev_file, strerror(errno));
		exit(EXIT_FAILURE);
	}

	BSEMU_SharedData drv_data;
	if (ioctl(pcie_dev,BSEMU_IOC_GETDEVICE,&drv_data) != 0)
	{
		fprintf (stderr, "Error: ioctl: %s\n", strerror(errno));
		close(pcie_dev);
		exit(EXIT_FAILURE);
	}

	if (drv_data.vid != MY_VENDOR_ID || drv_data.did != MY_DEVICE_ID)
	{
		fprintf (stderr, "Error: device has Vendor:Device ID of %x:%x but expected %x:%x\n",
		drv_data.vid, drv_data.did, MY_VENDOR_ID, MY_DEVICE_ID);
		close(pcie_dev);
		exit(EXIT_FAILURE);
	}

	if (drv_data.bar[1].log_addr == 0l || drv_data.bar[2].log_addr == 0l)
	{
		fprintf (stderr, "Error: no BAR1 or BAR2 found on device\n");
		close(pcie_dev);
		exit(EXIT_FAILURE);
	}

	pBar1 = (tBar1*) mmap(NULL, drv_data.bar[1].size, PROT_READ | PROT_WRITE, MAP_SHARED, pcie_dev, getpagesize());
	if (pBar1 == MAP_FAILED)
	{
		fprintf (stderr, "Error: mmap of BAR1: %s\n", strerror(errno));
		close(pcie_dev);
		exit(EXIT_FAILURE);
	}

	pBar2 = (tBar2*) mmap(NULL, drv_data.bar[2].size, PROT_READ | PROT_WRITE, MAP_SHARED, pcie_dev, 2*getpagesize());
	if (pBar2 == MAP_FAILED)
	{
		fprintf (stderr, "Error: mmap of BAR2: %s\n", strerror(errno));
		close(pcie_dev);
		exit(EXIT_FAILURE);
	}

	pBar1->command_lo = 0xdeadbeef;
	pBar1->command_hi = 0xdeadbeef;
        if(DEBUG_PCIE) {
	    printf("id = %llx\n",(((UInt64) pBar1->bluespec_id_hi) << 32) | ((UInt64) pBar1->bluespec_id_lo));
	    printf("version = %lx\n",pBar1->scemi_version);
	    printf("loopback = %llx\n",(((UInt64) pBar1->command_hi) << 32) | ((UInt64) pBar1->command_lo));
	    printf("bar1rpkt = %lu\n",pBar1->bar1_rpkt_count);
	    printf("bar1wpkt = %lu\n\n",pBar1->bar1_wpkt_count);
	   
	}
}