Ejemplo n.º 1
0
Archivo: ksm02.c Proyecto: 1587/ltp
int main(int argc, char *argv[])
{
	int lc;
	int size = 128, num = 3, unit = 1;
	unsigned long nmask[MAXNODES / BITS_PER_LONG] = { 0 };
	unsigned int node;

	tst_parse_opts(argc, argv, ksm_options, ksm_usage);

	node = get_a_numa_node(tst_exit);
	set_node(nmask, node);

	setup();

	for (lc = 0; TEST_LOOPING(lc); lc++) {
		tst_count = 0;
		check_ksm_options(&size, &num, &unit);

		if (set_mempolicy(MPOL_BIND, nmask, MAXNODES) == -1) {
			if (errno != ENOSYS)
				tst_brkm(TBROK | TERRNO, cleanup,
					 "set_mempolicy");
			else
				tst_brkm(TCONF, cleanup,
					 "set_mempolicy syscall is not "
					 "implemented on your system.");
		}
		create_same_memory(size, num, unit);

		write_cpusets(node);
		create_same_memory(size, num, unit);
	}
	cleanup();
	tst_exit();
}
Ejemplo n.º 2
0
void 
proc_numa_setInterleaved(int* processorList, int numberOfProcessors)
{
    long i;
    int j;
    int ret=0;
    unsigned long numberOfNodes = 65;
    unsigned long mask = 0UL;

    for (i=0; i<numa_info.numberOfNodes; i++)
    {
        for (j=0; j<numberOfProcessors; j++)
        {
            if (proc_findProcessor(i,processorList[j]))
            {
                mask |= (1UL<<i);
                break;
            }
        }
    }

    ret = set_mempolicy(MPOL_INTERLEAVE,&mask,numberOfNodes);

    if (ret < 0)
    {
        ERROR;
    }
}
Ejemplo n.º 3
0
static void mempol_restore(void)
{
	int ret;

	ret = set_mempolicy(MPOL_DEFAULT, NULL, g->p.nr_nodes-1);

	BUG_ON(ret);
}
Ejemplo n.º 4
0
/*
* Set the memory policy for data allocation for a process
*/
void linux_set_mempol(int mempol, int realnode)
{

  int node = local_topo->nnodes;
  int core, i, error;
  unsigned long mask;

  switch(mempol)
  {
   case OS:
    if(node > 0)
      error = set_mempolicy(MPOL_DEFAULT,NULL,0); 
   break;
   case LOCAL:   
    if(node >0)
    {  
     core = sched_getcpu();
     realnode = linux_get_nodeid(core);
     mask = 1 << realnode;
     error = set_mempolicy(MPOL_BIND,&mask,sizeof(unsigned long)+1);
    }
   break;
   case INTERLEAVE:
    if(node >0){
      for(i=0;i<node+1;i++)
        mask = mask + (1 << i);
        error = set_mempolicy(MPOL_INTERLEAVE,&mask,sizeof(mask)+1);      
    }
   break;
   case MANUAL:   
    if(node >0)
    {  
     mask = 1 << realnode;
     error = set_mempolicy(MPOL_BIND,&mask,sizeof(unsigned long)+1);
    }
   break;
   default:
    if(node > 0)
      error = set_mempolicy(MPOL_DEFAULT,NULL,0); 
   break;
  }

  if (error < 0)
    printf("\nWARNING: Memory binding not supported or can not be enforced\n");

}
Ejemplo n.º 5
0
/**
 * policy: indicates the memory policy
 * nids: indicates the NUMA nodes to be applied
 * len: indicates how many NUMA nodes nids has
 */
int CmiSetMemAffinity(int policy, int *nids, int len) {
    int i;
    mem_aff_mask myMask;
    unsigned int masksize = 8*sizeof(mem_aff_mask);
    MEM_MASK_ZERO(&myMask);
    for (i=0; i<len; i++) MEM_MASK_SET(nids[i], &myMask);
    if (set_mempolicy(policy, &myMask, masksize)<0) {
        CmiPrintf("Error> setting memory policy (%d) error with mask %X\n", policy, myMask);
        return -1;
    } else
        return 0;
}
Ejemplo n.º 6
0
static void verify_mempolicy(unsigned int node, int mode)
{
	struct bitmask *bm = numa_allocate_nodemask();
	unsigned int i;

	numa_bitmask_setbit(bm, node);

	TEST(set_mempolicy(mode, bm->maskp, bm->size+1));

	if (TST_RET) {
		tst_res(TFAIL | TTERRNO,
		        "set_mempolicy(%s) node %u",
		        tst_numa_mode_name(mode), node);
		return;
	}

	tst_res(TPASS, "set_mempolicy(%s) node %u",
	        tst_numa_mode_name(mode), node);

	numa_free_nodemask(bm);

	const char *prefix = "child: ";

	if (SAFE_FORK()) {
		prefix = "parent: ";
		tst_reap_children();
	}

	tst_nodemap_reset_counters(nodes);
	alloc_fault_count(nodes, NULL, PAGES_ALLOCATED * page_size);
	tst_nodemap_print_counters(nodes);

	for (i = 0; i < nodes->cnt; i++) {
		if (nodes->map[i] == node) {
			if (nodes->counters[i] == PAGES_ALLOCATED) {
				tst_res(TPASS, "%sNode %u allocated %u",
				        prefix, node, PAGES_ALLOCATED);
			} else {
				tst_res(TFAIL, "%sNode %u allocated %u, expected %u",
				        prefix, node, nodes->counters[i],
				        PAGES_ALLOCATED);
			}
			continue;
		}

		if (nodes->counters[i]) {
			tst_res(TFAIL, "%sNode %u allocated %u, expected 0",
			        prefix, i, nodes->counters[i]);
		}
	}
}
Ejemplo n.º 7
0
static void bind_to_memnode(int node)
{
	unsigned long nodemask;
	int ret;

	if (node == -1)
		return;

	BUG_ON(g->p.nr_nodes > (int)sizeof(nodemask));
	nodemask = 1L << node;

	ret = set_mempolicy(MPOL_BIND, &nodemask, sizeof(nodemask)*8);
	dprintf("binding to node %d, mask: %016lx => %d\n", node, nodemask, ret);

	BUG_ON(ret);
}
Ejemplo n.º 8
0
Archivo: mem.c Proyecto: Nan619/ltp-ddt
void oom(int testcase, int mempolicy, int lite)
{
	pid_t pid;
	int status;
#if HAVE_NUMA_H && HAVE_LINUX_MEMPOLICY_H && HAVE_NUMAIF_H \
	&& HAVE_MPOL_CONSTANTS
	unsigned long nmask = 0;
	unsigned int node;

	if (mempolicy)
		node = get_a_numa_node(cleanup);
	nmask += 1 << node;
#endif

	switch (pid = fork()) {
	case -1:
		tst_brkm(TBROK | TERRNO, cleanup, "fork");
	case 0:
#if HAVE_NUMA_H && HAVE_LINUX_MEMPOLICY_H && HAVE_NUMAIF_H \
	&& HAVE_MPOL_CONSTANTS
		if (mempolicy)
			if (set_mempolicy(MPOL_BIND, &nmask, MAXNODES) == -1)
				tst_brkm(TBROK | TERRNO, cleanup,
					 "set_mempolicy");
#endif
		_test_alloc(testcase, lite);
		exit(0);
	default:
		break;
	}
	tst_resm(TINFO, "expected victim is %d.", pid);
	if (waitpid(-1, &status, 0) == -1)
		tst_brkm(TBROK | TERRNO, cleanup, "waitpid");

	if (testcase == OVERCOMMIT) {
		if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
			tst_resm(TFAIL, "the victim unexpectedly failed: %d",
				 status);
	} else {
		if (!WIFSIGNALED(status) || WTERMSIG(status) != SIGKILL)
			tst_resm(TFAIL, "the victim unexpectedly failed: %d",
				 status);
	}
}
Ejemplo n.º 9
0
static void set_global_mempolicy(int mempolicy)
{
#if HAVE_NUMA_H && HAVE_LINUX_MEMPOLICY_H && HAVE_NUMAIF_H \
	&& HAVE_MPOL_CONSTANTS
	unsigned long nmask[MAXNODES / BITS_PER_LONG] = { 0 };
	int num_nodes, *nodes;
	int ret;

	if (mempolicy) {
		ret = get_allowed_nodes_arr(NH_MEMS|NH_CPUS, &num_nodes, &nodes);
		if (ret != 0)
			tst_brkm(TBROK|TERRNO, cleanup,
				 "get_allowed_nodes_arr");
		if (num_nodes < 2) {
			tst_resm(TINFO, "mempolicy need NUMA system support");
			free(nodes);
			return;
		}
		switch(mempolicy) {
		case MPOL_BIND:
			/* bind the second node */
			set_node(nmask, nodes[1]);
			break;
		case MPOL_INTERLEAVE:
		case MPOL_PREFERRED:
			if (num_nodes == 2) {
				tst_resm(TINFO, "The mempolicy need "
					 "more than 2 numa nodes");
				free(nodes);
				return;
			} else {
				/* Using the 2nd,3rd node */
				set_node(nmask, nodes[1]);
				set_node(nmask, nodes[2]);
			}
			break;
		default:
			tst_brkm(TBROK|TERRNO, cleanup, "Bad mempolicy mode");
		}
		if (set_mempolicy(mempolicy, nmask, MAXNODES) == -1)
			tst_brkm(TBROK|TERRNO, cleanup, "set_mempolicy");
	}
#endif
}
Ejemplo n.º 10
0
int main(int argc, char *argv[])
{
	int lc;
	char *msg;
	int size = 128, num = 3, unit = 1;
	unsigned long nnodes = 1;
	unsigned long nmask = 2;

	msg = parse_opts(argc, argv, ksm_options, ksm_usage);
	if (msg != NULL)
		tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg);

	nnodes = count_numa();
	if (count_numa() <= 1)
		tst_brkm(TCONF, NULL, "required a NUMA system.");

	setup();

	for (lc = 0; TEST_LOOPING(lc); lc++) {
		Tst_count = 0;
		check_ksm_options(&size, &num, &unit);

		write_memcg();

		if (set_mempolicy(MPOL_BIND, &nmask, MAXNODES) == -1) {
			if (errno != ENOSYS)
				tst_brkm(TBROK|TERRNO, cleanup, "set_mempolicy");
			else
				tst_brkm(TCONF, cleanup,
					"set_mempolicy syscall is not implemented on your system.");
		}
		create_same_memory(size, num, unit);

		write_cpusets();
		create_same_memory(size, num, unit);
	}
	cleanup();
	tst_exit();
}
Ejemplo n.º 11
0
static void testcpuset(void)
{
	int lc;
	int child, i, status;
	unsigned long nmask[MAXNODES / BITS_PER_LONG] = { 0 };
	char mems[BUFSIZ], buf[BUFSIZ];

	read_cpuset_files(CPATH, "cpus", buf);
	write_cpuset_files(CPATH_NEW, "cpus", buf);
	read_cpuset_files(CPATH, "mems", mems);
	write_cpuset_files(CPATH_NEW, "mems", mems);
	SAFE_FILE_PRINTF(cleanup, CPATH_NEW "/tasks", "%d", getpid());

	switch (child = fork()) {
	case -1:
		tst_brkm(TBROK | TERRNO, cleanup, "fork");
	case 0:
		for (i = 0; i < nnodes; i++) {
			if (nodes[i] >= MAXNODES)
				continue;
			set_node(nmask, nodes[i]);
		}
		if (set_mempolicy(MPOL_BIND, nmask, MAXNODES) == -1)
			tst_brkm(TBROK | TERRNO, cleanup, "set_mempolicy");
		exit(mem_hog_cpuset(ncpus > 1 ? ncpus : 1));
	}
	for (lc = 0; TEST_LOOPING(lc); lc++) {
		tst_count = 0;
		snprintf(buf, BUFSIZ, "%d", nodes[0]);
		write_cpuset_files(CPATH_NEW, "mems", buf);
		snprintf(buf, BUFSIZ, "%d", nodes[1]);
		write_cpuset_files(CPATH_NEW, "mems", buf);
	}

	if (waitpid(child, &status, WUNTRACED | WCONTINUED) == -1)
		tst_brkm(TBROK | TERRNO, cleanup, "waitpid");
	if (WEXITSTATUS(status) != 0)
		tst_resm(TFAIL, "child exit status is %d", WEXITSTATUS(status));
}
Ejemplo n.º 12
0
static void *
s_numa_alloc(size_t sz, int cpu) {
  void *ret = NULL;

  if (likely(sz > 0)) {
    if (likely(cpu >= 0)) {
      if (likely(s_numa_nodes != NULL && s_n_cpus > 0)) {
        unsigned int node = s_numa_nodes[cpu];
        unsigned int allocd_node = UINT_MAX;
        struct bitmask *bmp;
        int r;
  
        bmp = numa_allocate_nodemask();
        numa_bitmask_setbit(bmp, node);

        errno = 0;
        r = (int)set_mempolicy(MPOL_BIND, bmp->maskp, bmp->size + 1);
        if (likely(r == 0)) {
          errno = 0;
          ret = numa_alloc_onnode(sz, (int)node);
          if (likely(ret != NULL)) {
            lagopus_result_t rl;

            /*
             * We need this "first touch" even using the
             * numa_alloc_onnode().
             */
            (void)memset(ret, 0, sz);

            errno = 0;
            r = (int)get_mempolicy((int *)&allocd_node, NULL, 0, ret,
                                   MPOL_F_NODE|MPOL_F_ADDR);
            if (likely(r == 0)) {
              if (unlikely(node != allocd_node)) {
                /*
                 * The memory is not allocated on the node, but it is
                 * still usable. Just return it.
                 */
                lagopus_msg_warning("can't allocate " PFSZ(u) " bytes memory "
                                    "for CPU %d (NUMA node %d).\n",
                                    sz, cpu, node);
              }
            } else {
              lagopus_perror(LAGOPUS_RESULT_POSIX_API_ERROR);
              lagopus_msg_error("get_mempolicy() returned %d.\n", r);
            }

            rl = s_add_addr(ret, sz);
            if (unlikely(rl != LAGOPUS_RESULT_OK)) {
              lagopus_perror(rl);
              lagopus_msg_error("can't register the allocated address.\n");
              numa_free(ret, sz);
              ret = NULL;
            }
          }

        } else {	/* r == 0 */
          lagopus_perror(LAGOPUS_RESULT_POSIX_API_ERROR);
          lagopus_msg_error("set_mempolicy() returned %d.\n", r);
        }

        numa_free_nodemask(bmp);
        set_mempolicy(MPOL_DEFAULT, NULL, 0);

      } else {	/* s_numa_nodes != NULL && s_n_cpus > 0 */
        /*
         * Not initialized or initialization failure.
         */
        lagopus_msg_warning("The NUMA related information is not initialized. "
                            "Use malloc(3) instead.\n");
        ret = malloc(sz);
      }

    } else {	/* cpu >= 0 */
      /*
       * Use pure malloc(3).
       */
      ret = malloc(sz);
    }

  }

  return ret;
}
int main(int argc, char *argv[]) {
	int i;
	int ret;
	int nr = 2;
	char c;
	char *p;
	int mapflag = MAP_ANONYMOUS;
	int protflag = PROT_READ|PROT_WRITE;
        unsigned long nr_nodes = numa_max_node() + 1;
        struct bitmask *new_nodes;
        unsigned long nodemask;
	int do_unpoison = 0;
	int loop = 3;

	while ((c = getopt(argc, argv, "vp:m:n:ul:h:")) != -1) {
		switch(c) {
                case 'v':
                        verbose = 1;
                        break;
                case 'p':
                        testpipe = optarg;
                        {
                                struct stat stat;
                                lstat(testpipe, &stat);
                                if (!S_ISFIFO(stat.st_mode))
                                        errmsg("Given file is not fifo.\n");
                        }
                        break;
		case 'm':
			if (!strcmp(optarg, "private"))
				mapflag |= MAP_PRIVATE;
			else if (!strcmp(optarg, "shared"))
				mapflag |= MAP_SHARED;
			else
				errmsg("invalid optarg for -m\n");
			break;
		case 'n':
			nr = strtoul(optarg, NULL, 10);
			break;
		case 'u':
			do_unpoison = 1;
			break;
		case 'l':
			loop = strtoul(optarg, NULL, 10);
			break;
		case 'h':
			HPS = strtoul(optarg, NULL, 10) * 1024;
			mapflag |= MAP_HUGETLB;
			/* todo: arch independent */
			if (HPS != 2097152 && HPS != 1073741824)
				errmsg("Invalid hugepage size\n");
			break;
		default:
			errmsg("invalid option\n");
			break;
		}
	}

        if (nr_nodes < 2)
                errmsg("A minimum of 2 nodes is required for this test.\n");

        new_nodes = numa_bitmask_alloc(nr_nodes);
        numa_bitmask_setbit(new_nodes, 1);

        nodemask = 1; /* only node 0 allowed */
        if (set_mempolicy(MPOL_BIND, &nodemask, nr_nodes) == -1)
                err("set_mempolicy");

	signal(SIGUSR2, sig_handle);
	pprintf("start background migration\n");
	pause();

	signal(SIGUSR1, sig_handle_flag);
	pprintf("hugepages prepared\n");

	while (flag) {
		p = checked_mmap((void *)ADDR_INPUT, nr * HPS, protflag, mapflag, -1, 0);
		/* fault in */
		memset(p, 'a', nr * HPS);
		for (i = 0; i < nr; i++) {
			ret = madvise(p + i * HPS, 4096, MADV_HWPOISON);
			if (ret) {
				perror("madvise");
				pprintf("madvise returned %d\n", ret);
			}
		}
		if (do_unpoison) {
			pprintf("need unpoison\n");
			pause();
		}
		checked_munmap(p, nr * HPS);
		if (loop-- <= 0)
			break;
	}
	pprintf("exit\n");
	pause();
	return 0;
}
Ejemplo n.º 14
0
static void init_matrix(int rank)
{
#ifdef STARPU_HAVE_LIBNUMA
	if (numa)
	{
		fprintf(stderr, "Using INTERLEAVE policy\n");
		unsigned long nodemask = ((1<<0)|(1<<1));
		int ret = set_mempolicy(MPOL_INTERLEAVE, &nodemask, 3);
		if (ret)
			perror("set_mempolicy failed");
	}
#endif

	/* Allocate a grid of data handles, not all of them have to be allocated later on */
	dataA_handles = calloc(nblocks*nblocks, sizeof(starpu_data_handle_t));
	dataA = calloc(nblocks*nblocks, sizeof(TYPE *));
	allocated_memory_extra += nblocks*nblocks*(sizeof(starpu_data_handle_t) + sizeof(TYPE *));

	size_t blocksize = (size_t)(size/nblocks)*(size/nblocks)*sizeof(TYPE);

	/* Allocate all the blocks that belong to this mpi node */
	unsigned long i,j;
	for (j = 0; j < nblocks; j++)
	{
		for (i = 0; i < nblocks; i++)
		{
			TYPE **blockptr = &dataA[j+i*nblocks];
//			starpu_data_handle_t *handleptr = &dataA_handles[j+nblocks*i];
			starpu_data_handle_t *handleptr = &dataA_handles[j+nblocks*i];

			if (get_block_rank(i, j) == rank)
			{
				/* This blocks should be treated by the current MPI process */
				/* Allocate and fill it */
				starpu_malloc((void **)blockptr, blocksize);
				allocated_memory += blocksize;

				//fprintf(stderr, "Rank %d : fill block (i = %d, j = %d)\n", rank, i, j);
				fill_block_with_random(*blockptr, size, nblocks);
				//fprintf(stderr, "Rank %d : fill block (i = %d, j = %d)\n", rank, i, j);
				if (i == j)
				{
					unsigned tmp;
					for (tmp = 0; tmp < size/nblocks; tmp++)
					{
						(*blockptr)[tmp*((size/nblocks)+1)] += (TYPE)10*nblocks;
					}
				}

				/* Register it to StarPU */
				starpu_matrix_data_register(handleptr, STARPU_MAIN_RAM,
					(uintptr_t)*blockptr, size/nblocks,
					size/nblocks, size/nblocks, sizeof(TYPE));
			}
			else {
				*blockptr = STARPU_POISON_PTR;
				*handleptr = STARPU_POISON_PTR;
			}
		}
	}

	/* Allocate the temporary buffers required for the distributed algorithm */

	unsigned k;

	/* tmp buffer 11 */
#ifdef SINGLE_TMP11
	starpu_malloc((void **)&tmp_11_block, blocksize);
	allocated_memory_extra += blocksize;
	starpu_matrix_data_register(&tmp_11_block_handle, STARPU_MAIN_RAM, (uintptr_t)tmp_11_block,
			size/nblocks, size/nblocks, size/nblocks, sizeof(TYPE));
#else
	tmp_11_block_handles = calloc(nblocks, sizeof(starpu_data_handle_t));
	tmp_11_block = calloc(nblocks, sizeof(TYPE *));
	allocated_memory_extra += nblocks*(sizeof(starpu_data_handle_t) + sizeof(TYPE *));

	for (k = 0; k < nblocks; k++)
	{
		if (tmp_11_block_is_needed(rank, nblocks, k))
		{
			starpu_malloc((void **)&tmp_11_block[k], blocksize);
			allocated_memory_extra += blocksize;
			STARPU_ASSERT(tmp_11_block[k]);

			starpu_matrix_data_register(&tmp_11_block_handles[k], STARPU_MAIN_RAM,
				(uintptr_t)tmp_11_block[k],
				size/nblocks, size/nblocks, size/nblocks, sizeof(TYPE));
		}
	}
#endif

	/* tmp buffers 12 and 21 */
#ifdef SINGLE_TMP1221
	tmp_12_block_handles = calloc(nblocks, sizeof(starpu_data_handle_t));
	tmp_21_block_handles = calloc(nblocks, sizeof(starpu_data_handle_t));
	tmp_12_block = calloc(nblocks, sizeof(TYPE *));
	tmp_21_block = calloc(nblocks, sizeof(TYPE *));

	allocated_memory_extra += 2*nblocks*(sizeof(starpu_data_handle_t) + sizeof(TYPE *));
#else
	for (i = 0; i < 2; i++) {
		tmp_12_block_handles[i] = calloc(nblocks, sizeof(starpu_data_handle_t));
		tmp_21_block_handles[i] = calloc(nblocks, sizeof(starpu_data_handle_t));
		tmp_12_block[i] = calloc(nblocks, sizeof(TYPE *));
		tmp_21_block[i] = calloc(nblocks, sizeof(TYPE *));

		allocated_memory_extra += 2*nblocks*(sizeof(starpu_data_handle_t) + sizeof(TYPE *));
	}
#endif

	for (k = 0; k < nblocks; k++)
	{
#ifdef SINGLE_TMP1221
		if (tmp_12_block_is_needed(rank, nblocks, k))
		{
			starpu_malloc((void **)&tmp_12_block[k], blocksize);
			allocated_memory_extra += blocksize;
			STARPU_ASSERT(tmp_12_block[k]);

			starpu_matrix_data_register(&tmp_12_block_handles[k], STARPU_MAIN_RAM,
				(uintptr_t)tmp_12_block[k],
				size/nblocks, size/nblocks, size/nblocks, sizeof(TYPE));
		}

		if (tmp_21_block_is_needed(rank, nblocks, k))
		{
			starpu_malloc((void **)&tmp_21_block[k], blocksize);
			allocated_memory_extra += blocksize;
			STARPU_ASSERT(tmp_21_block[k]);

			starpu_matrix_data_register(&tmp_21_block_handles[k], STARPU_MAIN_RAM,
				(uintptr_t)tmp_21_block[k],
				size/nblocks, size/nblocks, size/nblocks, sizeof(TYPE));
		}
#else
	for (i = 0; i < 2; i++) {
		if (tmp_12_block_is_needed(rank, nblocks, k))
		{
			starpu_malloc((void **)&tmp_12_block[i][k], blocksize);
			allocated_memory_extra += blocksize;
			STARPU_ASSERT(tmp_12_block[i][k]);

			starpu_matrix_data_register(&tmp_12_block_handles[i][k], STARPU_MAIN_RAM,
				(uintptr_t)tmp_12_block[i][k],
				size/nblocks, size/nblocks, size/nblocks, sizeof(TYPE));
		}

		if (tmp_21_block_is_needed(rank, nblocks, k))
		{
			starpu_malloc((void **)&tmp_21_block[i][k], blocksize);
			allocated_memory_extra += blocksize;
			STARPU_ASSERT(tmp_21_block[i][k]);

			starpu_matrix_data_register(&tmp_21_block_handles[i][k], STARPU_MAIN_RAM,
				(uintptr_t)tmp_21_block[i][k],
				size/nblocks, size/nblocks, size/nblocks, sizeof(TYPE));
		}
	}
#endif
	}

	//display_all_blocks(nblocks, size/nblocks);
}