static ssize_t drv_attr_show(struct kobject * kobj, struct attribute * attr, char * buf) { struct driver_attribute * drv_attr = to_drv_attr(attr); struct device_driver * drv = to_driver(kobj); ssize_t ret = -EIO; #if defined(__VMKLNX__) vmk_ModuleID moduleID = vmklnx_get_driver_module_id(drv); #endif if (drv_attr->show) { #if defined(__VMKLNX__) VMKAPI_MODULE_CALL(moduleID, ret, drv_attr->show, drv, buf); #else ret = drv_attr->show(drv, buf); #endif } return ret; }
/* _VMKLNX_CODECHECK_: mempool_alloc */ void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) { void *element; unsigned long flags; wait_queue_t wait; gfp_t gfp_temp; #if defined(__VMKLNX__) VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); #endif might_sleep_if(gfp_mask & __GFP_WAIT); gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ gfp_mask |= __GFP_NOWARN; /* failures are OK */ gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); #if defined(__VMKLNX__) && defined(VMX86_DEBUG) if (gfp_mask & __GFP_WAIT) { vmk_WorldAssertIsSafeToBlock(); } #endif /* defined(__VMKLNX__) */ repeat_alloc: #if defined(__VMKLNX__) VMKAPI_MODULE_CALL(pool->module_id, element, pool->alloc, gfp_temp, pool->pool_data); #else /* !defined(__VMKLNX__) */ element = pool->alloc(gfp_temp, pool->pool_data); #endif /* defined(__VMKLNX__) */ if (likely(element != NULL)) return element; spin_lock_irqsave(&pool->lock, flags); if (likely(pool->curr_nr)) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); return element; } spin_unlock_irqrestore(&pool->lock, flags); /* We must not sleep in the GFP_ATOMIC case */ if (!(gfp_mask & __GFP_WAIT)) return NULL; /* Now start performing page reclaim */ gfp_temp = gfp_mask; init_wait(&wait); prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); smp_mb(); if (!pool->curr_nr) { /* * FIXME: this should be io_schedule(). The timeout is there * as a workaround for some DM problems in 2.6.18. */ #if defined(__VMKLNX__) schedule_timeout(5*HZ); #else /* !defined(__VMKLNX__) */ io_schedule_timeout(5*HZ); #endif /* defined(__VMKLNX__) */ } finish_wait(&pool->wait, &wait); goto repeat_alloc; }
mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, mempool_free_t *free_fn, void *pool_data, int node_id) { mempool_t *pool; #if defined(__VMKLNX__) vmk_ModuleID moduleID; vmk_HeapID heapID; VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); moduleID = vmk_ModuleStackTop(); heapID = vmk_ModuleGetHeapID(moduleID); VMK_ASSERT(heapID != VMK_INVALID_HEAP_ID); pool = vmklnx_kmalloc(heapID, sizeof(*pool), GFP_KERNEL, NULL); #else /* !defined(__VMKLNX__) */ pool = kmalloc_node(sizeof(*pool), GFP_KERNEL, node_id); #endif /* defined(__VMKLNX__) */ if (!pool) return NULL; memset(pool, 0, sizeof(*pool)); #if defined(__VMKLNX__) pool->elements = vmklnx_kmalloc(heapID, min_nr * sizeof(void *), GFP_KERNEL, NULL); #else /* !defined(__VMKLNX__) */ pool->elements = kmalloc_node(min_nr * sizeof(void *), GFP_KERNEL, node_id); #endif /* defined(__VMKLNX__) */ if (!pool->elements) { kfree(pool); return NULL; } spin_lock_init(&pool->lock); pool->min_nr = min_nr; pool->pool_data = pool_data; init_waitqueue_head(&pool->wait); pool->alloc = alloc_fn; pool->free = free_fn; #if defined(__VMKLNX__) pool->module_id = moduleID; #endif /* defined(__VMKLNX__) */ /* * First pre-allocate the guaranteed number of buffers. */ while (pool->curr_nr < pool->min_nr) { void *element; #if defined(__VMKLNX__) VMKAPI_MODULE_CALL(pool->module_id, element, pool->alloc, GFP_KERNEL, pool->pool_data); #else /* !defined(__VMKLNX__) */ element = pool->alloc(GFP_KERNEL, pool->pool_data); #endif /* defined(__VMKLNX__) */ if (unlikely(!element)) { free_pool(pool); return NULL; } add_element(pool, element); } return pool; }
/** * mempool_resize - resize an existing memory pool * @pool: pointer to the memory pool which was allocated via * mempool_create(). * @new_min_nr: the new minimum number of elements guaranteed to be * allocated for this pool. * @gfp_mask: the usual allocation bitmask. * * This function shrinks/grows the pool. In the case of growing, * it cannot be guaranteed that the pool will be grown to the new * size immediately, but new mempool_free() calls will refill it. * * Note, the caller must guarantee that no mempool_destroy is called * while this function is running. mempool_alloc() & mempool_free() * might be called (eg. from IRQ contexts) while this function executes. */ int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask) { void *element; void **new_elements; unsigned long flags; #if defined(__VMKLNX__) VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE); #endif BUG_ON(new_min_nr <= 0); spin_lock_irqsave(&pool->lock, flags); if (new_min_nr <= pool->min_nr) { while (new_min_nr < pool->curr_nr) { element = remove_element(pool); spin_unlock_irqrestore(&pool->lock, flags); #if defined(__VMKLNX__) VMKAPI_MODULE_CALL_VOID(pool->module_id, pool->free, element, pool->pool_data); #else /* !defined(__VMKLNX__) */ pool->free(element, pool->pool_data); #endif /* defined(__VMKLNX__) */ spin_lock_irqsave(&pool->lock, flags); } pool->min_nr = new_min_nr; goto out_unlock; } spin_unlock_irqrestore(&pool->lock, flags); /* Grow the pool */ new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask); if (!new_elements) return -ENOMEM; spin_lock_irqsave(&pool->lock, flags); if (unlikely(new_min_nr <= pool->min_nr)) { /* Raced, other resize will do our work */ spin_unlock_irqrestore(&pool->lock, flags); kfree(new_elements); goto out; } memcpy(new_elements, pool->elements, pool->curr_nr * sizeof(*new_elements)); kfree(pool->elements); pool->elements = new_elements; pool->min_nr = new_min_nr; while (pool->curr_nr < pool->min_nr) { spin_unlock_irqrestore(&pool->lock, flags); #if defined(__VMKLNX__) VMKAPI_MODULE_CALL(pool->module_id, element, pool->alloc, gfp_mask, pool->pool_data); #else /* !defined(__VMKLNX__) */ element = pool->alloc(gfp_mask, pool->pool_data); #endif /* defined(__VMKLNX__) */ if (!element) goto out; spin_lock_irqsave(&pool->lock, flags); if (pool->curr_nr < pool->min_nr) { add_element(pool, element); } else { spin_unlock_irqrestore(&pool->lock, flags); #if defined(__VMKLNX__) VMKAPI_MODULE_CALL_VOID(pool->module_id, pool->free, element, pool->pool_data); #else /* !defined(__VMKLNX__) */ pool->free(element, pool->pool_data); /* Raced */ #endif /* defined(__VMKLNX__) */ goto out; } } out_unlock: spin_unlock_irqrestore(&pool->lock, flags); out: return 0; }