int klist_node_attached(struct klist_node * n)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	return (n->n_klist != NULL);
}
/* _VMKLNX_CODECHECK_: driver_unregister */
void driver_unregister(struct device_driver * drv)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	bus_remove_driver(drv);
}
struct klist_node * klist_next(struct klist_iter * i)
{
	struct list_head * next;
	struct klist_node * lnode = i->i_cur;
	struct klist_node * knode = NULL;
	void (*put)(struct klist_node *) = i->i_klist->put;

#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	spin_lock(&i->i_klist->k_lock);
	if (lnode) {
		next = lnode->n_node.next;
		if (!klist_dec_and_del(lnode))
			put = NULL;
	} else
		next = i->i_head->next;

	if (next != i->i_head) {
		knode = to_klist_node(next);
		kref_get(&knode->n_ref);
	}
	i->i_cur = knode;
	spin_unlock(&i->i_klist->k_lock);
	if (put && lnode)
		put(lnode);
	return knode;
}
static void free_pool(mempool_t *pool)
{
#if defined(__VMKLNX__)
        vmk_HeapID heapID;

        heapID = vmk_ModuleGetHeapID(pool->module_id);
        VMK_ASSERT(heapID != VMK_INVALID_HEAP_ID);
#endif /* defined(__VMKLNX__) */

	while (pool->curr_nr) {
		void *element = remove_element(pool);
#if defined(__VMKLNX__)
                VMKAPI_MODULE_CALL_VOID(pool->module_id, pool->free, element, 
                                        pool->pool_data);
#else /* !defined(__VMKLNX__) */
		pool->free(element, pool->pool_data);
#endif /* defined(__VMKLNX__) */
	}

#if defined(__VMKLNX__)
	vmklnx_kfree(heapID, pool->elements);
	vmklnx_kfree(heapID, pool);
#else /* !defined(__VMKLNX__) */
	kfree(pool->elements);
	kfree(pool);
#endif /* defined(__VMKLNX__) */
}
示例#5
0
文件: nvme_module.c 项目: vmware/nvme
/**
 * Create the default heap of the module, and associate heap with the module.
 *
 * This will update the module's global resource data.
 *
 * @return VMK_OK: heap creation successful
 * @return VMK_EXISTS: driver's heap has already been created
 * @return Others: errors returned by vmk_HeapCreate
 */
static VMK_ReturnStatus
HeapCreate()
{
   VMK_ReturnStatus vmkStatus;
   vmk_HeapCreateProps props;

   /* Ensures that this function is not called twice. */
   VMK_ASSERT(NVME_DRIVER_RES_HEAP_ID == VMK_INVALID_HEAP_ID);
   if (NVME_DRIVER_RES_HEAP_ID != VMK_INVALID_HEAP_ID) {
      return VMK_EXISTS;
   }

   props.type = VMK_HEAP_TYPE_SIMPLE;
   props.module = vmk_ModuleCurrentID;
   props.initial = NVME_DRIVER_PROPS_HEAP_INITIAL;
   props.max = NVME_DRIVER_PROPS_HEAP_MAX;
   props.creationTimeoutMS = VMK_TIMEOUT_UNLIMITED_MS;
   vmk_NameInitialize(&props.name, NVME_DRIVER_PROPS_HEAP_NAME);

   vmkStatus = vmk_HeapCreate(&props, &(NVME_DRIVER_RES_HEAP_ID));
   if (vmkStatus != VMK_OK) {
      return vmkStatus;
   }

   vmk_ModuleSetHeapID(vmk_ModuleCurrentID, NVME_DRIVER_RES_HEAP_ID);

   return VMK_OK;
}
示例#6
0
/**
 * simple_strtoull - convert a string to an unsigned long long
 * @cp: The start of the string
 * @endp: A pointer to the end of the parsed string will be placed here
 * @base: The number base to use
 */
unsigned long long simple_strtoull(const char *cp,char **endp,unsigned int base)
{
	unsigned long long result = 0,value;

#if defined (__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	if (!base) {
		base = 10;
		if (*cp == '0') {
			base = 8;
			cp++;
			if ((toupper(*cp) == 'X') && isxdigit(cp[1])) {
				cp++;
				base = 16;
			}
		}
	} else if (base == 16) {
		if (cp[0] == '0' && toupper(cp[1]) == 'X')
			cp += 2;
	}
	while (isxdigit(*cp) && (value = isdigit(*cp) ? *cp-'0' : (islower(*cp)
	    ? toupper(*cp) : *cp)-'A'+10) < base) {
		result = result*base + value;
		cp++;
	}
	if (endp)
		*endp = (char *)cp;
	return result;
}
/* _VMKLNX_CODECHECK_: mempool_free */
void mempool_free(void *element, mempool_t *pool)
{
	unsigned long flags;

#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	smp_mb();
	if (pool->curr_nr < pool->min_nr) {
		spin_lock_irqsave(&pool->lock, flags);
		if (pool->curr_nr < pool->min_nr) {
			add_element(pool, element);
			spin_unlock_irqrestore(&pool->lock, flags);
			wake_up(&pool->wait);
			return;
		}
		spin_unlock_irqrestore(&pool->lock, flags);
	}
#if defined(__VMKLNX__)
        VMKAPI_MODULE_CALL_VOID(pool->module_id, pool->free,
                                element, pool->pool_data);
#else /* !defined(__VMKLNX__) */
        pool->free(element, pool->pool_data);   
#endif /* defined(__VMKLNX__) */
}
void klist_iter_init(struct klist * k, struct klist_iter * i)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	klist_iter_init_node(k, i, NULL);
}
/**
 * input_ff_event() - generic handler for force-feedback events
 * @dev: input device to send the effect to
 * @type: event type (anything but EV_FF is ignored)
 * @code: event code
 * @value: event value
 */
int input_ff_event(struct input_dev *dev, unsigned int type,
		   unsigned int code, int value)
{
	struct ff_device *ff = dev->ff;

#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	if (type != EV_FF)
		return 0;

	switch (code) {
	case FF_GAIN:
		if (!test_bit(FF_GAIN, dev->ffbit) || value > 0xffff)
			break;

		ff->set_gain(dev, value);
		break;

	case FF_AUTOCENTER:
		if (!test_bit(FF_AUTOCENTER, dev->ffbit) || value > 0xffff)
			break;

		ff->set_autocenter(dev, value);
		break;

	default:
		if (check_effect_access(ff, code, NULL) == 0)
			ff->playback(dev, code, value);
		break;
	}

	return 0;
}
void klist_remove(struct klist_node * n)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	klist_del(n);
	wait_for_completion(&n->n_removed);
}
void klist_add_tail(struct klist_node * n, struct klist * k)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	klist_node_init(k, n);
	add_tail(k, n);
}
/* _VMKLNX_CODECHECK_: mempool_create */
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
				mempool_free_t *free_fn, void *pool_data)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	return  mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1);
}
void mempool_kfree(void *element, void *pool_data)
{
#if defined(__VMKLNX__)
        vmk_ModuleID moduleID;
        vmk_HeapID heapID;

	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
        moduleID = vmk_ModuleStackTop();
        VMK_ASSERT(moduleID != VMK_VMKERNEL_MODULE_ID);
        VMK_ASSERT(moduleID != vmklinuxModID);
        heapID = vmk_ModuleGetHeapID(moduleID);
        VMK_ASSERT(heapID != VMK_INVALID_HEAP_ID);
	vmklnx_kfree(heapID, element);
#else /* !defined(__VMKLNX__) */
	kfree(element);
#endif /* defined(__VMKLNX__) */
}
void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
{
	size_t size = (size_t) pool_data;
#if defined(__VMKLNX__)
        vmk_ModuleID moduleID;
        vmk_HeapID heapID;

        moduleID = vmk_ModuleStackTop();
        VMK_ASSERT(moduleID != VMK_VMKERNEL_MODULE_ID);
        VMK_ASSERT(moduleID != vmklinuxModID);
        heapID = vmk_ModuleGetHeapID(moduleID);
        VMK_ASSERT(heapID != VMK_INVALID_HEAP_ID);
	return vmklnx_kzmalloc(heapID, size, gfp_mask);
#else /* !defined(__VMKLNX__) */
	return kzalloc(size, gfp_mask);
#endif /* defined(__VMKLNX__) */
}
void mempool_free_slab(void *element, void *pool_data)
{
	struct kmem_cache *mem = pool_data;
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	kmem_cache_free(mem, element);
}
/*
 * A commonly used alloc and free fn.
 */
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
	struct kmem_cache *mem = pool_data;
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	return kmem_cache_alloc(mem, gfp_mask);
}
/*
 *----------------------------------------------------------------------
 *
 * LinuxUSB_Init
 *
 *      This is the init entry point for USB. Called from vmklinux
 *      init from linux_stubs.c
 *
 * Results:
 *      None.
 *
 * Side effects:
 *      None
 *
 *----------------------------------------------------------------------
 */
void
LinuxUSB_Init(void)
{
    VMK_ReturnStatus status;

    status = vmk_StressOptionOpen(VMK_STRESS_OPT_USB_BULK_DELAY_PROCESS_URB,
                                  &stressUSBBulkDelayProcessURB);
    VMK_ASSERT(status == VMK_OK);
    status = vmk_StressOptionOpen(VMK_STRESS_OPT_USB_BULK_URB_FAKE_TRANSIENT_ERROR,
                                  &stressUSBBulkURBFakeTransientError);
    VMK_ASSERT(status == VMK_OK);
    status = vmk_StressOptionOpen(VMK_STRESS_OPT_USB_DELAY_PROCESS_TD,
                                  &stressUSBDelayProcessTD);
    VMK_ASSERT(status == VMK_OK);
    status = vmk_StressOptionOpen(VMK_STRESS_OPT_USB_FAIL_GP_HEAP_ALLOC,
                                  &stressUSBFailGPHeapAlloc);
    VMK_ASSERT(status == VMK_OK);
    status = vmk_StressOptionOpen(VMK_STRESS_OPT_USB_STORAGE_DELAY_SCSI_DATA_PHASE,
                                  &stressUSBStorageDelaySCSIDataPhase);
    VMK_ASSERT(status == VMK_OK);
    status = vmk_StressOptionOpen(VMK_STRESS_OPT_USB_STORAGE_DELAY_SCSI_TRANSFER,
                                  &stressUSBStorageDelaySCSITransfer);
    VMK_ASSERT(status == VMK_OK);

    if (status != VMK_OK) {
        vmk_AlertMessage("%s - Failed to initialize USB common layer",
                         __FUNCTION__);
        VMK_ASSERT(status == VMK_OK);
    }

    return;
}
/* _VMKLNX_CODECHECK_: mempool_destroy */
void mempool_destroy(mempool_t *pool)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	/* Check for outstanding elements */
	BUG_ON(pool->curr_nr != pool->min_nr);
	free_pool(pool);
}
示例#19
0
/**
 * Unregister driver.
 *
 * This will update the module's global resource data.
 */
void
NvmeDriver_Unregister()
{
   DPRINT_TEMP("enter.");

   VMK_ASSERT(NVME_DRIVER_RES_DRIVER_HANDLE != VMK_DRIVER_NONE);

   vmk_DriverUnregister(NVME_DRIVER_RES_DRIVER_HANDLE);
   NVME_DRIVER_RES_DRIVER_HANDLE = VMK_DRIVER_NONE;
}
void klist_iter_exit(struct klist_iter * i)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	if (i->i_cur) {
		klist_del(i->i_cur);
		i->i_cur = NULL;
	}
}
void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	i->i_klist = k;
	i->i_head = &k->k_list;
	i->i_cur = n;
	if (n)
		kref_get(&n->n_ref);
}
/* _VMKLNX_CODECHECK_: put_driver */
void put_driver(struct device_driver * drv)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
	if (drv) {
		kref_put(&drv->kref, device_driver_release);
	}
#else /* !defined(__VMKLNX__) */
	kobject_put(&drv->kobj);
#endif /* defined(__VMKLNX__) */
}
示例#23
0
文件: nvme_module.c 项目: vmware/nvme
/**
 * Destroy log handle
 *
 * This will update the module's global resource data.
 */
static void
LogHandleDestroy()
{
   VMK_ASSERT(NVME_DRIVER_RES_LOG_HANDLE != VMK_INVALID_LOG_HANDLE);
   if (NVME_DRIVER_RES_LOG_HANDLE == VMK_INVALID_LOG_HANDLE) {
      return;
   }

   vmk_LogUnregister(NVME_DRIVER_RES_LOG_HANDLE);
   NVME_DRIVER_RES_LOG_HANDLE = VMK_INVALID_LOG_HANDLE;
}
示例#24
0
文件: nvme_module.c 项目: vmware/nvme
/**
 * Disassociate module default heap from the module and destroy the heap.
 *
 * This will update the module's global resource data.
 */
static void
HeapDestroy()
{
   VMK_ASSERT(NVME_DRIVER_RES_HEAP_ID != VMK_INVALID_HEAP_ID);
   if (NVME_DRIVER_RES_HEAP_ID == VMK_INVALID_HEAP_ID) {
      return;
   }

   vmk_ModuleSetHeapID(vmk_ModuleCurrentID, VMK_INVALID_HEAP_ID);
   vmk_HeapDestroy(NVME_DRIVER_RES_HEAP_ID);
   NVME_DRIVER_RES_HEAP_ID = VMK_INVALID_HEAP_ID;
}
/* _VMKLNX_CODECHECK_: driver_register */
int driver_register(struct device_driver * drv)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	if ((drv->bus->probe && drv->probe) ||
	    (drv->bus->remove && drv->remove) ||
	    (drv->bus->shutdown && drv->shutdown)) {
		printk(KERN_WARNING "Driver '%s' needs updating - please use bus_type methods\n", drv->name);
	}
	klist_init(&drv->klist_devices, NULL, NULL);
	return bus_add_driver(drv);
}
/* _VMKLNX_CODECHECK_: get_driver */
struct device_driver * get_driver(struct device_driver * drv)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
	if (drv) {
		kref_get(&drv->kref);
	}

	return drv;
#else /* !defined(__VMKLNX__) */
	return drv ? to_drv(kobject_get(&drv->kobj)) : NULL;
#endif /* defined(__VMKLNX__) */
}
void klist_init(struct klist * k, void (*get)(struct klist_node *),
		void (*put)(struct klist_node *))
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	INIT_LIST_HEAD(&k->k_list);
	spin_lock_init(&k->k_lock);
#if defined(__VMKLNX__)
        k->module_id = vmk_ModuleStackTop();
#endif
	k->get = get;
	k->put = put;
}
/**
 * input_ff_free() - frees force feedback portion of input device
 * @dev: input device supporting force feedback
 *
 * This function is only needed in error path as input core will
 * automatically free force feedback structures when device is
 * destroyed.
 */
void input_ff_destroy(struct input_dev *dev)
{
#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	clear_bit(EV_FF, dev->evbit);
	if (dev->ff) {
		if (dev->ff->destroy)
			dev->ff->destroy(dev->ff);
		kfree(dev->ff->private);
		kfree(dev->ff);
		dev->ff = NULL;
	}
}
void klist_del(struct klist_node * n)
{
	struct klist * k = n->n_klist;
	void (*put)(struct klist_node *) = k->put;

#if defined(__VMKLNX__)
	VMK_ASSERT(vmk_PreemptionIsEnabled() == VMK_FALSE);
#endif
	spin_lock(&k->k_lock);
	if (!klist_dec_and_del(n))
		put = NULL;
	spin_unlock(&k->k_lock);
	if (put)
		put(n);
}
示例#30
0
/**
 * Setup INTx mode interrupt handler
 *
 * @param [in] ctrlr controller instance
 */
static VMK_ReturnStatus
IntxSetup(struct NvmeCtrlr *ctrlr)
{
   VMK_ReturnStatus vmkStatus;
   vmk_uint32 numAllocated;

   ctrlr->ctrlOsResources.intrArray = Nvme_Alloc(sizeof(vmk_IntrCookie), 0, NVME_ALLOC_ZEROED);
   if (ctrlr->ctrlOsResources.intrArray == NULL) {
      return VMK_NO_MEMORY;
   }

   vmkStatus = vmk_PCIAllocIntrCookie(vmk_ModuleCurrentID,
      ctrlr->ctrlOsResources.pciDevice, VMK_PCI_INTERRUPT_TYPE_LEGACY,
      1, 1, NULL, ctrlr->ctrlOsResources.intrArray, &numAllocated);
   if (vmkStatus != VMK_OK) {
      EPRINT("unable to allocate intr cookie, 0x%x.", vmkStatus);
      return vmkStatus;
   }

   /* should have just 1 intr cookie allocated for intx */
   VMK_ASSERT(numAllocated == 1);

   ctrlr->ctrlOsResources.msixEnabled = 0;
   ctrlr->numIoQueues = 1;
   ctrlr->ctrlOsResources.numVectors = 1; /* 1 intx for both admin and io */

   /* for intx mode, we should register intr handler here rather than
    * at individual queue creation time.
    */
   vmkStatus = OsLib_IntrRegister(ctrlr->ctrlOsResources.device, ctrlr->ctrlOsResources.intrArray[0],
      ctrlr, /* for intx handler, the data is the controller itself */
      0, /* use default id 0 */
      NvmeCtrlr_IntxAck, NvmeCtrlr_IntxHandler);
   if (vmkStatus != VMK_OK) {
      EPRINT("unable to register intr handler, 0x%x.", vmkStatus);
      goto free_intr;
   }

   return VMK_OK;

free_intr:
   vmk_PCIFreeIntrCookie(vmk_ModuleCurrentID, ctrlr->ctrlOsResources.pciDevice);
   ctrlr->numIoQueues = 0;
   ctrlr->ctrlOsResources.numVectors = 0;

   return vmkStatus;
}