Beispiel #1
0
/*
 * Closes the specified connection.
 *
 * Upon return, the connection has been destroyed and cannot be used anymore.
 *
 * This function does nothing if connection is set to NULL.
 */
void tf_close(struct tf_connection *connection)
{
	int error;
	enum TF_CONN_STATE state;

	dprintk(KERN_DEBUG "tf_close(%p)\n", connection);

	if (connection == NULL)
		return;

	/*
	 * Assumption: Linux guarantees that no other operation is in progress
	 * and that no other operation will be started when close is called
	 */
	BUG_ON(atomic_read(&(connection->pending_op_count)) != 0);

	/*
	 * Exchange a Destroy Device Context message if needed.
	 */
	spin_lock(&(connection->state_lock));
	state = connection->state;
	spin_unlock(&(connection->state_lock));
	if (state == TF_CONN_STATE_VALID_DEVICE_CONTEXT) {
		/*
		 * A DestroyDeviceContext operation was not performed. Do it
		 * now.
		 */
		error = tf_destroy_device_context(connection);
		if (error != 0)
			/* avoid cleanup if destroy device context fails */
			goto error;
	}

	/*
	 * Clean up the shared memory
	 */
	tf_cleanup_shared_memories(connection);

#ifdef CONFIG_TF_ION
	if (connection->ion_client != NULL)
		ion_client_destroy(connection->ion_client);
#endif

	spin_lock(&(connection->dev->connection_list_lock));
	list_del(&(connection->list));
	spin_unlock(&(connection->dev->connection_list_lock));

	internal_kfree(connection);

	return;

error:
	dprintk(KERN_DEBUG "tf_close(%p) failed with error code %d\n",
		connection, error);
}
Beispiel #2
0
/**
 * Unmaps a shared memory
 **/
static void SCXLNXConnUnmapShmem(
    struct SCXLNX_CONNECTION *pConn,
    struct SCXLNX_SHMEM_DESC *pShmemDesc,
    u32 nFullCleanup)
{
    /* check pShmemDesc contains a descriptor */
    if (pShmemDesc == NULL)
        return;

    dprintk(KERN_DEBUG "SCXLNXConnUnmapShmem(%p)\n", pShmemDesc);

retry:
    mutex_lock(&(pConn->sharedMemoriesMutex));
    if (atomic_read(&pShmemDesc->nRefCnt) > 1) {
        /*
         * Shared mem still in use, wait for other operations completion
         * before actually unmapping it.
         */
        dprintk(KERN_INFO "Descriptor in use\n");
        mutex_unlock(&(pConn->sharedMemoriesMutex));
        schedule();
        goto retry;
    }

    SCXLNXCommReleaseSharedMemory(
        &(pConn->sAllocationContext),
        pShmemDesc,
        nFullCleanup);

    list_del(&(pShmemDesc->list));

    if ((pShmemDesc->nType == SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM) ||
            (nFullCleanup != 0)) {
        internal_kfree(pShmemDesc);

        atomic_dec(&(pConn->nShmemAllocated));
    } else {
        /*
         * This is a preallocated shared memory, add to free list
         * Since the device context is unmapped last, it is
         * always the first element of the free list if no
         * device context has been created
         */
        pShmemDesc->hIdentifier = 0;
        list_add(&(pShmemDesc->list), &(pConn->sFreeSharedMemoryList));
    }

    mutex_unlock(&(pConn->sharedMemoriesMutex));
}
Beispiel #3
0
/**
 * Unmaps a shared memory
 **/
void tf_unmap_shmem(
		struct tf_connection *connection,
		struct tf_shmem_desc *shmem_desc,
		u32 full_cleanup)
{
	/* check shmem_desc contains a descriptor */
	if (shmem_desc == NULL)
		return;

	dprintk(KERN_DEBUG "tf_unmap_shmem(%p)\n", shmem_desc);

retry:
	mutex_lock(&(connection->shmem_mutex));
	if (atomic_read(&shmem_desc->ref_count) > 1) {
		/*
		 * Shared mem still in use, wait for other operations completion
		 * before actually unmapping it.
		 */
		dprintk(KERN_INFO "Descriptor in use\n");
		mutex_unlock(&(connection->shmem_mutex));
		schedule();
		goto retry;
	}

	tf_cleanup_shared_memory(
			&(connection->cpt_alloc_context),
			shmem_desc,
			full_cleanup);

	list_del(&(shmem_desc->list));

	if ((shmem_desc->type == TF_SHMEM_TYPE_REGISTERED_SHMEM) ||
			(full_cleanup != 0)) {
		internal_kfree(shmem_desc);

		atomic_dec(&(connection->shmem_count));
	} else {
		/*
		 * This is a preallocated shared memory, add to free list
		 * Since the device context is unmapped last, it is
		 * always the first element of the free list if no
		 * device context has been created
		 */
		shmem_desc->block_identifier = 0;
		list_add(&(shmem_desc->list), &(connection->free_shmem_list));
	}

	mutex_unlock(&(connection->shmem_mutex));
}
Beispiel #4
0
/*
 * Closes the specified connection.
 *
 * Upon return, the connection referenced by pConn has been destroyed and cannot
 * be used anymore.
 *
 * This function does nothing if pConn is set to NULL.
 */
void SCXLNXConnClose(struct SCXLNX_CONNECTION *pConn)
{
    int nError;
    enum SCXLNX_CONN_STATE nState;

    dprintk(KERN_DEBUG "SCXLNXConnClose(%p)\n", pConn);

    if (pConn == NULL)
        return;

    /*
     * Assumption: Linux guarantees that no other operation is in progress
     * and that no other operation will be started when close is called
     */
    BUG_ON(atomic_read(&(pConn->nPendingOpCounter)) != 0);

    /*
     * Exchange a Destroy Device Context message if needed.
     */
    spin_lock(&(pConn->stateLock));
    nState = pConn->nState;
    spin_unlock(&(pConn->stateLock));
    if (nState == SCXLNX_CONN_STATE_VALID_DEVICE_CONTEXT) {
        /*
         * A DestroyDeviceContext operation was not performed. Do it
         * now.
         */
        nError = SCXLNXConnDestroyDeviceContext(pConn);
        if (nError != 0)
            /* avoid cleanup if destroy device context fails */
            goto error;
    }

    /*
     * Clean up the shared memory
     */
    SCXLNXConnCleanupSharedMemory(pConn);

    internal_kfree(pConn);

    return;

error:
    dprintk(KERN_DEBUG "SCXLNXConnClose(%p) failed with error code %d\n",
            pConn, nError);
}
Beispiel #5
0
/**
 *Unmaps a shared memory descriptor
 **/
static void SCXLNXConnUnmapShmem(SCXLNX_SHMEM_MONITOR *pShmemMonitor,
				 SCXLNX_SHMEM_DESC *pShmemDesc,
				 u32 nFullCleanup)
{
	/*check pShmemDesc contains a descriptor */
	if (pShmemDesc == NULL)
		return;

	dprintk(KERN_INFO "SCXLNXConnUnmapShmem(%p) : \
		descriptor %p, refcnt=%d\n", pShmemMonitor,
		pShmemDesc, atomic_read(&(pShmemDesc->nRefCnt)));

wait_for_unused:
	down(&(pShmemMonitor->sharedMemoriesMutex));
	if (atomic_read(&(pShmemDesc->nRefCnt)) > 1) {
		dprintk(KERN_INFO "Descriptor in use\n");
		up(&(pShmemMonitor->sharedMemoriesMutex));
		schedule();
		goto wait_for_unused;
	}

	SCXLNXSMCommReleaseDescriptor(pShmemDesc, 1);

	list_del(&(pShmemDesc->list));

	if ((pShmemDesc->nType == SCXLNX_SHMEM_TYPE_REGISTERED_SHMEM)
		 || (nFullCleanup != 0)) {
		/*free descriptor */
		internal_kfree(pShmemDesc);

		atomic_dec(&(pShmemMonitor->nShmemAllocated));
	} else {
		/*
		 *This is a preallocated descriptor, add to free list
		 *Since the device context is unmapped last, it is
		 *always the first element of the free list if no
		 *device context has been created
		 */
		pShmemDesc->hIdentifier = 0;
		list_add(&(pShmemDesc->list),
			 &(pShmemMonitor->sFreeSharedMemoryList));
	}

	up(&(pShmemMonitor->sharedMemoriesMutex));
}
Beispiel #6
0
/*
 * Opens a connection to the specified device.
 *
 * The placeholder referenced by ppConn is set to the address of the
 * new connection; it is set to NULL upon failure.
 *
 * Returns zero upon successful completion, or an appropriate error code upon
 * failure.
 */
int SCXLNXConnOpen(struct SCXLNX_DEVICE *pDevice,
                   struct file *file,
                   struct SCXLNX_CONNECTION **ppConn)
{
    int nError;
    struct SCXLNX_CONNECTION *pConn = NULL;

    dprintk(KERN_INFO "SCXLNXConnOpen(%p, %p)\n", file, ppConn);

    /*
     * Allocate and initialize the connection.
     * kmalloc only allocates sizeof(*pConn) virtual memory
     */
    pConn = (struct SCXLNX_CONNECTION *) internal_kmalloc(sizeof(*pConn),
            GFP_KERNEL);
    if (pConn == NULL) {
        printk(KERN_ERR "SCXLNXConnOpen(): "
               "Out of memory for connection!\n");
        nError = -ENOMEM;
        goto error;
    }

    memset(pConn, 0, sizeof(*pConn));

    INIT_LIST_HEAD(&(pConn->list));
    pConn->nState = SCXLNX_CONN_STATE_NO_DEVICE_CONTEXT;
    pConn->pDevice = pDevice;
    spin_lock_init(&(pConn->stateLock));
    atomic_set(&(pConn->nPendingOpCounter), 0);

    /*
     * Initialize the shared memory
     */
    nError = SCXLNXConnInitSharedMemory(pConn);
    if (nError != 0)
        goto error;

#ifdef CONFIG_TF_MSHIELD
    /*
     * Initialize CUS specifics
     */
    SCXPublicCryptoInitDeviceContext(pConn);
#endif

    /*
     * Successful completion.
     */

    *ppConn = pConn;

    dprintk(KERN_INFO "SCXLNXConnOpen(): Success (pConn=%p)\n", pConn);
    return 0;

    /*
     * Error handling.
     */

error:
    dprintk(KERN_ERR "SCXLNXConnOpen(): Failure (error %d)\n", nError);
    /* Deallocate the descriptor pages if necessary */
    internal_kfree(pConn);
    *ppConn = NULL;
    return nError;
}
Beispiel #7
0
/*
 * Opens a connection to the specified device.
 *
 * The placeholder referenced by connection is set to the address of the
 * new connection; it is set to NULL upon failure.
 *
 * Returns zero upon successful completion, or an appropriate error code upon
 * failure.
 */
int tf_open(struct tf_device *dev,
	struct file *file,
	struct tf_connection **connection)
{
	int error;
	struct tf_connection *conn = NULL;

	dprintk(KERN_INFO "tf_open(%p, %p)\n", file, connection);

	/*
	 * Allocate and initialize the conn.
	 * kmalloc only allocates sizeof(*conn) virtual memory
	 */
	conn = (struct tf_connection *) internal_kmalloc(sizeof(*conn),
		GFP_KERNEL);
	if (conn == NULL) {
		printk(KERN_ERR "tf_open(): "
			"Out of memory for conn!\n");
		error = -ENOMEM;
		goto error;
	}

	memset(conn, 0, sizeof(*conn));

	conn->state = TF_CONN_STATE_NO_DEVICE_CONTEXT;
	conn->dev = dev;
	spin_lock_init(&(conn->state_lock));
	atomic_set(&(conn->pending_op_count), 0);
	INIT_LIST_HEAD(&(conn->list));

	/*
	 * Initialize the shared memory
	 */
	error = tf_init_shared_memory(conn);
	if (error != 0)
		goto error;

#ifdef CONFIG_TF_ZEBRA
	/*
	 * Initialize CUS specifics
	 */
	tf_crypto_init_cus(conn);
#endif

	/*
	 * Attach the conn to the device.
	 */
	spin_lock(&(dev->connection_list_lock));
	list_add(&(conn->list), &(dev->connection_list));
	spin_unlock(&(dev->connection_list_lock));

	/*
	 * Successful completion.
	 */

	*connection = conn;

	dprintk(KERN_INFO "tf_open(): Success (conn=%p)\n", conn);
	return 0;

	/*
	 * Error handling.
	 */

error:
	dprintk(KERN_ERR "tf_open(): Failure (error %d)\n", error);
	/* Deallocate the descriptor pages if necessary */
	internal_kfree(conn);
	*connection = NULL;
	return error;
}