Exemplo n.º 1
0
NTSTATUS smbd_smb2_request_check_tcon(struct smbd_smb2_request *req)
{
	const uint8_t *inhdr;
	int i = req->current_idx;
	uint32_t in_flags;
	uint32_t in_tid;
	void *p;
	struct smbd_smb2_tcon *tcon;

	req->tcon = NULL;

	inhdr = (const uint8_t *)req->in.vector[i+0].iov_base;

	in_flags = IVAL(inhdr, SMB2_HDR_FLAGS);
	in_tid = IVAL(inhdr, SMB2_HDR_TID);

	if (in_flags & SMB2_HDR_FLAG_CHAINED) {
		in_tid = req->last_tid;
	}

	req->last_tid = UINT32_MAX;

	/* lookup an existing session */
	p = idr_find(req->session->tcons.idtree, in_tid);
	if (p == NULL) {
		return NT_STATUS_NETWORK_NAME_DELETED;
	}
	tcon = talloc_get_type_abort(p, struct smbd_smb2_tcon);

	if (!change_to_user(tcon->compat_conn,req->session->vuid)) {
		return NT_STATUS_ACCESS_DENIED;
	}

	/* should we pass FLAG_CASELESS_PATHNAMES here? */
	if (!set_current_service(tcon->compat_conn, 0, true)) {
		return NT_STATUS_ACCESS_DENIED;
	}

	req->tcon = tcon;
	req->last_tid = in_tid;

	return NT_STATUS_OK;
}
ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
			       const char __user *buf, int in_len,
			       int out_len)
{
	struct ib_uverbs_attach_mcast cmd;
	struct ib_qp                 *qp;
	struct ib_uqp_object         *uobj;
	struct ib_uverbs_mcast_entry *mcast;
	int                           ret = -EINVAL;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	mutex_lock(&ib_uverbs_idr_mutex);

	qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
	if (!qp || qp->uobject->context != file->ucontext)
		goto out;

	uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);

	list_for_each_entry(mcast, &uobj->mcast_list, list)
		if (cmd.mlid == mcast->lid &&
		    !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
			ret = 0;
			goto out;
		}

	mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
	if (!mcast) {
		ret = -ENOMEM;
		goto out;
	}

	mcast->lid = cmd.mlid;
	memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);

	ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
	if (!ret) {
		uobj = container_of(qp->uobject, struct ib_uqp_object,
				    uevent.uobject);
		list_add_tail(&mcast->list, &uobj->mcast_list);
	} else
Exemplo n.º 3
0
static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
{
	struct cxl_afu *afu = data;
	struct cxl_context *ctx;
	int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
	int ret;

	rcu_read_lock();
	ctx = idr_find(&afu->contexts_idr, ph);
	if (ctx) {
		ret = cxl_irq(irq, ctx);
		rcu_read_unlock();
		return ret;
	}
	rcu_read_unlock();

	WARN(1, "Unable to demultiplex CXL PSL IRQ\n");
	return IRQ_HANDLED;
}
Exemplo n.º 4
0
/**
 * drm_minor_acquire - Acquire a DRM minor
 * @minor_id: Minor ID of the DRM-minor
 *
 * Looks up the given minor-ID and returns the respective DRM-minor object. The
 * refence-count of the underlying device is increased so you must release this
 * object with drm_minor_release().
 *
 * As long as you hold this minor, it is guaranteed that the object and the
 * minor->dev pointer will stay valid! However, the device may get unplugged and
 * unregistered while you hold the minor.
 *
 * Returns:
 * Pointer to minor-object with increased device-refcount, or PTR_ERR on
 * failure.
 */
struct drm_minor *drm_minor_acquire(unsigned int minor_id)
{
	struct drm_minor *minor;
	unsigned long flags;

	spin_lock_irqsave(&drm_minor_lock, flags);
	minor = idr_find(&drm_minors_idr, minor_id);
	if (minor)
		drm_dev_ref(minor->dev);
	spin_unlock_irqrestore(&drm_minor_lock, flags);

	if (!minor) {
		return ERR_PTR(-ENODEV);
	} else if (drm_device_is_unplugged(minor->dev)) {
		drm_dev_unref(minor->dev);
		return ERR_PTR(-ENODEV);
	}

	return minor;
}
Exemplo n.º 5
0
void *dm_get_mdptr(dev_t dev)
{
	struct mapped_device *md;
	void *mdptr = NULL;
	unsigned minor = MINOR(dev);

	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
		return NULL;

	down(&_minor_lock);

	md = idr_find(&_minor_idr, minor);

	if (md && (dm_disk(md)->first_minor == minor))
		mdptr = md->interface_ptr;

	up(&_minor_lock);

	return mdptr;
}
Exemplo n.º 6
0
struct drm_mode_object *__drm_mode_object_find(struct drm_device *dev,
					       uint32_t id, uint32_t type)
{
	struct drm_mode_object *obj = NULL;

	mutex_lock(&dev->mode_config.idr_mutex);
	obj = idr_find(&dev->mode_config.crtc_idr, id);
	if (obj && type != DRM_MODE_OBJECT_ANY && obj->type != type)
		obj = NULL;
	if (obj && obj->id != id)
		obj = NULL;

	if (obj && obj->free_cb) {
		if (!kref_get_unless_zero(&obj->refcount))
			obj = NULL;
	}
	mutex_unlock(&dev->mode_config.idr_mutex);

	return obj;
}
Exemplo n.º 7
0
/**
 * Open file.
 *
 * \param inode device inode
 * \param filp file pointer.
 * \return zero on success or a negative number on failure.
 *
 * Searches the DRM device with the same minor number, calls open_helper(), and
 * increments the device open count. If the open count was previous at zero,
 * i.e., it's the first that the device is open, then calls setup().
 */
int drm_open(struct inode *inode, struct file *filp)
{
	struct drm_device *dev = NULL;
	int minor_id = iminor(inode);
	struct drm_minor *minor;
	int retcode = 0;

	minor = idr_find(&drm_minors_idr, minor_id);
	if (!minor)
		return -ENODEV;

	if (!(dev = minor->dev))
		return -ENODEV;

	if (drm_device_is_unplugged(dev))
		return -ENODEV;

	retcode = drm_open_helper(inode, filp, dev);
	if (!retcode) {
		atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
<<<<<<< HEAD
Exemplo n.º 8
0
/*
 * free_ipcs - free all ipcs of one type
 * @ns:   the namespace to remove the ipcs from
 * @ids:  the table of ipcs to free
 * @free: the function called to free each individual ipc
 *
 * Called for each kind of ipc when an ipc_namespace exits.
 */
void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids,
	       void (*free)(struct ipc_namespace *, struct kern_ipc_perm *))
{
	struct kern_ipc_perm *perm;
	int next_id;
	int total, in_use;

	down_write(&ids->rw_mutex);

	in_use = ids->in_use;

	for (total = 0, next_id = 0; total < in_use; next_id++) {
		perm = idr_find(&ids->ipcs_idr, next_id);
		if (perm == NULL)
			continue;
		ipc_lock_by_ptr(perm);
		free(ns, perm);
		total++;
	}
	up_write(&ids->rw_mutex);
}
Exemplo n.º 9
0
/*
  we've run out of search handles - cleanup those that the client forgot
  to close
*/
static void pvfs_search_cleanup(struct pvfs_state *pvfs)
{
	int i;
	time_t t = time(NULL);

	for (i=0;i<MAX_OLD_SEARCHES;i++) {
		struct pvfs_search_state *search;
		void *p = idr_find(pvfs->search.idtree, i);

		if (p == NULL) return;

		search = talloc_get_type(p, struct pvfs_search_state);
		if (pvfs_list_eos(search->dir, search->current_index) &&
		    search->last_used != 0 &&
		    t > search->last_used + 30) {
			/* its almost certainly been forgotten
			 about */
			talloc_free(search);
		}
	}
}
Exemplo n.º 10
0
int tegra_uapi_close_channel(struct drm_device *drm, void *data,
			     struct drm_file *file)
{
	struct drm_tegra_close_channel *args = data;
	struct tegra_drm_file *fpriv = file->driver_priv;
	struct tegra_drm *tegra = drm->dev_private;
	struct tegra_drm_context_v1 *context;

	spin_lock(&tegra->context_lock);
	context = idr_find(&fpriv->uapi_v1_contexts, args->context);
	if (context)
		idr_remove(&fpriv->uapi_v1_contexts, args->context);
	spin_unlock(&tegra->context_lock);

	if (!context)
		return -EINVAL;

	tegra_drm_context_v1_put(context);

	return 0;
}
Exemplo n.º 11
0
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
                      u32 handle)
{
    struct drm_gem_object *obj;

    spin_lock(&filp->table_lock);

    /* Check if we currently have a reference on the object */
    obj = idr_find(&filp->object_idr, handle);
    if (obj == NULL) {
        spin_unlock(&filp->table_lock);
        return NULL;
    }

    drm_gem_object_reference(obj);

    spin_unlock(&filp->table_lock);

    return obj;
}
Exemplo n.º 12
0
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
	struct drm_device *dev;
	struct drm_gem_object *obj;

	/*                                                              
                                                                 
                                                                 
                                                                 
                                                              
                                                                     
                                                                 
                             
  */
	spin_lock(&filp->table_lock);

	/*                                                      */
	obj = idr_find(&filp->object_idr, handle);
	if (obj == NULL) {
		spin_unlock(&filp->table_lock);
		return -EINVAL;
	}
	dev = obj->dev;

	/*                                           */
	idr_remove(&filp->object_idr, handle);
	spin_unlock(&filp->table_lock);

	if (obj->import_attach)
		drm_prime_remove_imported_buf_handle(&filp->prime,
				obj->import_attach->dmabuf);

	if (dev->driver->gem_close_object)
		dev->driver->gem_close_object(obj, filp);
	drm_gem_object_handle_unreference_unlocked(obj);

	return 0;
}
Exemplo n.º 13
0
static inline void process_eqe(struct ehca_shca *shca, struct ehca_eqe *eqe)
{
    u64 eqe_value;
    u32 token;
    unsigned long flags;
    struct ehca_cq *cq;

    eqe_value = eqe->entry;
    ehca_dbg(&shca->ib_device, "eqe_value=%lx", eqe_value);
    if (EHCA_BMASK_GET(EQE_COMPLETION_EVENT, eqe_value)) {
        ehca_dbg(&shca->ib_device, "Got completion event");
        token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe_value);
        spin_lock_irqsave(&ehca_cq_idr_lock, flags);
        cq = idr_find(&ehca_cq_idr, token);
        if (cq == NULL) {
            spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
            ehca_err(&shca->ib_device,
                     "Invalid eqe for non-existing cq token=%x",
                     token);
            return;
        }
        reset_eq_pending(cq);
        cq->nr_events++;
        spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
        if (ehca_scaling_code)
            queue_comp_task(cq);
        else {
            comp_event_callback(cq);
            spin_lock_irqsave(&ehca_cq_idr_lock, flags);
            cq->nr_events--;
            if (!cq->nr_events)
                wake_up(&cq->wait_completion);
            spin_unlock_irqrestore(&ehca_cq_idr_lock, flags);
        }
    } else {
        ehca_dbg(&shca->ib_device, "Got non completion event");
        parse_identifier(shca, eqe_value);
    }
}
ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
				const char __user *buf, int in_len,
				int out_len)
{
	struct ib_uverbs_req_notify_cq cmd;
	struct ib_cq                  *cq;
	int                            ret = -EINVAL;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	mutex_lock(&ib_uverbs_idr_mutex);
	cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
	if (cq && cq->uobject->context == file->ucontext) {
		ib_req_notify_cq(cq, cmd.solicited_only ?
					IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
		ret = in_len;
	}
	mutex_unlock(&ib_uverbs_idr_mutex);

	return ret;
}
Exemplo n.º 15
0
/**
 * drm_gem_handle_delete - deletes the given file-private handle
 * @filp: drm file-private structure to use for the handle look up
 * @handle: userspace handle to delete
 *
 * Removes the GEM handle from the @filp lookup table and if this is the last
 * handle also cleans up linked resources like GEM names.
 */
int
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
{
	struct drm_device *dev;
	struct drm_gem_object *obj;

	/* This is gross. The idr system doesn't let us try a delete and
	 * return an error code.  It just spews if you fail at deleting.
	 * So, we have to grab a lock around finding the object and then
	 * doing the delete on it and dropping the refcount, or the user
	 * could race us to double-decrement the refcount and cause a
	 * use-after-free later.  Given the frequency of our handle lookups,
	 * we may want to use ida for number allocation and a hash table
	 * for the pointers, anyway.
	 */
	spin_lock(&filp->table_lock);

	/* Check if we currently have a reference on the object */
	obj = idr_find(&filp->object_idr, handle);
	if (obj == NULL) {
		spin_unlock(&filp->table_lock);
		return -EINVAL;
	}
	dev = obj->dev;

	/* Release reference and decrement refcount. */
	idr_remove(&filp->object_idr, handle);
	spin_unlock(&filp->table_lock);

	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_gem_remove_prime_handles(obj, filp);
	drm_vma_node_revoke(&obj->vma_node, filp->filp);

	if (dev->driver->gem_close_object)
		dev->driver->gem_close_object(obj, filp);
	drm_gem_object_handle_unreference_unlocked(obj);

	return 0;
}
Exemplo n.º 16
0
static void cq_event_callback(struct ehca_shca *shca,
			      u64 eqe)
{
	struct ehca_cq *cq;
	u32 token = EHCA_BMASK_GET(EQE_CQ_TOKEN, eqe);

	read_lock(&ehca_cq_idr_lock);
	cq = idr_find(&ehca_cq_idr, token);
	if (cq)
		atomic_inc(&cq->nr_events);
	read_unlock(&ehca_cq_idr_lock);

	if (!cq)
		return;

	ehca_error_data(shca, cq, cq->ipz_cq_handle.handle);

	if (atomic_dec_and_test(&cq->nr_events))
		wake_up(&cq->wait_completion);

	return;
}
Exemplo n.º 17
0
struct amdgpu_bo_list *
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
{
	struct amdgpu_bo_list *result;

	rcu_read_lock();
	result = idr_find(&fpriv->bo_list_handles, id);

	if (result) {
		if (kref_get_unless_zero(&result->refcount)) {
			rcu_read_unlock();
			mutex_lock(&result->lock);
		} else {
			rcu_read_unlock();
			result = NULL;
		}
	} else {
		rcu_read_unlock();
	}

	return result;
}
Exemplo n.º 18
0
/*
 * Find the session structure assoicated with a VUID
 * (not one from an in-progress session setup)
 */
struct smbsrv_session *smbsrv_session_find(struct smbsrv_connection *smb_conn,
        uint64_t vuid, struct timeval request_time)
{
    void *p;
    struct smbsrv_session *sess;

    if (vuid == 0) return NULL;

    if (vuid > smb_conn->sessions.idtree_limit) return NULL;

    p = idr_find(smb_conn->sessions.idtree_vuid, vuid);
    if (!p) return NULL;

    /* only return a finished session */
    sess = talloc_get_type(p, struct smbsrv_session);
    if (sess && sess->session_info) {
        sess->statistics.last_request_time = request_time;
        return sess;
    }

    return NULL;
}
Exemplo n.º 19
0
/*
 * Locking issues: We need to protect the result of the id look up until
 * we get the timer locked down so it is not deleted under us.  The
 * removal is done under the idr spinlock so we use that here to bridge
 * the find to the timer lock.  To avoid a dead lock, the timer id MUST
 * be release with out holding the timer lock.
 */
static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
{
	struct k_itimer *timr;
	/*
	 * Watch out here.  We do a irqsave on the idr_lock and pass the
	 * flags part over to the timer lock.  Must not let interrupts in
	 * while we are moving the lock.
	 */
	spin_lock_irqsave(&idr_lock, *flags);
	timr = idr_find(&posix_timers_id, (int)timer_id);
	if (timr) {
		spin_lock(&timr->it_lock);
		if (timr->it_signal == current->signal) {
			spin_unlock(&idr_lock);
			return timr;
		}
		spin_unlock(&timr->it_lock);
	}
	spin_unlock_irqrestore(&idr_lock, *flags);

	return NULL;
}
ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
			   const char __user *buf, int in_len,
			   int out_len)
{
	struct ib_uverbs_dereg_mr cmd;
	struct ib_mr             *mr;
	struct ib_umem_object    *memobj;
	int                       ret = -EINVAL;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	mutex_lock(&ib_uverbs_idr_mutex);

	mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
	if (!mr || mr->uobject->context != file->ucontext)
		goto out;

	memobj = container_of(mr->uobject, struct ib_umem_object, uobject);

	ret = ib_dereg_mr(mr);
	if (ret)
		goto out;

	idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);

	mutex_lock(&file->mutex);
	list_del(&memobj->uobject.list);
	mutex_unlock(&file->mutex);

	ib_umem_release(file->device->ib_dev, &memobj->umem);
	kfree(memobj);

out:
	mutex_unlock(&ib_uverbs_idr_mutex);

	return ret ? ret : in_len;
}
Exemplo n.º 21
0
static struct mapped_device *dm_find_md(dev_t dev)
{
	struct mapped_device *md;
	unsigned minor = MINOR(dev);

	if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
		return NULL;

	spin_lock(&_minor_lock);

	md = idr_find(&_minor_idr, minor);
	if (md && (md == MINOR_ALLOCED ||
		   (dm_disk(md)->first_minor != minor) ||
		   test_bit(DMF_FREEING, &md->flags))) {
		md = NULL;
		goto out;
	}

out:
	spin_unlock(&_minor_lock);

	return md;
}
Exemplo n.º 22
0
int ipc_get_maxid(struct ipc_ids *ids)
{
	struct kern_ipc_perm *ipc;
	int max_id = -1;
	int total, id;

	if (ids->in_use == 0)
		return -1;

	if (ids->in_use == IPCMNI)
		return IPCMNI - 1;

	/* Look for the last assigned id */
	total = 0;
	for (id = 0; id < IPCMNI && total < ids->in_use; id++) {
		ipc = idr_find(&ids->ipcs_idr, id);
		if (ipc != NULL) {
			max_id = id;
			total++;
		}
	}
	return max_id;
}
Exemplo n.º 23
0
static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
{
	struct kern_ipc_perm *ipc;
	int next_id;
	int total;

	for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
		ipc = idr_find(&ids->ipcs_idr, next_id);

		if (ipc == NULL)
			continue;

		if (ipc->key != key) {
			total++;
			continue;
		}

		ipc_lock_by_ptr(ipc);
		return ipc;
	}

	return NULL;
}
Exemplo n.º 24
0
/*
  dispatch a fully received message

  note that this deliberately can match more than one message handler
  per message. That allows a single messasging context to register
  (for example) a debug handler for more than one piece of code
*/
static void imessaging_dispatch(struct imessaging_context *msg, struct imessaging_rec *rec)
{
	struct dispatch_fn *d, *next;

	/* temporary IDs use an idtree, the rest use a array of pointers */
	if (rec->header->msg_type >= MSG_TMP_BASE) {
		d = (struct dispatch_fn *)idr_find(msg->dispatch_tree, 
						   rec->header->msg_type);
	} else if (rec->header->msg_type < msg->num_types) {
		d = msg->dispatch[rec->header->msg_type];
	} else {
		d = NULL;
	}

	for (; d; d = next) {
		DATA_BLOB data;
		next = d->next;
		data.data = rec->packet.data + sizeof(*rec->header);
		data.length = rec->header->length;
		d->fn(msg, d->private_data, d->msg_type, rec->header->from, &data);
	}
	rec->header->length = 0;
}
Exemplo n.º 25
0
void shm_exit_ns(struct ipc_namespace *ns)
{
	struct shmid_kernel *shp;
	int next_id;
	int total, in_use;

	down_write(&shm_ids(ns).rw_mutex);

	in_use = shm_ids(ns).in_use;

	for (total = 0, next_id = 0; total < in_use; next_id++) {
		shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
		if (shp == NULL)
			continue;
		ipc_lock_by_ptr(&shp->shm_perm);
		do_shm_rmid(ns, shp);
		total++;
	}
	up_write(&shm_ids(ns).rw_mutex);

	kfree(ns->ids[IPC_SHM_IDS]);
	ns->ids[IPC_SHM_IDS] = NULL;
}
ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
			     const char __user *buf,
			     int in_len, int out_len)
{
	struct ib_uverbs_dealloc_pd cmd;
	struct ib_pd               *pd;
	struct ib_uobject          *uobj;
	int                         ret = -EINVAL;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	mutex_lock(&ib_uverbs_idr_mutex);

	pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
	if (!pd || pd->uobject->context != file->ucontext)
		goto out;

	uobj = pd->uobject;

	ret = ib_dealloc_pd(pd);
	if (ret)
		goto out;

	idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);

	mutex_lock(&file->mutex);
	list_del(&uobj->list);
	mutex_unlock(&file->mutex);

	kfree(uobj);

out:
	mutex_unlock(&ib_uverbs_idr_mutex);

	return ret ? ret : in_len;
}
Exemplo n.º 27
0
static int uio_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
    struct inode *inode;
    inode = filep->f_path.dentry->d_inode;
    struct uio_device *idev;
    int ret = 0;

    idev = idr_find(&uio_idr, iminor(inode));
    if (!idev)
        return -ENODEV;

    if (idev->info) {
        if (idev->info->ioctl) {
            if (!try_module_get(idev->owner))
                return -ENODEV;
            ret = idev->info->ioctl(idev->info, cmd, arg);
            module_put(idev->owner);

            return ret;
        }
    }

    return -EINVAL;
}
Exemplo n.º 28
0
static void id_map_ent_timeout(struct work_struct *work)
{
	struct delayed_work *delay = to_delayed_work(work);
	struct id_map_entry *ent = container_of(delay, struct id_map_entry, timeout);
	struct id_map_entry *db_ent, *found_ent;
	struct mlx4_ib_dev *dev = ent->dev;
	struct mlx4_ib_sriov *sriov = &dev->sriov;
	struct rb_root *sl_id_map = &sriov->sl_id_map;
	int pv_id = (int) ent->pv_cm_id;

	spin_lock(&sriov->id_map_lock);
	db_ent = (struct id_map_entry *)idr_find(&sriov->pv_id_table, pv_id);
	if (!db_ent)
		goto out;
	found_ent = id_map_find_by_sl_id(&dev->ib_dev, ent->slave_id, ent->sl_cm_id);
	if (found_ent && found_ent == ent)
		rb_erase(&found_ent->node, sl_id_map);
	idr_remove(&sriov->pv_id_table, pv_id);

out:
	list_del(&ent->list);
	spin_unlock(&sriov->id_map_lock);
	kfree(ent);
}
ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
			     const char __user *buf, int in_len, int out_len)
{
	struct ib_uverbs_destroy_ah cmd;
	struct ib_ah		   *ah;
	struct ib_uobject	   *uobj;
	int			    ret = -EINVAL;

	if (copy_from_user(&cmd, buf, sizeof cmd))
		return -EFAULT;

	mutex_lock(&ib_uverbs_idr_mutex);

	ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle);
	if (!ah || ah->uobject->context != file->ucontext)
		goto out;

	uobj = ah->uobject;

	ret = ib_destroy_ah(ah);
	if (ret)
		goto out;

	idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle);

	mutex_lock(&file->mutex);
	list_del(&uobj->list);
	mutex_unlock(&file->mutex);

	kfree(uobj);

out:
	mutex_unlock(&ib_uverbs_idr_mutex);

	return ret ? ret : in_len;
}
Exemplo n.º 30
0
/* continue a search */
static NTSTATUS pvfs_search_next_trans2(struct ntvfs_module_context *ntvfs,
					struct ntvfs_request *req, union smb_search_next *io, 
					void *search_private, 
					bool (*callback)(void *, const union smb_search_data *))
{
	struct pvfs_state *pvfs = talloc_get_type(ntvfs->private_data,
				  struct pvfs_state);
	void *p;
	struct pvfs_search_state *search;
	struct pvfs_dir *dir;
	unsigned int reply_count;
	uint16_t handle;
	NTSTATUS status;

	handle = io->t2fnext.in.handle;

	p = idr_find(pvfs->search.idtree, handle);
	if (p == NULL) {
		/* we didn't find the search handle */
		return NT_STATUS_INVALID_HANDLE;
	}

	search = talloc_get_type(p, struct pvfs_search_state);

	dir = search->dir;
	
	status = NT_STATUS_OK;

	/* work out what type of continuation is being used */
	if (io->t2fnext.in.last_name && *io->t2fnext.in.last_name) {
		status = pvfs_list_seek(dir, io->t2fnext.in.last_name, &search->current_index);
		if (!NT_STATUS_IS_OK(status) && io->t2fnext.in.resume_key) {
			status = pvfs_list_seek_ofs(dir, io->t2fnext.in.resume_key, 
						    &search->current_index);
		}
	} else if (!(io->t2fnext.in.flags & FLAG_TRANS2_FIND_CONTINUE)) {
		status = pvfs_list_seek_ofs(dir, io->t2fnext.in.resume_key, 
					    &search->current_index);
	}
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	search->num_ea_names = io->t2fnext.in.num_names;
	search->ea_names = io->t2fnext.in.ea_names;

	status = pvfs_search_fill(pvfs, req, io->t2fnext.in.max_count, search, io->generic.data_level,
				  &reply_count, search_private, callback);
	if (!NT_STATUS_IS_OK(status)) {
		return status;
	}

	io->t2fnext.out.count = reply_count;
	io->t2fnext.out.end_of_search = pvfs_list_eos(dir, search->current_index) ? 1 : 0;

	/* work out if we are going to keep the search state */
	if ((io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE) ||
	    ((io->t2fnext.in.flags & FLAG_TRANS2_FIND_CLOSE_IF_END) && 
	     io->t2fnext.out.end_of_search)) {
		talloc_free(search);
	}

	return NT_STATUS_OK;
}