static int
diag_bridge_probe(struct usb_interface *ifc, const struct usb_device_id *id)
{
	struct diag_bridge		*dev;
	struct usb_host_interface	*ifc_desc;
	struct usb_endpoint_descriptor	*ep_desc;
	int				i;
	int				ret = -ENOMEM;
	__u8				ifc_num;

	pr_debug("id:%lu", id->driver_info);

	ifc_num = ifc->cur_altsetting->desc.bInterfaceNumber;

	/* is this interface supported ? */
	if (ifc_num != id->driver_info)
		return -ENODEV;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev) {
		pr_err("unable to allocate dev");
		return -ENOMEM;
	}
	dev->pdev = platform_device_alloc("diag_bridge", -1);
	if (!dev->pdev) {
		pr_err("unable to allocate platform device");
		kfree(dev);
		return -ENOMEM;
	}
	__dev = dev;

	dev->udev = usb_get_dev(interface_to_usbdev(ifc));
	dev->ifc = ifc;
	kref_init(&dev->kref);
	mutex_init(&dev->ifc_mutex);
	init_usb_anchor(&dev->submitted);

	ifc_desc = ifc->cur_altsetting;
	for (i = 0; i < ifc_desc->desc.bNumEndpoints; i++) {
		ep_desc = &ifc_desc->endpoint[i].desc;

		if (!dev->in_epAddr && usb_endpoint_is_bulk_in(ep_desc))
			dev->in_epAddr = ep_desc->bEndpointAddress;

		if (!dev->out_epAddr && usb_endpoint_is_bulk_out(ep_desc))
			dev->out_epAddr = ep_desc->bEndpointAddress;
	}

	if (!(dev->in_epAddr && dev->out_epAddr)) {
		pr_err("could not find bulk in and bulk out endpoints");
		ret = -ENODEV;
		goto error;
	}

	usb_set_intfdata(ifc, dev);
	diag_bridge_debugfs_init();
	platform_device_add(dev->pdev);

	dev_dbg(&dev->ifc->dev, "%s: complete\n", __func__);

	return 0;

error:
	if (dev)
		kref_put(&dev->kref, diag_bridge_delete);

	return ret;
}
示例#2
0
static void user_hwctx_put(struct nvhost_hwctx *ctx)
{
	kref_put(&ctx->ref, user_hwctx_free);
}
int diag_bridge_write(char *data, int size)
{
	struct urb		*urb = NULL;
	unsigned int		pipe;
	struct diag_bridge	*dev = __dev;
	int			ret;

	pr_debug("writing %d bytes", size);

	if (!dev) {
		pr_err("device is disconnected");
		return -ENODEV;
	}

	mutex_lock(&dev->ifc_mutex);
	if (!dev->ifc) {
		ret = -ENODEV;
		goto error;
	}

	if (!dev->ops) {
		pr_err("bridge is not open");
		ret = -ENODEV;
		goto error;
	}

	if (!size) {
		dev_err(&dev->ifc->dev, "invalid size:%d\n", size);
		ret = -EINVAL;
		goto error;
	}

	/* if there was a previous unrecoverable error, just quit */
	if (dev->err) {
		ret = -ENODEV;
		goto error;
	}

	kref_get(&dev->kref);

	urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!urb) {
		dev_err(&dev->ifc->dev, "unable to allocate urb\n");
		ret = -ENOMEM;
		goto put_error;
	}

	ret = usb_autopm_get_interface(dev->ifc);
	if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
		pr_err_ratelimited("write: autopm_get failed:%d", ret);
		goto free_error;
	}

	pipe = usb_sndbulkpipe(dev->udev, dev->out_epAddr);
	usb_fill_bulk_urb(urb, dev->udev, pipe, data, size,
				diag_bridge_write_cb, dev);
	urb->transfer_flags |= URB_ZERO_PACKET;
	usb_anchor_urb(urb, &dev->submitted);
	dev->pending_writes++;

	ret = usb_submit_urb(urb, GFP_KERNEL);
	if (ret) {
		pr_err_ratelimited("submitting urb failed err:%d", ret);
		dev->pending_writes--;
		usb_unanchor_urb(urb);
		usb_autopm_put_interface(dev->ifc);
		goto free_error;
	}

free_error:
	usb_free_urb(urb);
put_error:
	if (ret) /* otherwise this is done in the completion handler */
		kref_put(&dev->kref, diag_bridge_delete);
error:
	mutex_unlock(&dev->ifc_mutex);
	return ret;
}
示例#4
0
文件: rxe.c 项目: Tyler-D/RXE
/* called by the ifc layer to remove a device */
void rxe_remove(struct rxe_dev *rxe)
{
	rxe_unregister_device(rxe);

	kref_put(&rxe->ref_cnt, rxe_release);
}
示例#5
0
文件: urb.c 项目: 274914765/C
/**
 * usb_free_urb - frees the memory used by a urb when all users of it are finished
 * @urb: pointer to the urb to free, may be NULL
 *
 * Must be called when a user of a urb is finished with it.  When the last user
 * of the urb calls this function, the memory of the urb is freed.
 *
 * Note: The transfer buffer associated with the urb is not freed, that must be
 * done elsewhere.
 */
void usb_free_urb(struct urb *urb)
{
    if (urb)
        kref_put(&urb->kref, urb_destroy);
}
示例#6
0
void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
{
	struct bnx2fc_cmd *orig_io_req, *new_io_req;
	struct bnx2fc_cmd *rec_req;
	struct bnx2fc_mp_req *mp_req;
	struct fc_frame_header *fc_hdr, *fh;
	struct fc_els_ls_rjt *rjt;
	struct fc_els_rec_acc *acc;
	struct bnx2fc_rport *tgt;
	struct fcoe_err_report_entry *err_entry;
	struct scsi_cmnd *sc_cmd;
	enum fc_rctl r_ctl;
	unsigned char *buf;
	void *resp_buf;
	struct fc_frame *fp;
	u8 opcode;
	u32 offset;
	u32 e_stat;
	u32 resp_len, hdr_len;
	int rc = 0;
	bool send_seq_clnp = false;
	bool abort_io = false;

	BNX2FC_MISC_DBG("Entered rec_compl callback\n");
	rec_req = cb_arg->io_req;
	orig_io_req = cb_arg->aborted_io_req;
	BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid);
	tgt = orig_io_req->tgt;

	/* Handle REC timeout case */
	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) {
		BNX2FC_IO_DBG(rec_req, "timed out, abort "
		       "orig_io - 0x%x\n",
			orig_io_req->xid);
		/* els req is timed out. send abts for els */
		rc = bnx2fc_initiate_abts(rec_req);
		if (rc != SUCCESS) {
			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
				"failed. issue cleanup\n");
			bnx2fc_initiate_cleanup(rec_req);
		}
		orig_io_req->rec_retry++;
		/* REC timedout. send ABTS to the orig IO req */
		if (orig_io_req->rec_retry <= REC_RETRY_COUNT) {
			spin_unlock_bh(&tgt->tgt_lock);
			rc = bnx2fc_send_rec(orig_io_req);
			spin_lock_bh(&tgt->tgt_lock);
			if (!rc)
				goto rec_compl_done;
		}
		rc = bnx2fc_initiate_abts(orig_io_req);
		if (rc != SUCCESS) {
			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
				"failed xid = 0x%x. issue cleanup\n",
				orig_io_req->xid);
			bnx2fc_initiate_cleanup(orig_io_req);
		}
		goto rec_compl_done;
	}

	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) {
		BNX2FC_IO_DBG(rec_req, "completed"
		       "orig_io - 0x%x\n",
			orig_io_req->xid);
		goto rec_compl_done;
	}
	if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
		BNX2FC_IO_DBG(rec_req, "abts in prog "
		       "orig_io - 0x%x\n",
			orig_io_req->xid);
		goto rec_compl_done;
	}

	mp_req = &(rec_req->mp_req);
	fc_hdr = &(mp_req->resp_fc_hdr);
	resp_len = mp_req->resp_len;
	acc = resp_buf = mp_req->resp_buf;

	hdr_len = sizeof(*fc_hdr);

	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
	if (!buf) {
		printk(KERN_ERR PFX "rec buf: mem alloc failure\n");
		goto rec_compl_done;
	}
	memcpy(buf, fc_hdr, hdr_len);
	memcpy(buf + hdr_len, resp_buf, resp_len);

	fp = fc_frame_alloc(NULL, resp_len);
	if (!fp) {
		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
		goto free_buf;
	}

	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
	/* Copy FC Frame header and payload into the frame */
	memcpy(fh, buf, hdr_len + resp_len);

	opcode = fc_frame_payload_op(fp);
	if (opcode == ELS_LS_RJT) {
		BNX2FC_IO_DBG(rec_req, "opcode is RJT\n");
		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
		if ((rjt->er_reason == ELS_RJT_LOGIC ||
		    rjt->er_reason == ELS_RJT_UNAB) &&
		    rjt->er_explan == ELS_EXPL_OXID_RXID) {
			BNX2FC_IO_DBG(rec_req, "handle CMD LOST case\n");
			new_io_req = bnx2fc_cmd_alloc(tgt);
			if (!new_io_req)
				goto abort_io;
			new_io_req->sc_cmd = orig_io_req->sc_cmd;
			/* cleanup orig_io_req that is with the FW */
			set_bit(BNX2FC_FLAG_CMD_LOST,
				&orig_io_req->req_flags);
			bnx2fc_initiate_cleanup(orig_io_req);
			/* Post a new IO req with the same sc_cmd */
			BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
			spin_unlock_bh(&tgt->tgt_lock);
			rc = bnx2fc_post_io_req(tgt, new_io_req);
			spin_lock_bh(&tgt->tgt_lock);
			if (!rc)
				goto free_frame;
			BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
		}
abort_io:
		rc = bnx2fc_initiate_abts(orig_io_req);
		if (rc != SUCCESS) {
			BNX2FC_IO_DBG(rec_req, "rec_compl: initiate_abts "
				"failed. issue cleanup\n");
			bnx2fc_initiate_cleanup(orig_io_req);
		}
	} else if (opcode == ELS_LS_ACC) {
		/* REVISIT: Check if the exchange is already aborted */
		offset = ntohl(acc->reca_fc4value);
		e_stat = ntohl(acc->reca_e_stat);
		if (e_stat & ESB_ST_SEQ_INIT)  {
			BNX2FC_IO_DBG(rec_req, "target has the seq init\n");
			goto free_frame;
		}
		BNX2FC_IO_DBG(rec_req, "e_stat = 0x%x, offset = 0x%x\n",
			e_stat, offset);
		/* Seq initiative is with us */
		err_entry = (struct fcoe_err_report_entry *)
			     &orig_io_req->err_entry;
		sc_cmd = orig_io_req->sc_cmd;
		if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
			/* SCSI WRITE command */
			if (offset == orig_io_req->data_xfer_len) {
				BNX2FC_IO_DBG(rec_req, "WRITE - resp lost\n");
				/* FCP_RSP lost */
				r_ctl = FC_RCTL_DD_CMD_STATUS;
				offset = 0;
			} else  {
				/* start transmitting from offset */
				BNX2FC_IO_DBG(rec_req, "XFER_RDY/DATA lost\n");
				send_seq_clnp = true;
				r_ctl = FC_RCTL_DD_DATA_DESC;
				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
								offset, r_ctl))
					abort_io = true;
				/* XFER_RDY */
			}
		} else {
			/* SCSI READ command */
			if (err_entry->data.rx_buf_off ==
					orig_io_req->data_xfer_len) {
				/* FCP_RSP lost */
				BNX2FC_IO_DBG(rec_req, "READ - resp lost\n");
				r_ctl = FC_RCTL_DD_CMD_STATUS;
				offset = 0;
			} else  {
				/* request retransmission from this offset */
				send_seq_clnp = true;
				offset = err_entry->data.rx_buf_off;
				BNX2FC_IO_DBG(rec_req, "RD DATA lost\n");
				/* FCP_DATA lost */
				r_ctl = FC_RCTL_DD_SOL_DATA;
				if (bnx2fc_initiate_seq_cleanup(orig_io_req,
								offset, r_ctl))
					abort_io = true;
			}
		}
		if (abort_io) {
			rc = bnx2fc_initiate_abts(orig_io_req);
			if (rc != SUCCESS) {
				BNX2FC_IO_DBG(rec_req, "rec_compl:initiate_abts"
					      " failed. issue cleanup\n");
				bnx2fc_initiate_cleanup(orig_io_req);
			}
		} else if (!send_seq_clnp) {
			BNX2FC_IO_DBG(rec_req, "Send SRR - FCP_RSP\n");
			spin_unlock_bh(&tgt->tgt_lock);
			rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl);
			spin_lock_bh(&tgt->tgt_lock);

			if (rc) {
				BNX2FC_IO_DBG(rec_req, "Unable to send SRR"
					" IO will abort\n");
			}
		}
	}
free_frame:
	fc_frame_free(fp);
free_buf:
	kfree(buf);
rec_compl_done:
	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
	kfree(cb_arg);
}
示例#7
0
static int ion_handle_put(struct ion_handle *handle)
{
	return kref_put(&handle->ref, ion_handle_destroy);
}
示例#8
0
文件: device.c 项目: Anjali05/linux
void hl_hpriv_put(struct hl_fpriv *hpriv)
{
	kref_put(&hpriv->refcount, hpriv_release);
}
示例#9
0
文件: fdtap.c 项目: borisnorm/akaros
/* Adds a tap with the file/qid of the underlying device for the requested FD.
 * The FD must be a chan, and the device must support the filter requested.
 *
 * Returns -1 or some other device-specific non-zero number on failure, 0 on
 * success. */
int add_fd_tap(struct proc *p, struct fd_tap_req *tap_req)
{
	struct fd_table *fdt = &p->open_files;
	struct fd_tap *tap;
	int ret = 0;
	struct chan *chan;
	int fd = tap_req->fd;

	if (fd < 0) {
		set_errno(EBADF);
		return -1;
	}
	tap = kzmalloc(sizeof(struct fd_tap), MEM_WAIT);
	tap->proc = p;
	tap->fd = fd;
	tap->filter = tap_req->filter;
	tap->ev_q = tap_req->ev_q;
	tap->ev_id = tap_req->ev_id;
	tap->data = tap_req->data;

	spin_lock(&fdt->lock);
	if (fd >= fdt->max_fdset) {
		set_errno(ENFILE);
		goto out_with_lock;
	}
	if (!GET_BITMASK_BIT(fdt->open_fds->fds_bits, fd)) {
		set_errno(EBADF);
		goto out_with_lock;
	}
	if (!fdt->fd[fd].fd_chan) {
		set_error(EINVAL, "Can't tap a VFS file");
		goto out_with_lock;
	}
	chan = fdt->fd[fd].fd_chan;
	if (fdt->fd[fd].fd_tap) {
		set_error(EBUSY, "FD %d already has a tap", fd);
		goto out_with_lock;
	}
	if (!devtab[chan->type].tapfd) {
		set_error(ENOSYS, "Device %s does not handle taps",
				  devtab[chan->type].name);
		goto out_with_lock;
	}
	/* need to keep chan alive for our call to the device.  someone else
	 * could come in and close the FD and the chan, once we unlock */
	chan_incref(chan);
	tap->chan = chan;
	/* One for the FD table, one for us to keep the removal of *this* tap from
	 * happening until we've attempted to register with the device. */
	kref_init(&tap->kref, tap_full_release, 2);
	fdt->fd[fd].fd_tap = tap;
	/* As soon as we unlock, another thread can come in and remove our old tap
	 * from the table and decref it.  Our ref keeps us from removing it yet,
	 * as well as keeps the memory safe.  However, a new tap can be installed
	 * and registered with the device before we even attempt to register.  The
	 * devices should be able to handle multiple, distinct taps, even if they
	 * happen to have the same {proc, fd} tuple. */
	spin_unlock(&fdt->lock);
	/* For refcnting fans, the tap ref is weak/uncounted.  We'll protect the
	 * memory and call the device when tap is being released. */
	ret = devtab[chan->type].tapfd(chan, tap, FDTAP_CMD_ADD);
	if (ret) {
		/* we failed, so we need to make sure *our* tap is removed.  We haven't
		 * decreffed, so we know our tap pointer is unique. */
		spin_lock(&fdt->lock);
		if (fdt->fd[fd].fd_tap == tap) {
			fdt->fd[fd].fd_tap = 0;
			/* normally we can't decref a tap while holding a lock, but we
			 * know we have another reference so this won't trigger a release */
			kref_put(&tap->kref);
		}
		spin_unlock(&fdt->lock);
		/* Regardless of whether someone else removed it or not, *we* are the
		 * only ones that know that registration failed and that we shouldn't
		 * remove it.  Since we still hold a ref, we can change the release
		 * method to skip the device dereg. */
		tap->kref.release = tap_min_release;
	}
	kref_put(&tap->kref);
	return ret;
out_with_lock:
	spin_unlock(&fdt->lock);
	kfree(tap);
	return -1;
}
void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
{
	kref_put(&submit->refcount, submit_cleanup);
}
static inline void autogroup_kref_put(struct autogroup *ag)
{
	kref_put(&ag->kref, autogroup_destroy);
}
static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev,
			     struct rpmsg_hdr *msg, unsigned int len)
{
	struct rpmsg_endpoint *ept;
	struct scatterlist sg;
	int err;

	dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
					msg->src, msg->dst, msg->len,
					msg->flags, msg->reserved);
	print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
					msg, sizeof(*msg) + msg->len, true);

	/*
	 * We currently use fixed-sized buffers, so trivially sanitize
	 * the reported payload length.
	 */
	if (len > RPMSG_BUF_SIZE ||
		msg->len > (len - sizeof(struct rpmsg_hdr))) {
		dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
		return -EINVAL;
	}

	/* use the dst addr to fetch the callback of the appropriate user */
	mutex_lock(&vrp->endpoints_lock);

	ept = idr_find(&vrp->endpoints, msg->dst);

	/* let's make sure no one deallocates ept while we use it */
	if (ept)
		kref_get(&ept->refcount);

	mutex_unlock(&vrp->endpoints_lock);

	if (ept) {
		/* make sure ept->cb doesn't go away while we use it */
		mutex_lock(&ept->cb_lock);

		if (ept->cb)
			ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
				msg->src);

		mutex_unlock(&ept->cb_lock);

		/* farewell, ept, we don't need you anymore */
		kref_put(&ept->refcount, __ept_release);
	} else
		dev_warn(dev, "msg received with no recipient\n");

	/* publish the real size of the buffer */
	sg_init_one(&sg, msg, RPMSG_BUF_SIZE);

	/* add the buffer back to the remote processor's virtqueue */
	err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL);
	if (err < 0) {
		dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
		return err;
	}

	return 0;
}
示例#13
0
static void ctx3d_put(struct nvhost_hwctx *ctx)
{
	kref_put(&ctx->ref, ctx3d_free);
}
示例#14
0
文件: bsg-lib.c 项目: Anjali05/linux
void bsg_job_put(struct bsg_job *job)
{
	kref_put(&job->kref, bsg_teardown_job);
}
示例#15
0
static inline void qh_put (struct ehci_qh *qh)
{
	kref_put(&qh->kref, qh_destroy);
}
示例#16
0
/**
 * batadv_dat_entry_put - decrement the dat_entry refcounter and possibly
 *  release it
 * @dat_entry: dat_entry to be free'd
 */
static void batadv_dat_entry_put(struct batadv_dat_entry *dat_entry)
{
	kref_put(&dat_entry->refcount, batadv_dat_entry_release);
}
示例#17
0
void bnx2fc_srr_compl(struct bnx2fc_els_cb_arg *cb_arg)
{
	struct bnx2fc_mp_req *mp_req;
	struct fc_frame_header *fc_hdr, *fh;
	struct bnx2fc_cmd *srr_req;
	struct bnx2fc_cmd *orig_io_req;
	struct fc_frame *fp;
	unsigned char *buf;
	void *resp_buf;
	u32 resp_len, hdr_len;
	u8 opcode;
	int rc = 0;

	orig_io_req = cb_arg->aborted_io_req;
	srr_req = cb_arg->io_req;
	if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &srr_req->req_flags)) {
		/* SRR timedout */
		BNX2FC_IO_DBG(srr_req, "srr timed out, abort "
		       "orig_io - 0x%x\n",
			orig_io_req->xid);
		rc = bnx2fc_initiate_abts(srr_req);
		if (rc != SUCCESS) {
			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
				"failed. issue cleanup\n");
			bnx2fc_initiate_cleanup(srr_req);
		}
		if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
		    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
			BNX2FC_IO_DBG(srr_req, "srr_compl:xid 0x%x flags = %lx",
				      orig_io_req->xid, orig_io_req->req_flags);
			goto srr_compl_done;
		}
		orig_io_req->srr_retry++;
		if (orig_io_req->srr_retry <= SRR_RETRY_COUNT) {
			struct bnx2fc_rport *tgt = orig_io_req->tgt;
			spin_unlock_bh(&tgt->tgt_lock);
			rc = bnx2fc_send_srr(orig_io_req,
					     orig_io_req->srr_offset,
					     orig_io_req->srr_rctl);
			spin_lock_bh(&tgt->tgt_lock);
			if (!rc)
				goto srr_compl_done;
		}

		rc = bnx2fc_initiate_abts(orig_io_req);
		if (rc != SUCCESS) {
			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
				"failed xid = 0x%x. issue cleanup\n",
				orig_io_req->xid);
			bnx2fc_initiate_cleanup(orig_io_req);
		}
		goto srr_compl_done;
	}
	if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags) ||
	    test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) {
		BNX2FC_IO_DBG(srr_req, "srr_compl:xid - 0x%x flags = %lx",
			      orig_io_req->xid, orig_io_req->req_flags);
		goto srr_compl_done;
	}
	mp_req = &(srr_req->mp_req);
	fc_hdr = &(mp_req->resp_fc_hdr);
	resp_len = mp_req->resp_len;
	resp_buf = mp_req->resp_buf;

	hdr_len = sizeof(*fc_hdr);
	buf = kzalloc(PAGE_SIZE, GFP_ATOMIC);
	if (!buf) {
		printk(KERN_ERR PFX "srr buf: mem alloc failure\n");
		goto srr_compl_done;
	}
	memcpy(buf, fc_hdr, hdr_len);
	memcpy(buf + hdr_len, resp_buf, resp_len);

	fp = fc_frame_alloc(NULL, resp_len);
	if (!fp) {
		printk(KERN_ERR PFX "fc_frame_alloc failure\n");
		goto free_buf;
	}

	fh = (struct fc_frame_header *) fc_frame_header_get(fp);
	/* Copy FC Frame header and payload into the frame */
	memcpy(fh, buf, hdr_len + resp_len);

	opcode = fc_frame_payload_op(fp);
	switch (opcode) {
	case ELS_LS_ACC:
		BNX2FC_IO_DBG(srr_req, "SRR success\n");
		break;
	case ELS_LS_RJT:
		BNX2FC_IO_DBG(srr_req, "SRR rejected\n");
		rc = bnx2fc_initiate_abts(orig_io_req);
		if (rc != SUCCESS) {
			BNX2FC_IO_DBG(srr_req, "srr_compl: initiate_abts "
				"failed xid = 0x%x. issue cleanup\n",
				orig_io_req->xid);
			bnx2fc_initiate_cleanup(orig_io_req);
		}
		break;
	default:
		BNX2FC_IO_DBG(srr_req, "srr compl - invalid opcode = %d\n",
			opcode);
		break;
	}
	fc_frame_free(fp);
free_buf:
	kfree(buf);
srr_compl_done:
	kref_put(&orig_io_req->refcount, bnx2fc_cmd_release);
}
示例#18
0
int diag_bridge_read(char *data, int size)
{
	struct urb		*urb = NULL;
	unsigned int		pipe;
	struct diag_bridge	*dev = __dev;
	int			ret;

	if (!dev) {
		printk(KERN_WARNING "[DIAG BRIDGE]%s dev is NULL\n",__func__);
		return -ENODEV;
	}

	pr_debug("reading %d bytes", size);

	if (!dev || !dev->ifc) {
		pr_err("device is disconnected");
		return -ENODEV;
	}

	if (!dev->ops) {
		pr_err("bridge is not open");
		return -ENODEV;
	}

	if (!size) {
		dev_err(&dev->udev->dev, "invalid size:%d\n", size);
		return -EINVAL;
	}

	/* if there was a previous unrecoverable error, just quit */
	if (dev->err)
	{
		pr_info("diag_bridge_read:dev->err\n");
		return -ENODEV;
	}

	kref_get(&dev->kref);

	urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!urb) {
		dev_err(&dev->udev->dev, "unable to allocate urb\n");
		ret = -ENOMEM;
		goto error;
	}

	ret = usb_autopm_get_interface(dev->ifc);
	if (ret < 0 && ret != -EAGAIN && ret != -EACCES) {
		pr_err_ratelimited("read: autopm_get failed:%d", ret);
		goto free_error;
	}

	pipe = usb_rcvbulkpipe(dev->udev, dev->in_epAddr);
	usb_fill_bulk_urb(urb, dev->udev, pipe, data, size,
				diag_bridge_read_cb, dev);
	usb_anchor_urb(urb, &dev->submitted);
	dev->pending_reads++;

	ret = usb_submit_urb(urb, GFP_KERNEL);
	if (ret) {
		pr_err_ratelimited("submitting urb failed err:%d", ret);
		dev->pending_reads--;
		usb_unanchor_urb(urb);
	}

	usb_autopm_put_interface(dev->ifc);

free_error:
	usb_free_urb(urb);
error:
	if (ret) /* otherwise this is done in the completion handler */
		kref_put(&dev->kref, diag_bridge_delete);
	return ret;
}
示例#19
0
static int bnx2fc_initiate_els(struct bnx2fc_rport *tgt, unsigned int op,
			void *data, u32 data_len,
			void (*cb_func)(struct bnx2fc_els_cb_arg *cb_arg),
			struct bnx2fc_els_cb_arg *cb_arg, u32 timer_msec)
{
	struct fcoe_port *port = tgt->port;
	struct bnx2fc_interface *interface = port->priv;
	struct fc_rport *rport = tgt->rport;
	struct fc_lport *lport = port->lport;
	struct bnx2fc_cmd *els_req;
	struct bnx2fc_mp_req *mp_req;
	struct fc_frame_header *fc_hdr;
	struct fcoe_task_ctx_entry *task;
	struct fcoe_task_ctx_entry *task_page;
	int rc = 0;
	int task_idx, index;
	u32 did, sid;
	u16 xid;

	rc = fc_remote_port_chkready(rport);
	if (rc) {
		printk(KERN_ERR PFX "els 0x%x: rport not ready\n", op);
		rc = -EINVAL;
		goto els_err;
	}
	if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
		printk(KERN_ERR PFX "els 0x%x: link is not ready\n", op);
		rc = -EINVAL;
		goto els_err;
	}
	if (!(test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) ||
	     (test_bit(BNX2FC_FLAG_EXPL_LOGO, &tgt->flags))) {
		printk(KERN_ERR PFX "els 0x%x: tgt not ready\n", op);
		rc = -EINVAL;
		goto els_err;
	}
	els_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ELS);
	if (!els_req) {
		rc = -ENOMEM;
		goto els_err;
	}

	els_req->sc_cmd = NULL;
	els_req->port = port;
	els_req->tgt = tgt;
	els_req->cb_func = cb_func;
	cb_arg->io_req = els_req;
	els_req->cb_arg = cb_arg;

	mp_req = (struct bnx2fc_mp_req *)&(els_req->mp_req);
	rc = bnx2fc_init_mp_req(els_req);
	if (rc == FAILED) {
		printk(KERN_ERR PFX "ELS MP request init failed\n");
		spin_lock_bh(&tgt->tgt_lock);
		kref_put(&els_req->refcount, bnx2fc_cmd_release);
		spin_unlock_bh(&tgt->tgt_lock);
		rc = -ENOMEM;
		goto els_err;
	} else {
		/* rc SUCCESS */
		rc = 0;
	}

	/* Set the data_xfer_len to the size of ELS payload */
	mp_req->req_len = data_len;
	els_req->data_xfer_len = mp_req->req_len;

	/* Fill ELS Payload */
	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS)) {
		memcpy(mp_req->req_buf, data, data_len);
	} else {
		printk(KERN_ERR PFX "Invalid ELS op 0x%x\n", op);
		els_req->cb_func = NULL;
		els_req->cb_arg = NULL;
		spin_lock_bh(&tgt->tgt_lock);
		kref_put(&els_req->refcount, bnx2fc_cmd_release);
		spin_unlock_bh(&tgt->tgt_lock);
		rc = -EINVAL;
	}

	if (rc)
		goto els_err;

	/* Fill FC header */
	fc_hdr = &(mp_req->req_fc_hdr);

	did = tgt->rport->port_id;
	sid = tgt->sid;

	if (op == ELS_SRR)
		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS4_REQ, did, sid,
				   FC_TYPE_FCP, FC_FC_FIRST_SEQ |
				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
	else
		__fc_fill_fc_hdr(fc_hdr, FC_RCTL_ELS_REQ, did, sid,
				   FC_TYPE_ELS, FC_FC_FIRST_SEQ |
				   FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);

	/* Obtain exchange id */
	xid = els_req->xid;
	task_idx = xid/BNX2FC_TASKS_PER_PAGE;
	index = xid % BNX2FC_TASKS_PER_PAGE;

	/* Initialize task context for this IO request */
	task_page = (struct fcoe_task_ctx_entry *)
			interface->hba->task_ctx[task_idx];
	task = &(task_page[index]);
	bnx2fc_init_mp_task(els_req, task);

	spin_lock_bh(&tgt->tgt_lock);

	if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
		printk(KERN_ERR PFX "initiate_els.. session not ready\n");
		els_req->cb_func = NULL;
		els_req->cb_arg = NULL;
		kref_put(&els_req->refcount, bnx2fc_cmd_release);
		spin_unlock_bh(&tgt->tgt_lock);
		return -EINVAL;
	}

	if (timer_msec)
		bnx2fc_cmd_timer_set(els_req, timer_msec);
	bnx2fc_add_2_sq(tgt, xid);

	els_req->on_active_queue = 1;
	list_add_tail(&els_req->link, &tgt->els_queue);

	/* Ring doorbell */
	bnx2fc_ring_doorbell(tgt);
	spin_unlock_bh(&tgt->tgt_lock);

els_err:
	return rc;
}
示例#20
0
static int ion_buffer_put(struct ion_buffer *buffer)
{
	return kref_put(&buffer->ref, ion_buffer_destroy);
}