Beispiel #1
0
static int
handle_interrupt (pixma_t * s, int timeout)
{
  uint8_t buf[16];
  int len;

  len = pixma_wait_interrupt (s->io, buf, sizeof (buf), timeout);
  if (len == PIXMA_ETIMEDOUT)
    return 0;
  if (len < 0)
    return len;
  switch (s->cfg->pid)
    {
    case MP360_PID:
    case MP370_PID:
    case MP375R_PID:
    case MP390_PID:
    case MF5730_PID:
    case MF5750_PID:
    case MF5770_PID:
    case MF3110_PID:
    case IR1020_PID:
      if (len != 16)
	{
	  PDBG (pixma_dbg
		(1, "WARNING:unexpected interrupt packet length %d\n", len));
	  return PIXMA_EPROTO;
	}
      if (buf[12] & 0x40)
	query_status (s);
      if (buf[10] & 0x40)
	send_time (s);
      /* FIXME: following is unverified! */
      if (buf[15] & 1)
	s->events = PIXMA_EV_BUTTON2;	/* b/w scan */
      if (buf[15] & 2)
	s->events = PIXMA_EV_BUTTON1;	/* color scan */
      break;

    case MP5_PID:
    case MP10_PID:
    case MP700_PID:
    case MP730_PID:
    case MP710_PID:
    case MP740_PID:
      if (len != 8)
	{
	  PDBG (pixma_dbg
		(1, "WARNING:unexpected interrupt packet length %d\n", len));
	  return PIXMA_EPROTO;
	}
      if (buf[7] & 0x10)
	s->events = PIXMA_EV_BUTTON1;
      if (buf[5] & 8)
	send_time (s);
      break;

    default:
      PDBG (pixma_dbg (1, "WARNING:unknown interrupt, please report!\n"));
      PDBG (pixma_hexdump (1, buf, len));
    }
  return 1;
}
Beispiel #2
0
static void post_qp_event(struct iwch_dev *rnicp, struct iwch_cq *chp,
			  struct respQ_msg_t *rsp_msg,
			  enum ib_event_type ib_event,
			  int send_term)
{
	struct ib_event event;
	struct iwch_qp_attributes attrs;
	struct iwch_qp *qhp;

	spin_lock(&rnicp->lock);
	qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));

	if (!qhp) {
		printk(KERN_ERR "%s unaffiliated error 0x%x qpid 0x%x\n",
		       __func__, CQE_STATUS(rsp_msg->cqe),
		       CQE_QPID(rsp_msg->cqe));
		spin_unlock(&rnicp->lock);
		return;
	}

	if ((qhp->attr.state == IWCH_QP_STATE_ERROR) ||
	    (qhp->attr.state == IWCH_QP_STATE_TERMINATE)) {
		PDBG("%s AE received after RTS - "
		     "qp state %d qpid 0x%x status 0x%x\n", __func__,
		     qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe));
		spin_unlock(&rnicp->lock);
		return;
	}

	printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x "
	       "type %d wrid.hi 0x%x wrid.lo 0x%x \n", __func__,
	       CQE_QPID(rsp_msg->cqe), CQE_OPCODE(rsp_msg->cqe),
	       CQE_STATUS(rsp_msg->cqe), CQE_TYPE(rsp_msg->cqe),
	       CQE_WRID_HI(rsp_msg->cqe), CQE_WRID_LOW(rsp_msg->cqe));

	atomic_inc(&qhp->refcnt);
	spin_unlock(&rnicp->lock);

	if (qhp->attr.state == IWCH_QP_STATE_RTS) {
		attrs.next_state = IWCH_QP_STATE_TERMINATE;
		iwch_modify_qp(qhp->rhp, qhp, IWCH_QP_ATTR_NEXT_STATE,
			       &attrs, 1);
		if (send_term)
			iwch_post_terminate(qhp, rsp_msg);
	}

	event.event = ib_event;
	event.device = chp->ibcq.device;
	if (ib_event == IB_EVENT_CQ_ERR)
		event.element.cq = &chp->ibcq;
	else
		event.element.qp = &qhp->ibqp;

	if (qhp->ibqp.event_handler)
		(*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context);

	(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);

	if (atomic_dec_and_test(&qhp->refcnt))
		wake_up(&qhp->wait);
}
void QNitpickerPlatformWindow::_process_mouse_event(Input::Event *ev)
{
	QPoint global_position(ev->ax(), ev->ay());
	QPoint local_position(global_position.x() - geometry().x(),
			              global_position.y() - geometry().y());

	//qDebug() << "local_position =" << local_position;
	//qDebug() << "global_position =" << global_position;

	switch (ev->type()) {

		case Input::Event::PRESS:

			if (qnpw_verbose)
				PDBG("PRESS");

			/* make this window the focused window */
			requestActivateWindow();

			switch (ev->code()) {
				case Input::BTN_LEFT:
					_mouse_button_state |= Qt::LeftButton;
					break;
				case Input::BTN_RIGHT:
					_mouse_button_state |= Qt::RightButton;
					break;
				case Input::BTN_MIDDLE:
					_mouse_button_state |= Qt::MidButton;
					break;
				case Input::BTN_SIDE:
					_mouse_button_state |= Qt::XButton1;
					break;
				case Input::BTN_EXTRA:
					_mouse_button_state |= Qt::XButton2;
					break;
			}
			break;

		case Input::Event::RELEASE:

			if (qnpw_verbose)
				PDBG("RELEASE");

			switch (ev->code()) {
				case Input::BTN_LEFT:
					_mouse_button_state &= ~Qt::LeftButton;
					break;
				case Input::BTN_RIGHT:
					_mouse_button_state &= ~Qt::RightButton;
					break;
				case Input::BTN_MIDDLE:
					_mouse_button_state &= ~Qt::MidButton;
					break;
				case Input::BTN_SIDE:
					_mouse_button_state &= ~Qt::XButton1;
					break;
				case Input::BTN_EXTRA:
					_mouse_button_state &= ~Qt::XButton2;
					break;
			}
			break;

		case Input::Event::WHEEL:

			if (qnpw_verbose)
				PDBG("WHEEL");

			QWindowSystemInterface::handleWheelEvent(window(),
					                                 local_position,
					                                 local_position,
					                                 ev->ry() * 120,
					                                 Qt::Vertical);
			return;

		default:
			break;
	}

	QWindowSystemInterface::handleMouseEvent(window(),
			                                 local_position,
			                                 global_position,
			                                 _mouse_button_state);
}
Beispiel #4
0
static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
		     struct t4_cq *rcq, struct t4_cq *scq,
		     struct c4iw_dev_ucontext *uctx)
{
	int user = (uctx != &rdev->uctx);
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	struct c4iw_wr_wait wr_wait;
	struct sk_buff *skb;
	int ret;
	int eqsize;

	wq->sq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->sq.qid)
		return -ENOMEM;

	wq->rq.qid = c4iw_get_qpid(rdev, uctx);
	if (!wq->rq.qid) {
		ret = -ENOMEM;
		goto free_sq_qid;
	}

	if (!user) {
		wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq,
				 GFP_KERNEL);
		if (!wq->sq.sw_sq) {
			ret = -ENOMEM;
			goto free_rq_qid;
		}

		wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq,
				 GFP_KERNEL);
		if (!wq->rq.sw_rq) {
			ret = -ENOMEM;
			goto free_sw_sq;
		}
	}

	/*
	 * RQT must be a power of 2.
	 */
	wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
	wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
	if (!wq->rq.rqt_hwaddr) {
		ret = -ENOMEM;
		goto free_sw_rq;
	}

	if (user) {
		ret = alloc_oc_sq(rdev, &wq->sq);
		if (ret)
			goto free_hwaddr;

		ret = alloc_host_sq(rdev, &wq->sq);
		if (ret)
			goto free_sq;
	} else
		ret = alloc_host_sq(rdev, &wq->sq);
		if (ret)
			goto free_hwaddr;
	memset(wq->sq.queue, 0, wq->sq.memsize);
	dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);

	wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev),
					  wq->rq.memsize, &(wq->rq.dma_addr),
					  GFP_KERNEL);
	if (!wq->rq.queue) {
		ret = -ENOMEM;
		goto free_sq;
	}
	PDBG("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
		__func__, wq->sq.queue,
		(unsigned long long)virt_to_phys(wq->sq.queue),
		wq->rq.queue,
		(unsigned long long)virt_to_phys(wq->rq.queue));
	memset(wq->rq.queue, 0, wq->rq.memsize);
	dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);

	wq->db = rdev->lldi.db_reg;
	wq->gts = rdev->lldi.gts_reg;
	if (user) {
		wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->sq.qid << rdev->qpshift);
		wq->sq.udb &= PAGE_MASK;
		wq->rq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(wq->rq.qid << rdev->qpshift);
		wq->rq.udb &= PAGE_MASK;
	}
	wq->rdev = rdev;
	wq->rq.msn = 1;

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + 2 * sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto free_dma;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(2) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;

	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		(t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
		V_FW_RI_RES_WR_IQID(scq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
	res++;
	res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
	res->u.sqrq.op = FW_RI_RES_OP_WRITE;

	/*
	 * eqsize is the number of 64B entries plus the status page size.
	 */
	eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
	res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
		V_FW_RI_RES_WR_HOSTFCMODE(0) |	/* no host cidx updates */
		V_FW_RI_RES_WR_CPRIO(0) |	/* don't keep in chip cache */
		V_FW_RI_RES_WR_PCIECHN(0) |	/* set by uP at ri_init time */
		V_FW_RI_RES_WR_IQID(rcq->cqid));
	res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
		V_FW_RI_RES_WR_DCAEN(0) |
		V_FW_RI_RES_WR_DCACPU(0) |
		V_FW_RI_RES_WR_FBMIN(2) |
		V_FW_RI_RES_WR_FBMAX(2) |
		V_FW_RI_RES_WR_CIDXFTHRESHO(0) |
		V_FW_RI_RES_WR_CIDXFTHRESH(0) |
		V_FW_RI_RES_WR_EQSIZE(eqsize));
	res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
	res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto free_dma;
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__);
	if (ret)
		goto free_dma;

	PDBG("%s sqid 0x%x rqid 0x%x kdb 0x%p squdb 0x%llx rqudb 0x%llx\n",
	     __func__, wq->sq.qid, wq->rq.qid, wq->db,
	     (unsigned long long)wq->sq.udb, (unsigned long long)wq->rq.udb);

	return 0;
free_dma:
	dma_free_coherent(&(rdev->lldi.pdev->dev),
			  wq->rq.memsize, wq->rq.queue,
			  dma_unmap_addr(&wq->rq, mapping));
free_sq:
	dealloc_sq(rdev, &wq->sq);
free_hwaddr:
	c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
free_sw_rq:
	kfree(wq->rq.sw_rq);
free_sw_sq:
	kfree(wq->sq.sw_sq);
free_rq_qid:
	c4iw_put_qpid(rdev, wq->rq.qid, uctx);
free_sq_qid:
	c4iw_put_qpid(rdev, wq->sq.qid, uctx);
	return ret;
}
Beispiel #5
0
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
			     int vector, struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct c4iw_dev *rhp;
	struct c4iw_cq *chp;
	struct c4iw_create_cq_resp uresp;
	struct c4iw_ucontext *ucontext = NULL;
	int ret;
	size_t memsize, hwentries;
	struct c4iw_mm_entry *mm, *mm2;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);

	rhp = to_c4iw_dev(ibdev);

	if (vector >= rhp->rdev.lldi.nciq)
		return ERR_PTR(-EINVAL);

	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context)
		ucontext = to_c4iw_ucontext(ib_context);

	/* account for the status page. */
	entries++;

	/* IQ needs one extra entry to differentiate full vs empty. */
	entries++;

	/*
	 * entries must be multiple of 16 for HW.
	 */
	entries = roundup(entries, 16);

	/*
	 * Make actual HW queue 2x to avoid cdix_inc overflows.
	 */
	hwentries = min(entries * 2, T4_MAX_IQ_SIZE);

	/*
	 * Make HW queue at least 64 entries so GTS updates aren't too
	 * frequent.
	 */
	if (hwentries < 64)
		hwentries = 64;

	memsize = hwentries * sizeof *chp->cq.queue;

	/*
	 * memsize must be a multiple of the page size if its a user cq.
	 */
	if (ucontext) {
		memsize = roundup(memsize, PAGE_SIZE);
		hwentries = memsize / sizeof *chp->cq.queue;
		while (hwentries > T4_MAX_IQ_SIZE) {
			memsize -= PAGE_SIZE;
			hwentries = memsize / sizeof *chp->cq.queue;
		}
	}
	chp->cq.size = hwentries;
	chp->cq.memsize = memsize;
	chp->cq.vector = vector;

	ret = create_cq(&rhp->rdev, &chp->cq,
			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
	if (ret)
		goto err1;

	chp->rhp = rhp;
	chp->cq.size--;				/* status page */
	chp->ibcq.cqe = entries - 2;
	spin_lock_init(&chp->lock);
	spin_lock_init(&chp->comp_handler_lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
	if (ret)
		goto err2;

	if (ucontext) {
		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm)
			goto err3;
		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
		if (!mm2)
			goto err4;

		uresp.qid_mask = rhp->rdev.cqmask;
		uresp.cqid = chp->cq.cqid;
		uresp.size = chp->cq.size;
		uresp.memsize = chp->cq.memsize;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		uresp.gts_key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		ret = ib_copy_to_udata(udata, &uresp,
				       sizeof(uresp) - sizeof(uresp.reserved));
		if (ret)
			goto err5;

		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = chp->cq.memsize;
		insert_mmap(ucontext, mm);

		mm2->key = uresp.gts_key;
		mm2->addr = chp->cq.ugts;
		mm2->len = PAGE_SIZE;
		insert_mmap(ucontext, mm2);
	}
	PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
	     __func__, chp->cq.cqid, chp, chp->cq.size,
	     chp->cq.memsize,
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
err5:
	kfree(mm2);
err4:
	kfree(mm);
err3:
	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
err2:
	destroy_cq(&chp->rhp->rdev, &chp->cq,
		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
err1:
	kfree(chp);
	return ERR_PTR(ret);
}
Beispiel #6
0
static int
mp750_scan (pixma_t * s)
{
  mp750_t *mp = (mp750_t *) s->subdriver;
  int error;
  uint8_t *buf;
  unsigned size, dpi, spare;

  dpi = s->param->ydpi;
  /* add a stripe shift for 2400dpi */
  mp->stripe_shift = (dpi == 2400) ? 4 : 0;

  if (mp->state != state_idle)
    return PIXMA_EBUSY;

  /* clear interrupt packets buffer */
  while (handle_interrupt (s, 0) > 0)
    {
    }

  /*  if (s->param->channels == 1)
    mp->raw_width = ALIGN_SUP (s->param->w, 12);
  else
    mp->raw_width = ALIGN_SUP (s->param->w, 4);*/

  /* change to use CCD grayscale mode --- why does this give segmentation error at runtime in mp750_check_param? */
  if ((s->param->channels == 3) || (is_ccd_grayscale (s)))
    mp->raw_width = ALIGN_SUP (s->param->w, 4);
  else
    mp->raw_width = ALIGN_SUP (s->param->w, 12);
  /* not sure about MP750, but there is no need for aligning at 12 for the MP760/770, MP780/790 since always use CCD color mode */

  /* modify for stripe shift */
  spare = 2 * calc_component_shifting (s) + 2 * mp->stripe_shift; /* FIXME: or maybe (2*... + 1)? */
  mp->raw_height = s->param->h + spare;
  PDBG (pixma_dbg (3, "raw_width=%u raw_height=%u dpi=%u\n",
		   mp->raw_width, mp->raw_height, dpi));

  /* PDBG (pixma_dbg (4, "line_size=%"PRIu64"\n",s->param->line_size)); */

  mp->line_size = get_cis_ccd_line_size (s); /* scanner hardware line_size multiplied by 3 for CCD grayscale */

  size = 8 + 2 * IMAGE_BLOCK_SIZE + spare * mp->line_size;
  buf = (uint8_t *) malloc (size);
  if (!buf)
    return PIXMA_ENOMEM;
  mp->buf = buf;
  mp->rawimg = buf;
  mp->imgbuf_ofs = spare * mp->line_size;
  mp->imgcol = mp->rawimg + IMAGE_BLOCK_SIZE + 8; /* added to make rgb->gray */
  mp->img = mp->rawimg + IMAGE_BLOCK_SIZE + 8;
  mp->imgbuf_len = IMAGE_BLOCK_SIZE + mp->imgbuf_ofs;
  mp->rawimg_left = 0;
  mp->last_block_size = 0;
  mp->shifted_bytes = -(int) mp->imgbuf_ofs;

  error = step1 (s);
  if (error >= 0)
    error = start_session (s);
  if (error >= 0)
    mp->state = state_warmup;
  if (error >= 0)
    error = select_source (s);
  if (error >= 0)
    error = send_scan_param (s);
  if (error < 0)
    {
      mp750_finish_scan (s);
      return error;
    }
  return 0;
}
Beispiel #7
0
void PluginStarter::_start_plugin(QString &file_name, QByteArray const &file_buf)
{
	if (file_name.endsWith(".gz")) {
		file_name.remove(".gz");

		uint32_t file_size = *(uint32_t*)(file_buf.constData() + file_buf.size() - sizeof(uint32_t));

		PDBG("file_size_uncompressed = %u", file_size);

		size_t ram_quota = Genode::Arg_string::find_arg(_args.constData(), "ram_quota").long_value(0) + file_size;

		if ((long)Genode::env()->ram_session()->avail() - (long)ram_quota < QPluginWidget::RAM_QUOTA) {
			PERR("quota exceeded");
			_plugin_loading_state = QUOTA_EXCEEDED_ERROR;
			return;
		}

		QByteArray ram_quota_string("ram_quota=" + QByteArray::number((unsigned long long) ram_quota));
		QByteArray ds_size_string("ds_size=" + QByteArray::number((unsigned long long)file_size));
		QByteArray plugin_connection_args(ram_quota_string + "," + ds_size_string);
		_pc = new Loader::Connection(plugin_connection_args.constData());

		Genode::Dataspace_capability ds = _pc->dataspace();
		if (ds.valid()) {
			void *ds_addr = Genode::env()->rm_session()->attach(ds);

			z_stream zs;
			zs.next_in = (Bytef*)(file_buf.data());
			zs.avail_in = file_buf.size();
			zs.total_in = 0;
			zs.next_out = (Bytef*)ds_addr;
			zs.avail_out = file_size;
			zs.total_out = 0;
			zs.zalloc = Z_NULL;
			zs.zfree = Z_NULL;

			/* enable gzip format detection */
			if (inflateInit2(&zs, 16 + MAX_WBITS) != Z_OK) {
				PERR("inflateInit2() failed");
				_plugin_loading_state = INFLATE_ERROR;

				inflateEnd(&zs);
				Genode::env()->rm_session()->detach(ds_addr);
				return;
			}

			/* uncompress */
			if (inflate(&zs, Z_SYNC_FLUSH) != Z_STREAM_END) {
				PERR("inflate() failed");
				_plugin_loading_state = INFLATE_ERROR;

				inflateEnd(&zs);
				Genode::env()->rm_session()->detach(ds_addr);
				return;
			}

			inflateEnd(&zs);

			Genode::env()->rm_session()->detach(ds_addr);
		}
	} else {
		size_t ram_quota = Genode::Arg_string::find_arg(_args.constData(), "ram_quota").long_value(0);

		if ((long)Genode::env()->ram_session()->avail() - (long)ram_quota < QPluginWidget::RAM_QUOTA) {
			_plugin_loading_state = QUOTA_EXCEEDED_ERROR;
			return;
		}

		QByteArray ram_quota_string("ram_quota=" + QByteArray::number((unsigned long long) ram_quota));
		QByteArray ds_size_string("ds_size=" + QByteArray::number((unsigned long long)file_buf.size()));
		QByteArray plugin_connection_args(ram_quota_string + "," + ds_size_string);
		_pc = new Loader::Connection(plugin_connection_args.constData());

		Genode::Dataspace_capability ds = _pc->dataspace();
		if (ds.valid()) {
			void *ds_addr = Genode::env()->rm_session()->attach(_pc->dataspace());

			memcpy(ds_addr, file_buf.constData(), file_buf.size());

			Genode::env()->rm_session()->detach(ds_addr);
		}
	}

	try {
		_pc->start(_args.constData(), _max_width, _max_height, 10000, file_name.toLatin1().constData());
		_plugin_loading_state = LOADED;
	} catch (Loader::Session::Rom_access_failed) {
		_plugin_loading_state = ROM_CONNECTION_FAILED_EXCEPTION;
	} catch (Loader::Session::Plugin_start_timed_out) {
		_plugin_loading_state = TIMEOUT_EXCEPTION;
	}
}
Beispiel #8
0
/*
 * Get one cq entry from c4iw and map it to openib.
 *
 * Returns:
 *	0			cqe returned
 *	-ENODATA		EMPTY;
 *	-EAGAIN			caller must try again
 *	any other -errno	fatal error
 */
static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
{
	struct c4iw_qp *qhp = NULL;
	struct t4_cqe uninitialized_var(cqe), *rd_cqe;
	struct t4_wq *wq;
	u32 credit = 0;
	u8 cqe_flushed;
	u64 cookie = 0;
	int ret;

	ret = t4_next_cqe(&chp->cq, &rd_cqe);

	if (ret)
		return ret;

	qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
	if (!qhp)
		wq = NULL;
	else {
		spin_lock(&qhp->lock);
		wq = &(qhp->wq);
	}
	ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
	if (ret)
		goto out;

	wc->wr_id = cookie;
	wc->qp = &qhp->ibqp;
	wc->vendor_err = CQE_STATUS(&cqe);
	wc->wc_flags = 0;

	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x "
	     "lo 0x%x cookie 0x%llx\n", __func__, CQE_QPID(&cqe),
	     CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
	     CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), (unsigned long long)cookie);

	if (CQE_TYPE(&cqe) == 0) {
		if (!CQE_STATUS(&cqe))
			wc->byte_len = CQE_LEN(&cqe);
		else
			wc->byte_len = 0;
		wc->opcode = IB_WC_RECV;
		if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
		    CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
			wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
		}
	} else {
		switch (CQE_OPCODE(&cqe)) {
		case FW_RI_RDMA_WRITE:
			wc->opcode = IB_WC_RDMA_WRITE;
			break;
		case FW_RI_READ_REQ:
			wc->opcode = IB_WC_RDMA_READ;
			wc->byte_len = CQE_LEN(&cqe);
			break;
		case FW_RI_SEND_WITH_INV:
		case FW_RI_SEND_WITH_SE_INV:
			wc->opcode = IB_WC_SEND;
			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
			break;
		case FW_RI_SEND:
		case FW_RI_SEND_WITH_SE:
			wc->opcode = IB_WC_SEND;
			break;
		case FW_RI_BIND_MW:
			wc->opcode = IB_WC_BIND_MW;
			break;

		case FW_RI_LOCAL_INV:
			wc->opcode = IB_WC_LOCAL_INV;
			break;
		case FW_RI_FAST_REGISTER:
			wc->opcode = IB_WC_FAST_REG_MR;
			break;
		default:
			printk(KERN_ERR MOD "Unexpected opcode %d "
			       "in the CQE received for QPID=0x%0x\n",
			       CQE_OPCODE(&cqe), CQE_QPID(&cqe));
			ret = -EINVAL;
			goto out;
		}
	}

	if (cqe_flushed)
		wc->status = IB_WC_WR_FLUSH_ERR;
	else {

		switch (CQE_STATUS(&cqe)) {
		case T4_ERR_SUCCESS:
			wc->status = IB_WC_SUCCESS;
			break;
		case T4_ERR_STAG:
			wc->status = IB_WC_LOC_ACCESS_ERR;
			break;
		case T4_ERR_PDID:
			wc->status = IB_WC_LOC_PROT_ERR;
			break;
		case T4_ERR_QPID:
		case T4_ERR_ACCESS:
			wc->status = IB_WC_LOC_ACCESS_ERR;
			break;
		case T4_ERR_WRAP:
			wc->status = IB_WC_GENERAL_ERR;
			break;
		case T4_ERR_BOUND:
			wc->status = IB_WC_LOC_LEN_ERR;
			break;
		case T4_ERR_INVALIDATE_SHARED_MR:
		case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
			wc->status = IB_WC_MW_BIND_ERR;
			break;
		case T4_ERR_CRC:
		case T4_ERR_MARKER:
		case T4_ERR_PDU_LEN_ERR:
		case T4_ERR_OUT_OF_RQE:
		case T4_ERR_DDP_VERSION:
		case T4_ERR_RDMA_VERSION:
		case T4_ERR_DDP_QUEUE_NUM:
		case T4_ERR_MSN:
		case T4_ERR_TBIT:
		case T4_ERR_MO:
		case T4_ERR_MSN_RANGE:
		case T4_ERR_IRD_OVERFLOW:
		case T4_ERR_OPCODE:
		case T4_ERR_INTERNAL_ERR:
			wc->status = IB_WC_FATAL_ERR;
			break;
		case T4_ERR_SWFLUSH:
			wc->status = IB_WC_WR_FLUSH_ERR;
			break;
		default:
			printk(KERN_ERR MOD
			       "Unexpected cqe_status 0x%x for QPID=0x%0x\n",
			       CQE_STATUS(&cqe), CQE_QPID(&cqe));
			ret = -EINVAL;
		}
	}
out:
	if (wq)
		spin_unlock(&qhp->lock);
	return ret;
}
Beispiel #9
0
static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
		   u8 *cqe_flushed, u64 *cookie, u32 *credit)
{
	int ret = 0;
	struct t4_cqe *hw_cqe, read_cqe;

	*cqe_flushed = 0;
	*credit = 0;
	ret = t4_next_cqe(cq, &hw_cqe);
	if (ret)
		return ret;

	PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
	     " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
	     __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
	     CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
	     CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
	     CQE_WRID_LOW(hw_cqe));

	/*
                                        
  */
	if (wq == NULL) {
		ret = -EAGAIN;
		goto skip_cqe;
	}

	/*
                                 
                                                       
                                        
                                          
                                      
  */
	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {

		/*
                                                           
                                                              
                                                 
   */
		if (!wq->sq.oldest_read) {
			if (CQE_STATUS(hw_cqe))
				t4_set_wq_in_error(wq);
			ret = -EAGAIN;
			goto skip_cqe;
		}

		/*
                                                          
                     
   */
		create_read_req_cqe(wq, hw_cqe, &read_cqe);
		hw_cqe = &read_cqe;
		advance_oldest_read(wq);
	}

	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
		*cqe_flushed = t4_wq_in_error(wq);
		t4_set_wq_in_error(wq);
		goto proc_cqe;
	}

	if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
		ret = -EAGAIN;
		goto skip_cqe;
	}

	/*
                    
  */
	if (RQ_TYPE(hw_cqe)) {

		/*
                                                               
                                                               
                                                             
           
   */

		if (t4_rq_empty(wq)) {
			t4_set_wq_in_error(wq);
			ret = -EAGAIN;
			goto skip_cqe;
		}
		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
			t4_set_wq_in_error(wq);
			hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
			goto proc_cqe;
		}
		goto proc_cqe;
	}

	/*
                                         
   
                                                     
                                                      
                                                          
            
                                                       
                                
                                     
  */
	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
		struct t4_swsqe *swsqe;

		PDBG("%s out of order completion going in sw_sq at idx %u\n",
		     __func__, CQE_WRID_SQ_IDX(hw_cqe));
		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
		swsqe->cqe = *hw_cqe;
		swsqe->complete = 1;
		ret = -EAGAIN;
		goto flush_wq;
	}

proc_cqe:
	*cqe = *hw_cqe;

	/*
                                                         
               
  */
	if (SQ_TYPE(hw_cqe)) {
		wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
		PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
		t4_sq_consume(wq);
	} else {
		PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
		BUG_ON(t4_rq_empty(wq));
		t4_rq_consume(wq);
	}

flush_wq:
	/*
                                                   
  */
	flush_completed_wrs(wq, cq);

skip_cqe:
	if (SW_CQE(hw_cqe)) {
		PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
		     __func__, cq, cq->cqid, cq->sw_cidx);
		t4_swcq_consume(cq);
	} else {
		PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
		     __func__, cq, cq->cqid, cq->cidx);
		t4_hwcq_consume(cq);
	}
	return ret;
}
Beispiel #10
0
struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
			     int vector, struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct c4iw_dev *rhp;
	struct c4iw_cq *chp;
	struct c4iw_create_cq_resp uresp;
	struct c4iw_ucontext *ucontext = NULL;
	int ret;
	size_t memsize, hwentries;
	struct c4iw_mm_entry *mm, *mm2;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);

	rhp = to_c4iw_dev(ibdev);

	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context)
		ucontext = to_c4iw_ucontext(ib_context);

	/*                              */
	entries++;

	/*                                                          */
	entries++;

	/*
                                          
  */
	entries = roundup(entries, 16);

	/*
                                                        
  */
	hwentries = entries * 2;

	/*
                                                               
             
  */
	if (hwentries < 64)
		hwentries = 64;

	memsize = hwentries * sizeof *chp->cq.queue;

	/*
                                                                 
  */
	if (ucontext) {
		memsize = roundup(memsize, PAGE_SIZE);
		hwentries = memsize / sizeof *chp->cq.queue;
		while (hwentries > T4_MAX_IQ_SIZE) {
			memsize -= PAGE_SIZE;
			hwentries = memsize / sizeof *chp->cq.queue;
		}
	}
	chp->cq.size = hwentries;
	chp->cq.memsize = memsize;

	ret = create_cq(&rhp->rdev, &chp->cq,
			ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
	if (ret)
		goto err1;

	chp->rhp = rhp;
	chp->cq.size--;				/*             */
	chp->ibcq.cqe = entries - 2;
	spin_lock_init(&chp->lock);
	spin_lock_init(&chp->comp_handler_lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
	if (ret)
		goto err2;

	if (ucontext) {
		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm)
			goto err3;
		mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
		if (!mm2)
			goto err4;

		uresp.qid_mask = rhp->rdev.cqmask;
		uresp.cqid = chp->cq.cqid;
		uresp.size = chp->cq.size;
		uresp.memsize = chp->cq.memsize;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		uresp.gts_key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		ret = ib_copy_to_udata(udata, &uresp, sizeof uresp);
		if (ret)
			goto err5;

		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = chp->cq.memsize;
		insert_mmap(ucontext, mm);

		mm2->key = uresp.gts_key;
		mm2->addr = chp->cq.ugts;
		mm2->len = PAGE_SIZE;
		insert_mmap(ucontext, mm2);
	}
	PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
	     __func__, chp->cq.cqid, chp, chp->cq.size,
	     chp->cq.memsize,
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
err5:
	kfree(mm2);
err4:
	kfree(mm);
err3:
	remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
err2:
	destroy_cq(&chp->rhp->rdev, &chp->cq,
		   ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
err1:
	kfree(chp);
	return ERR_PTR(ret);
}
int build_phys_page_list(struct ib_phys_buf *buffer_list,
					int num_phys_buf,
					u64 *iova_start,
					u64 *total_size,
					int *npages,
					int *shift,
					__be64 **page_list)
{
	u64 mask;
	int i, j, n;

	mask = 0;
	*total_size = 0;
	for (i = 0; i < num_phys_buf; ++i) {
		if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
			return -EINVAL;
		if (i != 0 && i != num_phys_buf - 1 &&
		    (buffer_list[i].size & ~PAGE_MASK))
			return -EINVAL;
		*total_size += buffer_list[i].size;
		if (i > 0)
			mask |= buffer_list[i].addr;
		else
			mask |= buffer_list[i].addr & PAGE_MASK;
		if (i != num_phys_buf - 1)
			mask |= buffer_list[i].addr + buffer_list[i].size;
		else
			mask |= (buffer_list[i].addr + buffer_list[i].size +
				PAGE_SIZE - 1) & PAGE_MASK;
	}

	if (*total_size > 0xFFFFFFFFULL)
		return -ENOMEM;

	/* Find largest page shift we can use to cover buffers */
	for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
		if (num_phys_buf > 1) {
			if ((1ULL << *shift) & mask)
				break;
		} else
			if (1ULL << *shift >=
			    buffer_list[0].size +
			    (buffer_list[0].addr & ((1ULL << *shift) - 1)))
				break;

	buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
	buffer_list[0].addr &= ~0ull << *shift;

	*npages = 0;
	for (i = 0; i < num_phys_buf; ++i)
		*npages += (buffer_list[i].size +
			(1ULL << *shift) - 1) >> *shift;

	if (!*npages)
		return -EINVAL;

	*page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL);
	if (!*page_list)
		return -ENOMEM;

	n = 0;
	for (i = 0; i < num_phys_buf; ++i)
		for (j = 0;
		     j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
		     ++j)
			(*page_list)[n++] = cpu_to_be64(buffer_list[i].addr +
			    ((u64) j << *shift));

	PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n",
	     __FUNCTION__, (unsigned long long) *iova_start,
	     (unsigned long long) mask, *shift, (unsigned long long) *total_size,
	     *npages);

	return 0;

}
Beispiel #12
0
static int
mp730_fill_buffer (pixma_t * s, pixma_imagebuf_t * ib)
{
  int error, n;
  mp730_t *mp = (mp730_t *) s->subdriver;
  unsigned block_size, bytes_received;
  uint8_t header[16];

  do
    {
      do
	{
	  if (s->cancel)
	    return PIXMA_ECANCELED;
	  if (mp->last_block)           /* end of image */
	      return 0;

	  error = read_image_block (s, header, mp->imgbuf + mp->imgbuf_len);
	  if (error < 0)
	    return error;

	  bytes_received = error;
	  block_size = pixma_get_be16 (header + 4);
	  mp->last_block = ((header[2] & 0x28) == 0x28);
	  if (mp->last_block)
	    {    /* end of image */
	      mp->state = state_finished;
	    }
	  if ((header[2] & ~0x38) != 0)
	    {
	      PDBG (pixma_dbg (1, "WARNING: Unexpected result header\n"));
	      PDBG (pixma_hexdump (1, header, 16));
	    }
	  PASSERT (bytes_received == block_size);

	  if (block_size == 0)
	    {
	      /* no image data at this moment. */
	      /*pixma_sleep(100000); *//* FIXME: too short, too long? */
	      handle_interrupt (s, 100);
            }
	}
      while (block_size == 0);

      /* TODO: simplify! */
      mp->imgbuf_len += bytes_received;
      n = mp->imgbuf_len / s->param->line_size;
      /* n = number of full lines (rows) we have in the buffer. */
      if (n != 0)
	{
	  if (s->param->channels != 1    &&
	      s->cfg->pid != MF5730_PID  &&
	      s->cfg->pid != MF5750_PID  &&
	      s->cfg->pid != MF5770_PID  &&
	      s->cfg->pid != MF3110_PID  &&
	      s->cfg->pid != IR1020_PID)
	    {
	      /* color, and not an MF57x0 nor MF3110 */
	      pack_rgb (mp->imgbuf, n, mp->raw_width, mp->lbuf);
	    }
	  else
             /* grayscale/lineart or MF57x0 or MF3110 */
             memcpy (mp->lbuf, mp->imgbuf, n * s->param->line_size);

	  block_size = n * s->param->line_size;
	  mp->imgbuf_len -= block_size;
	  memcpy (mp->imgbuf, mp->imgbuf + block_size, mp->imgbuf_len);
	}
    }
  while (n == 0);

  ib->rptr = mp->lbuf;
  ib->rend = mp->lbuf + block_size;
  return ib->rend - ib->rptr;
}
Beispiel #13
0
static int
step1 (pixma_t * s)
{
  int error;

  error = query_status (s);
  if (error < 0)
    return error;
  if ((s->param->source == PIXMA_SOURCE_ADF
       || s->param->source == PIXMA_SOURCE_ADFDUP)
      && !has_paper (s))
    return PIXMA_ENO_PAPER;
  if (has_ccd_sensor (s))
    {
      switch (s->cfg->pid)
        {
          case MF5730_PID:
          case MF5750_PID:
          case MF5770_PID:
          /* MF57x0: Wait 10 sec before starting for 1st page only */
            if (s->param->adf_pageid == 0)
	      {
                int tmo = 10;  /* like Windows driver, 10 sec CCD calibration ? */
                while (--tmo >= 0)
                  {
                    error = handle_interrupt (s, 1000);		\
                    if (s->cancel)				\
                      return PIXMA_ECANCELED;			\
                    if (error != PIXMA_ECANCELED && error < 0)	\
                      return error;
                    PDBG (pixma_dbg (2, "CCD Calibration ends in %d sec.\n", tmo));
                  }
              }
            break;

          default:
            break;
        }
        
      activate (s, 0);
      error = calibrate (s);
      
      switch (s->cfg->pid)
        {
          case MF5730_PID:
          case MF5750_PID:
          case MF5770_PID:
          /* MF57x0: calibration returns PIXMA_STATUS_FAILED */
            if (error == PIXMA_ECANCELED)
              error = read_error_info (s, NULL, 0);
            break;

          default:
            break;
        }
    }
  if (error >= 0)
    error = activate (s, 0);
  if (error >= 0)
    error = activate (s, 4);
  return error;
}
Beispiel #14
0
/*
 * Move all CQEs from the HWCQ into the SWCQ.
 * Deal with out-of-order and/or completions that complete
 * prior unsignalled WRs.
 */
void c4iw_flush_hw_cq(struct c4iw_cq *chp)
{
	struct t4_cqe *hw_cqe, *swcqe, read_cqe;
	struct c4iw_qp *qhp;
	struct t4_swsqe *swsqe;
	int ret;

	PDBG("%s  cqid 0x%x\n", __func__, chp->cq.cqid);
	ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);

	/*
	 * This logic is similar to poll_cq(), but not quite the same
	 * unfortunately.  Need to move pertinent HW CQEs to the SW CQ but
	 * also do any translation magic that poll_cq() normally does.
	 */
	while (!ret) {
		qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));

		/*
		 * drop CQEs with no associated QP
		 */
		if (qhp == NULL)
			goto next_cqe;

		if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
			goto next_cqe;

		if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {

			/* If we have reached here because of async
			 * event or other error, and have egress error
			 * then drop
			 */
			if (CQE_TYPE(hw_cqe) == 1)
				goto next_cqe;

			/* drop peer2peer RTR reads.
			 */
			if (CQE_WRID_STAG(hw_cqe) == 1)
				goto next_cqe;

			/*
			 * Eat completions for unsignaled read WRs.
			 */
			if (!qhp->wq.sq.oldest_read->signaled) {
				advance_oldest_read(&qhp->wq);
				goto next_cqe;
			}

			/*
			 * Don't write to the HWCQ, create a new read req CQE
			 * in local memory and move it into the swcq.
			 */
			create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
			hw_cqe = &read_cqe;
			advance_oldest_read(&qhp->wq);
		}

		/* if its a SQ completion, then do the magic to move all the
		 * unsignaled and now in-order completions into the swcq.
		 */
		if (SQ_TYPE(hw_cqe)) {
			swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
			swsqe->cqe = *hw_cqe;
			swsqe->complete = 1;
			flush_completed_wrs(&qhp->wq, &chp->cq);
		} else {
			swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
			*swcqe = *hw_cqe;
			swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
			t4_swcq_produce(&chp->cq);
		}
next_cqe:
		t4_hwcq_consume(&chp->cq);
		ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
	}
}
Beispiel #15
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
			     struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	struct iwch_create_cq_resp uresp;
	struct iwch_create_cq_req ureq;
	struct iwch_ucontext *ucontext = NULL;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
	rhp = to_iwch_dev(ibdev);
	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context) {
		ucontext = to_iwch_ucontext(ib_context);
		if (!t3a_device(rhp)) {
			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
				kfree(chp);
				return ERR_PTR(-EFAULT);
			}
			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
		}
	}

	if (t3a_device(rhp)) {

		
		entries += 16;
	}
	entries = roundup_pow_of_two(entries);
	chp->cq.size_log2 = ilog2(entries);

	if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}
	chp->rhp = rhp;
	chp->ibcq.cqe = 1 << chp->cq.size_log2;
	spin_lock_init(&chp->lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}

	if (ucontext) {
		struct iwch_mm_entry *mm;

		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm) {
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-ENOMEM);
		}
		uresp.cqid = chp->cq.cqid;
		uresp.size_log2 = chp->cq.size_log2;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		if (ib_copy_to_udata(udata, &uresp, sizeof (uresp))) {
			kfree(mm);
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-EFAULT);
		}
		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
					     sizeof (struct t3_cqe));
		insert_mmap(ucontext, mm);
	}
	PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
}
Beispiel #16
0
/*
 * poll_cq
 *
 * Caller must:
 *     check the validity of the first CQE,
 *     supply the wq assicated with the qpid.
 *
 * credit: cq credit to return to sge.
 * cqe_flushed: 1 iff the CQE is flushed.
 * cqe: copy of the polled CQE.
 *
 * return value:
 *    0		    CQE returned ok.
 *    -EAGAIN       CQE skipped, try again.
 *    -EOVERFLOW    CQ overflow detected.
 */
static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
		   u8 *cqe_flushed, u64 *cookie, u32 *credit)
{
	int ret = 0;
	struct t4_cqe *hw_cqe, read_cqe;

	*cqe_flushed = 0;
	*credit = 0;
	ret = t4_next_cqe(cq, &hw_cqe);
	if (ret)
		return ret;

	PDBG("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x"
	     " opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
	     __func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
	     CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
	     CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
	     CQE_WRID_LOW(hw_cqe));

	/*
	 * skip cqe's not affiliated with a QP.
	 */
	if (wq == NULL) {
		ret = -EAGAIN;
		goto skip_cqe;
	}

	/*
	* skip hw cqe's if the wq is flushed.
	*/
	if (wq->flushed && !SW_CQE(hw_cqe)) {
		ret = -EAGAIN;
		goto skip_cqe;
	}

	/*
	 * skip TERMINATE cqes...
	 */
	if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
		ret = -EAGAIN;
		goto skip_cqe;
	}

	/*
	 * Gotta tweak READ completions:
	 *	1) the cqe doesn't contain the sq_wptr from the wr.
	 *	2) opcode not reflected from the wr.
	 *	3) read_len not reflected from the wr.
	 *	4) cq_type is RQ_TYPE not SQ_TYPE.
	 */
	if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {

		/* If we have reached here because of async
		 * event or other error, and have egress error
		 * then drop
		 */
		if (CQE_TYPE(hw_cqe) == 1) {
			if (CQE_STATUS(hw_cqe))
				t4_set_wq_in_error(wq);
			ret = -EAGAIN;
			goto skip_cqe;
		}

		/* If this is an unsolicited read response, then the read
		 * was generated by the kernel driver as part of peer-2-peer
		 * connection setup.  So ignore the completion.
		 */
		if (CQE_WRID_STAG(hw_cqe) == 1) {
			if (CQE_STATUS(hw_cqe))
				t4_set_wq_in_error(wq);
			ret = -EAGAIN;
			goto skip_cqe;
		}

		/*
		 * Eat completions for unsignaled read WRs.
		 */
		if (!wq->sq.oldest_read->signaled) {
			advance_oldest_read(wq);
			ret = -EAGAIN;
			goto skip_cqe;
		}

		/*
		 * Don't write to the HWCQ, so create a new read req CQE
		 * in local memory.
		 */
		create_read_req_cqe(wq, hw_cqe, &read_cqe);
		hw_cqe = &read_cqe;
		advance_oldest_read(wq);
	}

	if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
		*cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
		t4_set_wq_in_error(wq);
	}

	/*
	 * RECV completion.
	 */
	if (RQ_TYPE(hw_cqe)) {

		/*
		 * HW only validates 4 bits of MSN.  So we must validate that
		 * the MSN in the SEND is the next expected MSN.  If its not,
		 * then we complete this with T4_ERR_MSN and mark the wq in
		 * error.
		 */

		if (t4_rq_empty(wq)) {
			t4_set_wq_in_error(wq);
			ret = -EAGAIN;
			goto skip_cqe;
		}
		if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
			t4_set_wq_in_error(wq);
			hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
			goto proc_cqe;
		}
		goto proc_cqe;
	}

	/*
	 * If we get here its a send completion.
	 *
	 * Handle out of order completion. These get stuffed
	 * in the SW SQ. Then the SW SQ is walked to move any
	 * now in-order completions into the SW CQ.  This handles
	 * 2 cases:
	 *	1) reaping unsignaled WRs when the first subsequent
	 *	   signaled WR is completed.
	 *	2) out of order read completions.
	 */
	if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
		struct t4_swsqe *swsqe;

		PDBG("%s out of order completion going in sw_sq at idx %u\n",
		     __func__, CQE_WRID_SQ_IDX(hw_cqe));
		swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
		swsqe->cqe = *hw_cqe;
		swsqe->complete = 1;
		ret = -EAGAIN;
		goto flush_wq;
	}

proc_cqe:
	*cqe = *hw_cqe;

	/*
	 * Reap the associated WR(s) that are freed up with this
	 * completion.
	 */
	if (SQ_TYPE(hw_cqe)) {
		int idx = CQE_WRID_SQ_IDX(hw_cqe);
		BUG_ON(idx >= wq->sq.size);

		/*
		* Account for any unsignaled completions completed by
		* this signaled completion.  In this case, cidx points
		* to the first unsignaled one, and idx points to the
		* signaled one.  So adjust in_use based on this delta.
		* if this is not completing any unsigned wrs, then the
		* delta will be 0. Handle wrapping also!
		*/
		if (idx < wq->sq.cidx)
			wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
		else
			wq->sq.in_use -= idx - wq->sq.cidx;
		BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);

		wq->sq.cidx = (uint16_t)idx;
		PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
		*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
		t4_sq_consume(wq);
	} else {
		PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
		*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
		BUG_ON(t4_rq_empty(wq));
		t4_rq_consume(wq);
		goto skip_cqe;
	}

flush_wq:
	/*
	 * Flush any completed cqes that are now in-order.
	 */
	flush_completed_wrs(wq, cq);

skip_cqe:
	if (SW_CQE(hw_cqe)) {
		PDBG("%s cq %p cqid 0x%x skip sw cqe cidx %u\n",
		     __func__, cq, cq->cqid, cq->sw_cidx);
		t4_swcq_consume(cq);
	} else {
		PDBG("%s cq %p cqid 0x%x skip hw cqe cidx %u\n",
		     __func__, cq, cq->cqid, cq->cidx);
		t4_hwcq_consume(cq);
	}
	return ret;
}
Beispiel #17
0
static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
{
#ifdef notyet
	struct iwch_cq *chp = to_iwch_cq(cq);
	struct t3_cq oldcq, newcq;
	int ret;

	PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);

	
	if (cqe <= cq->cqe)
		return 0;

	
	cqe = roundup_pow_of_two(cqe+1);
	newcq.size_log2 = ilog2(cqe);

	
	if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
		return -ENOMEM;
	}

	
	ret = iwch_quiesce_qps(chp);
	if (ret) {
		return ret;
	}

	ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
	if (ret) {
		return ret;
	}

	
	memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
				        sizeof(struct t3_cqe));

	
	oldcq = chp->cq;
	chp->cq = newcq;
	chp->cq.cqid = oldcq.cqid;

	
	ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
	if (ret) {
		chp->cq = oldcq;
		return ret;
	}
	chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;

	
	oldcq.cqid = newcq.cqid;
	ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
	if (ret) {
		printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
			__func__, ret);
	}

	

	
	ret = iwch_resume_qps(chp);
	return ret;
#else
	return -ENOSYS;
#endif
}
Beispiel #18
0
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int user = (uctx != &rdev->uctx);
	struct c4iw_wr_wait wr_wait;
	int ret;
	struct sk_buff *skb;

	cq->cqid = c4iw_get_cqid(rdev, uctx);
	if (!cq->cqid) {
		ret = -ENOMEM;
		goto err1;
	}

	if (!user) {
		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
		if (!cq->sw_queue) {
			ret = -ENOMEM;
			goto err2;
		}
	}
	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
				       &cq->dma_addr, GFP_KERNEL);
	if (!cq->queue) {
		ret = -ENOMEM;
		goto err3;
	}
	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
	memset(cq->queue, 0, cq->memsize);

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err4;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP(FW_RI_RES_WR) |
			V_FW_RI_RES_WR_NRES(1) |
			FW_WR_COMPL(1));
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (unsigned long) &wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_WRITE;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);
	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
			V_FW_RI_RES_WR_IQANUS(0) |
			V_FW_RI_RES_WR_IQANUD(1) |
			F_FW_RI_RES_WR_IQANDST |
			V_FW_RI_RES_WR_IQANDSTINDEX(
				rdev->lldi.ciq_ids[cq->vector]));
	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
			F_FW_RI_RES_WR_IQDROPRSS |
			V_FW_RI_RES_WR_IQPCIECH(2) |
			V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
			F_FW_RI_RES_WR_IQO |
			V_FW_RI_RES_WR_IQESIZE(1));
	res->u.cq.iqsize = cpu_to_be16(cq->size);
	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err4;
	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	if (ret)
		goto err4;

	cq->gen = 1;
	cq->gts = rdev->lldi.gts_reg;
	cq->rdev = rdev;
	if (user) {
		cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
					(cq->cqid << rdev->cqshift);
		cq->ugts &= PAGE_MASK;
	}
	return 0;
err4:
	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
err3:
	kfree(cq->sw_queue);
err2:
	c4iw_put_cqid(rdev, cq->cqid, uctx);
err1:
	return ret;
}
Beispiel #19
0
static int iwch_poll_cq_one(struct iwch_dev *rhp, struct iwch_cq *chp,
			    struct ib_wc *wc)
{
	struct iwch_qp *qhp = NULL;
	struct t3_cqe cqe, *rd_cqe;
	struct t3_wq *wq;
	u32 credit = 0;
	u8 cqe_flushed;
	u64 cookie;
	int ret = 1;

	rd_cqe = cxio_next_cqe(&chp->cq);

	if (!rd_cqe)
		return 0;

	qhp = get_qhp(rhp, CQE_QPID(*rd_cqe));
	if (!qhp)
		wq = NULL;
	else {
		spin_lock(&qhp->lock);
		wq = &(qhp->wq);
	}
	ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie,
				   &credit);
	if (t3a_device(chp->rhp) && credit) {
		PDBG("%s updating %d cq credits on id %d\n", __func__,
		     credit, chp->cq.cqid);
		cxio_hal_cq_op(&rhp->rdev, &chp->cq, CQ_CREDIT_UPDATE, credit);
	}

	if (ret) {
		ret = -EAGAIN;
		goto out;
	}
	ret = 1;

	wc->wr_id = cookie;
	wc->qp = &qhp->ibqp;
	wc->vendor_err = CQE_STATUS(cqe);
	wc->wc_flags = 0;

	PDBG("%s qpid 0x%x type %d opcode %d status 0x%x wrid hi 0x%x "
	     "lo 0x%x cookie 0x%llx\n", __func__,
	     CQE_QPID(cqe), CQE_TYPE(cqe),
	     CQE_OPCODE(cqe), CQE_STATUS(cqe), CQE_WRID_HI(cqe),
	     CQE_WRID_LOW(cqe), (unsigned long long) cookie);

	if (CQE_TYPE(cqe) == 0) {
		if (!CQE_STATUS(cqe))
			wc->byte_len = CQE_LEN(cqe);
		else
			wc->byte_len = 0;
		wc->opcode = IB_WC_RECV;
		if (CQE_OPCODE(cqe) == T3_SEND_WITH_INV ||
		    CQE_OPCODE(cqe) == T3_SEND_WITH_SE_INV) {
			wc->ex.invalidate_rkey = CQE_WRID_STAG(cqe);
			wc->wc_flags |= IB_WC_WITH_INVALIDATE;
		}
	} else {
		switch (CQE_OPCODE(cqe)) {
		case T3_RDMA_WRITE:
			wc->opcode = IB_WC_RDMA_WRITE;
			break;
		case T3_READ_REQ:
			wc->opcode = IB_WC_RDMA_READ;
			wc->byte_len = CQE_LEN(cqe);
			break;
		case T3_SEND:
		case T3_SEND_WITH_SE:
		case T3_SEND_WITH_INV:
		case T3_SEND_WITH_SE_INV:
			wc->opcode = IB_WC_SEND;
			break;
		case T3_BIND_MW:
			wc->opcode = IB_WC_BIND_MW;
			break;

		case T3_LOCAL_INV:
			wc->opcode = IB_WC_LOCAL_INV;
			break;
		case T3_FAST_REGISTER:
			wc->opcode = IB_WC_FAST_REG_MR;
			break;
		default:
			printk(KERN_ERR MOD "Unexpected opcode %d "
			       "in the CQE received for QPID=0x%0x\n",
			       CQE_OPCODE(cqe), CQE_QPID(cqe));
			ret = -EINVAL;
			goto out;
		}
	}

	if (cqe_flushed)
		wc->status = IB_WC_WR_FLUSH_ERR;
	else {

		switch (CQE_STATUS(cqe)) {
		case TPT_ERR_SUCCESS:
			wc->status = IB_WC_SUCCESS;
			break;
		case TPT_ERR_STAG:
			wc->status = IB_WC_LOC_ACCESS_ERR;
			break;
		case TPT_ERR_PDID:
			wc->status = IB_WC_LOC_PROT_ERR;
			break;
		case TPT_ERR_QPID:
		case TPT_ERR_ACCESS:
			wc->status = IB_WC_LOC_ACCESS_ERR;
			break;
		case TPT_ERR_WRAP:
			wc->status = IB_WC_GENERAL_ERR;
			break;
		case TPT_ERR_BOUND:
			wc->status = IB_WC_LOC_LEN_ERR;
			break;
		case TPT_ERR_INVALIDATE_SHARED_MR:
		case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
			wc->status = IB_WC_MW_BIND_ERR;
			break;
		case TPT_ERR_CRC:
		case TPT_ERR_MARKER:
		case TPT_ERR_PDU_LEN_ERR:
		case TPT_ERR_OUT_OF_RQE:
		case TPT_ERR_DDP_VERSION:
		case TPT_ERR_RDMA_VERSION:
		case TPT_ERR_DDP_QUEUE_NUM:
		case TPT_ERR_MSN:
		case TPT_ERR_TBIT:
		case TPT_ERR_MO:
		case TPT_ERR_MSN_RANGE:
		case TPT_ERR_IRD_OVERFLOW:
		case TPT_ERR_OPCODE:
			wc->status = IB_WC_FATAL_ERR;
			break;
		case TPT_ERR_SWFLUSH:
			wc->status = IB_WC_WR_FLUSH_ERR;
			break;
		default:
			printk(KERN_ERR MOD "Unexpected cqe_status 0x%x for "
			       "QPID=0x%0x\n", CQE_STATUS(cqe), CQE_QPID(cqe));
			ret = -EINVAL;
		}
	}
out:
	if (wq)
		spin_unlock(&qhp->lock);
	return ret;
}
static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
{
#ifdef notyet
	struct iwch_cq *chp = to_iwch_cq(cq);
	struct t3_cq oldcq, newcq;
	int ret;

	PDBG("%s ib_cq %p cqe %d\n", __func__, cq, cqe);

	/* We don't downsize... */
	if (cqe <= cq->cqe)
		return 0;

	/* create new t3_cq with new size */
	cqe = roundup_pow_of_two(cqe+1);
	newcq.size_log2 = ilog2(cqe);

	/* Dont allow resize to less than the current wce count */
	if (cqe < Q_COUNT(chp->cq.rptr, chp->cq.wptr)) {
		return -ENOMEM;
	}

	/* Quiesce all QPs using this CQ */
	ret = iwch_quiesce_qps(chp);
	if (ret) {
		return ret;
	}

	ret = cxio_create_cq(&chp->rhp->rdev, &newcq);
	if (ret) {
		return ret;
	}

	/* copy CQEs */
	memcpy(newcq.queue, chp->cq.queue, (1 << chp->cq.size_log2) *
				        sizeof(struct t3_cqe));

	/* old iwch_qp gets new t3_cq but keeps old cqid */
	oldcq = chp->cq;
	chp->cq = newcq;
	chp->cq.cqid = oldcq.cqid;

	/* resize new t3_cq to update the HW context */
	ret = cxio_resize_cq(&chp->rhp->rdev, &chp->cq);
	if (ret) {
		chp->cq = oldcq;
		return ret;
	}
	chp->ibcq.cqe = (1<<chp->cq.size_log2) - 1;

	/* destroy old t3_cq */
	oldcq.cqid = newcq.cqid;
	ret = cxio_destroy_cq(&chp->rhp->rdev, &oldcq);
	if (ret) {
		printk(KERN_ERR MOD "%s - cxio_destroy_cq failed %d\n",
			__func__, ret);
	}

	/* add user hooks here */

	/* resume qps */
	ret = iwch_resume_qps(chp);
	return ret;
#else
	return -ENOSYS;
#endif
}
Beispiel #21
0
int main(int argc, char * argv[])
{
  const char * config_file_name = NULL;
  bool daemon_mode = true;
  pid_t pid;


  /* Search command line for config file name
   * */
  for ( int i = 1; i < argc; ++i ) {
    if ( strncmp(argv[i], "--config=", 9) == 0 ) {
      config_file_name = argv[i] + 9;
    }
    else if ( strcmp(argv[i], "--no-daemon") == 0 ) {
      daemon_mode = 0;
    }
  }


  /* Read config file
   * */
  if ( !config_file_name ) {
    config_file_name = ffsrv_find_config_file();
  }

  if ( config_file_name && !ffsrv_read_config_file(config_file_name) ) {
    return EXIT_FAILURE;
  }



  /* Walk over command line again, overriding config file settings
   * */
  for ( int i = 1; i < argc; ++i ) {
    char keyname[256] = "", keyvalue[256] = "";
    sscanf(argv[i], "%255[^=]=%255s", keyname, keyvalue);
    if ( !ffsrv_parse_option(keyname, keyvalue) ) {
      return EXIT_FAILURE;
    }
  }


  /* Become daemon if requested
   * */
  if ( daemon_mode ) {
    if ( (pid = become_daemon()) == -1 ) {
      fprintf(stderr, "become_daemon() fails: %s", strerror(errno));
      return EXIT_FAILURE;
    }
    if ( pid != 0 ) {
      return EXIT_SUCCESS; /* finish parrent process */
    }
  }


  /* Setup log file name
   * */
  if ( !ffsrv.logfilename || !*ffsrv.logfilename ) {
    free(ffsrv.logfilename);
    if ( daemon_mode ) {
      ffsrv.logfilename = strdup("ffsrv.log");
    }
    else {
      ffsrv.logfilename = strdup("stderr");
    }
  }
  set_logfilename(ffsrv.logfilename);








  /* Setup singal handler
   * */
  if ( !setup_signal_handler() ) {
    PDBG("setup_signal_handler() fails: %s", strerror(errno));
    return EXIT_FAILURE;
  }


  /* Start actual server
   * */

  if ( !ffsrv_start() ) {
    PDBG("ffsrv_start() fails: %s", strerror(errno));
    return EXIT_FAILURE;
  }


  /* Sleep this thread */
  while ( 42 ) {
    sleep(1);
  }


  ffsrv_finish();

  return EXIT_SUCCESS;
}
Beispiel #22
0
static int
mp750_fill_buffer (pixma_t * s, pixma_imagebuf_t * ib)
{
  mp750_t *mp = (mp750_t *) s->subdriver;
  int error;
  uint8_t info;
  unsigned block_size, bytes_received, n;
  int shift[3], base_shift;
  int c;

  c = ((is_ccd_grayscale (s)) ? 3 : s->param->channels) * s->param->depth / 8; /* single-byte or double-byte data */

  if (mp->state == state_warmup)
    {
      int tmo = 60;

      query_status (s);
      check_status (s);
 /*SIM*/ while (!is_calibrated (s) && --tmo >= 0)
        {
          if (s->cancel)
            return PIXMA_ECANCELED;
          if (handle_interrupt (s, 1000) > 0)
            {
              block_size = 0;
              error = request_image_block (s, &block_size, &info);
               /*SIM*/ if (error < 0)
          return error;
            }
        }
      if (tmo < 0)
        {
          PDBG (pixma_dbg (1, "WARNING: Timed out waiting for calibration\n"));
          return PIXMA_ETIMEDOUT;
        }
      pixma_sleep (100000);
      query_status (s);
      if (is_warming_up (s) || !is_calibrated (s))
        {
          PDBG (pixma_dbg (1, "WARNING: Wrong status: wup=%d cal=%d\n",
               is_warming_up (s), is_calibrated (s)));
          return PIXMA_EPROTO;
        }
      block_size = 0;
      request_image_block (s, &block_size, &info);
       /*SIM*/ mp->state = state_scanning;
      mp->last_block = 0;
    }

  /* TODO: Move to other place, values are constant. */
  base_shift = calc_component_shifting (s) * mp->line_size;
  if (s->param->source == PIXMA_SOURCE_ADF)
    {
      shift[0] = 0;
      shift[1] = -base_shift;
      shift[2] = -2 * base_shift;
    }
  else
    {
      shift[0] = -2 * base_shift;
      shift[1] = -base_shift;
      shift[2] = 0;
    }

  do
    {
      if (mp->last_block_size > 0)
        {
          block_size = mp->imgbuf_len - mp->last_block_size;
          memcpy (mp->img, mp->img + mp->last_block_size, block_size);
        }

      do
        {
          if (s->cancel)
            return PIXMA_ECANCELED;
          if (mp->last_block)
            {
              /* end of image */
              info = mp->last_block;
              if (info != 0x38)
                {
                  query_status (s);
                   /*SIM*/ while ((info & 0x28) != 0x28)
                    {
                      pixma_sleep (10000);
                      error = request_image_block2 (s, &info);
                      if (s->cancel)
                        return PIXMA_ECANCELED;	/* FIXME: Is it safe to cancel here? */
                      if (error < 0)
                        return error;
                    }
                }
              mp->needs_abort = (info != 0x38);
              mp->last_block = info;
              mp->state = state_finished;
              return 0;
            }

          check_status (s);
           /*SIM*/ while (handle_interrupt (s, 1) > 0);
           /*SIM*/ block_size = IMAGE_BLOCK_SIZE;
          error = request_image_block (s, &block_size, &info);
          if (error < 0)
            {
              if (error == PIXMA_ECANCELED)
                read_error_info (s, NULL, 0);
              return error;
            }
          mp->last_block = info;
          if ((info & ~0x38) != 0)
            {
              PDBG (pixma_dbg (1, "WARNING: Unknown info byte %x\n", info));
            }
          if (block_size == 0)
            {
              /* no image data at this moment. */
              pixma_sleep (10000);
            }
        }
      while (block_size == 0);

      error = read_image_block (s, mp->rawimg + mp->rawimg_left);
      if (error < 0)
	{
	  mp->state = state_transfering;
	  return error;
	}
      bytes_received = error;
      PASSERT (bytes_received == block_size);

      /* TODO: simplify! */
      mp->rawimg_left += bytes_received;
      n = mp->rawimg_left / 3;
      /* n = number of pixels in the buffer? */

      /* Color to Grayscale converion for CCD sensor */
      if (is_ccd_grayscale (s)) {
	shift_rgb (mp->rawimg, n, shift[0], shift[1], shift[2], mp->stripe_shift, mp->line_size, 
		   mp->imgcol + mp->imgbuf_ofs); 
	/* dst: img, src: imgcol */
	rgb_to_gray (mp->img, mp->imgcol, n, c); /* cropping occurs later? */  
	PDBG (pixma_dbg (4, "*fill_buffer: did grayscale conversion \n"));
      }
      /* Color image processing */
      else {
	shift_rgb (mp->rawimg, n, shift[0], shift[1], shift[2], mp->stripe_shift, mp->line_size, 
		   mp->img + mp->imgbuf_ofs);
	PDBG (pixma_dbg (4, "*fill_buffer: no grayscale conversion---keep color \n")); 
      }

      /* entering remaining unprocessed bytes after last complete pixel into mp->rawimg buffer -- no influence on mp->img */
      n *= 3;
      mp->shifted_bytes += n;
      mp->rawimg_left -= n;	/* rawimg_left = 0, 1 or 2 bytes left in the buffer. */
      mp->last_block_size = n;
      memcpy (mp->rawimg, mp->rawimg + n, mp->rawimg_left);

    }
  while (mp->shifted_bytes <= 0);

  if ((unsigned) mp->shifted_bytes < mp->last_block_size) 
    {
      if (is_ccd_grayscale (s))
	ib->rptr = mp->img + mp->last_block_size/3 - mp->shifted_bytes/3; /* testing---works OK */
      else
	ib->rptr = mp->img + mp->last_block_size - mp->shifted_bytes;
    }
  else
    ib->rptr = mp->img;
  if (is_ccd_grayscale (s))
    ib->rend = mp->img + mp->last_block_size/3; /* testing---works OK */
  else
    ib->rend = mp->img + mp->last_block_size; 
  return ib->rend - ib->rptr;
}
Beispiel #23
0
static int
iclass_fill_buffer (pixma_t * s, pixma_imagebuf_t * ib)
{
  int error, n;
  iclass_t *mf = (iclass_t *) s->subdriver;
  unsigned block_size, lines_size, first_block_size;
  uint8_t info;

/*
 * 1. send a block request cmd (d4 20 00... 04 00 06)
 * 2. examine the response for block size and/or end-of-scan flag
 * 3. read the block one chunk at a time
 * 4. repeat until have enough to process >=1 lines
 */
  do
    {
      do
        {
          if (s->cancel)
            return PIXMA_ECANCELED;
          if (mf->last_block)
            {
              /* end of image */
              mf->state = state_finished;
              return 0;
            }

          first_block_size = 0;
          error = request_image_block (s, 4, &info, &block_size, 
                          mf->blkptr + mf->blk_len, &first_block_size);
          /* add current block to remainder of previous */
          mf->blk_len += first_block_size;
          if (error < 0)
            {
              /* NOTE: seen in traffic logs but don't know the meaning. */
              read_error_info (s, NULL, 0);
              if (error == PIXMA_ECANCELED)
                return error;
            }

          /* info: 0x28 = end; 0x38 = end + ADF empty */
          mf->last_block = info & 0x38;
          if ((info & ~0x38) != 0)
            {
              PDBG (pixma_dbg (1, "WARNING: Unexpected result header\n"));
              PDBG (pixma_hexdump (1, &info, 1));
            }

          if (block_size == 0)
            {
              /* no image data at this moment. */
              /*pixma_sleep(100000); *//* FIXME: too short, too long? */
              handle_interrupt (s, 100);
            }
        }
      while (block_size == 0 && first_block_size == 0);

      error = read_image_block (s, mf->blkptr + mf->blk_len, block_size);
      block_size = error;
      if (error < 0)
        return error;

      /* add current block to remainder of previous */
      mf->blk_len += block_size;
      /* n = number of full lines (rows) we have in the buffer. */
      n = mf->blk_len / s->param->line_size;
      if (n != 0)
        {
          if (s->param->channels != 1 &&
                  s->cfg->pid != MF3010_PID &&
                  s->cfg->pid != MF4410_PID &&
                  s->cfg->pid != MF4770_PID &&
	          s->cfg->pid != MF4550_PID &&
	          s->cfg->pid != MF4600_PID &&
	          s->cfg->pid != MF6500_PID &&
	          s->cfg->pid != MF8030_PID)
            {
              /* color and not MF46xx or MF65xx */
              pack_rgb (mf->blkptr, n, mf->raw_width, mf->lineptr);
            }
          else
            {
              /* grayscale */
              memcpy (mf->lineptr, mf->blkptr, n * s->param->line_size);
            }
          lines_size = n * s->param->line_size;
          /* cull remainder and shift left */
          mf->blk_len -= lines_size;
          memcpy (mf->blkptr, mf->blkptr + lines_size, mf->blk_len);
        }
    }
  while (n == 0);

  /* output full lines, keep partial lines for next block */
  ib->rptr = mf->lineptr;
  ib->rend = mf->lineptr + lines_size;
  return ib->rend - ib->rptr;
}
long nmc_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{	
	int result = 0, retval = 0;	
	int gcmd53_buf_size;
	spi_ioc_t spi_data;
	
	PINFO("%s\n", __FUNCTION__);	

	if(_IOC_TYPE(cmd) != NMC_IOC_MAGIC) 
		return -EINVAL;	
	if(_IOC_NR(cmd) > NMC_IOC_MAXNR) 
		return -EINVAL;	
	if(_IOC_DIR(cmd) & _IOC_READ) 
	{		
		if(!access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd))) 
		{		
			PERR("%s: cmd: !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd))\n", __FUNCTION__);			
			return -EINVAL;		
		}	
	} else if(_IOC_DIR(cmd) & _IOC_WRITE) {		
		if(!access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd))) 
		{			
			PERR("%s: cmd: !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd))\n", __FUNCTION__);			
			return -EINVAL;		
		}	
	}	

	switch(cmd)	
	{		
		case NMC_IOPWRON:			
			/* device power on and reset high */			
			PINFO("%s: ************ POWER ON ****************\n", __FUNCTION__);			
			break;		
		case NMC_IOPWROFF:			
			/* device power off and reset low */			
			PINFO("%s: ************ POWER OFF ****************\n", __FUNCTION__);			
			break;		
		/* SDIO */
		case NMC_IOWRCMD52:	/* sdio cmd52 */			
			{				
				sdio_cmd52_t cmd52;				
				PDBG("%s: NMC_IOWRCMD52\n", __FUNCTION__);				
				if(copy_from_user(&cmd52, (sdio_cmd52_t*)arg, sizeof(sdio_cmd52_t)) != 0)
				{					
					PERR("%s: fail copy_from_user(&cmd52, (sdio_cmd52_t*)arg, sizeof(sdio_cmd52_t)) != 0\n", __FUNCTION__);				
					return -EINVAL;				
				}	
				nmi_sdio_cmd52(&cmd52);				
				copy_to_user((void*)arg, &cmd52, sizeof(sdio_cmd52_t));			
			} break;		
		case NMC_IOWCMD53:	/* sdio cmd53 */			
			{				
				sdio_cmd53_ioctl_t cmd53_ioctl;				
				PDBG("%s: NMC_IOWCMD53\n", __FUNCTION__);				
				if(copy_from_user(&cmd53_ioctl, (sdio_cmd53_ioctl_t*)arg, sizeof(sdio_cmd53_ioctl_t)) != 0) 
				{					
					PERR("%s: fail copy_from_user(&cmd53, (sdio_cmd53_t*)arg, sizeof(sdio_cmd53_t)) != 0\n", __FUNCTION__);					
					return -EINVAL;				
				}				
				gcmd53.read_write = cmd53_ioctl.read_write;				
				gcmd53.address = cmd53_ioctl.address;				
				gcmd53.block_mode = cmd53_ioctl.block_mode;				
				gcmd53.block_size = cmd53_ioctl.block_size;				
				gcmd53.count = cmd53_ioctl.count;				
				gcmd53.function = cmd53_ioctl.function;				
				gcmd53.increment = cmd53_ioctl.increment;								
				gcmd53_buf_size = gcmd53.count ;				
				gcmd53_buf_size *= gcmd53.block_mode?gcmd53.block_size:1;								

				PDBG("gcmd53.read_write = %d\n", gcmd53.read_write);				
				PDBG("gcmd53.function = %d\n", gcmd53.function);				
				PDBG("gcmd53.block_mode = %d\n", gcmd53.block_mode);				
				PDBG("gcmd53.increment = %d\n", gcmd53.increment);				
				PDBG("gcmd53.address = %d\n", gcmd53.address);				
				PDBG("gcmd53.count = %d\n", gcmd53.count);				
				PDBG("gcmd53.block_size= %d\n", gcmd53.block_size);				
				PDBG("gcmd53_buf_size= %d\n", gcmd53_buf_size);			
			} break;		
		case NMC_IOWRCMD53BUF:			
			PDBG("%s: NMC_IOWRCMD53BUF\n", __FUNCTION__);			
			if(copy_from_user(cmd53_buf, (unsigned char*)arg, gcmd53_buf_size) != 0) 
			{				
				PERR("%s: fail copy_from_user(buf, (unsigned char*)arg, gcmd53_buf_size) != 0\n", __FUNCTION__);				
				return -EINVAL;			
			}			
			gcmd53.buffer = cmd53_buf;			
			nmi_sdio_cmd53(&gcmd53);			
			copy_to_user((void*)arg, cmd53_buf, gcmd53_buf_size);			
			break;		
		/* SPI */
		case NMC_IOWSPIWREG:	/* spi write reg */
			PDBG("%s: NMC_IOWSPIWREG\n", __FUNCTION__);	
			if(copy_from_user(&spi_data, (spi_ioc_t*)arg, sizeof(spi_ioc_t)) != 0) 
			{					
				PERR("%s: fail copy_from_user(&spi_data, (spi_ioc_t*)arg, sizeof(spi_ioc_t)) != 0\n", __FUNCTION__);					
				return -EINVAL;				
			}		

			PDBG("spi adr = %x, val = %x\n", spi_data.adr, spi_data.val);		
			gspi.adr = spi_data.adr;
			gspi.val = spi_data.val;
			custom_spi_write_reg(spi_data.adr, spi_data.val);
			break;
		case NMC_IOWSPIRREG:	/* spi read reg */
			{
				unsigned long data = 0;
				PDBG("%s: NMC_IOWSPIRREG\n", __FUNCTION__);	
				if(copy_from_user(&spi_data, (spi_ioc_t*)arg, sizeof(spi_ioc_t)) != 0) 
				{				
					PERR("%s: fail copy_from_user(&spi_data, (spi_ioc_t*)arg, sizeof(spi_ioc_t)) != 0\n", __FUNCTION__);				
					return -EINVAL;			
				}		
				
				gspi.adr = spi_data.adr;
				gspi.val = spi_data.val;
				custom_spi_read_reg(spi_data.adr, &data);
				spi_data.val = data;
				PDBG("spi adr = %x, val = %x\n", spi_data.adr, spi_data.val);	
				copy_to_user((void*)arg, &spi_data, sizeof(spi_ioc_t));	
			}
			break;
		case NMC_IOWSPIWBRST: 	/* spi burst write, adr and size */
			PDBG("%s: NMC_IOWSPIWBRST\n", __FUNCTION__);	
			if(copy_from_user(&spi_data, (spi_ioc_t*)arg, sizeof(spi_ioc_t)) != 0) 
			{					
				PERR("%s: fail copy_from_user(&spi_data, (spi_ioc_t*)arg, sizeof(spi_ioc_t)) != 0\n", __FUNCTION__);					
				return -EINVAL;				
			}		

			PDBG("spi adr = %x, val = %x\n", spi_data.adr, spi_data.val);		
			gspi.adr = spi_data.adr;
			gspi.val = spi_data.val;	/* size */
			break;
		case NMC_IOWSPIWBRSTBUF: 	/* spi burst write, buf */
			PDBG("%s: NMC_IOWSPIWBRSTBUF\n", __FUNCTION__);

			spi_rwbuf = kmalloc(gspi.val, GFP_KERNEL);
			if(!spi_rwbuf) {		
				PERR("no memory for spi rw buf\n");		
				return -ENOMEM;	
			}
			
			if(copy_from_user(spi_rwbuf, (unsigned char*)arg, gspi.val) != 0) 
			{				
				PERR("%s: fail copy_from_user(spi_rwbuf, (unsigned char*)arg, gspi.val) != 0\n", __FUNCTION__);				
				return -EINVAL;			
			}		
			custom_spi_write(gspi.adr, spi_rwbuf, gspi.val);

			kfree(spi_rwbuf);
			break;
		default:			
			PERR("%s: unknown command\n", __FUNCTION__);			
			break;	
	}	
	return result;
}
Beispiel #25
0
void iwch_ev_dispatch(struct cxio_rdev *rdev_p, struct sk_buff *skb)
{
	struct iwch_dev *rnicp;
	struct respQ_msg_t *rsp_msg = (struct respQ_msg_t *) skb->data;
	struct iwch_cq *chp;
	struct iwch_qp *qhp;
	u32 cqid = RSPQ_CQID(rsp_msg);

	rnicp = (struct iwch_dev *) rdev_p->ulp;
	spin_lock(&rnicp->lock);
	chp = get_chp(rnicp, cqid);
	qhp = get_qhp(rnicp, CQE_QPID(rsp_msg->cqe));
	if (!chp || !qhp) {
		printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d "
		       "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x \n",
		       cqid, CQE_QPID(rsp_msg->cqe),
		       CQE_OPCODE(rsp_msg->cqe), CQE_STATUS(rsp_msg->cqe),
		       CQE_TYPE(rsp_msg->cqe), CQE_WRID_HI(rsp_msg->cqe),
		       CQE_WRID_LOW(rsp_msg->cqe));
		spin_unlock(&rnicp->lock);
		goto out;
	}
	iwch_qp_add_ref(&qhp->ibqp);
	atomic_inc(&chp->refcnt);
	spin_unlock(&rnicp->lock);

	/*
	 * 1) completion of our sending a TERMINATE.
	 * 2) incoming TERMINATE message.
	 */
	if ((CQE_OPCODE(rsp_msg->cqe) == T3_TERMINATE) &&
	    (CQE_STATUS(rsp_msg->cqe) == 0)) {
		if (SQ_TYPE(rsp_msg->cqe)) {
			PDBG("%s QPID 0x%x ep %p disconnecting\n",
			     __func__, qhp->wq.qpid, qhp->ep);
			iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
		} else {
			PDBG("%s post REQ_ERR AE QPID 0x%x\n", __func__,
			     qhp->wq.qpid);
			post_qp_event(rnicp, chp, rsp_msg,
				      IB_EVENT_QP_REQ_ERR, 0);
			iwch_ep_disconnect(qhp->ep, 0, GFP_ATOMIC);
		}
		goto done;
	}

	/* Bad incoming Read request */
	if (SQ_TYPE(rsp_msg->cqe) &&
	    (CQE_OPCODE(rsp_msg->cqe) == T3_READ_RESP)) {
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
		goto done;
	}

	/* Bad incoming write */
	if (RQ_TYPE(rsp_msg->cqe) &&
	    (CQE_OPCODE(rsp_msg->cqe) == T3_RDMA_WRITE)) {
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_REQ_ERR, 1);
		goto done;
	}

	switch (CQE_STATUS(rsp_msg->cqe)) {

	/* Completion Events */
	case TPT_ERR_SUCCESS:

		/*
		 * Confirm the destination entry if this is a RECV completion.
		 */
		if (qhp->ep && SQ_TYPE(rsp_msg->cqe))
			dst_confirm(qhp->ep->dst);
		(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
		break;

	case TPT_ERR_STAG:
	case TPT_ERR_PDID:
	case TPT_ERR_QPID:
	case TPT_ERR_ACCESS:
	case TPT_ERR_WRAP:
	case TPT_ERR_BOUND:
	case TPT_ERR_INVALIDATE_SHARED_MR:
	case TPT_ERR_INVALIDATE_MR_WITH_MW_BOUND:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_ACCESS_ERR, 1);
		break;

	/* Device Fatal Errors */
	case TPT_ERR_ECC:
	case TPT_ERR_ECC_PSTAG:
	case TPT_ERR_INTERNAL_ERR:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_DEVICE_FATAL, 1);
		break;

	/* QP Fatal Errors */
	case TPT_ERR_OUT_OF_RQE:
	case TPT_ERR_PBL_ADDR_BOUND:
	case TPT_ERR_CRC:
	case TPT_ERR_MARKER:
	case TPT_ERR_PDU_LEN_ERR:
	case TPT_ERR_DDP_VERSION:
	case TPT_ERR_RDMA_VERSION:
	case TPT_ERR_OPCODE:
	case TPT_ERR_DDP_QUEUE_NUM:
	case TPT_ERR_MSN:
	case TPT_ERR_TBIT:
	case TPT_ERR_MO:
	case TPT_ERR_MSN_GAP:
	case TPT_ERR_MSN_RANGE:
	case TPT_ERR_RQE_ADDR_BOUND:
	case TPT_ERR_IRD_OVERFLOW:
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
		break;

	default:
		printk(KERN_ERR MOD "Unknown T3 status 0x%x QPID 0x%x\n",
		       CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid);
		post_qp_event(rnicp, chp, rsp_msg, IB_EVENT_QP_FATAL, 1);
		break;
	}
done:
	if (atomic_dec_and_test(&chp->refcnt))
	        wake_up(&chp->wait);
	iwch_qp_rem_ref(&qhp->ibqp);
out:
	dev_kfree_skb_irq(skb);
}
Beispiel #26
0
static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int vector,
			     struct ib_ucontext *ib_context,
			     struct ib_udata *udata)
{
	struct iwch_dev *rhp;
	struct iwch_cq *chp;
	struct iwch_create_cq_resp uresp;
	struct iwch_create_cq_req ureq;
	struct iwch_ucontext *ucontext = NULL;
	static int warned;
	size_t resplen;

	PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries);
	rhp = to_iwch_dev(ibdev);
	chp = kzalloc(sizeof(*chp), GFP_KERNEL);
	if (!chp)
		return ERR_PTR(-ENOMEM);

	if (ib_context) {
		ucontext = to_iwch_ucontext(ib_context);
		if (!t3a_device(rhp)) {
			if (ib_copy_from_udata(&ureq, udata, sizeof (ureq))) {
				kfree(chp);
				return ERR_PTR(-EFAULT);
			}
			chp->user_rptr_addr = (u32 __user *)(unsigned long)ureq.user_rptr_addr;
		}
	}

	if (t3a_device(rhp)) {

		/*
		 * T3A: Add some fluff to handle extra CQEs inserted
		 * for various errors.
		 * Additional CQE possibilities:
		 *      TERMINATE,
		 *      incoming RDMA WRITE Failures
		 *      incoming RDMA READ REQUEST FAILUREs
		 * NOTE: We cannot ensure the CQ won't overflow.
		 */
		entries += 16;
	}
	entries = roundup_pow_of_two(entries);
	chp->cq.size_log2 = ilog2(entries);

	if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}
	chp->rhp = rhp;
	chp->ibcq.cqe = 1 << chp->cq.size_log2;
	spin_lock_init(&chp->lock);
	spin_lock_init(&chp->comp_handler_lock);
	atomic_set(&chp->refcnt, 1);
	init_waitqueue_head(&chp->wait);
	if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
		cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
		kfree(chp);
		return ERR_PTR(-ENOMEM);
	}

	if (ucontext) {
		struct iwch_mm_entry *mm;

		mm = kmalloc(sizeof *mm, GFP_KERNEL);
		if (!mm) {
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-ENOMEM);
		}
		uresp.cqid = chp->cq.cqid;
		uresp.size_log2 = chp->cq.size_log2;
		spin_lock(&ucontext->mmap_lock);
		uresp.key = ucontext->key;
		ucontext->key += PAGE_SIZE;
		spin_unlock(&ucontext->mmap_lock);
		mm->key = uresp.key;
		mm->addr = virt_to_phys(chp->cq.queue);
		if (udata->outlen < sizeof uresp) {
			if (!warned++)
				printk(KERN_WARNING MOD "Warning - "
				       "downlevel libcxgb3 (non-fatal).\n");
			mm->len = PAGE_ALIGN((1UL << uresp.size_log2) *
					     sizeof(struct t3_cqe));
			resplen = sizeof(struct iwch_create_cq_resp_v0);
		} else {
			mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
					     sizeof(struct t3_cqe));
			uresp.memsize = mm->len;
			resplen = sizeof uresp;
		}
		if (ib_copy_to_udata(udata, &uresp, resplen)) {
			kfree(mm);
			iwch_destroy_cq(&chp->ibcq);
			return ERR_PTR(-EFAULT);
		}
		insert_mmap(ucontext, mm);
	}
	PDBG("created cqid 0x%0x chp %p size 0x%0x, dma_addr 0x%0llx\n",
	     chp->cq.cqid, chp, (1 << chp->cq.size_log2),
	     (unsigned long long) chp->cq.dma_addr);
	return &chp->ibcq;
}
Beispiel #27
0
static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
		     struct c4iw_dev_ucontext *uctx)
{
	struct fw_ri_res_wr *res_wr;
	struct fw_ri_res *res;
	int wr_len;
	int user = (uctx != &rdev->uctx);
	struct c4iw_wr_wait wr_wait;
	int ret;
	struct sk_buff *skb;

	cq->cqid = c4iw_get_cqid(rdev, uctx);
	if (!cq->cqid) {
		ret = -ENOMEM;
		goto err1;
	}

	if (!user) {
		cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
		if (!cq->sw_queue) {
			ret = -ENOMEM;
			goto err2;
		}
	}
	cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize,
				       &cq->dma_addr, GFP_KERNEL);
	if (!cq->queue) {
		ret = -ENOMEM;
		goto err3;
	}
	dma_unmap_addr_set(cq, mapping, cq->dma_addr);
	memset(cq->queue, 0, cq->memsize);

	/* build fw_ri_res_wr */
	wr_len = sizeof *res_wr + sizeof *res;

	skb = alloc_skb(wr_len, GFP_KERNEL);
	if (!skb) {
		ret = -ENOMEM;
		goto err4;
	}
	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);

	res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
	memset(res_wr, 0, wr_len);
	res_wr->op_nres = cpu_to_be32(
			FW_WR_OP_V(FW_RI_RES_WR) |
			FW_RI_RES_WR_NRES_V(1) |
			FW_WR_COMPL_F);
	res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
	res_wr->cookie = (uintptr_t)&wr_wait;
	res = res_wr->res;
	res->u.cq.restype = FW_RI_RES_TYPE_CQ;
	res->u.cq.op = FW_RI_RES_OP_WRITE;
	res->u.cq.iqid = cpu_to_be32(cq->cqid);
	res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
			FW_RI_RES_WR_IQANUS_V(0) |
			FW_RI_RES_WR_IQANUD_V(1) |
			FW_RI_RES_WR_IQANDST_F |
			FW_RI_RES_WR_IQANDSTINDEX_V(
				rdev->lldi.ciq_ids[cq->vector]));
	res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
			FW_RI_RES_WR_IQDROPRSS_F |
			FW_RI_RES_WR_IQPCIECH_V(2) |
			FW_RI_RES_WR_IQINTCNTTHRESH_V(0) |
			FW_RI_RES_WR_IQO_F |
			FW_RI_RES_WR_IQESIZE_V(1));
	res->u.cq.iqsize = cpu_to_be16(cq->size);
	res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);

	c4iw_init_wr_wait(&wr_wait);

	ret = c4iw_ofld_send(rdev, skb);
	if (ret)
		goto err4;
	PDBG("%s wait_event wr_wait %p\n", __func__, &wr_wait);
	ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
	if (ret)
		goto err4;

	cq->gen = 1;
	cq->gts = rdev->lldi.gts_reg;
	cq->rdev = rdev;

	cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
				      &cq->bar2_qid,
				      user ? &cq->bar2_pa : NULL);
	if (user && !cq->bar2_va) {
		pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
			pci_name(rdev->lldi.pdev), cq->cqid);
		ret = -EINVAL;
		goto err4;
	}
	return 0;
err4:
	dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue,
			  dma_unmap_addr(cq, mapping));
err3:
	kfree(cq->sw_queue);
err2:
	c4iw_put_cqid(rdev, cq->cqid, uctx);
err1:
	return ret;
}